1
0
mirror of https://github.com/RipMeApp/ripme.git synced 2025-08-17 19:26:34 +02:00

dos2unix to have lf line endings.

This commit is contained in:
soloturn
2023-06-15 18:20:46 +02:00
parent ab17fa9822
commit 96c9907ec4
24 changed files with 1644 additions and 1688 deletions

View File

@@ -1,15 +1,15 @@
* Ripme version:
* Java version: <!-- (output of `java -version`) -->
* Operating system: <!-- (if Windows, output of `ver` or `winver`) -->
<!-- Please do not link to content featuring underage characters even if the characters are drawn.
These works are still illegal in many places including much of America -->
* Exact URL you were trying to rip when the problem occurred:
* Please include any additional information about how to reproduce the problem:
## Expected Behavior
Detail the expected behavior here.
## Actual Behavior
Detail the actual (incorrect) behavior here. You can post log snippets or attach log files to your issue report.
* Ripme version:
* Java version: <!-- (output of `java -version`) -->
* Operating system: <!-- (if Windows, output of `ver` or `winver`) -->
<!-- Please do not link to content featuring underage characters even if the characters are drawn.
These works are still illegal in many places including much of America -->
* Exact URL you were trying to rip when the problem occurred:
* Please include any additional information about how to reproduce the problem:
## Expected Behavior
Detail the expected behavior here.
## Actual Behavior
Detail the actual (incorrect) behavior here. You can post log snippets or attach log files to your issue report.

View File

@@ -1,27 +1,27 @@
# Category
This change is exactly one of the following (please change `[ ]` to `[x]`) to indicate which:
* [ ] a bug fix (Fix #...)
* [ ] a new Ripper
* [ ] a refactoring
* [ ] a style change/fix
* [ ] a new feature
# Description
Please add details about your change here.
# Testing
Required verification:
* [ ] I've verified that there are no regressions in `mvn test` (there are no new failures or errors).
* [ ] I've verified that this change works as intended.
* [ ] Downloads all relevant content.
* [ ] Downloads content from multiple pages (as necessary or appropriate).
* [ ] Saves content at reasonable file names (e.g. page titles or content IDs) to help easily browse downloaded content.
* [ ] I've verified that this change did not break existing functionality (especially in the Ripper I modified).
Optional but recommended:
* [ ] I've added a unit test to cover my change.
# Category
This change is exactly one of the following (please change `[ ]` to `[x]`) to indicate which:
* [ ] a bug fix (Fix #...)
* [ ] a new Ripper
* [ ] a refactoring
* [ ] a style change/fix
* [ ] a new feature
# Description
Please add details about your change here.
# Testing
Required verification:
* [ ] I've verified that there are no regressions in `mvn test` (there are no new failures or errors).
* [ ] I've verified that this change works as intended.
* [ ] Downloads all relevant content.
* [ ] Downloads content from multiple pages (as necessary or appropriate).
* [ ] Saves content at reasonable file names (e.g. page titles or content IDs) to help easily browse downloaded content.
* [ ] I've verified that this change did not break existing functionality (especially in the Ripper I modified).
Optional but recommended:
* [ ] I've added a unit test to cover my change.

View File

@@ -1,58 +1,58 @@
package com.rarchives.ripme.ripper.rippers;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import org.jsoup.Connection.Response;
import com.rarchives.ripme.utils.Http;
/*
* Ripper for ArtStation's short URL domain.
* Example URL: https://artstn.co/p/JlE15Z
*/
public class ArtstnRipper extends ArtStationRipper {
public URL artStationUrl = null;
public ArtstnRipper(URL url) throws IOException {
super(url);
}
@Override
public boolean canRip(URL url) {
return url.getHost().endsWith("artstn.co");
}
@Override
public String getGID(URL url) throws MalformedURLException {
if (artStationUrl == null) {
// Run only once.
try {
artStationUrl = getFinalUrl(url);
if (artStationUrl == null) {
throw new IOException("Null url received.");
}
} catch (IOException e) {
LOGGER.error("Couldnt resolve URL.", e);
}
}
return super.getGID(artStationUrl);
}
public URL getFinalUrl(URL url) throws IOException {
if (url.getHost().endsWith("artstation.com")) {
return url;
}
LOGGER.info("Checking url: " + url);
Response response = Http.url(url).connection().followRedirects(false).execute();
if (response.statusCode() / 100 == 3 && response.hasHeader("location")) {
return getFinalUrl(new URL(response.header("location")));
} else {
return null;
}
}
}
package com.rarchives.ripme.ripper.rippers;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import org.jsoup.Connection.Response;
import com.rarchives.ripme.utils.Http;
/*
* Ripper for ArtStation's short URL domain.
* Example URL: https://artstn.co/p/JlE15Z
*/
public class ArtstnRipper extends ArtStationRipper {
public URL artStationUrl = null;
public ArtstnRipper(URL url) throws IOException {
super(url);
}
@Override
public boolean canRip(URL url) {
return url.getHost().endsWith("artstn.co");
}
@Override
public String getGID(URL url) throws MalformedURLException {
if (artStationUrl == null) {
// Run only once.
try {
artStationUrl = getFinalUrl(url);
if (artStationUrl == null) {
throw new IOException("Null url received.");
}
} catch (IOException e) {
LOGGER.error("Couldnt resolve URL.", e);
}
}
return super.getGID(artStationUrl);
}
public URL getFinalUrl(URL url) throws IOException {
if (url.getHost().endsWith("artstation.com")) {
return url;
}
LOGGER.info("Checking url: " + url);
Response response = Http.url(url).connection().followRedirects(false).execute();
if (response.statusCode() / 100 == 3 && response.hasHeader("location")) {
return getFinalUrl(new URL(response.header("location")));
} else {
return null;
}
}
}

View File

@@ -1,174 +1,174 @@
package com.rarchives.ripme.ripper.rippers;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
/**
* @author Tushar
*
*/
public class ComicextraRipper extends AbstractHTMLRipper {
private static final String FILE_NAME = "page";
private Pattern p1 =
Pattern.compile("https:\\/\\/www.comicextra.com\\/comic\\/([A-Za-z0-9_-]+)");
private Pattern p2 = Pattern.compile(
"https:\\/\\/www.comicextra.com\\/([A-Za-z0-9_-]+)\\/([A-Za-z0-9_-]+)(?:\\/full)?");
private UrlType urlType = UrlType.UNKNOWN;
private List<String> chaptersList = null;
private int chapterIndex = -1; // index for the chaptersList, useful in getting the next page.
private int imageIndex = 0; // image index for each chapter images.
public ComicextraRipper(URL url) throws IOException {
super(url);
}
@Override
protected String getDomain() {
return "comicextra.com";
}
@Override
public String getHost() {
return "comicextra";
}
@Override
public String getGID(URL url) throws MalformedURLException {
Matcher m1 = p1.matcher(url.toExternalForm());
if (m1.matches()) {
// URL is of comic( https://www.comicextra.com/comic/the-punisher-frank-castle-max).
urlType = UrlType.COMIC;
return m1.group(1);
}
Matcher m2 = p2.matcher(url.toExternalForm());
if (m2.matches()) {
// URL is of chapter( https://www.comicextra.com/the-punisher-frank-castle-max/chapter-75).
urlType = UrlType.CHAPTER;
return m2.group(1);
}
throw new MalformedURLException(
"Expected comicextra.com url of type: https://www.comicextra.com/comic/some-comic-name\n"
+ " or https://www.comicextra.com/some-comic-name/chapter-001 got " + url
+ " instead");
}
@Override
protected Document getFirstPage() throws IOException {
Document doc = null;
switch (urlType) {
case COMIC:
// For COMIC type url we extract the urls of each chapters and store them in chapters.
chaptersList = new ArrayList<>();
Document comicPage = Http.url(url).get();
Elements elements = comicPage.select("div.episode-list a");
for (Element e : elements) {
chaptersList.add(getCompleteChapterUrl(e.attr("abs:href")));
}
// Set the first chapter from the chapterList as the doc.
chapterIndex = 0;
doc = Http.url(chaptersList.get(chapterIndex)).get();
break;
case CHAPTER:
doc = Http.url(url).get();
break;
case UNKNOWN:
default:
throw new IOException("Unknown url type encountered.");
}
return doc;
}
@Override
public Document getNextPage(Document doc) throws IOException, URISyntaxException {
if (urlType == UrlType.COMIC) {
++chapterIndex;
imageIndex = 0; // Resetting the imagesIndex so that images prefix within each chapter starts from '001_'.
if (chapterIndex < chaptersList.size()) {
return Http.url(chaptersList.get(chapterIndex)).get();
}
}
return super.getNextPage(doc);
}
@Override
protected List<String> getURLsFromPage(Document page) {
List<String> urls = new ArrayList<>();
if (urlType == UrlType.COMIC || urlType == UrlType.CHAPTER) {
Elements images = page.select("img.chapter_img");
for (Element img : images) {
urls.add(img.attr("src"));
}
}
return urls;
}
@Override
protected void downloadURL(URL url, int index) {
String subdirectory = getSubDirectoryName();
String prefix = getPrefix(++imageIndex);
addURLToDownload(url, subdirectory, null, null, prefix, FILE_NAME, null, Boolean.TRUE);
}
/*
* This function appends /full at the end of the chapters url to get all the images for the
* chapter in the same Document.
*/
private String getCompleteChapterUrl(String chapterUrl) {
if (!chapterUrl.endsWith("/full")) {
chapterUrl = chapterUrl + "/full";
}
return chapterUrl;
}
/*
* This functions returns sub folder name for the current chapter.
*/
private String getSubDirectoryName() {
String subDirectory = "";
if (urlType == UrlType.COMIC) {
Matcher m = p2.matcher(chaptersList.get(chapterIndex));
if (m.matches()) {
subDirectory = m.group(2);
}
}
if (urlType == UrlType.CHAPTER) {
Matcher m = p2.matcher(url.toExternalForm());
if (m.matches()) {
subDirectory = m.group(2);
}
}
return subDirectory;
}
/*
* Enum to classify different types of urls.
*/
private enum UrlType {
COMIC, CHAPTER, UNKNOWN
}
}
package com.rarchives.ripme.ripper.rippers;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
/**
* @author Tushar
*
*/
public class ComicextraRipper extends AbstractHTMLRipper {
private static final String FILE_NAME = "page";
private Pattern p1 =
Pattern.compile("https:\\/\\/www.comicextra.com\\/comic\\/([A-Za-z0-9_-]+)");
private Pattern p2 = Pattern.compile(
"https:\\/\\/www.comicextra.com\\/([A-Za-z0-9_-]+)\\/([A-Za-z0-9_-]+)(?:\\/full)?");
private UrlType urlType = UrlType.UNKNOWN;
private List<String> chaptersList = null;
private int chapterIndex = -1; // index for the chaptersList, useful in getting the next page.
private int imageIndex = 0; // image index for each chapter images.
public ComicextraRipper(URL url) throws IOException {
super(url);
}
@Override
protected String getDomain() {
return "comicextra.com";
}
@Override
public String getHost() {
return "comicextra";
}
@Override
public String getGID(URL url) throws MalformedURLException {
Matcher m1 = p1.matcher(url.toExternalForm());
if (m1.matches()) {
// URL is of comic( https://www.comicextra.com/comic/the-punisher-frank-castle-max).
urlType = UrlType.COMIC;
return m1.group(1);
}
Matcher m2 = p2.matcher(url.toExternalForm());
if (m2.matches()) {
// URL is of chapter( https://www.comicextra.com/the-punisher-frank-castle-max/chapter-75).
urlType = UrlType.CHAPTER;
return m2.group(1);
}
throw new MalformedURLException(
"Expected comicextra.com url of type: https://www.comicextra.com/comic/some-comic-name\n"
+ " or https://www.comicextra.com/some-comic-name/chapter-001 got " + url
+ " instead");
}
@Override
protected Document getFirstPage() throws IOException {
Document doc = null;
switch (urlType) {
case COMIC:
// For COMIC type url we extract the urls of each chapters and store them in chapters.
chaptersList = new ArrayList<>();
Document comicPage = Http.url(url).get();
Elements elements = comicPage.select("div.episode-list a");
for (Element e : elements) {
chaptersList.add(getCompleteChapterUrl(e.attr("abs:href")));
}
// Set the first chapter from the chapterList as the doc.
chapterIndex = 0;
doc = Http.url(chaptersList.get(chapterIndex)).get();
break;
case CHAPTER:
doc = Http.url(url).get();
break;
case UNKNOWN:
default:
throw new IOException("Unknown url type encountered.");
}
return doc;
}
@Override
public Document getNextPage(Document doc) throws IOException, URISyntaxException {
if (urlType == UrlType.COMIC) {
++chapterIndex;
imageIndex = 0; // Resetting the imagesIndex so that images prefix within each chapter starts from '001_'.
if (chapterIndex < chaptersList.size()) {
return Http.url(chaptersList.get(chapterIndex)).get();
}
}
return super.getNextPage(doc);
}
@Override
protected List<String> getURLsFromPage(Document page) {
List<String> urls = new ArrayList<>();
if (urlType == UrlType.COMIC || urlType == UrlType.CHAPTER) {
Elements images = page.select("img.chapter_img");
for (Element img : images) {
urls.add(img.attr("src"));
}
}
return urls;
}
@Override
protected void downloadURL(URL url, int index) {
String subdirectory = getSubDirectoryName();
String prefix = getPrefix(++imageIndex);
addURLToDownload(url, subdirectory, null, null, prefix, FILE_NAME, null, Boolean.TRUE);
}
/*
* This function appends /full at the end of the chapters url to get all the images for the
* chapter in the same Document.
*/
private String getCompleteChapterUrl(String chapterUrl) {
if (!chapterUrl.endsWith("/full")) {
chapterUrl = chapterUrl + "/full";
}
return chapterUrl;
}
/*
* This functions returns sub folder name for the current chapter.
*/
private String getSubDirectoryName() {
String subDirectory = "";
if (urlType == UrlType.COMIC) {
Matcher m = p2.matcher(chaptersList.get(chapterIndex));
if (m.matches()) {
subDirectory = m.group(2);
}
}
if (urlType == UrlType.CHAPTER) {
Matcher m = p2.matcher(url.toExternalForm());
if (m.matches()) {
subDirectory = m.group(2);
}
}
return subDirectory;
}
/*
* Enum to classify different types of urls.
*/
private enum UrlType {
COMIC, CHAPTER, UNKNOWN
}
}

View File

@@ -1,55 +1,55 @@
package com.rarchives.ripme.ripper.rippers;
import java.io.IOException;
import java.net.*;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
public class CyberdropRipper extends AbstractHTMLRipper {
public CyberdropRipper(URL url) throws IOException {
super(url);
}
@Override
public String getHost() {
return "cyberdrop";
}
@Override
public String getDomain() {
return "cyberdrop.me";
}
@Override
public String getGID(URL url) throws MalformedURLException {
Pattern p = Pattern.compile("^https?://cyberdrop\\.me/a/([a-zA-Z0-9]+).*?$");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(1);
}
throw new MalformedURLException("Expected cyberdrop.me URL format: " +
"https://cyberdrop.me/a/xxxxxxxx - got " + url + "instead");
}
@Override
public void downloadURL(URL url, int index) {
addURLToDownload(url, getPrefix(index));
}
@Override
protected List<String> getURLsFromPage(Document page) {
ArrayList<String> urls = new ArrayList<>();
for (Element element: page.getElementsByClass("image")) {
urls.add(element.attr("href"));
}
return urls;
}
package com.rarchives.ripme.ripper.rippers;
import java.io.IOException;
import java.net.*;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
public class CyberdropRipper extends AbstractHTMLRipper {
public CyberdropRipper(URL url) throws IOException {
super(url);
}
@Override
public String getHost() {
return "cyberdrop";
}
@Override
public String getDomain() {
return "cyberdrop.me";
}
@Override
public String getGID(URL url) throws MalformedURLException {
Pattern p = Pattern.compile("^https?://cyberdrop\\.me/a/([a-zA-Z0-9]+).*?$");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(1);
}
throw new MalformedURLException("Expected cyberdrop.me URL format: " +
"https://cyberdrop.me/a/xxxxxxxx - got " + url + "instead");
}
@Override
public void downloadURL(URL url, int index) {
addURLToDownload(url, getPrefix(index));
}
@Override
protected List<String> getURLsFromPage(Document page) {
ArrayList<String> urls = new ArrayList<>();
for (Element element: page.getElementsByClass("image")) {
urls.add(element.attr("href"));
}
return urls;
}
}

View File

@@ -1,66 +1,66 @@
package com.rarchives.ripme.ripper.rippers;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
public class FitnakedgirlsRipper extends AbstractHTMLRipper {
public FitnakedgirlsRipper(URL url) throws IOException {
super(url);
}
@Override
public String getHost() {
return "fitnakedgirls";
}
@Override
public String getDomain() {
return "fitnakedgirls.com";
}
@Override
public String getGID(URL url) throws MalformedURLException {
Pattern p;
Matcher m;
p = Pattern.compile("^.*fitnakedgirls\\.com/gallery/(.+)$");
m = p.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(1);
}
throw new MalformedURLException(
"Expected fitnakedgirls.com gallery format: " + "fitnakedgirls.com/gallery/####" + " Got: " + url);
}
@Override
public List<String> getURLsFromPage(Document doc) {
List<String> imageURLs = new ArrayList<>();
Elements imgs = doc.select("div[class*=wp-tiles-tile-bg] > img");
for (Element img : imgs) {
String imgSrc = img.attr("src");
imageURLs.add(imgSrc);
}
return imageURLs;
}
@Override
public void downloadURL(URL url, int index) {
// Send referrer when downloading images
addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null);
}
package com.rarchives.ripme.ripper.rippers;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
public class FitnakedgirlsRipper extends AbstractHTMLRipper {
public FitnakedgirlsRipper(URL url) throws IOException {
super(url);
}
@Override
public String getHost() {
return "fitnakedgirls";
}
@Override
public String getDomain() {
return "fitnakedgirls.com";
}
@Override
public String getGID(URL url) throws MalformedURLException {
Pattern p;
Matcher m;
p = Pattern.compile("^.*fitnakedgirls\\.com/gallery/(.+)$");
m = p.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(1);
}
throw new MalformedURLException(
"Expected fitnakedgirls.com gallery format: " + "fitnakedgirls.com/gallery/####" + " Got: " + url);
}
@Override
public List<String> getURLsFromPage(Document doc) {
List<String> imageURLs = new ArrayList<>();
Elements imgs = doc.select("div[class*=wp-tiles-tile-bg] > img");
for (Element img : imgs) {
String imgSrc = img.attr("src");
imageURLs.add(imgSrc);
}
return imageURLs;
}
@Override
public void downloadURL(URL url, int index) {
// Send referrer when downloading images
addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null);
}
}

View File

@@ -1,293 +1,293 @@
package com.rarchives.ripme.ripper.rippers;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.net.*;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.java_websocket.client.WebSocketClient;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.java_websocket.handshake.ServerHandshake;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import com.rarchives.ripme.ripper.AbstractJSONRipper;
public class ScrolllerRipper extends AbstractJSONRipper {
public ScrolllerRipper(URL url) throws IOException {
super(url);
}
@Override
public String getHost() {
return "scrolller";
}
@Override
public String getDomain() {
return "scrolller.com";
}
@Override
public String getGID(URL url) throws MalformedURLException {
// Typical URL is: https://scrolller.com/r/subreddit
// Parameters like "filter" and "sort" can be passed (ex: https://scrolller.com/r/subreddit?filter=xxx&sort=yyyy)
Pattern p = Pattern.compile("^https?://scrolller\\.com/r/([a-zA-Z0-9]+).*?$");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(1);
}
throw new MalformedURLException("Expected scrolller.com URL format: " +
"scrolller.com/r/subreddit OR scroller.com/r/subreddit?filter= - got " + url + "instead");
}
@Override
public void downloadURL(URL url, int index) {
addURLToDownload(url, getPrefix(index));
}
private JSONObject prepareQuery(String iterator, String gid, String sortByString) throws IOException, URISyntaxException {
String QUERY_NOSORT = "query SubredditQuery( $url: String! $filter: SubredditPostFilter $iterator: String ) { getSubreddit(url: $url) { children( limit: 50 iterator: $iterator filter: $filter ) { iterator items { __typename url title subredditTitle subredditUrl redditPath isNsfw albumUrl isFavorite mediaSources { url width height isOptimized } } } } }";
String QUERY_SORT = "subscription SubredditSubscription( $url: String! $sortBy: SubredditSortBy $timespan: SubredditTimespan $iterator: String $limit: Int $filter: SubredditPostFilter ) { fetchSubreddit( url: $url sortBy: $sortBy timespan: $timespan iterator: $iterator limit: $limit filter: $filter ) { __typename ... on Subreddit { __typename url title secondaryTitle description createdAt isNsfw subscribers isComplete itemCount videoCount pictureCount albumCount isFollowing } ... on SubredditPost { __typename url title subredditTitle subredditUrl redditPath isNsfw albumUrl isFavorite mediaSources { url width height isOptimized } } ... on Iterator { iterator } ... on Error { message } } }";
String filterString = convertFilterString(getParameter(this.url,"filter"));
JSONObject variablesObject = new JSONObject().put("url", String.format("/r/%s", gid)).put("sortBy", sortByString.toUpperCase());
JSONObject finalQueryObject = new JSONObject().put("variables", variablesObject).put("query", sortByString.equals("") ? QUERY_NOSORT : QUERY_SORT);
if (iterator != null) {
// Iterator is not present on the first page
variablesObject.put("iterator", iterator);
}
if (!filterString.equals("NOFILTER")) {
variablesObject.put("filter", filterString);
}
return sortByString.equals("") ? getPosts(finalQueryObject) : getPostsSorted(finalQueryObject);
}
public String convertFilterString(String filterParameter) {
// Converts the ?filter= parameter of the URL to one that can be used in the GraphQL query
// I could basically remove the last "s" and call toUpperCase instead of this switch statement but this looks easier to read.
switch (filterParameter.toLowerCase()) {
case "pictures":
return "PICTURE";
case "videos":
return "VIDEO";
case "albums":
return "ALBUM";
case "":
return "NOFILTER";
default:
LOGGER.error(String.format("Invalid filter %s using no filter",filterParameter));
return "";
}
}
public String getParameter(URL url, String parameter) throws MalformedURLException {
// Gets passed parameters from the URL
String toReplace = String.format("https://scrolller.com/r/%s?",getGID(url));
List<NameValuePair> args= URLEncodedUtils.parse(url.toExternalForm(), Charset.defaultCharset());
for (NameValuePair arg:args) {
// First parameter contains part of the url so we have to remove it
// Ex: for the url https://scrolller.com/r/CatsStandingUp?filter=xxxx&sort=yyyy
// 1) arg.getName() => https://scrolller.com/r/CatsStandingUp?filter
// 2) arg.getName() => sort
if (arg.getName().replace(toReplace,"").toLowerCase().equals((parameter))) {
return arg.getValue();
}
}
return "";
}
private JSONObject getPosts(JSONObject data) {
// The actual GraphQL query call
try {
String url = "https://api.scrolller.com/api/v2/graphql";
URL obj = new URI(url).toURL();
HttpURLConnection conn = (HttpURLConnection) obj.openConnection();
conn.setReadTimeout(5000);
conn.addRequestProperty("Accept-Language", "en-US,en;q=0.8");
conn.addRequestProperty("User-Agent", "Mozilla");
conn.addRequestProperty("Referer", "scrolller.com");
conn.setDoOutput(true);
OutputStreamWriter w = new OutputStreamWriter(conn.getOutputStream(), "UTF-8");
w.write(data.toString());
w.close();
BufferedReader in = new BufferedReader(new InputStreamReader(conn.getInputStream()));
String inputLine;
StringBuffer jsonString = new StringBuffer();
while ((inputLine = in.readLine()) != null) {
jsonString.append(inputLine);
}
in.close();
conn.disconnect();
return new JSONObject(jsonString.toString());
} catch (Exception e) {
e.printStackTrace();
}
return new JSONObject("{}");
}
private JSONObject getPostsSorted(JSONObject data) throws MalformedURLException {
// The actual GraphQL query call (if sort parameter is present)
try {
ArrayList<String> postsJsonStrings = new ArrayList<>();
WebSocketClient wsc = new WebSocketClient(new URI("wss://api.scrolller.com/api/v2/graphql")) {
@Override
public void onOpen(ServerHandshake serverHandshake) {
// As soon as the WebSocket connects send our query
this.send(data.toString());
}
@Override
public void onMessage(String s) {
postsJsonStrings.add(s);
if (new JSONObject(s).getJSONObject("data").getJSONObject("fetchSubreddit").has("iterator")) {
this.close();
}
}
@Override
public void onClose(int i, String s, boolean b) {
}
@Override
public void onError(Exception e) {
LOGGER.error(String.format("WebSocket error, server reported %s", e.getMessage()));
}
};
wsc.connect();
while (!wsc.isClosed()) {
// Posts list is not over until the connection closes.
}
JSONObject finalObject = new JSONObject();
JSONArray posts = new JSONArray();
// Iterator is the last object in the post list, let's duplicate it in his own object for clarity.
finalObject.put("iterator", new JSONObject(postsJsonStrings.get(postsJsonStrings.size()-1)));
for (String postString : postsJsonStrings) {
posts.put(new JSONObject(postString));
}
finalObject.put("posts", posts);
if (finalObject.getJSONArray("posts").length() == 1 && !finalObject.getJSONArray("posts").getJSONObject(0).getJSONObject("data").getJSONObject("fetchSubreddit").has("mediaSources")) {
// Only iterator, no posts.
return null;
}
return finalObject;
} catch (URISyntaxException ue) {
// Nothing to catch, it's an hardcoded URI.
}
return null;
}
@Override
protected List<String> getURLsFromJSON(JSONObject json) throws JSONException {
boolean sortRequested = json.has("posts");
int bestArea = 0;
String bestUrl = "";
List<String> list = new ArrayList<>();
JSONArray itemsList = sortRequested ? json.getJSONArray("posts") : json.getJSONObject("data").getJSONObject("getSubreddit").getJSONObject("children").getJSONArray("items");
for (Object item : itemsList) {
if (sortRequested && !((JSONObject) item).getJSONObject("data").getJSONObject("fetchSubreddit").has("mediaSources")) {
continue;
}
JSONArray sourcesTMP = sortRequested ? ((JSONObject) item).getJSONObject("data").getJSONObject("fetchSubreddit").getJSONArray("mediaSources") : ((JSONObject) item).getJSONArray("mediaSources");
for (Object sourceTMP : sourcesTMP)
{
int widthTMP = ((JSONObject) sourceTMP).getInt("width");
int heightTMP = ((JSONObject) sourceTMP).getInt("height");
int areaTMP = widthTMP * heightTMP;
if (areaTMP > bestArea) {
bestArea = widthTMP;
bestUrl = ((JSONObject) sourceTMP).getString("url");
}
}
list.add(bestUrl);
bestUrl = "";
bestArea = 0;
}
return list;
}
@Override
protected JSONObject getFirstPage() throws IOException {
try {
return prepareQuery(null, this.getGID(url), getParameter(url,"sort"));
} catch (URISyntaxException e) {
LOGGER.error(String.format("Error obtaining first page: %s", e.getMessage()));
return null;
}
}
@Override
public JSONObject getNextPage(JSONObject source) throws IOException {
// Every call the the API contains an "iterator" string that we need to pass to the API to get the next page
// Checking if iterator is null is not working for some reason, hence why the weird "iterator.toString().equals("null")"
Object iterator = null;
if (source.has("iterator")) {
// Sort requested, custom JSON.
iterator = source.getJSONObject("iterator").getJSONObject("data").getJSONObject("fetchSubreddit").get("iterator");
} else {
iterator = source.getJSONObject("data").getJSONObject("getSubreddit").getJSONObject("children").get("iterator");
}
if (!iterator.toString().equals("null")) {
// Need to change page.
try {
return prepareQuery(iterator.toString(), this.getGID(url), getParameter(url,"sort"));
} catch (URISyntaxException e) {
LOGGER.error(String.format("Error changing page: %s", e.getMessage()));
return null;
}
} else {
return null;
}
}
package com.rarchives.ripme.ripper.rippers;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.net.*;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.java_websocket.client.WebSocketClient;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.java_websocket.handshake.ServerHandshake;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import com.rarchives.ripme.ripper.AbstractJSONRipper;
public class ScrolllerRipper extends AbstractJSONRipper {
public ScrolllerRipper(URL url) throws IOException {
super(url);
}
@Override
public String getHost() {
return "scrolller";
}
@Override
public String getDomain() {
return "scrolller.com";
}
@Override
public String getGID(URL url) throws MalformedURLException {
// Typical URL is: https://scrolller.com/r/subreddit
// Parameters like "filter" and "sort" can be passed (ex: https://scrolller.com/r/subreddit?filter=xxx&sort=yyyy)
Pattern p = Pattern.compile("^https?://scrolller\\.com/r/([a-zA-Z0-9]+).*?$");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(1);
}
throw new MalformedURLException("Expected scrolller.com URL format: " +
"scrolller.com/r/subreddit OR scroller.com/r/subreddit?filter= - got " + url + "instead");
}
@Override
public void downloadURL(URL url, int index) {
addURLToDownload(url, getPrefix(index));
}
private JSONObject prepareQuery(String iterator, String gid, String sortByString) throws IOException, URISyntaxException {
String QUERY_NOSORT = "query SubredditQuery( $url: String! $filter: SubredditPostFilter $iterator: String ) { getSubreddit(url: $url) { children( limit: 50 iterator: $iterator filter: $filter ) { iterator items { __typename url title subredditTitle subredditUrl redditPath isNsfw albumUrl isFavorite mediaSources { url width height isOptimized } } } } }";
String QUERY_SORT = "subscription SubredditSubscription( $url: String! $sortBy: SubredditSortBy $timespan: SubredditTimespan $iterator: String $limit: Int $filter: SubredditPostFilter ) { fetchSubreddit( url: $url sortBy: $sortBy timespan: $timespan iterator: $iterator limit: $limit filter: $filter ) { __typename ... on Subreddit { __typename url title secondaryTitle description createdAt isNsfw subscribers isComplete itemCount videoCount pictureCount albumCount isFollowing } ... on SubredditPost { __typename url title subredditTitle subredditUrl redditPath isNsfw albumUrl isFavorite mediaSources { url width height isOptimized } } ... on Iterator { iterator } ... on Error { message } } }";
String filterString = convertFilterString(getParameter(this.url,"filter"));
JSONObject variablesObject = new JSONObject().put("url", String.format("/r/%s", gid)).put("sortBy", sortByString.toUpperCase());
JSONObject finalQueryObject = new JSONObject().put("variables", variablesObject).put("query", sortByString.equals("") ? QUERY_NOSORT : QUERY_SORT);
if (iterator != null) {
// Iterator is not present on the first page
variablesObject.put("iterator", iterator);
}
if (!filterString.equals("NOFILTER")) {
variablesObject.put("filter", filterString);
}
return sortByString.equals("") ? getPosts(finalQueryObject) : getPostsSorted(finalQueryObject);
}
public String convertFilterString(String filterParameter) {
// Converts the ?filter= parameter of the URL to one that can be used in the GraphQL query
// I could basically remove the last "s" and call toUpperCase instead of this switch statement but this looks easier to read.
switch (filterParameter.toLowerCase()) {
case "pictures":
return "PICTURE";
case "videos":
return "VIDEO";
case "albums":
return "ALBUM";
case "":
return "NOFILTER";
default:
LOGGER.error(String.format("Invalid filter %s using no filter",filterParameter));
return "";
}
}
public String getParameter(URL url, String parameter) throws MalformedURLException {
// Gets passed parameters from the URL
String toReplace = String.format("https://scrolller.com/r/%s?",getGID(url));
List<NameValuePair> args= URLEncodedUtils.parse(url.toExternalForm(), Charset.defaultCharset());
for (NameValuePair arg:args) {
// First parameter contains part of the url so we have to remove it
// Ex: for the url https://scrolller.com/r/CatsStandingUp?filter=xxxx&sort=yyyy
// 1) arg.getName() => https://scrolller.com/r/CatsStandingUp?filter
// 2) arg.getName() => sort
if (arg.getName().replace(toReplace,"").toLowerCase().equals((parameter))) {
return arg.getValue();
}
}
return "";
}
private JSONObject getPosts(JSONObject data) {
// The actual GraphQL query call
try {
String url = "https://api.scrolller.com/api/v2/graphql";
URL obj = new URI(url).toURL();
HttpURLConnection conn = (HttpURLConnection) obj.openConnection();
conn.setReadTimeout(5000);
conn.addRequestProperty("Accept-Language", "en-US,en;q=0.8");
conn.addRequestProperty("User-Agent", "Mozilla");
conn.addRequestProperty("Referer", "scrolller.com");
conn.setDoOutput(true);
OutputStreamWriter w = new OutputStreamWriter(conn.getOutputStream(), "UTF-8");
w.write(data.toString());
w.close();
BufferedReader in = new BufferedReader(new InputStreamReader(conn.getInputStream()));
String inputLine;
StringBuffer jsonString = new StringBuffer();
while ((inputLine = in.readLine()) != null) {
jsonString.append(inputLine);
}
in.close();
conn.disconnect();
return new JSONObject(jsonString.toString());
} catch (Exception e) {
e.printStackTrace();
}
return new JSONObject("{}");
}
private JSONObject getPostsSorted(JSONObject data) throws MalformedURLException {
// The actual GraphQL query call (if sort parameter is present)
try {
ArrayList<String> postsJsonStrings = new ArrayList<>();
WebSocketClient wsc = new WebSocketClient(new URI("wss://api.scrolller.com/api/v2/graphql")) {
@Override
public void onOpen(ServerHandshake serverHandshake) {
// As soon as the WebSocket connects send our query
this.send(data.toString());
}
@Override
public void onMessage(String s) {
postsJsonStrings.add(s);
if (new JSONObject(s).getJSONObject("data").getJSONObject("fetchSubreddit").has("iterator")) {
this.close();
}
}
@Override
public void onClose(int i, String s, boolean b) {
}
@Override
public void onError(Exception e) {
LOGGER.error(String.format("WebSocket error, server reported %s", e.getMessage()));
}
};
wsc.connect();
while (!wsc.isClosed()) {
// Posts list is not over until the connection closes.
}
JSONObject finalObject = new JSONObject();
JSONArray posts = new JSONArray();
// Iterator is the last object in the post list, let's duplicate it in his own object for clarity.
finalObject.put("iterator", new JSONObject(postsJsonStrings.get(postsJsonStrings.size()-1)));
for (String postString : postsJsonStrings) {
posts.put(new JSONObject(postString));
}
finalObject.put("posts", posts);
if (finalObject.getJSONArray("posts").length() == 1 && !finalObject.getJSONArray("posts").getJSONObject(0).getJSONObject("data").getJSONObject("fetchSubreddit").has("mediaSources")) {
// Only iterator, no posts.
return null;
}
return finalObject;
} catch (URISyntaxException ue) {
// Nothing to catch, it's an hardcoded URI.
}
return null;
}
@Override
protected List<String> getURLsFromJSON(JSONObject json) throws JSONException {
boolean sortRequested = json.has("posts");
int bestArea = 0;
String bestUrl = "";
List<String> list = new ArrayList<>();
JSONArray itemsList = sortRequested ? json.getJSONArray("posts") : json.getJSONObject("data").getJSONObject("getSubreddit").getJSONObject("children").getJSONArray("items");
for (Object item : itemsList) {
if (sortRequested && !((JSONObject) item).getJSONObject("data").getJSONObject("fetchSubreddit").has("mediaSources")) {
continue;
}
JSONArray sourcesTMP = sortRequested ? ((JSONObject) item).getJSONObject("data").getJSONObject("fetchSubreddit").getJSONArray("mediaSources") : ((JSONObject) item).getJSONArray("mediaSources");
for (Object sourceTMP : sourcesTMP)
{
int widthTMP = ((JSONObject) sourceTMP).getInt("width");
int heightTMP = ((JSONObject) sourceTMP).getInt("height");
int areaTMP = widthTMP * heightTMP;
if (areaTMP > bestArea) {
bestArea = widthTMP;
bestUrl = ((JSONObject) sourceTMP).getString("url");
}
}
list.add(bestUrl);
bestUrl = "";
bestArea = 0;
}
return list;
}
@Override
protected JSONObject getFirstPage() throws IOException {
try {
return prepareQuery(null, this.getGID(url), getParameter(url,"sort"));
} catch (URISyntaxException e) {
LOGGER.error(String.format("Error obtaining first page: %s", e.getMessage()));
return null;
}
}
@Override
public JSONObject getNextPage(JSONObject source) throws IOException {
// Every call the the API contains an "iterator" string that we need to pass to the API to get the next page
// Checking if iterator is null is not working for some reason, hence why the weird "iterator.toString().equals("null")"
Object iterator = null;
if (source.has("iterator")) {
// Sort requested, custom JSON.
iterator = source.getJSONObject("iterator").getJSONObject("data").getJSONObject("fetchSubreddit").get("iterator");
} else {
iterator = source.getJSONObject("data").getJSONObject("getSubreddit").getJSONObject("children").get("iterator");
}
if (!iterator.toString().equals("null")) {
// Need to change page.
try {
return prepareQuery(iterator.toString(), this.getGID(url), getParameter(url,"sort"));
} catch (URISyntaxException e) {
LOGGER.error(String.format("Error changing page: %s", e.getMessage()));
return null;
}
} else {
return null;
}
}
}

View File

@@ -1,223 +1,223 @@
package com.rarchives.ripme.ripper.rippers;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.json.JSONObject;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.Connection.Response;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
/**
* For ripping VSCO pictures.
*/
public class VscoRipper extends AbstractHTMLRipper {
int pageNumber = 1;
JSONObject profileJSON;
private static final String DOMAIN = "vsco.co",
HOST = "vsco";
public VscoRipper(URL url) throws IOException{
super(url);
}
/**
* Checks to see if VscoRipper can Rip specified url.
* @param url
* @return True if can rip.
* False if cannot rip.
*/
@Override
public boolean canRip(URL url) {
if (!url.getHost().endsWith(DOMAIN)) {
return false;
}
// Ignores personalized things (e.g. login, feed) and store page
// Allows links to user profiles and links to images.
//@TODO: Add support for journals and collections.
String u = url.toExternalForm();
return !u.contains("/store/") ||
!u.contains("/feed/") ||
!u.contains("/login/") ||
!u.contains("/journal/") ||
!u.contains("/collection/")||
!u.contains("/images/") ||
u.contains("/media/");
}
@Override
public URL sanitizeURL(URL url) throws MalformedURLException {
//no sanitization needed.
return url;
}
/**
* <p>Gets the direct URL of full-sized image through the <meta> tag.</p>
* When expanding future functionality (e.g. support from journals), put everything into this method.
* @param page
* @return
*/
@Override
public List<String> getURLsFromPage(Document page){
List<String> toRip = new ArrayList<>();
//If user wanted to rip single image
if (url.toString().contains("/media/")){
try {
toRip.add(vscoImageToURL(url.toExternalForm()));
} catch (IOException ex) {
LOGGER.debug("Failed to convert " + url.toString() + " to external form.");
}
} else {
String username = getUserName();
String userTkn = getUserTkn(username);
String siteID = getSiteID(userTkn, username);
while (true) {
profileJSON = getProfileJSON(userTkn, username, Integer.toString(pageNumber), siteID);
for (int i = 0; i < profileJSON.getJSONArray("media").length(); i++) {
toRip.add("https://" + profileJSON.getJSONArray("media").getJSONObject(i).getString("responsive_url"));
}
if (pageNumber * 1000 > profileJSON.getInt("total")) {
return toRip;
}
pageNumber++;
}
}
return toRip;
}
private String getUserTkn(String username) {
String userTokenPage = "https://vsco.co/content/Static";
Map<String,String> responseCookies = new HashMap<>();
try {
Response resp = Http.url(userTokenPage).ignoreContentType().response();
responseCookies = resp.cookies();
return responseCookies.get("vs");
} catch (IOException e) {
LOGGER.error("Could not get user tkn");
return null;
}
}
private String getUserName() {
Pattern p = Pattern.compile("^https?://vsco.co/([a-zA-Z0-9-]+)(/gallery)?(/)?");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
String user = m.group(1);
return user;
}
return null;
}
private JSONObject getProfileJSON(String tkn, String username, String page, String siteId) {
String size = "1000";
String purl = "https://vsco.co/ajxp/" + tkn + "/2.0/medias?site_id=" + siteId + "&page=" + page + "&size=" + size;
Map<String,String> cookies = new HashMap<>();
cookies.put("vs", tkn);
try {
JSONObject j = Http.url(purl).cookies(cookies).getJSON();
return j;
} catch (IOException e) {
LOGGER.error("Could not profile images");
return null;
}
}
private String getSiteID(String tkn, String username) {
Map<String,String> cookies = new HashMap<>();
cookies.put("vs", tkn);
try {
JSONObject j = Http.url("https://vsco.co/ajxp/" + tkn + "/2.0/sites?subdomain=" + username).cookies(cookies).getJSON();
return Integer.toString(j.getJSONArray("sites").getJSONObject(0).getInt("id"));
} catch (IOException e) {
LOGGER.error("Could not get site id");
return null;
}
}
private String vscoImageToURL(String url) throws IOException{
Document page = Jsoup.connect(url).userAgent(USER_AGENT)
.get();
//create Elements filled only with Elements with the "meta" tag.
Elements metaTags = page.getElementsByTag("meta");
String result = "";
for(Element metaTag : metaTags){
//find URL inside meta-tag with property of "og:image"
if (metaTag.attr("property").equals("og:image")){
String givenURL = metaTag.attr("content");
givenURL = givenURL.replaceAll("\\?h=[0-9]+", "");//replace the "?h=xxx" tag at the end of the URL (where each x is a number)
result = givenURL;
LOGGER.debug("Found image URL: " + givenURL);
break;//immediately stop after getting URL (there should only be 1 image to be downloaded)
}
}
//Means website changed, things need to be fixed.
if (result.isEmpty()){
LOGGER.error("Could not find image URL at: " + url);
}
return result;
}
@Override
public String getHost() {
return HOST;
}
@Override
public String getGID(URL url) throws MalformedURLException {
//Single Image
Pattern p = Pattern.compile("^https?://vsco\\.co/([a-zA-Z0-9-]+)/media/([a-zA-Z0-9]+)");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()){
// Return the text contained between () in the regex
String user = m.group(1);
String imageNum = m.group(2).substring(0, 5);//first 5 characters should be enough to make each rip unique
return user + "/" + imageNum;
}
//Member profile (Usernames should all be different, so this should work.
p = Pattern.compile("^https?://vsco.co/([a-zA-Z0-9-]+)(/gallery)?(/)?");
m = p.matcher(url.toExternalForm());
if (m.matches()){
String user = m.group(1);
return user;
}
throw new MalformedURLException("Expected a URL to a single image or to a member profile, got " + url + " instead");
}
@Override
public String getDomain() {
return DOMAIN;
}
@Override
public void downloadURL(URL url, int index) {
addURLToDownload(url, getPrefix(index));
}
}
package com.rarchives.ripme.ripper.rippers;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.json.JSONObject;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.Connection.Response;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
/**
* For ripping VSCO pictures.
*/
public class VscoRipper extends AbstractHTMLRipper {
int pageNumber = 1;
JSONObject profileJSON;
private static final String DOMAIN = "vsco.co",
HOST = "vsco";
public VscoRipper(URL url) throws IOException{
super(url);
}
/**
* Checks to see if VscoRipper can Rip specified url.
* @param url
* @return True if can rip.
* False if cannot rip.
*/
@Override
public boolean canRip(URL url) {
if (!url.getHost().endsWith(DOMAIN)) {
return false;
}
// Ignores personalized things (e.g. login, feed) and store page
// Allows links to user profiles and links to images.
//@TODO: Add support for journals and collections.
String u = url.toExternalForm();
return !u.contains("/store/") ||
!u.contains("/feed/") ||
!u.contains("/login/") ||
!u.contains("/journal/") ||
!u.contains("/collection/")||
!u.contains("/images/") ||
u.contains("/media/");
}
@Override
public URL sanitizeURL(URL url) throws MalformedURLException {
//no sanitization needed.
return url;
}
/**
* <p>Gets the direct URL of full-sized image through the <meta> tag.</p>
* When expanding future functionality (e.g. support from journals), put everything into this method.
* @param page
* @return
*/
@Override
public List<String> getURLsFromPage(Document page){
List<String> toRip = new ArrayList<>();
//If user wanted to rip single image
if (url.toString().contains("/media/")){
try {
toRip.add(vscoImageToURL(url.toExternalForm()));
} catch (IOException ex) {
LOGGER.debug("Failed to convert " + url.toString() + " to external form.");
}
} else {
String username = getUserName();
String userTkn = getUserTkn(username);
String siteID = getSiteID(userTkn, username);
while (true) {
profileJSON = getProfileJSON(userTkn, username, Integer.toString(pageNumber), siteID);
for (int i = 0; i < profileJSON.getJSONArray("media").length(); i++) {
toRip.add("https://" + profileJSON.getJSONArray("media").getJSONObject(i).getString("responsive_url"));
}
if (pageNumber * 1000 > profileJSON.getInt("total")) {
return toRip;
}
pageNumber++;
}
}
return toRip;
}
private String getUserTkn(String username) {
String userTokenPage = "https://vsco.co/content/Static";
Map<String,String> responseCookies = new HashMap<>();
try {
Response resp = Http.url(userTokenPage).ignoreContentType().response();
responseCookies = resp.cookies();
return responseCookies.get("vs");
} catch (IOException e) {
LOGGER.error("Could not get user tkn");
return null;
}
}
private String getUserName() {
Pattern p = Pattern.compile("^https?://vsco.co/([a-zA-Z0-9-]+)(/gallery)?(/)?");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
String user = m.group(1);
return user;
}
return null;
}
private JSONObject getProfileJSON(String tkn, String username, String page, String siteId) {
String size = "1000";
String purl = "https://vsco.co/ajxp/" + tkn + "/2.0/medias?site_id=" + siteId + "&page=" + page + "&size=" + size;
Map<String,String> cookies = new HashMap<>();
cookies.put("vs", tkn);
try {
JSONObject j = Http.url(purl).cookies(cookies).getJSON();
return j;
} catch (IOException e) {
LOGGER.error("Could not profile images");
return null;
}
}
private String getSiteID(String tkn, String username) {
Map<String,String> cookies = new HashMap<>();
cookies.put("vs", tkn);
try {
JSONObject j = Http.url("https://vsco.co/ajxp/" + tkn + "/2.0/sites?subdomain=" + username).cookies(cookies).getJSON();
return Integer.toString(j.getJSONArray("sites").getJSONObject(0).getInt("id"));
} catch (IOException e) {
LOGGER.error("Could not get site id");
return null;
}
}
private String vscoImageToURL(String url) throws IOException{
Document page = Jsoup.connect(url).userAgent(USER_AGENT)
.get();
//create Elements filled only with Elements with the "meta" tag.
Elements metaTags = page.getElementsByTag("meta");
String result = "";
for(Element metaTag : metaTags){
//find URL inside meta-tag with property of "og:image"
if (metaTag.attr("property").equals("og:image")){
String givenURL = metaTag.attr("content");
givenURL = givenURL.replaceAll("\\?h=[0-9]+", "");//replace the "?h=xxx" tag at the end of the URL (where each x is a number)
result = givenURL;
LOGGER.debug("Found image URL: " + givenURL);
break;//immediately stop after getting URL (there should only be 1 image to be downloaded)
}
}
//Means website changed, things need to be fixed.
if (result.isEmpty()){
LOGGER.error("Could not find image URL at: " + url);
}
return result;
}
@Override
public String getHost() {
return HOST;
}
@Override
public String getGID(URL url) throws MalformedURLException {
//Single Image
Pattern p = Pattern.compile("^https?://vsco\\.co/([a-zA-Z0-9-]+)/media/([a-zA-Z0-9]+)");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()){
// Return the text contained between () in the regex
String user = m.group(1);
String imageNum = m.group(2).substring(0, 5);//first 5 characters should be enough to make each rip unique
return user + "/" + imageNum;
}
//Member profile (Usernames should all be different, so this should work.
p = Pattern.compile("^https?://vsco.co/([a-zA-Z0-9-]+)(/gallery)?(/)?");
m = p.matcher(url.toExternalForm());
if (m.matches()){
String user = m.group(1);
return user;
}
throw new MalformedURLException("Expected a URL to a single image or to a member profile, got " + url + " instead");
}
@Override
public String getDomain() {
return DOMAIN;
}
@Override
public void downloadURL(URL url, int index) {
addURLToDownload(url, getPrefix(index));
}
}

View File

@@ -1,36 +1,36 @@
package com.rarchives.ripme.ripper.rippers;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class XlecxRipper extends XcartxRipper {
private Pattern p = Pattern.compile("^https?://xlecx.org/([a-zA-Z0-9_\\-]+).html");
public XlecxRipper(URL url) throws IOException {
super(url);
}
@Override
public String getHost() {
return "xlecx";
}
@Override
public String getDomain() {
return "xlecx.org";
}
@Override
public String getGID(URL url) throws MalformedURLException {
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(1);
}
throw new MalformedURLException("Expected URL format: http://xlecx.org/comic, got: " + url);
}
}
package com.rarchives.ripme.ripper.rippers;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class XlecxRipper extends XcartxRipper {
private Pattern p = Pattern.compile("^https?://xlecx.org/([a-zA-Z0-9_\\-]+).html");
public XlecxRipper(URL url) throws IOException {
super(url);
}
@Override
public String getHost() {
return "xlecx";
}
@Override
public String getDomain() {
return "xlecx.org";
}
@Override
public String getGID(URL url) throws MalformedURLException {
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(1);
}
throw new MalformedURLException("Expected URL format: http://xlecx.org/comic, got: " + url);
}
}

View File

@@ -1,80 +1,80 @@
package com.rarchives.ripme.ripper.rippers.video;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.VideoRipper;
import com.rarchives.ripme.utils.Http;
public class TwitchVideoRipper extends VideoRipper {
private static final String HOST = "twitch";
public TwitchVideoRipper(URL url) throws IOException {
super(url);
}
@Override
public String getHost() {
return HOST;
}
@Override
public boolean canRip(URL url) {
Pattern p = Pattern.compile("^https://clips\\.twitch\\.tv/.*$");
Matcher m = p.matcher(url.toExternalForm());
return m.matches();
}
@Override
public URL sanitizeURL(URL url) throws MalformedURLException {
return url;
}
@Override
public String getGID(URL url) throws MalformedURLException {
Pattern p = Pattern.compile("^https://clips\\.twitch\\.tv/(.*)$");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(m.groupCount());
}
throw new MalformedURLException(
"Expected Twitch.tv format:"
+ "https://clips.twitch.tv/####"
+ " Got: " + url);
}
@Override
public void rip() throws IOException {
LOGGER.info("Retrieving " + this.url);
Document doc = Http.url(url).get();
//Get user friendly filename from page title
String title = doc.title();
Elements script = doc.select("script");
if (script.isEmpty()) {
throw new IOException("Could not find script code at " + url);
}
//Regex assumes highest quality source is listed first
Pattern p = Pattern.compile("\"source\":\"(.*?)\"");
for (Element element : script) {
Matcher m = p.matcher(element.data());
if (m.find()){
String vidUrl = m.group(1);
addURLToDownload(new URL(vidUrl), HOST + "_" + title);
}
}
waitForThreads();
}
package com.rarchives.ripme.ripper.rippers.video;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.VideoRipper;
import com.rarchives.ripme.utils.Http;
public class TwitchVideoRipper extends VideoRipper {
private static final String HOST = "twitch";
public TwitchVideoRipper(URL url) throws IOException {
super(url);
}
@Override
public String getHost() {
return HOST;
}
@Override
public boolean canRip(URL url) {
Pattern p = Pattern.compile("^https://clips\\.twitch\\.tv/.*$");
Matcher m = p.matcher(url.toExternalForm());
return m.matches();
}
@Override
public URL sanitizeURL(URL url) throws MalformedURLException {
return url;
}
@Override
public String getGID(URL url) throws MalformedURLException {
Pattern p = Pattern.compile("^https://clips\\.twitch\\.tv/(.*)$");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(m.groupCount());
}
throw new MalformedURLException(
"Expected Twitch.tv format:"
+ "https://clips.twitch.tv/####"
+ " Got: " + url);
}
@Override
public void rip() throws IOException {
LOGGER.info("Retrieving " + this.url);
Document doc = Http.url(url).get();
//Get user friendly filename from page title
String title = doc.title();
Elements script = doc.select("script");
if (script.isEmpty()) {
throw new IOException("Could not find script code at " + url);
}
//Regex assumes highest quality source is listed first
Pattern p = Pattern.compile("\"source\":\"(.*?)\"");
for (Element element : script) {
Matcher m = p.matcher(element.data());
if (m.find()){
String vidUrl = m.group(1);
addURLToDownload(new URL(vidUrl), HOST + "_" + title);
}
}
waitForThreads();
}
}

View File

@@ -1,99 +1,99 @@
package com.rarchives.ripme.utils;
import java.net.Authenticator;
import java.net.PasswordAuthentication;
import java.util.Map;
import java.util.HashMap;
/**
* Proxy/Socks setter
*/
public class Proxy {
private Proxy() {
}
/**
* Parse the proxy server settings from string, using the format
* [user:password]@host[:port].
*
* @param fullproxy the string to parse
* @return HashMap containing proxy server, port, user and password
*/
private static Map<String, String> parseServer(String fullproxy) {
Map<String, String> proxy = new HashMap<String, String>();
if (fullproxy.lastIndexOf("@") != -1) {
int sservli = fullproxy.lastIndexOf("@");
String userpw = fullproxy.substring(0, sservli);
String[] usersplit = userpw.split(":");
proxy.put("user", usersplit[0]);
proxy.put("password", usersplit[1]);
fullproxy = fullproxy.substring(sservli + 1);
}
String[] servsplit = fullproxy.split(":");
if (servsplit.length == 2) {
proxy.put("port", servsplit[1]);
}
proxy.put("server", servsplit[0]);
return proxy;
}
/**
* Set a HTTP Proxy.
* WARNING: Authenticated HTTP Proxy won't work from jdk1.8.111 unless
* passing the flag -Djdk.http.auth.tunneling.disabledSchemes="" to java
* see https://stackoverflow.com/q/41505219
*
* @param fullproxy the proxy, using format [user:password]@host[:port]
*/
public static void setHTTPProxy(String fullproxy) {
Map<String, String> proxyServer = parseServer(fullproxy);
if (proxyServer.get("user") != null && proxyServer.get("password") != null) {
Authenticator.setDefault(new Authenticator(){
protected PasswordAuthentication getPasswordAuthentication(){
PasswordAuthentication p = new PasswordAuthentication(proxyServer.get("user"), proxyServer.get("password").toCharArray());
return p;
}
});
System.setProperty("http.proxyUser", proxyServer.get("user"));
System.setProperty("http.proxyPassword", proxyServer.get("password"));
System.setProperty("https.proxyUser", proxyServer.get("user"));
System.setProperty("https.proxyPassword", proxyServer.get("password"));
}
if (proxyServer.get("port") != null) {
System.setProperty("http.proxyPort", proxyServer.get("port"));
System.setProperty("https.proxyPort", proxyServer.get("port"));
}
System.setProperty("http.proxyHost", proxyServer.get("server"));
System.setProperty("https.proxyHost", proxyServer.get("server"));
}
/**
* Set a Socks Proxy Server (globally).
*
* @param fullsocks the socks server, using format [user:password]@host[:port]
*/
public static void setSocks(String fullsocks) {
Map<String, String> socksServer = parseServer(fullsocks);
if (socksServer.get("user") != null && socksServer.get("password") != null) {
Authenticator.setDefault(new Authenticator(){
protected PasswordAuthentication getPasswordAuthentication(){
PasswordAuthentication p = new PasswordAuthentication(socksServer.get("user"), socksServer.get("password").toCharArray());
return p;
}
});
System.setProperty("java.net.socks.username", socksServer.get("user"));
System.setProperty("java.net.socks.password", socksServer.get("password"));
}
if (socksServer.get("port") != null) {
System.setProperty("socksProxyPort", socksServer.get("port"));
}
System.setProperty("socksProxyHost", socksServer.get("server"));
}
}
package com.rarchives.ripme.utils;
import java.net.Authenticator;
import java.net.PasswordAuthentication;
import java.util.Map;
import java.util.HashMap;
/**
* Proxy/Socks setter
*/
public class Proxy {
private Proxy() {
}
/**
* Parse the proxy server settings from string, using the format
* [user:password]@host[:port].
*
* @param fullproxy the string to parse
* @return HashMap containing proxy server, port, user and password
*/
private static Map<String, String> parseServer(String fullproxy) {
Map<String, String> proxy = new HashMap<String, String>();
if (fullproxy.lastIndexOf("@") != -1) {
int sservli = fullproxy.lastIndexOf("@");
String userpw = fullproxy.substring(0, sservli);
String[] usersplit = userpw.split(":");
proxy.put("user", usersplit[0]);
proxy.put("password", usersplit[1]);
fullproxy = fullproxy.substring(sservli + 1);
}
String[] servsplit = fullproxy.split(":");
if (servsplit.length == 2) {
proxy.put("port", servsplit[1]);
}
proxy.put("server", servsplit[0]);
return proxy;
}
/**
* Set a HTTP Proxy.
* WARNING: Authenticated HTTP Proxy won't work from jdk1.8.111 unless
* passing the flag -Djdk.http.auth.tunneling.disabledSchemes="" to java
* see https://stackoverflow.com/q/41505219
*
* @param fullproxy the proxy, using format [user:password]@host[:port]
*/
public static void setHTTPProxy(String fullproxy) {
Map<String, String> proxyServer = parseServer(fullproxy);
if (proxyServer.get("user") != null && proxyServer.get("password") != null) {
Authenticator.setDefault(new Authenticator(){
protected PasswordAuthentication getPasswordAuthentication(){
PasswordAuthentication p = new PasswordAuthentication(proxyServer.get("user"), proxyServer.get("password").toCharArray());
return p;
}
});
System.setProperty("http.proxyUser", proxyServer.get("user"));
System.setProperty("http.proxyPassword", proxyServer.get("password"));
System.setProperty("https.proxyUser", proxyServer.get("user"));
System.setProperty("https.proxyPassword", proxyServer.get("password"));
}
if (proxyServer.get("port") != null) {
System.setProperty("http.proxyPort", proxyServer.get("port"));
System.setProperty("https.proxyPort", proxyServer.get("port"));
}
System.setProperty("http.proxyHost", proxyServer.get("server"));
System.setProperty("https.proxyHost", proxyServer.get("server"));
}
/**
* Set a Socks Proxy Server (globally).
*
* @param fullsocks the socks server, using format [user:password]@host[:port]
*/
public static void setSocks(String fullsocks) {
Map<String, String> socksServer = parseServer(fullsocks);
if (socksServer.get("user") != null && socksServer.get("password") != null) {
Authenticator.setDefault(new Authenticator(){
protected PasswordAuthentication getPasswordAuthentication(){
PasswordAuthentication p = new PasswordAuthentication(socksServer.get("user"), socksServer.get("password").toCharArray());
return p;
}
});
System.setProperty("java.net.socks.username", socksServer.get("user"));
System.setProperty("java.net.socks.password", socksServer.get("password"));
}
if (socksServer.get("port") != null) {
System.setProperty("socksProxyPort", socksServer.get("port"));
}
System.setProperty("socksProxyHost", socksServer.get("server"));
}
}

View File

@@ -1,59 +1,59 @@
Log = Logi
History = Historia
created = Stworzono
modified = Zmodyfikowano
queue = Kolejka
Configuration = Konfiguracja
# Keys for the Configuration menu
current.version = Obecna Wersja
check.for.updates = Sprawdź dostępność aktualizacji
auto.update = Auto Aktualizacja?
max.download.threads = Maksymalna Ilośc Pobieranych Plików:
timeout.mill = Opóźnienie (w milisekundach):
retry.download.count = Liczba ponownych pobrań
overwrite.existing.files = Nadpisać istniejące pliki?
sound.when.rip.completes = Dźwięk po zakończeniu
preserve.order = Zachować porządek
save.logs = Zapisz Logi
notification.when.rip.starts = Powiadomienie przy uruchomieniu pobierania
save.urls.only = Zapisz tylko linki
save.album.titles = Zapisz nazwy albumów
autorip.from.clipboard = Auto pobieranie ze schowka
save.descriptions = Zapisz opis
prefer.mp4.over.gif = Preferuj MP4 od GIF
restore.window.position = Przywróć pozycję okna
remember.url.history = Zapamiętaj historię linków
loading.history.from = Załaduj historię z...
# Misc UI keys
loading.history.from.configuration = Załaduj historię z ustawień
interrupted.while.waiting.to.rip.next.album = Przerwany podczas oczekiwania na zgrywanie następnego albumu
inactive = Nieaktywny
re-rip.checked = Sprawdź pobrane ripy
remove = Usuń
clear = Wyczyść
download.url.list = Pobierz listę linków
select.save.dir = Wybierz ścieżkę zapisu
# Keys for the logs generated by DownloadFileThread
nonretriable.status.code = Nieodwracalny kod statusu
retriable.status.code = Odzyskiwanie kodu statusu
server.doesnt.support.resuming.downloads = Serwer nie obsługuje wznowienia pobierania
# A "magic number" can also be called a file signature
was.unable.to.get.content.type.using.magic.number = Nie udało się uzyskać typu zawartości za pomocą magicznej liczby
magic.number.was = Magiczną liczbą była
deleting.existing.file = Usuwanie istniejących plików
request.properties = Poproś o uprawnienia
download.interrupted = Pobieranie przerwane
exceeded.maximum.retries = Spodziewana ilośc powtórzeń
http.status.exception = Wyjątek statusu http
exception.while.downloading.file = Wystąpił problem podczas pobierania pliku
failed.to.download = Nie można pobrać pliku
skipping = Pomijanie
Log = Logi
History = Historia
created = Stworzono
modified = Zmodyfikowano
queue = Kolejka
Configuration = Konfiguracja
# Keys for the Configuration menu
current.version = Obecna Wersja
check.for.updates = Sprawdź dostępność aktualizacji
auto.update = Auto Aktualizacja?
max.download.threads = Maksymalna Ilośc Pobieranych Plików:
timeout.mill = Opóźnienie (w milisekundach):
retry.download.count = Liczba ponownych pobrań
overwrite.existing.files = Nadpisać istniejące pliki?
sound.when.rip.completes = Dźwięk po zakończeniu
preserve.order = Zachować porządek
save.logs = Zapisz Logi
notification.when.rip.starts = Powiadomienie przy uruchomieniu pobierania
save.urls.only = Zapisz tylko linki
save.album.titles = Zapisz nazwy albumów
autorip.from.clipboard = Auto pobieranie ze schowka
save.descriptions = Zapisz opis
prefer.mp4.over.gif = Preferuj MP4 od GIF
restore.window.position = Przywróć pozycję okna
remember.url.history = Zapamiętaj historię linków
loading.history.from = Załaduj historię z...
# Misc UI keys
loading.history.from.configuration = Załaduj historię z ustawień
interrupted.while.waiting.to.rip.next.album = Przerwany podczas oczekiwania na zgrywanie następnego albumu
inactive = Nieaktywny
re-rip.checked = Sprawdź pobrane ripy
remove = Usuń
clear = Wyczyść
download.url.list = Pobierz listę linków
select.save.dir = Wybierz ścieżkę zapisu
# Keys for the logs generated by DownloadFileThread
nonretriable.status.code = Nieodwracalny kod statusu
retriable.status.code = Odzyskiwanie kodu statusu
server.doesnt.support.resuming.downloads = Serwer nie obsługuje wznowienia pobierania
# A "magic number" can also be called a file signature
was.unable.to.get.content.type.using.magic.number = Nie udało się uzyskać typu zawartości za pomocą magicznej liczby
magic.number.was = Magiczną liczbą była
deleting.existing.file = Usuwanie istniejących plików
request.properties = Poproś o uprawnienia
download.interrupted = Pobieranie przerwane
exceeded.maximum.retries = Spodziewana ilośc powtórzeń
http.status.exception = Wyjątek statusu http
exception.while.downloading.file = Wystąpił problem podczas pobierania pliku
failed.to.download = Nie można pobrać pliku
skipping = Pomijanie
file.already.exists = Plik już istnieje

View File

@@ -1,75 +1,75 @@
Log = 日志
History = 历史
created = 创建时间
modified = 修改时间
queue = 队列
Configuration = 配置
open = 打开
# Keys for the Configuration menu
current.version = 当前版本
check.for.updates = 检查更新
auto.update = 自动更新?
max.download.threads = 最大下载线程数:
timeout.mill = 超时(毫秒):
retry.download.count = 重试下载次数
overwrite.existing.files = 覆盖现有文件?
sound.when.rip.completes = 抓取完成时播放声音
preserve.order = 保持顺序
save.logs = 保存日志
notification.when.rip.starts = 通知抓取开始
save.urls.only = 仅保存 URL
save.album.titles = 保存专辑标题
autorip.from.clipboard = 监视剪贴板上的 URL
save.descriptions = 保存描述
prefer.mp4.over.gif = 首选 MP4 而非 GIF
restore.window.position = 恢复窗口位置
remember.url.history = 记住 URL 历史
loading.history.from = 加载历史从
# Queue keys
queue.remove.all = 移除全部
queue.validation = 您确定要移除队列内的全部项目?
queue.remove.selected = 移除所选项目
# History
re-rip.checked = 重新抓取选中的项目
remove = 移除
clear = 清除
history.check.all = 选中全部
history.check.none = 取消选中全部
history.check.selected = 选中所选项目
history.uncheck.selected = 取消选中所选项目
history.load.failed.warning = RipMe 加载位于 historyFile.getAbsolutePath() 的历史文件失败\n\n错误%s\n\n关闭 RipMe 会自动覆盖此文件的内容,\n请在关闭 RipMe 前备份它!
history.load.none = 无可重新抓取的历史条目。请先抓取一些专辑
history.load.none.checked = 未 '选中' 任何历史条目,请通过选中所需 URL 前面的复选框或URL 的右键菜单以选中所需条目
# TrayIcon
tray.show = 显示
tray.hide = 隐藏
tray.autorip = 监视剪贴板上的 URL
tray.exit = 退出
# Misc UI keys
loading.history.from.configuration = 从配置加载历史
interrupted.while.waiting.to.rip.next.album = 等候抓取下一专辑期间发生中断
inactive = 非活动
download.url.list = 下载 URL 列表
select.save.dir = 选择保存目录
# Keys for the logs generated by DownloadFileThread
nonretriable.status.code = 非可重试状态代码
retriable.status.code = 可重试状态代码
server.doesnt.support.resuming.downloads = 服务器不支持继续下载(续传)
# A "magic number" can also be called a file signature
was.unable.to.get.content.type.using.magic.number = 不能使用幻数获取内容类型
magic.number.was = 幻数为
deleting.existing.file = 删除现有文件
request.properties = 请求属性
download.interrupted = 下载中断
exceeded.maximum.retries = 超过最大重试次数
http.status.exception = HTTP 状态意外
exception.while.downloading.file = 下载文件时发生意外
failed.to.download = 下载失败
skipping = 跳过
Log = 日志
History = 历史
created = 创建时间
modified = 修改时间
queue = 队列
Configuration = 配置
open = 打开
# Keys for the Configuration menu
current.version = 当前版本
check.for.updates = 检查更新
auto.update = 自动更新?
max.download.threads = 最大下载线程数:
timeout.mill = 超时(毫秒):
retry.download.count = 重试下载次数
overwrite.existing.files = 覆盖现有文件?
sound.when.rip.completes = 抓取完成时播放声音
preserve.order = 保持顺序
save.logs = 保存日志
notification.when.rip.starts = 通知抓取开始
save.urls.only = 仅保存 URL
save.album.titles = 保存专辑标题
autorip.from.clipboard = 监视剪贴板上的 URL
save.descriptions = 保存描述
prefer.mp4.over.gif = 首选 MP4 而非 GIF
restore.window.position = 恢复窗口位置
remember.url.history = 记住 URL 历史
loading.history.from = 加载历史从
# Queue keys
queue.remove.all = 移除全部
queue.validation = 您确定要移除队列内的全部项目?
queue.remove.selected = 移除所选项目
# History
re-rip.checked = 重新抓取选中的项目
remove = 移除
clear = 清除
history.check.all = 选中全部
history.check.none = 取消选中全部
history.check.selected = 选中所选项目
history.uncheck.selected = 取消选中所选项目
history.load.failed.warning = RipMe 加载位于 historyFile.getAbsolutePath() 的历史文件失败\n\n错误%s\n\n关闭 RipMe 会自动覆盖此文件的内容,\n请在关闭 RipMe 前备份它!
history.load.none = 无可重新抓取的历史条目。请先抓取一些专辑
history.load.none.checked = 未 '选中' 任何历史条目,请通过选中所需 URL 前面的复选框或URL 的右键菜单以选中所需条目
# TrayIcon
tray.show = 显示
tray.hide = 隐藏
tray.autorip = 监视剪贴板上的 URL
tray.exit = 退出
# Misc UI keys
loading.history.from.configuration = 从配置加载历史
interrupted.while.waiting.to.rip.next.album = 等候抓取下一专辑期间发生中断
inactive = 非活动
download.url.list = 下载 URL 列表
select.save.dir = 选择保存目录
# Keys for the logs generated by DownloadFileThread
nonretriable.status.code = 非可重试状态代码
retriable.status.code = 可重试状态代码
server.doesnt.support.resuming.downloads = 服务器不支持继续下载(续传)
# A "magic number" can also be called a file signature
was.unable.to.get.content.type.using.magic.number = 不能使用幻数获取内容类型
magic.number.was = 幻数为
deleting.existing.file = 删除现有文件
request.properties = 请求属性
download.interrupted = 下载中断
exceeded.maximum.retries = 超过最大重试次数
http.status.exception = HTTP 状态意外
exception.while.downloading.file = 下载文件时发生意外
failed.to.download = 下载失败
skipping = 跳过
file.already.exists = 文件已存在

View File

@@ -1,57 +1,57 @@
package com.rarchives.ripme.tst;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import com.rarchives.ripme.utils.Proxy;
import com.rarchives.ripme.utils.Utils;
import com.rarchives.ripme.utils.Http;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertFalse;
public class proxyTest {
// This test will only run on machines where the user has added a entry for proxy.socks
@Test
public void testSocksProxy() throws IOException, URISyntaxException {
// Unset proxy before testing
System.setProperty("http.proxyHost", "");
System.setProperty("https.proxyHost", "");
System.setProperty("socksProxyHost", "");
URL url = new URI("https://icanhazip.com").toURL();
String proxyConfig = Utils.getConfigString("proxy.socks", "");
if (!proxyConfig.equals("")) {
String ip1 = Http.url(url).ignoreContentType().get().text();
Proxy.setSocks(Utils.getConfigString("proxy.socks", ""));
String ip2 = Http.url(url).ignoreContentType().get().text();
assertFalse(ip1.equals(ip2));
} else {
System.out.println("Skipping testSocksProxy");
assert(true);
}
}
// This test will only run on machines where the user has added a entry for proxy.http
@Test
public void testHTTPProxy() throws IOException, URISyntaxException {
// Unset proxy before testing
System.setProperty("http.proxyHost", "");
System.setProperty("https.proxyHost", "");
System.setProperty("socksProxyHost", "");
URL url = new URI("https://icanhazip.com").toURL();
String proxyConfig = Utils.getConfigString("proxy.http", "");
if (!proxyConfig.equals("")) {
String ip1 = Http.url(url).ignoreContentType().get().text();
Proxy.setHTTPProxy(Utils.getConfigString("proxy.http", ""));
String ip2 = Http.url(url).ignoreContentType().get().text();
assertFalse(ip1.equals(ip2));
} else {
System.out.println("Skipping testHTTPProxy");
assert(true);
}
}
}
package com.rarchives.ripme.tst;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import com.rarchives.ripme.utils.Proxy;
import com.rarchives.ripme.utils.Utils;
import com.rarchives.ripme.utils.Http;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertFalse;
public class proxyTest {
// This test will only run on machines where the user has added a entry for proxy.socks
@Test
public void testSocksProxy() throws IOException, URISyntaxException {
// Unset proxy before testing
System.setProperty("http.proxyHost", "");
System.setProperty("https.proxyHost", "");
System.setProperty("socksProxyHost", "");
URL url = new URI("https://icanhazip.com").toURL();
String proxyConfig = Utils.getConfigString("proxy.socks", "");
if (!proxyConfig.equals("")) {
String ip1 = Http.url(url).ignoreContentType().get().text();
Proxy.setSocks(Utils.getConfigString("proxy.socks", ""));
String ip2 = Http.url(url).ignoreContentType().get().text();
assertFalse(ip1.equals(ip2));
} else {
System.out.println("Skipping testSocksProxy");
assert(true);
}
}
// This test will only run on machines where the user has added a entry for proxy.http
@Test
public void testHTTPProxy() throws IOException, URISyntaxException {
// Unset proxy before testing
System.setProperty("http.proxyHost", "");
System.setProperty("https.proxyHost", "");
System.setProperty("socksProxyHost", "");
URL url = new URI("https://icanhazip.com").toURL();
String proxyConfig = Utils.getConfigString("proxy.http", "");
if (!proxyConfig.equals("")) {
String ip1 = Http.url(url).ignoreContentType().get().text();
Proxy.setHTTPProxy(Utils.getConfigString("proxy.http", ""));
String ip2 = Http.url(url).ignoreContentType().get().text();
assertFalse(ip1.equals(ip2));
} else {
System.out.println("Skipping testHTTPProxy");
assert(true);
}
}
}

View File

@@ -1,28 +1,28 @@
package com.rarchives.ripme.tst.ripper.rippers;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import com.rarchives.ripme.ripper.rippers.ComicextraRipper;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
public class ComicextraRipperTest extends RippersTest {
@Test
@Tag("flaky")
public void testComicUrl() throws IOException, URISyntaxException {
URL url = new URI("https://www.comicextra.com/comic/karma-police").toURL();
ComicextraRipper ripper = new ComicextraRipper(url);
testRipper(ripper);
}
@Test
@Disabled("no images found error, broken ripper?")
public void testChapterUrl() throws IOException, URISyntaxException {
URL url = new URI("https://www.comicextra.com/v-for-vendetta/chapter-1").toURL();
ComicextraRipper ripper = new ComicextraRipper(url);
testRipper(ripper);
}
}
package com.rarchives.ripme.tst.ripper.rippers;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import com.rarchives.ripme.ripper.rippers.ComicextraRipper;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
public class ComicextraRipperTest extends RippersTest {
@Test
@Tag("flaky")
public void testComicUrl() throws IOException, URISyntaxException {
URL url = new URI("https://www.comicextra.com/comic/karma-police").toURL();
ComicextraRipper ripper = new ComicextraRipper(url);
testRipper(ripper);
}
@Test
@Disabled("no images found error, broken ripper?")
public void testChapterUrl() throws IOException, URISyntaxException {
URL url = new URI("https://www.comicextra.com/v-for-vendetta/chapter-1").toURL();
ComicextraRipper ripper = new ComicextraRipper(url);
testRipper(ripper);
}
}

View File

@@ -1,55 +1,55 @@
package com.rarchives.ripme.tst.ripper.rippers;
import com.rarchives.ripme.ripper.rippers.CyberdropRipper;
import com.rarchives.ripme.utils.Http;
import org.jsoup.nodes.Document;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class CyberdropRipperTest extends RippersTest {
@Test
public void testScrolllerGID() throws IOException, URISyntaxException {
Map<URL, String> testURLs = new HashMap<>();
testURLs.put(new URI("https://cyberdrop.me/a/n4umdBjw").toURL(), "n4umdBjw");
testURLs.put(new URI("https://cyberdrop.me/a/iLtp4BjW").toURL(), "iLtp4BjW");
for (URL url : testURLs.keySet()) {
CyberdropRipper ripper = new CyberdropRipper(url);
ripper.setup();
Assertions.assertEquals(testURLs.get(url), ripper.getGID(ripper.getURL()));
deleteDir(ripper.getWorkingDir());
}
}
@Test
@Tag("flaky")
public void testCyberdropNumberOfFiles() throws IOException, URISyntaxException {
List<URL> testURLs = new ArrayList<URL>();
testURLs.add(new URI("https://cyberdrop.me/a/n4umdBjw").toURL());
testURLs.add(new URI("https://cyberdrop.me/a/iLtp4BjW").toURL());
for (URL url : testURLs) {
Assertions.assertTrue(willDownloadAllFiles(url));
}
}
public boolean willDownloadAllFiles(URL url) throws IOException {
Document doc = Http.url(url).get();
long numberOfLinks = doc.getElementsByClass("image").stream().count();
int numberOfFiles = Integer.parseInt(doc.getElementById("totalFilesAmount").text());
return numberOfLinks == numberOfFiles;
}
package com.rarchives.ripme.tst.ripper.rippers;
import com.rarchives.ripme.ripper.rippers.CyberdropRipper;
import com.rarchives.ripme.utils.Http;
import org.jsoup.nodes.Document;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class CyberdropRipperTest extends RippersTest {
@Test
public void testScrolllerGID() throws IOException, URISyntaxException {
Map<URL, String> testURLs = new HashMap<>();
testURLs.put(new URI("https://cyberdrop.me/a/n4umdBjw").toURL(), "n4umdBjw");
testURLs.put(new URI("https://cyberdrop.me/a/iLtp4BjW").toURL(), "iLtp4BjW");
for (URL url : testURLs.keySet()) {
CyberdropRipper ripper = new CyberdropRipper(url);
ripper.setup();
Assertions.assertEquals(testURLs.get(url), ripper.getGID(ripper.getURL()));
deleteDir(ripper.getWorkingDir());
}
}
@Test
@Tag("flaky")
public void testCyberdropNumberOfFiles() throws IOException, URISyntaxException {
List<URL> testURLs = new ArrayList<URL>();
testURLs.add(new URI("https://cyberdrop.me/a/n4umdBjw").toURL());
testURLs.add(new URI("https://cyberdrop.me/a/iLtp4BjW").toURL());
for (URL url : testURLs) {
Assertions.assertTrue(willDownloadAllFiles(url));
}
}
public boolean willDownloadAllFiles(URL url) throws IOException {
Document doc = Http.url(url).get();
long numberOfLinks = doc.getElementsByClass("image").stream().count();
int numberOfFiles = Integer.parseInt(doc.getElementById("totalFilesAmount").text());
return numberOfLinks == numberOfFiles;
}
}

View File

@@ -1,30 +1,30 @@
package com.rarchives.ripme.tst.ripper.rippers;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import com.rarchives.ripme.ripper.rippers.FolioRipper;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
public class FolioRipperTest extends RippersTest {
/**
* Test for folio.ink ripper
*/
@Test
@Disabled("test or ripper broken")
public void testFolioRip() throws IOException, URISyntaxException {
FolioRipper ripper = new FolioRipper(new URI("https://folio.ink/DmBe6i").toURL());
testRipper(ripper);
}
@Test
public void testGetGID() throws IOException, URISyntaxException {
URL url = new URI("https://folio.ink/DmBe6i").toURL();
FolioRipper ripper = new FolioRipper(url);
Assertions.assertEquals("DmBe6i", ripper.getGID(url));
}
}
package com.rarchives.ripme.tst.ripper.rippers;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import com.rarchives.ripme.ripper.rippers.FolioRipper;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
public class FolioRipperTest extends RippersTest {
/**
* Test for folio.ink ripper
*/
@Test
@Disabled("test or ripper broken")
public void testFolioRip() throws IOException, URISyntaxException {
FolioRipper ripper = new FolioRipper(new URI("https://folio.ink/DmBe6i").toURL());
testRipper(ripper);
}
@Test
public void testGetGID() throws IOException, URISyntaxException {
URL url = new URI("https://folio.ink/DmBe6i").toURL();
FolioRipper ripper = new FolioRipper(url);
Assertions.assertEquals("DmBe6i", ripper.getGID(url));
}
}

View File

@@ -1,53 +1,53 @@
package com.rarchives.ripme.tst.ripper.rippers;
import com.rarchives.ripme.ripper.rippers.GfycatRipper;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
public class GfycatRipperTest extends RippersTest {
/**
* Rips correctly formatted URL directly from Gfycat
*/
@Test
public void testGfycatGoodURL() throws IOException, URISyntaxException {
GfycatRipper ripper = new GfycatRipper(new URI("https://gfycat.com/TemptingExcellentIchthyosaurs").toURL());
testRipper(ripper);
}
/**
* Rips badly formatted URL directly from Gfycat
*/
public void testGfycatBadURL() throws IOException, URISyntaxException {
GfycatRipper ripper = new GfycatRipper(new URI("https://gfycat.com/gifs/detail/limitedtestyamericancrow").toURL());
testRipper(ripper);
}
/**
* Rips a Gfycat profile
*/
public void testGfycatProfile() throws IOException, URISyntaxException {
GfycatRipper ripper = new GfycatRipper(new URI("https://gfycat.com/@golbanstorage").toURL());
testRipper(ripper);
}
/**
* Rips a Gfycat amp link
* @throws IOException
*/
public void testGfycatAmp() throws IOException, URISyntaxException {
GfycatRipper ripper = new GfycatRipper(new URI("https://gfycat.com/amp/TemptingExcellentIchthyosaurs").toURL());
testRipper(ripper);
}
/**
* Rips a Gfycat profile with special characters in username
*/
public void testGfycatSpecialChar() throws IOException, URISyntaxException {
GfycatRipper ripper = new GfycatRipper(new URI("https://gfycat.com/@rsss.kr").toURL());
testRipper(ripper);
}
}
package com.rarchives.ripme.tst.ripper.rippers;
import com.rarchives.ripme.ripper.rippers.GfycatRipper;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
public class GfycatRipperTest extends RippersTest {
/**
* Rips correctly formatted URL directly from Gfycat
*/
@Test
public void testGfycatGoodURL() throws IOException, URISyntaxException {
GfycatRipper ripper = new GfycatRipper(new URI("https://gfycat.com/TemptingExcellentIchthyosaurs").toURL());
testRipper(ripper);
}
/**
* Rips badly formatted URL directly from Gfycat
*/
public void testGfycatBadURL() throws IOException, URISyntaxException {
GfycatRipper ripper = new GfycatRipper(new URI("https://gfycat.com/gifs/detail/limitedtestyamericancrow").toURL());
testRipper(ripper);
}
/**
* Rips a Gfycat profile
*/
public void testGfycatProfile() throws IOException, URISyntaxException {
GfycatRipper ripper = new GfycatRipper(new URI("https://gfycat.com/@golbanstorage").toURL());
testRipper(ripper);
}
/**
* Rips a Gfycat amp link
* @throws IOException
*/
public void testGfycatAmp() throws IOException, URISyntaxException {
GfycatRipper ripper = new GfycatRipper(new URI("https://gfycat.com/amp/TemptingExcellentIchthyosaurs").toURL());
testRipper(ripper);
}
/**
* Rips a Gfycat profile with special characters in username
*/
public void testGfycatSpecialChar() throws IOException, URISyntaxException {
GfycatRipper ripper = new GfycatRipper(new URI("https://gfycat.com/@rsss.kr").toURL());
testRipper(ripper);
}
}

View File

@@ -1,42 +1,42 @@
package com.rarchives.ripme.tst.ripper.rippers;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import com.rarchives.ripme.ripper.rippers.ListalRipper;
import org.junit.jupiter.api.Test;
public class ListalRipperTest extends RippersTest {
/**
* Test for list type url.
*/
@Test
public void testPictures() throws IOException, URISyntaxException {
ListalRipper ripper =
new ListalRipper(new URI("https://www.listal.com/emma-stone_iii/pictures").toURL());
testRipper(ripper);
}
/**
* Test for list type url.
*/
@Test
public void testRipListType() throws IOException, URISyntaxException {
ListalRipper ripper =
new ListalRipper(new URI("https://www.listal.com/list/evolution-emma-stone").toURL());
testRipper(ripper);
}
/**
* Test for folder type url.
*/
@Test
public void testRipFolderType() throws IOException, URISyntaxException {
ListalRipper ripper =
new ListalRipper(new URI("https://www.listal.com/chet-atkins/pictures").toURL());
testRipper(ripper);
}
}
package com.rarchives.ripme.tst.ripper.rippers;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import com.rarchives.ripme.ripper.rippers.ListalRipper;
import org.junit.jupiter.api.Test;
public class ListalRipperTest extends RippersTest {
/**
* Test for list type url.
*/
@Test
public void testPictures() throws IOException, URISyntaxException {
ListalRipper ripper =
new ListalRipper(new URI("https://www.listal.com/emma-stone_iii/pictures").toURL());
testRipper(ripper);
}
/**
* Test for list type url.
*/
@Test
public void testRipListType() throws IOException, URISyntaxException {
ListalRipper ripper =
new ListalRipper(new URI("https://www.listal.com/list/evolution-emma-stone").toURL());
testRipper(ripper);
}
/**
* Test for folder type url.
*/
@Test
public void testRipFolderType() throws IOException, URISyntaxException {
ListalRipper ripper =
new ListalRipper(new URI("https://www.listal.com/chet-atkins/pictures").toURL());
testRipper(ripper);
}
}

View File

@@ -1,55 +1,55 @@
package com.rarchives.ripme.tst.ripper.rippers;
import com.rarchives.ripme.ripper.rippers.ScrolllerRipper;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
public class ScrolllerRipperTest extends RippersTest {
@Test
public void testScrolllerGID() throws IOException, URISyntaxException {
Map<URL, String> testURLs = new HashMap<>();
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp").toURL(), "CatsStandingUp");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=pictures").toURL(), "CatsStandingUp");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?sort=top&filter=pictures").toURL(), "CatsStandingUp");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=pictures&sort=top").toURL(), "CatsStandingUp");
for (URL url : testURLs.keySet()) {
ScrolllerRipper ripper = new ScrolllerRipper(url);
ripper.setup();
Assertions.assertEquals(testURLs.get(url), ripper.getGID(ripper.getURL()));
deleteDir(ripper.getWorkingDir());
}
}
@Test
public void testScrolllerFilterRegex() throws IOException, URISyntaxException {
Map<URL, String> testURLs = new HashMap<>();
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp").toURL(), "NOFILTER");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=pictures").toURL(), "PICTURE");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=videos").toURL(), "VIDEO");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=albums").toURL(), "ALBUM");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?sort=top&filter=pictures").toURL(), "PICTURE");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?sort=top&filter=videos").toURL(), "VIDEO");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?sort=top&filter=albums").toURL(), "ALBUM");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=pictures&sort=top").toURL(), "PICTURE");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=videos&sort=top").toURL(), "VIDEO");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=albums&sort=top").toURL(), "ALBUM");
for (URL url : testURLs.keySet()) {
ScrolllerRipper ripper = new ScrolllerRipper(url);
ripper.setup();
Assertions.assertEquals(testURLs.get(url), ripper.convertFilterString(ripper.getParameter(ripper.getURL(),"filter")));
deleteDir(ripper.getWorkingDir());
}
}
}
package com.rarchives.ripme.tst.ripper.rippers;
import com.rarchives.ripme.ripper.rippers.ScrolllerRipper;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
public class ScrolllerRipperTest extends RippersTest {
@Test
public void testScrolllerGID() throws IOException, URISyntaxException {
Map<URL, String> testURLs = new HashMap<>();
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp").toURL(), "CatsStandingUp");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=pictures").toURL(), "CatsStandingUp");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?sort=top&filter=pictures").toURL(), "CatsStandingUp");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=pictures&sort=top").toURL(), "CatsStandingUp");
for (URL url : testURLs.keySet()) {
ScrolllerRipper ripper = new ScrolllerRipper(url);
ripper.setup();
Assertions.assertEquals(testURLs.get(url), ripper.getGID(ripper.getURL()));
deleteDir(ripper.getWorkingDir());
}
}
@Test
public void testScrolllerFilterRegex() throws IOException, URISyntaxException {
Map<URL, String> testURLs = new HashMap<>();
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp").toURL(), "NOFILTER");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=pictures").toURL(), "PICTURE");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=videos").toURL(), "VIDEO");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=albums").toURL(), "ALBUM");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?sort=top&filter=pictures").toURL(), "PICTURE");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?sort=top&filter=videos").toURL(), "VIDEO");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?sort=top&filter=albums").toURL(), "ALBUM");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=pictures&sort=top").toURL(), "PICTURE");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=videos&sort=top").toURL(), "VIDEO");
testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=albums&sort=top").toURL(), "ALBUM");
for (URL url : testURLs.keySet()) {
ScrolllerRipper ripper = new ScrolllerRipper(url);
ripper.setup();
Assertions.assertEquals(testURLs.get(url), ripper.convertFilterString(ripper.getParameter(ripper.getURL(),"filter")));
deleteDir(ripper.getWorkingDir());
}
}
}

View File

@@ -1,52 +1,52 @@
package com.rarchives.ripme.tst.ripper.rippers;
import com.rarchives.ripme.ripper.rippers.VscoRipper;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
public class VscoRipperTest extends RippersTest {
/**
* Testing single image.
*
* @throws IOException
*/
@Test
public void testSingleImageRip() throws IOException, URISyntaxException {
VscoRipper ripper = new VscoRipper(new URI("https://vsco.co/jolly-roger/media/597ce449846079297b3f7cf3").toURL());
testRipper(ripper);
}
/**
* Tests profile rip., Prevents Bug #679 from happening again.
* https://github.com/RipMeApp/ripme/issues/679
*
* @throws IOException
*/
@Test
public void testHyphenatedRip() throws IOException, URISyntaxException {
VscoRipper ripper = new VscoRipper(new URI("https://vsco.co/jolly-roger/gallery").toURL());
testRipper(ripper);
}
/**
* Make sure it names the folder something sensible.
*
* @throws IOException
*/
@Test
public void testGetGID() throws IOException, URISyntaxException {
URL url = new URI("https://vsco.co/jolly-roger/media/590359c4ade3041f2658f407").toURL();
VscoRipper ripper = new VscoRipper(url);
Assertions.assertEquals("jolly-roger/59035", ripper.getGID(url), "Failed to get GID");
}
}
package com.rarchives.ripme.tst.ripper.rippers;
import com.rarchives.ripme.ripper.rippers.VscoRipper;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
public class VscoRipperTest extends RippersTest {
/**
* Testing single image.
*
* @throws IOException
*/
@Test
public void testSingleImageRip() throws IOException, URISyntaxException {
VscoRipper ripper = new VscoRipper(new URI("https://vsco.co/jolly-roger/media/597ce449846079297b3f7cf3").toURL());
testRipper(ripper);
}
/**
* Tests profile rip., Prevents Bug #679 from happening again.
* https://github.com/RipMeApp/ripme/issues/679
*
* @throws IOException
*/
@Test
public void testHyphenatedRip() throws IOException, URISyntaxException {
VscoRipper ripper = new VscoRipper(new URI("https://vsco.co/jolly-roger/gallery").toURL());
testRipper(ripper);
}
/**
* Make sure it names the folder something sensible.
*
* @throws IOException
*/
@Test
public void testGetGID() throws IOException, URISyntaxException {
URL url = new URI("https://vsco.co/jolly-roger/media/590359c4ade3041f2658f407").toURL();
VscoRipper ripper = new VscoRipper(url);
Assertions.assertEquals("jolly-roger/59035", ripper.getGID(url), "Failed to get GID");
}
}

View File

@@ -1,19 +1,19 @@
package com.rarchives.ripme.tst.ripper.rippers;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import com.rarchives.ripme.ripper.rippers.XlecxRipper;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
public class XlecxRipperTest extends RippersTest {
@Test
@Disabled("Broken ripper")
public void testAlbum() throws IOException, URISyntaxException {
XlecxRipper ripper = new XlecxRipper(new URI("http://xlecx.com/4274-black-canary-ravished-prey.html").toURL());
testRipper(ripper);
}
}
package com.rarchives.ripme.tst.ripper.rippers;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import com.rarchives.ripme.ripper.rippers.XlecxRipper;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
public class XlecxRipperTest extends RippersTest {
@Test
@Disabled("Broken ripper")
public void testAlbum() throws IOException, URISyntaxException {
XlecxRipper ripper = new XlecxRipper(new URI("http://xlecx.com/4274-black-canary-ravished-prey.html").toURL());
testRipper(ripper);
}
}

View File

@@ -1,27 +0,0 @@
echo ""
echo "====================================================="
echo "Tabs are not allowed"
echo "-----------------------------------------------------"
git grep -n -P "\t" -- :/*.java | sed -e "s/\t/\x1b[7m--->\x1b[m/g"
echo "====================================================="
echo ""
echo "====================================================="
echo "Trailing whitespace is not allowed"
echo "-----------------------------------------------------"
git grep -n -P "[ \t]+$" -- :/*.java | sed -e "s/\t/\x1b[7m--->\x1b[m/g" | sed -e "s/ /\x1b[7m.\x1b[m/g" | sed -e "s/$/\x1b[7m$\x1b[m/g"
echo "====================================================="
echo ""
echo "====================================================="
echo "'){' is not allowed. Place a space between ')' and '{', i.e. 'if (a) {'"
echo "-----------------------------------------------------"
git grep -n -P "\)\{" -- :/*.java
echo "====================================================="
echo ""
echo "====================================================="
echo "A space is required after keywords (if|else|for|while|do|try|catch|finally)"
echo "-----------------------------------------------------"
git grep -n -P "(\b(if|for|while|catch)\b[(])|(\b(else|do|try|finally)\b[{])" -- :/*.java | sed -r -e "s/(\b(if|for|while|catch)\b[(])|(\b(else|do|try|finally)\b[{])/\x1b[7m\0\x1b[m/g"
echo "====================================================="

View File

@@ -1,17 +0,0 @@
echo ""
echo "====================================================="
echo "Tabs are not allowed (please manually fix tabs)"
echo "-----------------------------------------------------"
git grep -n -P "\t" -- :/*.java | sed -e "s/\t/\x1b[7m--->\x1b[m/g"
echo "====================================================="
echo "Removing trailing whitespace..."
git grep -l -P "[ \t]+$" -- :/*.java | xargs -I % sed -i -r -e "s/[ \t]+$//g" %
echo "Replacing '){' with ') {'..."
git grep -l -P "\)\{" -- :/*.java | xargs -I % sed -i -r -e "s/\)\{/) {/g" %
echo "Adding space between keywords and punctuation..."
git grep -l -P "(\b(if|for|while|catch)\b[(])" -- :/*.java | xargs -I % sed -i -r -e "s/(\b(if|for|while|catch)\b[(])/\2 (/g" %
git grep -l -P "(\b(else|do|try|finally)\b[{])" -- :/*.java | xargs -I % sed -i -r -e "s/(\b(else|do|try|finally)\b[{])/\2 {/g" %