1
0
mirror of https://github.com/RipMeApp/ripme.git synced 2025-07-31 02:50:15 +02:00

Merge pull request #2088 from Thedrogon/my-branch

Just a few minor changes here and there (new to open source contribution) . Would certainly contribute more and try to update rippers.

Changes also made by metaprime@users.noreply.github.com
This commit is contained in:
metaprime
2025-02-02 00:04:43 -08:00
committed by GitHub
15 changed files with 3 additions and 31 deletions

View File

@@ -49,8 +49,6 @@ public class CfakeRipper extends AbstractHTMLRipper {
@Override
public Document getNextPage(Document doc) throws IOException {
// Find next page
String nextUrl = "";
// We use comic-nav-next to the find the next page
Element elem = doc.select("td > div.next > a").first();
if (elem == null) {

View File

@@ -59,7 +59,6 @@ public class ErofusRipper extends AbstractHTMLRipper {
public List<String> getURLsFromPage(Document page) {
logger.info(page);
List<String> imageURLs = new ArrayList<>();
int x = 1;
if (pageContainsImages(page)) {
logger.info("Page contains images");
ripAlbum(page);
@@ -83,7 +82,6 @@ public class ErofusRipper extends AbstractHTMLRipper {
}
}
return imageURLs;
}

View File

@@ -60,7 +60,6 @@ public class HentaiNexusRipper extends AbstractJSONRipper {
addURLToDownload(url, getPrefix(index));
}
@Override
protected List<String> getURLsFromJSON(JSONObject json) throws JSONException {
@@ -120,7 +119,7 @@ public class HentaiNexusRipper extends AbstractJSONRipper {
byte[] jsonBytes = Base64.getDecoder().decode(jsonEncodedString);
ArrayList unknownArray = new ArrayList();
ArrayList<Integer> unknownArray = new ArrayList<>();
ArrayList<Integer> indexesToUse = new ArrayList<>();
for (int i = 0x2; unknownArray.size() < 0x10; ++i) {
@@ -176,9 +175,7 @@ public class HentaiNexusRipper extends AbstractJSONRipper {
return decodedJsonString;
}
private static long signedToUnsigned(int signed) {
return (byte) signed & 0xFF;
}
}
}

View File

@@ -143,8 +143,6 @@ public class ImagebamRipper extends AbstractHTMLRipper {
.get();
// Find image
Elements metaTags = doc.getElementsByTag("meta");
String imgsrc = "";//initialize, so no NullPointerExceptions should ever happen.
Elements elem = doc.select("img[class*=main-image]");
if ((elem != null) && (elem.size() > 0)) {

View File

@@ -308,7 +308,7 @@ public class ImagefapRipper extends AbstractHTMLRipper {
} else if(duration / 1000 < 300){
logger.debug("Rate limit: " + (rateLimitFiveMinutes - callsMade) + " calls remaining for first 5 minute mark.");
} else if(duration / 1000 < 3600){
logger.debug("Rate limit: " + (RATE_LIMIT_HOUR - callsMade) + " calls remaining for first hour mark.");
logger.debug("Rate limit: " + (rateLimitHour - callsMade) + " calls remaining for first hour mark.");
}
return duration;

View File

@@ -12,7 +12,6 @@ import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
public class ImgboxRipper extends AbstractHTMLRipper {

View File

@@ -12,7 +12,6 @@ import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
public class KingcomixRipper extends AbstractHTMLRipper {

View File

@@ -3,11 +3,6 @@ package com.rarchives.ripme.ripper.rippers;
import java.io.IOException;
import java.net.URL;
import com.rarchives.ripme.utils.Http;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;
public class PawooRipper extends MastodonRipper {
public PawooRipper(URL url) throws IOException {
super(url);

View File

@@ -12,7 +12,6 @@ import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
public class PorncomixRipper extends AbstractHTMLRipper {

View File

@@ -43,8 +43,6 @@ public class PorncomixinfoRipper extends AbstractHTMLRipper {
@Override
public Document getNextPage(Document doc) throws IOException {
// Find next page
String nextUrl = "";
// We use comic-nav-next to the find the next page
Element elem = doc.select("a.next_page").first();
if (elem == null) {

View File

@@ -1,7 +1,6 @@
package com.rarchives.ripme.ripper.rippers;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;

View File

@@ -11,9 +11,6 @@ import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
public class ReadcomicRipper extends ViewcomicRipper {
public ReadcomicRipper(URL url) throws IOException {

View File

@@ -1,7 +1,6 @@
package com.rarchives.ripme.ripper.rippers;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

View File

@@ -13,9 +13,6 @@ import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.VideoRipper;
import com.rarchives.ripme.utils.Http;
public class YoupornRipper extends AbstractSingleFileRipper {

View File

@@ -12,7 +12,6 @@ import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
public class YuvutuRipper extends AbstractHTMLRipper {