diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 06283ebf..0c095b28 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,15 +1,15 @@ -* Ripme version: -* Java version: -* Operating system: - -* Exact URL you were trying to rip when the problem occurred: -* Please include any additional information about how to reproduce the problem: - -## Expected Behavior - -Detail the expected behavior here. - -## Actual Behavior - -Detail the actual (incorrect) behavior here. You can post log snippets or attach log files to your issue report. +* Ripme version: +* Java version: +* Operating system: + +* Exact URL you were trying to rip when the problem occurred: +* Please include any additional information about how to reproduce the problem: + +## Expected Behavior + +Detail the expected behavior here. + +## Actual Behavior + +Detail the actual (incorrect) behavior here. You can post log snippets or attach log files to your issue report. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 8810800c..56d0dd29 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,27 +1,27 @@ -# Category - -This change is exactly one of the following (please change `[ ]` to `[x]`) to indicate which: -* [ ] a bug fix (Fix #...) -* [ ] a new Ripper -* [ ] a refactoring -* [ ] a style change/fix -* [ ] a new feature - - -# Description - -Please add details about your change here. - - -# Testing - -Required verification: -* [ ] I've verified that there are no regressions in `mvn test` (there are no new failures or errors). -* [ ] I've verified that this change works as intended. - * [ ] Downloads all relevant content. - * [ ] Downloads content from multiple pages (as necessary or appropriate). - * [ ] Saves content at reasonable file names (e.g. page titles or content IDs) to help easily browse downloaded content. -* [ ] I've verified that this change did not break existing functionality (especially in the Ripper I modified). - -Optional but recommended: -* [ ] I've added a unit test to cover my change. +# Category + +This change is exactly one of the following (please change `[ ]` to `[x]`) to indicate which: +* [ ] a bug fix (Fix #...) +* [ ] a new Ripper +* [ ] a refactoring +* [ ] a style change/fix +* [ ] a new feature + + +# Description + +Please add details about your change here. + + +# Testing + +Required verification: +* [ ] I've verified that there are no regressions in `mvn test` (there are no new failures or errors). +* [ ] I've verified that this change works as intended. + * [ ] Downloads all relevant content. + * [ ] Downloads content from multiple pages (as necessary or appropriate). + * [ ] Saves content at reasonable file names (e.g. page titles or content IDs) to help easily browse downloaded content. +* [ ] I've verified that this change did not break existing functionality (especially in the Ripper I modified). + +Optional but recommended: +* [ ] I've added a unit test to cover my change. diff --git a/.github/workflows/gradle.yml b/.github/workflows/gradle.yml new file mode 100644 index 00000000..d1d140d1 --- /dev/null +++ b/.github/workflows/gradle.yml @@ -0,0 +1,65 @@ +name: CI + release + +on: + pull_request: + push: + branches: + - '**' + tags: + - '!**' + +jobs: + build: + + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, windows-latest, macOS-latest] + java: [23] + include: # test old java on one os only, upload from ubuntu java-17 + - os: ubuntu-latest + java: 21 + upload: true + + steps: + + - uses: actions/checkout@v1 + + - name: Set environment CI_ variables + id: ci-env + uses: FranzDiebold/github-env-vars-action@v2 + + - name: Set up java + uses: actions/setup-java@v4.2.1 + with: + java-version: ${{ matrix.java }} + distribution: zulu + cache: gradle + + - name: Build with Gradle + run: gradle clean build -PjavacRelease=${{ matrix.java }} + + - name: SHA256 + if: matrix.upload + run: shasum -a 256 build/libs/*.jar + + - name: upload jar as asset + if: matrix.upload + uses: actions/upload-artifact@v4 + with: + name: zipped-ripme-jar + path: build/libs/*.jar + + - name: create pre-release + id: create-pre-release + if: matrix.upload + uses: "marvinpinto/action-automatic-releases@latest" + with: + repo_token: "${{ secrets.GITHUB_TOKEN }}" + automatic_release_tag: "latest-${{ env.CI_REF_NAME_SLUG }}" + prerelease: true + title: "development build ${{ env.CI_REF_NAME }}" + files: | + build/libs/*.jar + +# vim:set ts=2 sw=2 et: diff --git a/.github/workflows/maven.yml b/.github/workflows/maven.yml deleted file mode 100644 index 93719005..00000000 --- a/.github/workflows/maven.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: Java CI - -on: [push, pull_request] - -jobs: - build: - - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macOS-latest] - java: [1.8, 1.9] - - steps: - - uses: actions/checkout@v1 - - name: Set up JDK 1.8 - uses: actions/setup-java@v1 - with: - java-version: ${{ matrix.java }} - - name: Build with Maven - run: mvn package --file pom.xml diff --git a/.gitignore b/.gitignore index e7813bc7..fe1e80c6 100644 --- a/.gitignore +++ b/.gitignore @@ -80,6 +80,12 @@ buildNumber.properties # Avoid ignoring Maven wrapper jar file (.jar files are usually ignored) !/.mvn/wrapper/maven-wrapper.jar +### gradle ### +/.gradle +/build +# Avoid ignoring gradle wrapper jar file (.jar files are usually ignored) +!/gradle/wrapper/gradle-wrapper.jar + ### Windows ### # Windows thumbnail cache files Thumbs.db @@ -105,6 +111,11 @@ $RECYCLE.BIN/ .vscode .idea .project +local.properties + +### Build files +.gradle/ +build/ ### Ripme ### ripme.log @@ -112,7 +123,6 @@ rips/ .history ripme.jar.update *.swp -*.properties !LabelsBundle*.properties history.json *.iml diff --git a/.project b/.project deleted file mode 100644 index 89407457..00000000 --- a/.project +++ /dev/null @@ -1,23 +0,0 @@ - - - ripme - - - - - - org.eclipse.jdt.core.javabuilder - - - - - org.eclipse.m2e.core.maven2Builder - - - - - - org.eclipse.jdt.core.javanature - org.eclipse.m2e.core.maven2Nature - - diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 0fd1b17f..00000000 --- a/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: java - -matrix: - include: - - jdk: openjdk9 - before_install: - - rm "${JAVA_HOME}/lib/security/cacerts" - - ln -s /etc/ssl/certs/java/cacerts "${JAVA_HOME}/lib/security/cacerts" - - jdk: openjdk8 - -after_success: - - mvn clean test jacoco:report coveralls:report diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index e26479b6..00000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "files.exclude": { - "target/**": true, - "**/.git": true, - "**/.DS_Store": true, - "**/*.class": true, - "**/rips/**": true - }, - "java.configuration.updateBuildConfiguration": "automatic" -} diff --git a/README.md b/README.md index 661b6f87..6334528c 100644 --- a/README.md +++ b/README.md @@ -1,91 +1,146 @@ -# RipMe [![Licensed under the MIT License](https://img.shields.io/badge/License-MIT-blue.svg)](https://github.com/RipMeApp/ripme/blob/master/LICENSE.txt) [![Join the chat at https://gitter.im/RipMeApp/Lobby](https://badges.gitter.im/RipMeApp/Lobby.svg)](https://gitter.im/RipMeApp/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Subreddit](https://img.shields.io/badge/discuss-on%20reddit-blue.svg)](https://www.reddit.com/r/ripme/) +# RipMe -[![Build Status](https://travis-ci.org/RipMeApp/ripme.svg?branch=master)](https://travis-ci.org/RipMeApp/ripme) -[![Coverage Status](https://coveralls.io/repos/github/RipMeApp/ripme/badge.svg?branch=master)](https://coveralls.io/github/RipMeApp/ripme?branch=master) +[![Licensed under the MIT License](https://img.shields.io/badge/License-MIT-blue.svg)](/LICENSE.txt) +[![Join the chat at https://gitter.im/RipMeApp/Lobby](https://badges.gitter.im/RipMeApp/Lobby.svg)](https://gitter.im/RipMeApp/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Subreddit](https://img.shields.io/badge/discuss-on%20reddit-blue.svg)](https://www.reddit.com/r/ripme/) +![alt Badge Status](https://github.com/ripmeapp2/ripme/actions/workflows/gradle.yml/badge.svg) +[![Coverage Status](https://coveralls.io/repos/github/RipMeApp/ripme/badge.svg?branch=main)](https://coveralls.io/github/RipMeApp/ripme?branch=main) -# Contribute +## Recent development updates -RipMe is maintained with ♥️ and in our limited free time by **[@MetaPrime](https://github.com/metaprime)**, **[@cyian-1756](https://github.com/cyian-1756)** and **[@kevin51jiang](https://github.com/kevin51jiang)**. If you'd like to contribute but aren't good with code, help keep us happy with a small contribution! +- For a while, the ripmeapp/ripme repo was inactive, but development continued at ripmeapp2/ripme. +- Now, maintainers have been updated and development has been rejoined with ripmeapp/ripme where it will continue. +- You may find a number of stale issues on ripmeapp/ripme and/or on ripmeapp2/ripme until everything is merged back together and statuses are updated. +- The current active development repo for RipMe is located at [ripmeapp/ripme](https://github.com/ripmeapp/ripme/). -[![Tip with PayPal](https://img.shields.io/badge/PayPal-Buy_us...-lightgrey.svg)](https://www.paypal.me/ripmeapp) -[![Tip with PayPal](https://img.shields.io/badge/coffee-%245-green.svg)](https://www.paypal.com/paypalme/ripmeapp/send?amount=5.00¤cyCode=USD&locale.x=en_US&country.x=US) -[![Tip with PayPal](https://img.shields.io/badge/beer-%2410-yellow.svg)](https://www.paypal.com/paypalme/ripmeapp/send?amount=10.00¤cyCode=USD&locale.x=en_US&country.x=US) -[![Tip with PayPal](https://img.shields.io/badge/lunch-%2420-orange.svg)](https://www.paypal.com/paypalme/ripmeapp/send?amount=20.00¤cyCode=USD&locale.x=en_US&country.x=US) -[![Tip with PayPal](https://img.shields.io/badge/dinner-%2450-red.svg)](https://www.paypal.com/paypalme/ripmeapp/send?amount=50.00¤cyCode=USD&locale.x=en_US&country.x=US) -[![Tip with PayPal](https://img.shields.io/badge/custom_amount-...-lightgrey.svg)](https://www.paypal.me/ripmeapp) +## Maintainers + +RipMe has been maintained with ♥️ and in our limited free time by the following +people, roughly in order from most recent primary developer, with current +activity marked by color of the indicator: + +- **[@soloturn](https://github.com/soloturn)** 🟢, +- **[@cyian-1756](https://github.com/cyian-1756)** 🟥, +- **[@kevin51jiang](https://github.com/kevin51jiang)** 🟥, +- **[@MetaPrime](https://github.com/metaprime)** 🟡, +- and its original creator, **[@4pr0n](https://github.com/4pr0n)** 🟥. + +If you'd like to become a maintainer, ask an active maintainer to be added to the team. + +## Contact + +Chat with the team and community on [gitter](https://gitter.im/RipMeApp/Lobby) and [reddit.com/r/ripme](https://www.reddit.com/r/ripme/) # About -RipMe is an album ripper for various websites. Runs on your computer. Requires Java 8. -RipMe is a cross-platform tool. It has been tested and confirmed working on Windows, Linux and MacOS. +RipMe is an album ripper for various websites. It is a cross-platform tool that runs on your computer, and +requires Java 21 or later to run. RipMe has been tested and is confirmed working on Windows, Linux, and MacOS. ![Screenshot](https://i.imgur.com/UCQNjeg.png) -## [Downloads](https://github.com/ripmeapp/ripme/releases) +## Downloads -Download `ripme.jar` from the [latest release](https://github.com/ripmeapp/ripme/releases). +Download `ripme.jar` from the [latest release](https://github.com/ripmeapp2/ripme/releases). For information about running the `.jar` file, see +[the How To Run wiki](https://github.com/ripmeapp/ripme/wiki/How-To-Run-RipMe). -**Note: If you're currently using version 1.2.x, 1.3.x or 1.7.49, you will not automatically get updates to the newest versions. We recommend downloading the latest version from the link above.** +The version number like `ripme-1.7.94-17-2167aa34-feature_auto_release.jar` contains a release number (`1.7.94`), given by +a person, the number of commits since this version (`17`). The commit SHA (`2167aa34`) uniquely references the +source code ripme was built from. If it is not built from the main branch, the branch name (`feature/auto-release`) is +given. -For information about running the `.jar` file, see [the How To Run wiki](https://github.com/ripmeapp/ripme/wiki/How-To-Run-RipMe). +## Installation -## [Changelog](https://github.com/ripmeapp/ripme/blob/master/ripme.json) (ripme.json) +On macOS, there is a [cask](https://github.com/Homebrew/homebrew-cask/blob/master/Casks/ripme.rb). + +``` +brew install --cask ripme && xattr -d com.apple.quarantine /Applications/ripme.jar +``` + +## Changelog + +[Changelog](/ripme.json) **(ripme.json)** # Features -* Quickly downloads all images in an online album (see supported sites below) -* Easily re-rip albums to fetch new content -* Built in updater -* Skips already downloaded images by default -* Can auto skip e-hentai and nhentai albums containing certain tags [See here for how to enable](https://github.com/RipMeApp/ripme/wiki/Config-options#nhentaiblacklisttags) -* Download a range of urls [See here for how](https://github.com/RipMeApp/ripme/wiki/How-To-Run-RipMe#downloading-a-url-range) +- Quickly downloads all images in an online album. [See supported sites](https://github.com/ripmeapp/ripme/wiki/Supported-Sites) +- Easily re-rip albums to fetch new content +- Built in updater +- Skips already downloaded images by default +- Can auto skip e-hentai and nhentai albums containing certain tags. [See here for how to enable](https://github.com/RipMeApp/ripme/wiki/Config-options#nhentaiblacklisttags) +- Download a range of urls. [See here for how](https://github.com/RipMeApp/ripme/wiki/How-To-Run-RipMe#downloading-a-url-range) -## [List of Supported Sites](https://github.com/ripmeapp/ripme/wiki/Supported-Sites) +## List of Supported Sites -* imgur -* twitter -* tumblr -* instagram -* flickr -* photobucket -* reddit -* gonewild -* motherless -* imagefap -* imagearn -* seenive -* vinebox -* 8muses -* deviantart -* xhamster -* (more) +- imgur +- twitter +- tumblr +- instagram +- flickr +- photobucket +- reddit +- gonewild +- motherless +- imagefap +- imagearn +- seenive +- vinebox +- 8muses +- deviantart +- xhamster +- [(more)](https://github.com/ripmeapp/ripme/wiki/Supported-Sites) ## Not Supported? Request support for more sites by adding a comment to [this Github issue](https://github.com/RipMeApp/ripme/issues/38). -If you're a developer, you can add your own Ripper by following the wiki guide +If you're a developer, you can add your own Ripper by following the wiki guide: [How To Create A Ripper for HTML Websites](https://github.com/ripmeapp/ripme/wiki/How-To-Create-A-Ripper-for-HTML-websites). # Compiling & Building -The project uses [Maven](http://maven.apache.org/). -To build the .jar file using Maven, navigate to the root project directory and run: +The project uses [Gradle](https://gradle.org). To build the .jar file, +navigate to the root project directory and run at least the test you +change, e.g. Xhamster. test execution can also excluded completely: ```bash -mvn clean compile assembly:single +./gradlew clean build testAll --tests XhamsterRipperTest.testXhamster2Album +./gradlew clean build -x test --warning-mode all ``` -This will include all dependencies in the JAR. +The generated JAR (java archive) in build/libs will include all +dependencies. # Running Tests -After building you can run tests by running the following: +Tests can be tagged as beeing slow, or flaky. The gradle build reacts to +the following combinations of tags: + +- default is to run all tests without tag. +- testAll runs all tests. +- testFlaky runs tests with tag "flaky". +- testSlow runs tests with tag "slow". +- tests can be run by test class, or single test. Use "testAll" so it does + not matter if a test is tagged or not. ```bash -mvn test +./gradlew test +./gradlew testAll +./gradlew testFlaky +./gradlew testSlow +./gradlew testAll --tests XhamsterRipperTest +./gradlew testAll --tests XhamsterRipperTest.testXhamster2Album ``` -Please note that some tests may fail as sites change and our rippers become out of date. -Start by building and testing a released version of RipMe -and then ensure that any changes you make do not cause more tests to break. +Please note that some tests may fail as sites change and our rippers +become out of date. Start by building and testing a released version +of RipMe and then ensure that any changes you make do not cause more +tests to break. + +# New GUI - compose-jb +As Java Swing will go away in future, a new GUI technology should be used. One of the +candidates is [Jetpack Compose for Desktop](https://github.com/JetBrains/compose-jb/). + +The library leverages the compose library for android and provides it for android, +desktop and web. The navigation library is not available for desktop, so Arkadii Ivanov +implemented +[decompose](https://proandroiddev.com/a-comprehensive-hundred-line-navigation-for-jetpack-desktop-compose-5b723c4f256e). diff --git a/build.bat b/build.bat index 7c2aa6c3..f6bf32a6 100755 --- a/build.bat +++ b/build.bat @@ -1,2 +1 @@ -mvn clean compile assembly:single -mvn io.github.zlika:reproducible-build-maven-plugin:0.6:strip-jar \ No newline at end of file +./gradlew clean build -x test diff --git a/build.gradle.kts b/build.gradle.kts new file mode 100644 index 00000000..f1f4e9aa --- /dev/null +++ b/build.gradle.kts @@ -0,0 +1,149 @@ +// the build derives a version with the jgitver plugin out of a tag in the git history. when there is no +// git repo, the jgitver default would be 0.0.0. one can override this version with a parameter. also, permit +// to start the build setting the javac release parameter, no parameter means build for java-17: +// gradle clean build -PjavacRelease=21 +// gradle clean build -PcustomVersion=1.0.0-10-asdf +val customVersion = (project.findProperty("customVersion") ?: "") as String +val javacRelease = (project.findProperty("javacRelease") ?: "21") as String + +plugins { + id("fr.brouillard.oss.gradle.jgitver") version "0.9.1" + id("jacoco") + id("java") + id("maven-publish") +} + +repositories { + mavenLocal() + mavenCentral() +} + +dependencies { + implementation("com.lmax:disruptor:3.4.4") + implementation("org.java-websocket:Java-WebSocket:1.5.3") + implementation("org.jsoup:jsoup:1.16.1") + implementation("org.json:json:20211205") + implementation("com.j2html:j2html:1.6.0") + implementation("commons-configuration:commons-configuration:1.10") + implementation("commons-cli:commons-cli:1.5.0") + implementation("commons-io:commons-io:2.13.0") + implementation("org.apache.httpcomponents:httpclient:4.5.14") + implementation("org.apache.httpcomponents:httpmime:4.5.14") + implementation("org.apache.logging.log4j:log4j-api:2.20.0") + implementation("org.apache.logging.log4j:log4j-core:2.20.0") + implementation("com.squareup.okhttp3:okhttp:4.12.0") + implementation("org.graalvm.js:js:22.3.2") + testImplementation(enforcedPlatform("org.junit:junit-bom:5.10.0")) + testImplementation("org.junit.jupiter:junit-jupiter") + testRuntimeOnly("org.junit.platform:junit-platform-launcher") +} + +group = "com.rarchives.ripme" +version = "1.7.94" +description = "ripme" + +jacoco { + toolVersion = "0.8.12" +} + +jgitver { + gitCommitIDLength = 8 + nonQualifierBranches = "main,master" + useGitCommitID = true +} + +afterEvaluate { + if (customVersion != "") { + project.version = customVersion + } +} + +tasks.compileJava { + options.release.set(Integer.parseInt(javacRelease)) +} + +tasks.withType { + duplicatesStrategy = DuplicatesStrategy.INCLUDE + manifest { + attributes["Main-Class"] = "com.rarchives.ripme.App" + attributes["Implementation-Version"] = archiveVersion + attributes["Multi-Release"] = "true" + } + + // To add all of the dependencies otherwise a "NoClassDefFoundError" error + from(sourceSets.main.get().output) + + dependsOn(configurations.runtimeClasspath) + from({ + configurations.runtimeClasspath.get().filter { it.name.endsWith("jar") }.map { zipTree(it) } + }) +} + +publishing { + publications { + create("maven") { + from(components["java"]) + } + } +} + +tasks.withType { + options.encoding = "UTF-8" + val compilerArgs = options.compilerArgs + compilerArgs.addAll(listOf("-Xlint:deprecation")) +} + +tasks.test { + testLogging { + showStackTraces = true + } + useJUnitPlatform { + // gradle-6.5.1 not yet allows passing this as parameter, so exclude it + excludeTags("flaky","slow") + includeEngines("junit-jupiter") + includeEngines("junit-vintage") + } + finalizedBy(tasks.jacocoTestReport) // report is always generated after tests run +} + +tasks.register("testAll") { + useJUnitPlatform { + includeTags("any()", "none()") + } +} + +tasks.register("testFlaky") { + useJUnitPlatform { + includeTags("flaky") + } +} + +tasks.register("testSlow") { + useJUnitPlatform { + includeTags("slow") + } +} + +tasks.register("testTagged") { + useJUnitPlatform { + includeTags("any()") + } +} + +// make all archive tasks in the build reproducible +tasks.withType().configureEach { + isPreserveFileTimestamps = false + isReproducibleFileOrder = true +} + +println("Build directory: ${file(layout.buildDirectory)}") + +tasks.jacocoTestReport { + dependsOn(tasks.test) // tests are required to run before generating the report + reports { + xml.required.set(false) + csv.required.set(false) + html.outputLocation.set(file("${file(layout.buildDirectory)}/jacocoHtml")) + } +} + diff --git a/build.sh b/build.sh index 2f044cde..d4dbe3b8 100755 --- a/build.sh +++ b/build.sh @@ -1,4 +1,2 @@ #!/usr/bin/env bash -mvn clean compile assembly:single -# Strip the jar of any non-reproducible metadata such as timestamps -mvn io.github.zlika:reproducible-build-maven-plugin:0.6:strip-jar \ No newline at end of file +./gradlew clean build -x test diff --git a/deploy.bat b/deploy.bat deleted file mode 100644 index 388ece44..00000000 --- a/deploy.bat +++ /dev/null @@ -1,2 +0,0 @@ -@echo off -powershell -c ".\deploy.ps1 -source (Join-Path target (Get-Item -Path .\target\* -Filter *.jar)[0].Name) -dest ripme.jar" diff --git a/deploy.ps1 b/deploy.ps1 deleted file mode 100644 index 9124c241..00000000 --- a/deploy.ps1 +++ /dev/null @@ -1,16 +0,0 @@ -Param ( - [Parameter(Mandatory=$True)] - [string]$source, - [Parameter(Mandatory=$True)] - [string]$dest -) - -Copy-Item -Path $source -Destination $dest - -$sourceHash = (Get-FileHash $source -algorithm MD5).Hash -$destHash = (Get-FileHash $dest -algorithm MD5).Hash -if ($sourceHash -eq $destHash) { - Write-Output 'Deployed successfully.' -} else { - Write-Output 'Hash Mismatch: did you close ripme before deploying?' -} diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 00000000..a4b76b95 Binary files /dev/null and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 00000000..df97d72b --- /dev/null +++ b/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,7 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10.2-bin.zip +networkTimeout=10000 +validateDistributionUrl=true +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/gradlew b/gradlew new file mode 100755 index 00000000..f5feea6d --- /dev/null +++ b/gradlew @@ -0,0 +1,252 @@ +#!/bin/sh + +# +# Copyright © 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# + +############################################################################## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/HEAD/platforms/jvm/plugins-application/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# +############################################################################## + +# Attempt to set APP_HOME + +# Resolve links: $0 may be a link +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac +done + +# This is normally unused +# shellcheck disable=SC2034 +APP_BASE_NAME=${0##*/} +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s +' "$PWD" ) || exit + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD=maximum + +warn () { + echo "$*" +} >&2 + +die () { + echo + echo "$*" + echo + exit 1 +} >&2 + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD=$JAVA_HOME/jre/sh/java + else + JAVACMD=$JAVA_HOME/bin/java + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD=java + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +fi + +# Increase the maximum file descriptors if we can. +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac +fi + +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + + # Now convert the arguments - kludge to limit ourselves to /bin/sh + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) + fi + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg + done +fi + + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + +exec "$JAVACMD" "$@" diff --git a/gradlew.bat b/gradlew.bat new file mode 100644 index 00000000..9b42019c --- /dev/null +++ b/gradlew.bat @@ -0,0 +1,94 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem +@rem SPDX-License-Identifier: Apache-2.0 +@rem + +@if "%DEBUG%"=="" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if %ERRORLEVEL% equ 0 goto execute + +echo. 1>&2 +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. 1>&2 +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if %ERRORLEVEL% equ 0 goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/patch.py b/patch.py deleted file mode 100644 index aa53755d..00000000 --- a/patch.py +++ /dev/null @@ -1,86 +0,0 @@ -import json -import subprocess -from hashlib import sha256 - -# This script will: -# - read current version -# - increment patch version -# - update version in a few places -# - insert new line in ripme.json with message -# - build ripme -# - add the hash of the latest binary to ripme.json -# - commit all changes -message = input('message: ') - -# Strip any spaces that might've been entered before the message -message.lstrip() - - -def get_ripme_json(): - with open('ripme.json') as dataFile: - ripmeJson = json.load(dataFile) - return ripmeJson - - -def update_hash(current_hash): - ripmeJson = get_ripme_json() - with open('ripme.json', 'w') as dataFile: - ripmeJson["currentHash"] = current_hash - print(ripmeJson["currentHash"]) - json.dump(ripmeJson, dataFile, indent=4) - - -def update_change_list(message): - ripmeJson = get_ripme_json() - with open('ripme.json', 'w') as dataFile: - ripmeJson["changeList"].insert(0, message) - json.dump(ripmeJson, dataFile, indent=4) - - -currentVersion = get_ripme_json()["latestVersion"] - -print('Current version ' + currentVersion) - -versionFields = currentVersion.split('.') -patchCur = int(versionFields[2]) -patchNext = patchCur + 1 -majorMinor = versionFields[:2] -majorMinor.append(str(patchNext)) -nextVersion = '.'.join(majorMinor) - -print('Updating to ' + nextVersion) - -substrExpr = 's/' + currentVersion + '/' + nextVersion + '/' -subprocess.call(['sed', '-i', '-e', substrExpr, 'src/main/java/com/rarchives/ripme/ui/UpdateUtils.java']) -subprocess.call(['git', 'grep', 'DEFAULT_VERSION.*' + nextVersion, - 'src/main/java/com/rarchives/ripme/ui/UpdateUtils.java']) - -substrExpr = 's/\\\"latestVersion\\\": \\\"' + currentVersion + '\\\"/\\\"latestVersion\\\": \\\"' + \ - nextVersion + '\\\"/' -subprocess.call(['sed', '-i', '-e', substrExpr, 'ripme.json']) -subprocess.call(['git', 'grep', 'latestVersion', 'ripme.json']) - -substrExpr = 's/' + currentVersion + '/' + nextVersion + '/' -subprocess.call(['sed', '-i', '-e', substrExpr, 'pom.xml']) -subprocess.call(['git', 'grep', '' + nextVersion + '', 'pom.xml']) - -commitMessage = nextVersion + ': ' + message - -update_change_list(commitMessage) - - -print("Building ripme") -subprocess.call(["mvn", "clean", "compile", "assembly:single"]) -print("Stripping jar") -subprocess.call(["mvn", "io.github.zlika:reproducible-build-maven-plugin:0.6:strip-jar"]) -print("Hashing .jar file") -openedFile = open("./target/ripme-{}-jar-with-dependencies.jar".format(nextVersion), "rb") -readFile = openedFile.read() -file_hash = sha256(readFile).hexdigest() -print("Hash is: {}".format(file_hash)) -print("Updating hash") -update_hash(file_hash) -subprocess.call(['git', 'add', '-u']) -subprocess.call(['git', 'commit', '-m', commitMessage]) -subprocess.call(['git', 'tag', nextVersion]) -print("Remember to run `git push origin master` before release.py") diff --git a/pom.xml b/pom.xml deleted file mode 100644 index 9f86678f..00000000 --- a/pom.xml +++ /dev/null @@ -1,169 +0,0 @@ - - 4.0.0 - com.rarchives.ripme - ripme - jar - 1.7.90 - ripme - http://rip.rarchives.com - - UTF-8 - 4.12 - 5.5.0 - 5.5.0 - - - - org.junit.jupiter - junit-jupiter-api - ${junit.jupiter.version} - test - - - junit - junit - ${junit.version} - test - - - org.junit.jupiter - junit-jupiter-engine - ${junit.jupiter.version} - test - - - org.junit.vintage - junit-vintage-engine - ${junit.vintage.version} - test - - - - org.jsoup - jsoup - 1.8.1 - - - org.json - json - 20140107 - - - commons-configuration - commons-configuration - 1.7 - - - log4j - log4j - 1.2.17 - - - commons-cli - commons-cli - 1.2 - - - commons-io - commons-io - 1.3.2 - - - org.apache.httpcomponents - httpclient - 4.3.6 - - - org.apache.httpcomponents - httpmime - 4.3.3 - - - - - - org.apache.maven.plugins - maven-site-plugin - 3.7.1 - - - io.github.zlika - reproducible-build-maven-plugin - 0.6 - - - maven-assembly-plugin - - - - com.rarchives.ripme.App - true - true - - - ./config - - - - jar-with-dependencies - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.1 - - 1.8 - 1.8 - - - - org.eluder.coveralls - coveralls-maven-plugin - 4.3.0 - - - - org.jacoco - jacoco-maven-plugin - 0.8.2 - - - prepare-agent - - prepare-agent - - - - - - maven-surefire-plugin - 2.22.2 - - - slow - - - - - - - - - - - org.apache.maven.plugins - maven-surefire-report-plugin - 3.0.0-M3 - - false - - - - - \ No newline at end of file diff --git a/release.py b/release.py deleted file mode 100755 index ad099bad..00000000 --- a/release.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python3 - -import re - -import os - -import sys -from hashlib import sha256 -from github import Github -import json -import argparse - -parser = argparse.ArgumentParser(description="Make a new ripme release on github") -parser.add_argument("-f", "--file", help="Path to the version of ripme to release") -parser.add_argument("-t", "--token", help="Your github personal access token") -parser.add_argument("-d", "--debug", help="Run in debug mode", action="store_true") -parser.add_argument("-n", "--non-interactive", help="Do not ask for any input from the user", action="store_true") -parser.add_argument("--test", help="Perform a dry run (Do everything but upload new release)", action="store_true") -parser.add_argument("--skip-hash-check", help="Skip hash check (This should only be used for testing)", action="store_true") -args = parser.parse_args() - -try: - # This binds input to raw_input on python2, we do this because input acts like eval on python2 - input = raw_input -except NameError: - pass - - -# Make sure the file the user selected is a jar -def isJar(filename): - if debug: - print("Checking if {} is a jar file".format(filename)) - return filename.endswith("jar") - - -# Returns true if last entry to the "changeList" section of ripme.json is in the format of $number.$number.$number: and -# false if not -def isValidCommitMessage(message): - if debug: - print(r"Checking if {} matches pattern ^\d+\.\d+\.\d+:".format(message)) - pattern = re.compile(r"^\d+\.\d+\.\d+:") - return re.match(pattern, message) - - -# Checks if the update has the name ripme.jar, if not it renames the file -def checkAndRenameFile(path): - """Check if path (a string) points to a ripme.jar. Returns the possibly renamed file path""" - if not path.endswith("ripme.jar"): - print("Specified file is not named ripme.jar, renaming") - new_path = os.path.join(os.path.dirname(path), "ripme.jar") - os.rename(path, new_path) - return new_path - return path - - -ripmeJson = json.loads(open("ripme.json").read()) -fileToUploadPath = checkAndRenameFile(args.file) -InNoninteractiveMode = args.non_interactive -commitMessage = ripmeJson.get("changeList")[0] -releaseVersion = ripmeJson.get("latestVersion") -debug = args.debug -accessToken = args.token -repoOwner = "ripmeapp" -repoName = "ripme" - -if not os.path.isfile(fileToUploadPath): - print("[!] Error: {} does not exist".format(fileToUploadPath)) - sys.exit(1) - -if not isJar(fileToUploadPath): - print("[!] Error: {} is not a jar file!".format(fileToUploadPath)) - sys.exit(1) - -if not isValidCommitMessage(commitMessage): - print("[!] Error: {} is not a valid commit message as it does not start with a version".format(fileToUploadPath)) - sys.exit(1) - - -if not args.skip_hash_check: - if debug: - print("Reading file {}".format(fileToUploadPath)) - ripmeUpdate = open(fileToUploadPath, mode='rb').read() - - # The actual hash of the file on disk - actualHash = sha256(ripmeUpdate).hexdigest() - - # The hash that we expect the update to have - expectedHash = ripmeJson.get("currentHash") - - # Make sure that the hash of the file we're uploading matches the hash in ripme.json. These hashes not matching will - # cause ripme to refuse to install the update for all users who haven't disabled update hash checking - if expectedHash != actualHash: - print("[!] Error: expected hash of file and actual hash differ") - print("[!] Expected hash is {}".format(expectedHash)) - print("[!] Actual hash is {}".format(actualHash)) - sys.exit(1) -else: - print("[*] WARNING: SKIPPING HASH CHECK") -# Ask the user to review the information before we precede -# This only runs in we're in interactive mode -if not InNoninteractiveMode: - print("File path: {}".format(fileToUploadPath)) - print("Release title: {}".format(commitMessage)) - print("Repo: {}/{}".format(repoOwner, repoName)) - input("\nPlease review the information above and ensure it is correct and then press enter") - -if not args.test: - print("Accessing github using token") - g = Github(accessToken) - - print("Creating release") - release = g.get_user(repoOwner).get_repo(repoName).create_git_release(releaseVersion, commitMessage, "") - - print("Uploading file") - release.upload_asset(fileToUploadPath, "ripme.jar") -else: - print("Not uploading release being script was run with --test flag") diff --git a/ripme.json b/ripme.json index 5d9fa969..c95f9381 100644 --- a/ripme.json +++ b/ripme.json @@ -1,6 +1,30 @@ { - "currentHash": "a2fdb180da195c617cff933fc400d16010d049580188a8eae7eb89e11bd0d4ef", + "latestVersion": "2.1.12-7-d0b97acd", + "currentHash": "ac40e5ff60f8e0bc7832874de529283a77f9e07d5a7d4a0e8f81e05d43e2df58", "changeList": [ + "2.1.12-7-d0b97acd, ripme now instead of ripme2 on github, ignore SSL verification option added", + "2.1.11-20-ca96ce88, Commer.party next page, Imgur, E-hentai fixed, set recent language.", + "2.1.10-21-c94a9543, Imagebam, Unify colons in UI, Motherless, right click menu, rgif fixed", + "2.1.9-7-22e915df, HistoryMenuMouseListener right click menu, Imagefap retry logic for getFullSizedImage(), EightmusesRipper fixed", + "2.1.8-1-f5153de8: jpg3 add, java-21 adjustments.", + "2.1.7-29-b080faae: luciousripper fix, java-21 adjustments.", + "2.1.6-1-68189f27: erome fix.", + "2.1.5-8-ba51d7b: ripme running with java-17.", + "2.1.4-38-836a7494: fixed imagefap ripper.", + "2.1.3-15-1b83dc68: relative path now from working dir to subfolder, allowing images to be put in subfolder with same filename, sanatize reddit titles saved as files, additional logging in AbstractHTMLRipper.", + "2.1.2-23-e5438e85: caching of first page, retry sleep time, nhentai fixed", + "2.1.2-3-ea90b172: better sanitize filenames for windows, save config on update value. reddit, print exceptions in loops and continue.", + "2.1.1-3-536339dd: java-11+ necessary to run, work around non existing working directory.", + "2.0.4-13-03e32cb7: fix vsco, add danbooru.", + "2.0.3: Check new version against ripme2app.", + "2.0.2: Add greek translation, fixed reddit, redgif.", + "2.0.1: Fixed reddit, tujigu, xhamster, imagebam, erome; marked some tests as flaky.", + "2.0.0: Fixed Zizki, WordpressComics, Imagebam; marked some tests as flaky ", + "1.7.95: Added porncomixinfo.net; Fixed ripper for HentaiNexus; move l option to before r and R; marked some tests as flaky ", + "1.7.94: Added reddit gallery support; Fixed AllporncomicRipper; Fix imagefap ripper; instagramRipper, replaced Nashorn with GraalVM.js", + "1.7.93: Fixed Motherless ripper; Fixed e621 ripper; Updated pt_PT translation; Implemented redgifs Ripper; added missing translation to Korean/KR; Fixed elecx ripper; Added ripper for HentaiNexus", + "1.7.92: Added read-comic.com ripper; Fix Pawoo ripper; Add ChineseSimplified language file; Fixed artstation ripper", + "1.7.91: Fixed luscious ripper. Fixed VK ripper; Added Kingcomix ripper", "1.7.90: Added FitnakedgirlsRipper; Fixed VK Album Ripper; Fixed Myreadingmanga Ripper; Fixed windows max file name; Fixed Pornhub Video Ripper; Fixed Motherless Ripper; Fixed Instagram Ripper", "1.7.89: Improved twitter ripper; Fixed xhamster image ripper; Fixed allporncomic ripper; Added Ripper for folio.ink", "1.7.88: Added ripper for Myreadingmanga.info; Added Mastodon rippers; Fix queue count update when queue is 0; Added ripper for listal; Now downloads best video when ripping twitter", @@ -261,6 +285,5 @@ "1.0.4: Fixed spaces-in-directory bug", "1.0.3: Added VK.com ripper", "1.0.1: Added auto-update functionality" - ], - "latestVersion": "1.7.90" -} \ No newline at end of file + ] +} diff --git a/settings.gradle.kts b/settings.gradle.kts new file mode 100644 index 00000000..5528f49d --- /dev/null +++ b/settings.gradle.kts @@ -0,0 +1,9 @@ +pluginManagement { + repositories { + mavenLocal() + gradlePluginPortal() + // TODO: remove after new build of compose-jb is published + maven("https://maven.pkg.jetbrains.space/public/p/compose/dev") + } +} +rootProject.name = "ripme" diff --git a/src/main/java/com/rarchives/ripme/App.java b/src/main/java/com/rarchives/ripme/App.java index 73e0bb66..2c37fdc9 100644 --- a/src/main/java/com/rarchives/ripme/App.java +++ b/src/main/java/com/rarchives/ripme/App.java @@ -1,31 +1,5 @@ package com.rarchives.ripme; -import java.awt.*; -import java.io.File; -import java.io.IOException; -import java.io.BufferedReader; -import java.io.FileReader; -import java.io.FileNotFoundException; - -import java.net.MalformedURLException; -import java.net.URL; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.Arrays; -import java.util.Collections; -import java.util.Date; - -import javax.swing.SwingUtilities; - -import org.apache.commons.cli.BasicParser; -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.HelpFormatter; -import org.apache.commons.cli.Options; -import org.apache.commons.cli.ParseException; -import org.apache.commons.lang.SystemUtils; -import org.apache.log4j.Logger; - import com.rarchives.ripme.ripper.AbstractRipper; import com.rarchives.ripme.ui.History; import com.rarchives.ripme.ui.HistoryEntry; @@ -35,6 +9,31 @@ import com.rarchives.ripme.utils.Proxy; import com.rarchives.ripme.utils.RipUtils; import com.rarchives.ripme.utils.Utils; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.DefaultParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.lang.SystemUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import javax.swing.*; +import java.awt.*; +import java.io.BufferedReader; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.stream.Stream; + /** * Entry point to application. * This is where all the fun happens, with the main method. @@ -44,7 +43,7 @@ import com.rarchives.ripme.utils.Utils; */ public class App { - public static final Logger logger = Logger.getLogger(App.class); + public static final Logger logger = LogManager.getLogger(App.class); public static String stringToAppendToFoldername = null; private static final History HISTORY = new History(); @@ -54,11 +53,11 @@ public class App { * * @param args Array of command line arguments. */ - public static void main(String[] args) throws MalformedURLException { + public static void main(String[] args) throws IOException { CommandLine cl = getArgs(args); if (args.length > 0 && cl.hasOption('v')){ - logger.info(UpdateUtils.getThisJarVersion()); + System.out.println(UpdateUtils.getThisJarVersion()); System.exit(0); } @@ -113,7 +112,7 @@ public class App { entry.dir = ripper.getWorkingDir().getAbsolutePath(); try { entry.title = ripper.getAlbumTitle(ripper.getURL()); - } catch (MalformedURLException e) { } + } catch (MalformedURLException ignored) { } HISTORY.add(entry); } } @@ -122,7 +121,7 @@ public class App { * For dealing with command-line arguments. * @param args Array of Command-line arguments */ - private static void handleArguments(String[] args) { + private static void handleArguments(String[] args) throws IOException { CommandLine cl = getArgs(args); //Help (list commands) @@ -169,6 +168,12 @@ public class App { Utils.setConfigBoolean("errors.skip404", true); } + //Destination directory + if (cl.hasOption('l')) { + // change the default rips directory + Utils.setConfigString("rips.directory", cl.getOptionValue('l')); + } + //Re-rip all previous albums if (cl.hasOption('r')) { // Re-rip all via command-line @@ -179,7 +184,7 @@ public class App { } for (HistoryEntry entry : HISTORY.toList()) { try { - URL url = new URL(entry.url); + URL url = new URI(entry.url).toURL(); rip(url); } catch (Exception e) { logger.error("[!] Failed to rip URL " + entry.url, e); @@ -208,7 +213,7 @@ public class App { if (entry.selected) { added++; try { - URL url = new URL(entry.url); + URL url = new URI(entry.url).toURL(); rip(url); } catch (Exception e) { logger.error("[!] Failed to rip URL " + entry.url, e); @@ -245,17 +250,11 @@ public class App { System.exit(-1); } - //Destination directory - if (cl.hasOption('l')) { - // change the default rips directory - Utils.setConfigString("rips.directory", cl.getOptionValue('l')); - } - //Read URLs from File if (cl.hasOption('f')) { - String filename = cl.getOptionValue('f'); + Path urlfile = Paths.get(cl.getOptionValue('f')); - try (BufferedReader br = new BufferedReader(new FileReader(filename))) { + try (BufferedReader br = Files.newBufferedReader(urlfile)) { String url; while ((url = br.readLine()) != null) { if (url.startsWith("//") || url.startsWith("#")) { @@ -288,11 +287,11 @@ public class App { /** * Attempt to rip targetURL. * @param targetURL URL to rip - * @param saveConfig Whether or not you want to save the config (?) + * @param saveConfig Whether you want to save the config (?) */ private static void ripURL(String targetURL, boolean saveConfig) { try { - URL url = new URL(targetURL); + URL url = new URI(targetURL).toURL(); rip(url); saveHistory(); } catch (MalformedURLException e) { @@ -337,7 +336,7 @@ public class App { * @return CommandLine object containing arguments. */ private static CommandLine getArgs(String[] args) { - BasicParser parser = new BasicParser(); + var parser = new DefaultParser(); try { return parser.parse(getOptions(), args, false); } catch (ParseException e) { @@ -349,19 +348,18 @@ public class App { /** * Loads history from history file into memory. - * @see MainWindow.loadHistory */ - private static void loadHistory() { - File historyFile = new File(Utils.getConfigDir() + File.separator + "history.json"); + private static void loadHistory() throws IOException { + Path historyFile = Paths.get(Utils.getConfigDir() + "/history.json"); HISTORY.clear(); - if (historyFile.exists()) { + if (Files.exists(historyFile)) { try { - logger.info("Loading history from " + historyFile.getCanonicalPath()); - HISTORY.fromFile(historyFile.getCanonicalPath()); + logger.info("Loading history from " + historyFile); + HISTORY.fromFile(historyFile.toString()); } catch (IOException e) { logger.error("Failed to load history from file " + historyFile, e); logger.warn( - "RipMe failed to load the history file at " + historyFile.getAbsolutePath() + "\n\n" + + "RipMe failed to load the history file at " + historyFile + "\n\n" + "Error: " + e.getMessage() + "\n\n" + "Closing RipMe will automatically overwrite the contents of this file,\n" + "so you may want to back the file up before closing RipMe!"); @@ -372,16 +370,18 @@ public class App { if (HISTORY.toList().isEmpty()) { // Loaded from config, still no entries. // Guess rip history based on rip folder - String[] dirs = Utils.getWorkingDirectory().list((dir, file) -> new File(dir.getAbsolutePath() + File.separator + file).isDirectory()); - for (String dir : dirs) { - String url = RipUtils.urlFromDirectoryName(dir); + Stream stream = Files.list(Utils.getWorkingDirectory()) + .filter(Files::isDirectory); + + stream.forEach(dir -> { + String url = RipUtils.urlFromDirectoryName(dir.toString()); if (url != null) { // We found one, add it to history HistoryEntry entry = new HistoryEntry(); entry.url = url; HISTORY.add(entry); } - } + }); } } } @@ -390,7 +390,7 @@ public class App { * @see MainWindow.saveHistory */ private static void saveHistory() { - Path historyFile = Paths.get(Utils.getConfigDir() + File.separator + "history.json"); + Path historyFile = Paths.get(Utils.getConfigDir() + "/history.json"); try { if (!Files.exists(historyFile)) { Files.createDirectories(historyFile.getParent()); diff --git a/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java b/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java index 3e3fdb18..e7b646e5 100644 --- a/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java @@ -2,42 +2,65 @@ package com.rarchives.ripme.ripper; import java.io.File; import java.io.FileOutputStream; -import java.io.FileWriter; import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; +import java.io.UnsupportedEncodingException; +import java.net.*; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; + import org.jsoup.nodes.Document; import com.rarchives.ripme.ui.RipStatusMessage.STATUS; import com.rarchives.ripme.utils.Utils; import com.rarchives.ripme.ui.MainWindow; import com.rarchives.ripme.ui.RipStatusMessage; +import com.rarchives.ripme.utils.Http; /** * Simplified ripper, designed for ripping from sites by parsing HTML. */ public abstract class AbstractHTMLRipper extends AbstractRipper { - private Map itemsPending = Collections.synchronizedMap(new HashMap()); - private Map itemsCompleted = Collections.synchronizedMap(new HashMap()); - private Map itemsErrored = Collections.synchronizedMap(new HashMap()); + private final Map itemsPending = Collections.synchronizedMap(new HashMap<>()); + private final Map itemsCompleted = Collections.synchronizedMap(new HashMap<>()); + private final Map itemsErrored = Collections.synchronizedMap(new HashMap<>()); + Document cachedFirstPage; protected AbstractHTMLRipper(URL url) throws IOException { super(url); + if(Utils.getConfigBoolean("ssl.verify.off",false)){ + Http.SSLVerifyOff(); + }else { + Http.undoSSLVerifyOff(); + } } protected abstract String getDomain(); public abstract String getHost(); - protected abstract Document getFirstPage() throws IOException; - public Document getNextPage(Document doc) throws IOException { + protected Document getFirstPage() throws IOException, URISyntaxException { + return Http.url(url).get(); + } + + protected Document getCachedFirstPage() throws IOException, URISyntaxException { + if (cachedFirstPage == null) { + cachedFirstPage = getFirstPage(); + } + return cachedFirstPage; + } + + public Document getNextPage(Document doc) throws IOException, URISyntaxException { return null; } - protected abstract List getURLsFromPage(Document page); + protected abstract List getURLsFromPage(Document page) throws UnsupportedEncodingException; protected List getDescriptionsFromPage(Document doc) throws IOException { throw new IOException("getDescriptionsFromPage not implemented"); // Do I do this or make an abstract function? } @@ -56,7 +79,7 @@ public abstract class AbstractHTMLRipper extends AbstractRipper { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { return url; } protected boolean hasDescriptionSupport() { @@ -86,12 +109,12 @@ public abstract class AbstractHTMLRipper extends AbstractRipper { } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { int index = 0; int textindex = 0; LOGGER.info("Retrieving " + this.url); sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm()); - Document doc = getFirstPage(); + var doc = getCachedFirstPage(); if (hasQueueSupport() && pageContainsAlbums(this.url)) { List urls = getAlbumsToQueue(doc); @@ -104,11 +127,28 @@ public abstract class AbstractHTMLRipper extends AbstractRipper { LOGGER.debug("Adding items from " + this.url + " to queue"); } + List doclocation = new ArrayList<>(); + + LOGGER.info("Got doc location " + doc.location()); + while (doc != null) { + + LOGGER.info("Processing a doc..."); + + // catch if we saw a doc location already, save the ones seen in a list + if (doclocation.contains(doc.location())) { + LOGGER.info("Already processed location " + doc.location() + " breaking"); + break; + } + doclocation.add(doc.location()); + if (alreadyDownloadedUrls >= Utils.getConfigInteger("history.end_rip_after_already_seen", 1000000000) && !isThisATest()) { sendUpdate(STATUS.DOWNLOAD_COMPLETE_HISTORY, "Already seen the last " + alreadyDownloadedUrls + " images ending rip"); break; } + + LOGGER.info("retrieving urls from doc"); + List imageURLs = getURLsFromPage(doc); // If hasASAPRipping() returns true then the ripper will handle downloading the files // if not it's done in the following block of code @@ -126,9 +166,9 @@ public abstract class AbstractHTMLRipper extends AbstractRipper { for (String imageURL : imageURLs) { index += 1; - LOGGER.debug("Found image url #" + index + ": " + imageURL); + LOGGER.debug("Found image url #" + index + ": '" + imageURL + "'"); downloadURL(new URL(imageURL), index); - if (isStopped()) { + if (isStopped() || isThisATest()) { break; } } @@ -139,7 +179,7 @@ public abstract class AbstractHTMLRipper extends AbstractRipper { if (!textURLs.isEmpty()) { LOGGER.debug("Found description link(s) from " + doc.location()); for (String textURL : textURLs) { - if (isStopped()) { + if (isStopped() || isThisATest()) { break; } textindex += 1; @@ -195,7 +235,7 @@ public abstract class AbstractHTMLRipper extends AbstractRipper { */ private String fileNameFromURL(URL url) { String saveAs = url.toExternalForm(); - if (saveAs.substring(saveAs.length() - 1) == "/") { saveAs = saveAs.substring(0,saveAs.length() - 1) ;} + if (saveAs.substring(saveAs.length() - 1).equals("/")) { saveAs = saveAs.substring(0,saveAs.length() - 1) ;} saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1); if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); } if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); } @@ -250,7 +290,7 @@ public abstract class AbstractHTMLRipper extends AbstractRipper { } LOGGER.debug("Downloading " + url + "'s description to " + saveFileAs); if (!saveFileAs.getParentFile().exists()) { - LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent())); + LOGGER.info("[+] Creating directory: " + saveFileAs.getParent()); saveFileAs.getParentFile().mkdirs(); } return true; @@ -281,22 +321,22 @@ public abstract class AbstractHTMLRipper extends AbstractRipper { } @Override - /** - * Returns total amount of files attempted. + /* + Returns total amount of files attempted. */ public int getCount() { return itemsCompleted.size() + itemsErrored.size(); } @Override - /** - * Queues multiple URLs of single images to download from a single Album URL + /* + Queues multiple URLs of single images to download from a single Album URL */ - public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) { - // Only download one file if this is a test. - if (super.isThisATest() && - (itemsPending.size() > 0 || itemsCompleted.size() > 0 || itemsErrored.size() > 0)) { + public boolean addURLToDownload(URL url, Path saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) { + // Only download one file if this is a test. + if (isThisATest() && (itemsCompleted.size() > 0 || itemsErrored.size() > 0)) { stop(); + itemsPending.clear(); return false; } if (!allowDuplicates() @@ -307,20 +347,24 @@ public abstract class AbstractHTMLRipper extends AbstractRipper { LOGGER.info("[!] Skipping " + url + " -- already attempted: " + Utils.removeCWD(saveAs)); return false; } + if (shouldIgnoreURL(url)) { + sendUpdate(STATUS.DOWNLOAD_SKIP, "Skipping " + url.toExternalForm() + " - ignored extension"); + return false; + } if (Utils.getConfigBoolean("urls_only.save", false)) { // Output URL to file - String urlFile = this.workingDir + File.separator + "urls.txt"; - try (FileWriter fw = new FileWriter(urlFile, true)) { - fw.write(url.toExternalForm()); - fw.write(System.lineSeparator()); - itemsCompleted.put(url, new File(urlFile)); + Path urlFile = Paths.get(this.workingDir + "/urls.txt"); + String text = url.toExternalForm() + System.lineSeparator(); + try { + Files.write(urlFile, text.getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE, StandardOpenOption.APPEND); + itemsCompleted.put(url, urlFile); } catch (IOException e) { LOGGER.error("Error while writing to " + urlFile, e); } } else { - itemsPending.put(url, saveAs); - DownloadFileThread dft = new DownloadFileThread(url, saveAs, this, getFileExtFromMIME); + itemsPending.put(url, saveAs.toFile()); + DownloadFileThread dft = new DownloadFileThread(url, saveAs.toFile(), this, getFileExtFromMIME); if (referrer != null) { dft.setReferrer(referrer); } @@ -334,7 +378,7 @@ public abstract class AbstractHTMLRipper extends AbstractRipper { } @Override - public boolean addURLToDownload(URL url, File saveAs) { + public boolean addURLToDownload(URL url, Path saveAs) { return addURLToDownload(url, saveAs, null, null, false); } @@ -352,10 +396,10 @@ public abstract class AbstractHTMLRipper extends AbstractRipper { } @Override - /** - * Cleans up & tells user about successful download + /* + Cleans up & tells user about successful download */ - public void downloadCompleted(URL url, File saveAs) { + public void downloadCompleted(URL url, Path saveAs) { if (observer == null) { return; } @@ -373,7 +417,7 @@ public abstract class AbstractHTMLRipper extends AbstractRipper { } @Override - /** + /* * Cleans up & tells user about failed download. */ public void downloadErrored(URL url, String reason) { @@ -388,18 +432,18 @@ public abstract class AbstractHTMLRipper extends AbstractRipper { } @Override - /** - * Tells user that a single file in the album they wish to download has - * already been downloaded in the past. + /* + Tells user that a single file in the album they wish to download has + already been downloaded in the past. */ - public void downloadExists(URL url, File file) { + public void downloadExists(URL url, Path file) { if (observer == null) { return; } itemsPending.remove(url); itemsCompleted.put(url, file); - observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_WARN, url + " already saved as " + file.getAbsolutePath())); + observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_WARN, url + " already saved as " + file)); checkIfComplete(); } @@ -421,21 +465,16 @@ public abstract class AbstractHTMLRipper extends AbstractRipper { * Sets directory to save all ripped files to. * @param url * URL to define how the working directory should be saved. - * @throws - * IOException */ @Override - public void setWorkingDir(URL url) throws IOException { - String path = Utils.getWorkingDirectory().getCanonicalPath(); + public void setWorkingDir(URL url) throws IOException, URISyntaxException { + Path wd = Utils.getWorkingDirectory(); + // TODO - change to nio + String path = wd.toAbsolutePath().toString(); if (!path.endsWith(File.separator)) { path += File.separator; } - String title; - if (Utils.getConfigBoolean("album_titles.save", true)) { - title = getAlbumTitle(this.url); - } else { - title = super.getAlbumTitle(this.url); - } + String title = getAlbumTitle(this.url); LOGGER.debug("Using album title '" + title + "'"); title = Utils.filesystemSafe(title); @@ -444,8 +483,10 @@ public abstract class AbstractHTMLRipper extends AbstractRipper { this.workingDir = new File(path); if (!this.workingDir.exists()) { - LOGGER.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir)); - this.workingDir.mkdirs(); + LOGGER.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir.toPath())); + if (!this.workingDir.mkdirs()) { + throw new IOException("Failed creating dir: \"" + this.workingDir + "\""); + } } LOGGER.debug("Set working directory to: " + this.workingDir); } @@ -466,13 +507,11 @@ public abstract class AbstractHTMLRipper extends AbstractRipper { */ @Override public String getStatusText() { - StringBuilder sb = new StringBuilder(); - sb.append(getCompletionPercentage()) - .append("% ") - .append("- Pending: " ).append(itemsPending.size()) - .append(", Completed: ").append(itemsCompleted.size()) - .append(", Errored: " ).append(itemsErrored.size()); - return sb.toString(); + return getCompletionPercentage() + + "% " + + "- Pending: " + itemsPending.size() + + ", Completed: " + itemsCompleted.size() + + ", Errored: " + itemsErrored.size(); } diff --git a/src/main/java/com/rarchives/ripme/ripper/AbstractJSONRipper.java b/src/main/java/com/rarchives/ripme/ripper/AbstractJSONRipper.java index d7e93fcb..1d8e688a 100644 --- a/src/main/java/com/rarchives/ripme/ripper/AbstractJSONRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/AbstractJSONRipper.java @@ -1,18 +1,24 @@ package com.rarchives.ripme.ripper; +import com.rarchives.ripme.ui.RipStatusMessage; +import com.rarchives.ripme.ui.RipStatusMessage.STATUS; +import com.rarchives.ripme.utils.Utils; +import org.json.JSONObject; + import java.io.File; -import java.io.FileWriter; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import org.json.JSONObject; -import com.rarchives.ripme.ui.RipStatusMessage; -import com.rarchives.ripme.ui.RipStatusMessage.STATUS; -import com.rarchives.ripme.utils.Utils; /** * Simplified ripper, designed for ripping from sites by parsing JSON. @@ -20,7 +26,7 @@ import com.rarchives.ripme.utils.Utils; public abstract class AbstractJSONRipper extends AbstractRipper { private Map itemsPending = Collections.synchronizedMap(new HashMap()); - private Map itemsCompleted = Collections.synchronizedMap(new HashMap()); + private Map itemsCompleted = Collections.synchronizedMap(new HashMap()); private Map itemsErrored = Collections.synchronizedMap(new HashMap()); protected AbstractJSONRipper(URL url) throws IOException { @@ -31,8 +37,8 @@ public abstract class AbstractJSONRipper extends AbstractRipper { @Override public abstract String getHost(); - protected abstract JSONObject getFirstPage() throws IOException; - protected JSONObject getNextPage(JSONObject doc) throws IOException { + protected abstract JSONObject getFirstPage() throws IOException, URISyntaxException; + protected JSONObject getNextPage(JSONObject doc) throws IOException, URISyntaxException { throw new IOException("getNextPage not implemented"); } protected abstract List getURLsFromJSON(JSONObject json); @@ -51,12 +57,12 @@ public abstract class AbstractJSONRipper extends AbstractRipper { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { return url; } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { int index = 0; LOGGER.info("Retrieving " + this.url); sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm()); @@ -98,7 +104,7 @@ public abstract class AbstractJSONRipper extends AbstractRipper { try { sendUpdate(STATUS.LOADING_RESOURCE, "next page"); json = getNextPage(json); - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { LOGGER.info("Can't get next page: " + e.getMessage()); break; } @@ -140,11 +146,11 @@ public abstract class AbstractJSONRipper extends AbstractRipper { /** * Queues multiple URLs of single images to download from a single Album URL */ - public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) { - // Only download one file if this is a test. - if (super.isThisATest() && - (itemsPending.size() > 0 || itemsCompleted.size() > 0 || itemsErrored.size() > 0)) { + public boolean addURLToDownload(URL url, Path saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) { + // Only download one file if this is a test. + if (super.isThisATest() && (itemsCompleted.size() > 0 || itemsErrored.size() > 0)) { stop(); + itemsPending.clear(); return false; } if (!allowDuplicates() @@ -155,20 +161,24 @@ public abstract class AbstractJSONRipper extends AbstractRipper { LOGGER.info("[!] Skipping " + url + " -- already attempted: " + Utils.removeCWD(saveAs)); return false; } + if (shouldIgnoreURL(url)) { + sendUpdate(STATUS.DOWNLOAD_SKIP, "Skipping " + url.toExternalForm() + " - ignored extension"); + return false; + } if (Utils.getConfigBoolean("urls_only.save", false)) { // Output URL to file - String urlFile = this.workingDir + File.separator + "urls.txt"; - try (FileWriter fw = new FileWriter(urlFile, true)) { - fw.write(url.toExternalForm()); - fw.write(System.lineSeparator()); - itemsCompleted.put(url, new File(urlFile)); + Path urlFile = Paths.get(this.workingDir + "/urls.txt"); + String text = url.toExternalForm() + System.lineSeparator(); + try { + Files.write(urlFile, text.getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE, StandardOpenOption.APPEND); + itemsCompleted.put(url, urlFile); } catch (IOException e) { LOGGER.error("Error while writing to " + urlFile, e); } } else { - itemsPending.put(url, saveAs); - DownloadFileThread dft = new DownloadFileThread(url, saveAs, this, getFileExtFromMIME); + itemsPending.put(url, saveAs.toFile()); + DownloadFileThread dft = new DownloadFileThread(url, saveAs.toFile(), this, getFileExtFromMIME); if (referrer != null) { dft.setReferrer(referrer); } @@ -182,7 +192,7 @@ public abstract class AbstractJSONRipper extends AbstractRipper { } @Override - public boolean addURLToDownload(URL url, File saveAs) { + public boolean addURLToDownload(URL url, Path saveAs) { return addURLToDownload(url, saveAs, null, null, false); } @@ -203,7 +213,7 @@ public abstract class AbstractJSONRipper extends AbstractRipper { /** * Cleans up & tells user about successful download */ - public void downloadCompleted(URL url, File saveAs) { + public void downloadCompleted(URL url, Path saveAs) { if (observer == null) { return; } @@ -240,14 +250,14 @@ public abstract class AbstractJSONRipper extends AbstractRipper { * Tells user that a single file in the album they wish to download has * already been downloaded in the past. */ - public void downloadExists(URL url, File file) { + public void downloadExists(URL url, Path file) { if (observer == null) { return; } itemsPending.remove(url); itemsCompleted.put(url, file); - observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_WARN, url + " already saved as " + file.getAbsolutePath())); + observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_WARN, url + " already saved as " + file)); checkIfComplete(); } @@ -273,11 +283,8 @@ public abstract class AbstractJSONRipper extends AbstractRipper { * IOException */ @Override - public void setWorkingDir(URL url) throws IOException { - String path = Utils.getWorkingDirectory().getCanonicalPath(); - if (!path.endsWith(File.separator)) { - path += File.separator; - } + public void setWorkingDir(URL url) throws IOException, URISyntaxException { + Path wd = Utils.getWorkingDirectory(); String title; if (Utils.getConfigBoolean("album_titles.save", true)) { title = getAlbumTitle(this.url); @@ -287,15 +294,13 @@ public abstract class AbstractJSONRipper extends AbstractRipper { LOGGER.debug("Using album title '" + title + "'"); title = Utils.filesystemSafe(title); - path += title; - path = Utils.getOriginalDirectory(path) + File.separator; // check for case sensitive (unix only) - - this.workingDir = new File(path); - if (!this.workingDir.exists()) { - LOGGER.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir)); - this.workingDir.mkdirs(); + wd = wd.resolve(title); + if (!Files.exists(wd)) { + LOGGER.info("[+] Creating directory: " + Utils.removeCWD(wd)); + Files.createDirectory(wd); } - LOGGER.debug("Set working directory to: " + this.workingDir); + this.workingDir = wd.toFile(); + LOGGER.info("Set working directory to: {}", this.workingDir); } /** diff --git a/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java b/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java index 3653b9f0..f290e72b 100644 --- a/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java @@ -8,14 +8,23 @@ import java.io.FileWriter; import java.io.IOException; import java.lang.reflect.Constructor; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Observable; import java.util.Scanner; -import org.apache.log4j.FileAppender; -import org.apache.log4j.Logger; +import java.util.concurrent.atomic.AtomicBoolean; + + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + import org.jsoup.HttpStatusException; import com.rarchives.ripme.App; import com.rarchives.ripme.ui.RipStatusComplete; @@ -28,7 +37,7 @@ public abstract class AbstractRipper extends Observable implements RipperInterface, Runnable { - protected static final Logger LOGGER = Logger.getLogger(AbstractRipper.class); + protected static final Logger LOGGER = LogManager.getLogger(AbstractRipper.class); private final String URLHistoryFile = Utils.getURLHistoryFile(); public static final String USER_AGENT = @@ -41,23 +50,24 @@ public abstract class AbstractRipper private boolean completed = true; - public abstract void rip() throws IOException; + public abstract void rip() throws IOException, URISyntaxException; public abstract String getHost(); - public abstract String getGID(URL url) throws MalformedURLException; + public abstract String getGID(URL url) throws MalformedURLException, URISyntaxException; public boolean hasASAPRipping() { return false; } // Everytime addUrlToDownload skips a already downloaded url this increases by 1 public int alreadyDownloadedUrls = 0; - private boolean shouldStop = false; + private final AtomicBoolean shouldStop = new AtomicBoolean(false); private static boolean thisIsATest = false; public void stop() { - shouldStop = true; + LOGGER.trace("stop()"); + shouldStop.set(true); } public boolean isStopped() { - return shouldStop; + return shouldStop.get(); } protected void stopCheck() throws IOException { - if (shouldStop) { + if (shouldStop.get()) { throw new IOException("Ripping interrupted"); } } @@ -163,7 +173,11 @@ public abstract class AbstractRipper if (!canRip(url)) { throw new MalformedURLException("Unable to rip url: " + url); } - this.url = sanitizeURL(url); + try { + this.url = sanitizeURL(url); + } catch (URISyntaxException e) { + throw new MalformedURLException(e.getMessage()); + } } /** @@ -175,14 +189,17 @@ public abstract class AbstractRipper * @throws IOException * Always be prepared. */ - public void setup() throws IOException { + public void setup() throws IOException, URISyntaxException { setWorkingDir(this.url); - Logger rootLogger = Logger.getRootLogger(); - FileAppender fa = (FileAppender) rootLogger.getAppender("FILE"); - if (fa != null) { - fa.setFile(this.workingDir + File.separator + "log.txt"); - fa.activateOptions(); - } + // we do not care if the rollingfileappender is active, just change the logfile in case + // TODO this does not work - not even with + // .withFileName("${sys:logFilename}") + // in Utils.java, RollingFileAppender. +// System.setProperty("logFilename", this.workingDir + "/log.txt"); +// LOGGER.debug("Changing log file to '{}/log.txt'", this.workingDir); +// LoggerContext ctx = (LoggerContext) LogManager.getContext(false); +// ctx.reconfigure(); +// ctx.updateLoggers(); this.threadPool = new DownloadThreadPool(); } @@ -199,7 +216,7 @@ public abstract class AbstractRipper * Path of the local file to save the content to. * @return True on success, false on failure. */ - public abstract boolean addURLToDownload(URL url, File saveAs); + public abstract boolean addURLToDownload(URL url, Path saveAs); /** * Queues image to be downloaded and saved. @@ -215,7 +232,7 @@ public abstract class AbstractRipper * True if downloaded successfully * False if failed to download */ - protected abstract boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies, + protected abstract boolean addURLToDownload(URL url, Path saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME); /** @@ -234,9 +251,10 @@ public abstract class AbstractRipper */ protected boolean addURLToDownload(URL url, Map options, Map cookies) { // Bit of a hack but this lets us pass a bool using a map - boolean useMIME = options.getOrDefault("getFileExtFromMIME", "false").toLowerCase().equals("true"); - return addURLToDownload(url, options.getOrDefault("prefix", ""), options.getOrDefault("subdirectory", ""), options.getOrDefault("referrer", null), - cookies, options.getOrDefault("fileName", null), options.getOrDefault("extension", null), useMIME); + boolean useMIME = options.getOrDefault("getFileExtFromMIME", "false").equalsIgnoreCase("true"); + return addURLToDownload(url, options.getOrDefault("subdirectory", ""), options.getOrDefault("referrer", null), cookies, + options.getOrDefault("prefix", ""), options.getOrDefault("fileName", null), options.getOrDefault("extension", null), + useMIME); } @@ -274,7 +292,7 @@ public abstract class AbstractRipper * True if downloaded successfully * False if failed to download */ - protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map cookies, String fileName, String extension, Boolean getFileExtFromMIME) { + protected boolean addURLToDownload(URL url, String subdirectory, String referrer, Map cookies, String prefix, String fileName, String extension, Boolean getFileExtFromMIME) { // A common bug is rippers adding urls that are just "http:". This rejects said urls if (url.toExternalForm().equals("http:") || url.toExternalForm().equals("https:")) { LOGGER.info(url.toExternalForm() + " is a invalid url amd will be changed"); @@ -285,8 +303,8 @@ public abstract class AbstractRipper if (url.toExternalForm().contains(" ")) { // If for some reason the url with all spaces encoded as %20 is malformed print an error try { - url = new URL(url.toExternalForm().replaceAll(" ", "%20")); - } catch (MalformedURLException e) { + url = new URI(url.toExternalForm().replaceAll(" ", "%20")).toURL(); + } catch (MalformedURLException | URISyntaxException e) { LOGGER.error("Unable to remove spaces from url\nURL: " + url.toExternalForm()); e.printStackTrace(); } @@ -305,34 +323,19 @@ public abstract class AbstractRipper LOGGER.debug("Ripper has been stopped"); return false; } - LOGGER.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", fileName: " + fileName); - String saveAs = getFileName(url, fileName, extension); - File saveFileAs; + LOGGER.debug("url: " + url + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", prefix: " + prefix + ", fileName: " + fileName); + Path saveAs; try { - if (!subdirectory.equals("")) { - subdirectory = Utils.filesystemSafe(subdirectory); - subdirectory = File.separator + subdirectory; + saveAs = getFilePath(url, subdirectory, prefix, fileName, extension); + LOGGER.debug("Downloading " + url + " to " + saveAs); + if (!Files.exists(saveAs.getParent())) { + LOGGER.info("[+] Creating directory: " + saveAs.getParent()); + Files.createDirectories(saveAs.getParent()); } - prefix = Utils.filesystemSanitized(prefix); - String topFolderName = workingDir.getCanonicalPath(); - if (App.stringToAppendToFoldername != null) { - topFolderName = topFolderName + App.stringToAppendToFoldername; - } - saveFileAs = new File( - topFolderName - + subdirectory - + File.separator - + prefix - + saveAs); } catch (IOException e) { LOGGER.error("[!] Error creating save file path for URL '" + url + "':", e); return false; } - LOGGER.debug("Downloading " + url + " to " + saveFileAs); - if (!saveFileAs.getParentFile().exists()) { - LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent())); - saveFileAs.getParentFile().mkdirs(); - } if (Utils.getConfigBoolean("remember.url_history", true) && !isThisATest()) { LOGGER.info("Writing " + url.toExternalForm() + " to file"); try { @@ -341,11 +344,11 @@ public abstract class AbstractRipper LOGGER.debug("Unable to write URL history file"); } } - return addURLToDownload(url, saveFileAs, referrer, cookies, getFileExtFromMIME); + return addURLToDownload(url, saveAs, referrer, cookies, getFileExtFromMIME); } protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map cookies, String fileName, String extension) { - return addURLToDownload(url, prefix, subdirectory, referrer, cookies, fileName, extension, false); + return addURLToDownload(url, subdirectory, referrer, cookies, prefix, fileName, extension, false); } protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map cookies, String fileName) { @@ -384,33 +387,53 @@ public abstract class AbstractRipper return addURLToDownload(url, prefix, ""); } - public static String getFileName(URL url, String fileName, String extension) { - String saveAs; - if (fileName != null) { - saveAs = fileName; - } else { - saveAs = url.toExternalForm(); - saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1); + public Path getFilePath(URL url, String subdir, String prefix, String fileName, String extension) throws IOException { + // construct the path: workingdir + subdir + prefix + filename + extension + // save into working dir + Path filepath = Paths.get(workingDir.getCanonicalPath()); + + if (null != App.stringToAppendToFoldername) + filepath = filepath.resolveSibling(filepath.getFileName() + App.stringToAppendToFoldername); + + if (null != subdir && !subdir.trim().isEmpty()) + filepath = filepath.resolve(Utils.filesystemSafe(subdir)); + + filepath = filepath.resolve(getFileName(url, prefix, fileName, extension)); + return filepath; + } + + public static String getFileName(URL url, String prefix, String fileName, String extension) { + // retrieve filename from URL if not passed + if (fileName == null || fileName.trim().isEmpty()) { + fileName = url.toExternalForm(); + fileName = fileName.substring(fileName.lastIndexOf('/')+1); } - if (extension == null) { + if (fileName.indexOf('?') >= 0) { fileName = fileName.substring(0, fileName.indexOf('?')); } + if (fileName.indexOf('#') >= 0) { fileName = fileName.substring(0, fileName.indexOf('#')); } + if (fileName.indexOf('&') >= 0) { fileName = fileName.substring(0, fileName.indexOf('&')); } + if (fileName.indexOf(':') >= 0) { fileName = fileName.substring(0, fileName.indexOf(':')); } + + // add prefix + if (prefix != null && !prefix.trim().isEmpty()) { + fileName = prefix + fileName; + } + + // retrieve extension from URL if not passed, no extension if nothing found + if (extension == null || extension.trim().isEmpty()) { // Get the extension of the file String[] lastBitOfURL = url.toExternalForm().split("/"); String[] lastBit = lastBitOfURL[lastBitOfURL.length - 1].split("."); if (lastBit.length != 0) { extension = lastBit[lastBit.length - 1]; - saveAs = saveAs + "." + extension; } } - - if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); } - if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); } - if (saveAs.indexOf('&') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('&')); } - if (saveAs.indexOf(':') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf(':')); } + // if extension is passed or found, add it if (extension != null) { - saveAs = saveAs + "." + extension; + fileName = fileName + "." + extension; } - return saveAs; + // make sure filename is not too long and has no unsupported chars + return Utils.sanitizeSaveAs(fileName); } @@ -443,20 +466,16 @@ public abstract class AbstractRipper * @param saveAs * Where the downloaded file is stored. */ - public abstract void downloadCompleted(URL url, File saveAs); + public abstract void downloadCompleted(URL url, Path saveAs); /** * Notifies observers that a file could not be downloaded (includes a reason). - * @param url - * @param reason */ public abstract void downloadErrored(URL url, String reason); /** * Notify observers that a download could not be completed, * but was not technically an "error". - * @param url - * @param file */ - public abstract void downloadExists(URL url, File file); + public abstract void downloadExists(URL url, Path file); /** * @return Number of files downloaded. @@ -478,17 +497,17 @@ public abstract class AbstractRipper completed = true; LOGGER.info(" Rip completed!"); - RipStatusComplete rsc = new RipStatusComplete(workingDir, getCount()); + RipStatusComplete rsc = new RipStatusComplete(workingDir.toPath(), getCount()); RipStatusMessage msg = new RipStatusMessage(STATUS.RIP_COMPLETE, rsc); observer.update(this, msg); - Logger rootLogger = Logger.getRootLogger(); - FileAppender fa = (FileAppender) rootLogger.getAppender("FILE"); - if (fa != null) { - LOGGER.debug("Changing log file back to 'ripme.log'"); - fa.setFile("ripme.log"); - fa.activateOptions(); - } + // we do not care if the rollingfileappender is active, just change the logfile in case + // TODO - does not work. +// System.setProperty("logFilename", "ripme.log"); +// LOGGER.debug("Changing log file back to 'ripme.log'"); +// LoggerContext ctx = (LoggerContext) LogManager.getContext(false); +// ctx.reconfigure(); + if (Utils.getConfigBoolean("urls_only.save", false)) { String urlFile = this.workingDir + File.separator + "urls.txt"; try { @@ -519,7 +538,7 @@ public abstract class AbstractRipper } @Override - public abstract void setWorkingDir(URL url) throws IOException; + public abstract void setWorkingDir(URL url) throws IOException, URISyntaxException; /** * @@ -532,8 +551,12 @@ public abstract class AbstractRipper * @throws MalformedURLException * If any of those damned URLs gets malformed. */ - public String getAlbumTitle(URL url) throws MalformedURLException { - return getHost() + "_" + getGID(url); + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { + try { + return getHost() + "_" + getGID(url); + } catch (URISyntaxException e) { + throw new MalformedURLException(e.getMessage()); + } } /** @@ -572,7 +595,6 @@ public abstract class AbstractRipper * The package name. * @return * List of constructors for all eligible Rippers. - * @throws Exception */ public static List> getRipperConstructors(String pkg) throws Exception { List> constructors = new ArrayList<>(); @@ -586,8 +608,7 @@ public abstract class AbstractRipper /** * Sends an update message to the relevant observer(s) on this ripper. - * @param status - * @param message + * @param status */ public void sendUpdate(STATUS status, Object message) { if (observer == null) { @@ -679,4 +700,18 @@ public abstract class AbstractRipper protected boolean useByteProgessBar() { return false;} // If true ripme will try to resume a broken download for this ripper protected boolean tryResumeDownload() { return false;} -} + + protected boolean shouldIgnoreURL(URL url) { + final String[] ignoredExtensions = Utils.getConfigStringArray("download.ignore_extensions"); + if (ignoredExtensions == null || ignoredExtensions.length == 0) return false; // nothing ignored + String[] pathElements = url.getPath().split("\\."); + if (pathElements.length == 0) return false; // no extension, can't filter + String extension = pathElements[pathElements.length - 1]; + for (String ignoredExtension : ignoredExtensions) { + if (ignoredExtension.equalsIgnoreCase(extension)) { + return true; + } + } + return false; + } +} \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java b/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java index f433e77f..0f3a1e7a 100644 --- a/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java @@ -1,27 +1,34 @@ package com.rarchives.ripme.ripper; -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - import com.rarchives.ripme.ui.RipStatusMessage; import com.rarchives.ripme.ui.RipStatusMessage.STATUS; import com.rarchives.ripme.utils.Utils; +import java.io.File; +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + // Should this file even exist? It does the same thing as abstractHTML ripper /**' * For ripping delicious albums off the interwebz. + * @deprecated Use AbstractHTMLRipper instead. */ +@Deprecated public abstract class AlbumRipper extends AbstractRipper { private Map itemsPending = Collections.synchronizedMap(new HashMap()); - private Map itemsCompleted = Collections.synchronizedMap(new HashMap()); + private Map itemsCompleted = Collections.synchronizedMap(new HashMap()); private Map itemsErrored = Collections.synchronizedMap(new HashMap()); protected AlbumRipper(URL url) throws IOException { @@ -29,10 +36,10 @@ public abstract class AlbumRipper extends AbstractRipper { } public abstract boolean canRip(URL url); - public abstract URL sanitizeURL(URL url) throws MalformedURLException; + public abstract URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException; public abstract void rip() throws IOException; public abstract String getHost(); - public abstract String getGID(URL url) throws MalformedURLException; + public abstract String getGID(URL url) throws MalformedURLException, URISyntaxException; protected boolean allowDuplicates() { return false; @@ -50,11 +57,11 @@ public abstract class AlbumRipper extends AbstractRipper { /** * Queues multiple URLs of single images to download from a single Album URL */ - public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) { - // Only download one file if this is a test. - if (super.isThisATest() && - (itemsPending.size() > 0 || itemsCompleted.size() > 0 || itemsErrored.size() > 0)) { + public boolean addURLToDownload(URL url, Path saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) { + // Only download one file if this is a test. + if (super.isThisATest() && (itemsCompleted.size() > 0 || itemsErrored.size() > 0)) { stop(); + itemsPending.clear(); return false; } if (!allowDuplicates() @@ -65,20 +72,24 @@ public abstract class AlbumRipper extends AbstractRipper { LOGGER.info("[!] Skipping " + url + " -- already attempted: " + Utils.removeCWD(saveAs)); return false; } + if (shouldIgnoreURL(url)) { + sendUpdate(STATUS.DOWNLOAD_SKIP, "Skipping " + url.toExternalForm() + " - ignored extension"); + return false; + } if (Utils.getConfigBoolean("urls_only.save", false)) { // Output URL to file - String urlFile = this.workingDir + File.separator + "urls.txt"; - try (FileWriter fw = new FileWriter(urlFile, true)) { - fw.write(url.toExternalForm()); - fw.write(System.lineSeparator()); - itemsCompleted.put(url, new File(urlFile)); + Path urlFile = Paths.get(this.workingDir + "/urls.txt"); + String text = url.toExternalForm() + System.lineSeparator(); + try { + Files.write(urlFile, text.getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE, StandardOpenOption.APPEND); + itemsCompleted.put(url, urlFile); } catch (IOException e) { LOGGER.error("Error while writing to " + urlFile, e); } } else { - itemsPending.put(url, saveAs); - DownloadFileThread dft = new DownloadFileThread(url, saveAs, this, getFileExtFromMIME); + itemsPending.put(url, saveAs.toFile()); + DownloadFileThread dft = new DownloadFileThread(url, saveAs.toFile(), this, getFileExtFromMIME); if (referrer != null) { dft.setReferrer(referrer); } @@ -92,7 +103,7 @@ public abstract class AlbumRipper extends AbstractRipper { } @Override - public boolean addURLToDownload(URL url, File saveAs) { + public boolean addURLToDownload(URL url, Path saveAs) { return addURLToDownload(url, saveAs, null, null, false); } @@ -113,7 +124,7 @@ public abstract class AlbumRipper extends AbstractRipper { /** * Cleans up & tells user about successful download */ - public void downloadCompleted(URL url, File saveAs) { + public void downloadCompleted(URL url, Path saveAs) { if (observer == null) { return; } @@ -150,14 +161,14 @@ public abstract class AlbumRipper extends AbstractRipper { * Tells user that a single file in the album they wish to download has * already been downloaded in the past. */ - public void downloadExists(URL url, File file) { + public void downloadExists(URL url, Path file) { if (observer == null) { return; } itemsPending.remove(url); itemsCompleted.put(url, file); - observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_WARN, url + " already saved as " + file.getAbsolutePath())); + observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_WARN, url + " already saved as " + file)); checkIfComplete(); } @@ -183,8 +194,10 @@ public abstract class AlbumRipper extends AbstractRipper { * IOException */ @Override - public void setWorkingDir(URL url) throws IOException { - String path = Utils.getWorkingDirectory().getCanonicalPath(); + public void setWorkingDir(URL url) throws IOException, URISyntaxException { + Path wd = Utils.getWorkingDirectory(); + // TODO - change to nio + String path = wd.toAbsolutePath().toString(); if (!path.endsWith(File.separator)) { path += File.separator; } @@ -202,7 +215,7 @@ public abstract class AlbumRipper extends AbstractRipper { this.workingDir = new File(path); if (!this.workingDir.exists()) { - LOGGER.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir)); + LOGGER.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir.toPath())); this.workingDir.mkdirs(); } LOGGER.debug("Set working directory to: " + this.workingDir); diff --git a/src/main/java/com/rarchives/ripme/ripper/DownloadFileThread.java b/src/main/java/com/rarchives/ripme/ripper/DownloadFileThread.java index 3613273e..e9c6f242 100644 --- a/src/main/java/com/rarchives/ripme/ripper/DownloadFileThread.java +++ b/src/main/java/com/rarchives/ripme/ripper/DownloadFileThread.java @@ -1,19 +1,17 @@ package com.rarchives.ripme.ripper; import java.io.*; -import java.net.HttpURLConnection; -import java.net.SocketTimeoutException; -import java.net.URL; -import java.net.URLConnection; +import java.net.*; +import java.nio.file.Files; +import java.nio.file.Paths; import java.util.Arrays; import java.util.HashMap; import java.util.Map; -import java.util.ResourceBundle; import javax.net.ssl.HttpsURLConnection; -import com.rarchives.ripme.ui.MainWindow; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.jsoup.HttpStatusException; import com.rarchives.ripme.ui.RipStatusMessage.STATUS; @@ -23,29 +21,31 @@ import com.rarchives.ripme.utils.Utils; * Thread for downloading files. Includes retry logic, observer notifications, * and other goodies. */ -class DownloadFileThread extends Thread { - private static final Logger logger = Logger.getLogger(DownloadFileThread.class); +class DownloadFileThread implements Runnable { + private static final Logger logger = LogManager.getLogger(DownloadFileThread.class); private String referrer = ""; private Map cookies = new HashMap<>(); - private URL url; + private final URL url; private File saveAs; - private String prettySaveAs; - private AbstractRipper observer; - private int retries; - private Boolean getFileExtFromMIME; + private final String prettySaveAs; + private final AbstractRipper observer; + private final int retries; + private final Boolean getFileExtFromMIME; private final int TIMEOUT; + private final int retrySleep; public DownloadFileThread(URL url, File saveAs, AbstractRipper observer, Boolean getFileExtFromMIME) { super(); this.url = url; this.saveAs = saveAs; - this.prettySaveAs = Utils.removeCWD(saveAs); + this.prettySaveAs = Utils.removeCWD(saveAs.toPath()); this.observer = observer; this.retries = Utils.getConfigInteger("download.retries", 1); this.TIMEOUT = Utils.getConfigInteger("download.timeout", 60000); + this.retrySleep = Utils.getConfigInteger("download.retry.sleep", 0); this.getFileExtFromMIME = getFileExtFromMIME; } @@ -61,12 +61,13 @@ class DownloadFileThread extends Thread { * Attempts to download the file. Retries as needed. Notifies observers upon * completion/error/warn. */ + @Override public void run() { // First thing we make sure the file name doesn't have any illegal chars in it saveAs = new File( saveAs.getParentFile().getAbsolutePath() + File.separator + Utils.sanitizeSaveAs(saveAs.getName())); long fileSize = 0; - int bytesTotal = 0; + int bytesTotal; int bytesDownloaded = 0; if (saveAs.exists() && observer.tryResumeDownload()) { fileSize = saveAs.length(); @@ -78,15 +79,15 @@ class DownloadFileThread extends Thread { return; } if (saveAs.exists() && !observer.tryResumeDownload() && !getFileExtFromMIME - || Utils.fuzzyExists(new File(saveAs.getParent()), saveAs.getName()) && getFileExtFromMIME + || Utils.fuzzyExists(Paths.get(saveAs.getParent()), saveAs.getName()) && getFileExtFromMIME && !observer.tryResumeDownload()) { if (Utils.getConfigBoolean("file.overwrite", false)) { logger.info("[!] " + Utils.getLocalizedString("deleting.existing.file") + prettySaveAs); - saveAs.delete(); + if (!saveAs.delete()) logger.error("could not delete existing file: " + saveAs.getAbsolutePath()); } else { - logger.info("[!] " + Utils.getLocalizedString("skipping") + url + " -- " + logger.info("[!] " + Utils.getLocalizedString("skipping") + " " + url + " -- " + Utils.getLocalizedString("file.already.exists") + ": " + prettySaveAs); - observer.downloadExists(url, saveAs); + observer.downloadExists(url, saveAs.toPath()); return; } } @@ -95,8 +96,6 @@ class DownloadFileThread extends Thread { int tries = 0; // Number of attempts to download do { tries += 1; - InputStream bis = null; - OutputStream fos = null; try { logger.info(" Downloading file: " + urlToDownload + (tries > 0 ? " Retry #" + tries : "")); observer.sendUpdate(STATUS.DOWNLOAD_STARTED, url.toExternalForm()); @@ -119,14 +118,14 @@ class DownloadFileThread extends Thread { huc.setRequestProperty("Referer", referrer); // Sic } huc.setRequestProperty("User-agent", AbstractRipper.USER_AGENT); - String cookie = ""; + StringBuilder cookie = new StringBuilder(); for (String key : cookies.keySet()) { - if (!cookie.equals("")) { - cookie += "; "; + if (!cookie.toString().equals("")) { + cookie.append("; "); } - cookie += key + "=" + cookies.get(key); + cookie.append(key).append("=").append(cookies.get(key)); } - huc.setRequestProperty("Cookie", cookie); + huc.setRequestProperty("Cookie", cookie.toString()); if (observer.tryResumeDownload()) { if (fileSize != 0) { huc.setRequestProperty("Range", "bytes=" + fileSize + "-"); @@ -150,7 +149,7 @@ class DownloadFileThread extends Thread { redirected = true; } String location = huc.getHeaderField("Location"); - urlToDownload = new URL(location); + urlToDownload = new URI(location).toURL(); // Throw exception so download can be retried throw new IOException("Redirect status code " + statusCode + " - redirect to " + location); } @@ -184,6 +183,7 @@ class DownloadFileThread extends Thread { } // Save file + InputStream bis; bis = new BufferedInputStream(huc.getInputStream()); // Check if we should get the file ext from the MIME type @@ -209,6 +209,7 @@ class DownloadFileThread extends Thread { } } // If we're resuming a download we append data to the existing file + OutputStream fos = null; if (statusCode == 206) { fos = new FileOutputStream(saveAs, true); } else { @@ -235,9 +236,11 @@ class DownloadFileThread extends Thread { } else if (saveAs.getAbsolutePath().length() > 259 && Utils.isWindows()) { // This if is for when the file path has gone above 260 chars which windows does // not allow - fos = new FileOutputStream( + fos = Files.newOutputStream( Utils.shortenSaveAsWindows(saveAs.getParentFile().getPath(), saveAs.getName())); + assert fos != null: "After shortenSaveAsWindows: " + saveAs.getAbsolutePath(); } + assert fos != null: e.getStackTrace(); } } byte[] data = new byte[1024 * 256]; @@ -278,24 +281,17 @@ class DownloadFileThread extends Thread { "HTTP status code " + hse.getStatusCode() + " while downloading " + url.toExternalForm()); return; } - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { logger.debug("IOException", e); logger.error("[!] " + Utils.getLocalizedString("exception.while.downloading.file") + ": " + url + " - " + e.getMessage()); - } finally { - // Close any open streams - try { - if (bis != null) { - bis.close(); - } - } catch (IOException e) { - } - try { - if (fos != null) { - fos.close(); - } - } catch (IOException e) { - } + } catch (NullPointerException npe){ + + logger.error("[!] " + Utils.getLocalizedString("failed.to.download") + " for URL " + url); + observer.downloadErrored(url, + Utils.getLocalizedString("failed.to.download") + " " + url.toExternalForm()); + return; + } if (tries > this.retries) { logger.error("[!] " + Utils.getLocalizedString("exceeded.maximum.retries") + " (" + this.retries @@ -303,9 +299,13 @@ class DownloadFileThread extends Thread { observer.downloadErrored(url, Utils.getLocalizedString("failed.to.download") + " " + url.toExternalForm()); return; + } else { + if (retrySleep > 0) { + Utils.sleep(retrySleep); + } } } while (true); - observer.downloadCompleted(url, saveAs); + observer.downloadCompleted(url, saveAs.toPath()); logger.info("[+] Saved " + url + " as " + this.prettySaveAs); } diff --git a/src/main/java/com/rarchives/ripme/ripper/DownloadThreadPool.java b/src/main/java/com/rarchives/ripme/ripper/DownloadThreadPool.java index a811c98a..8ae43743 100644 --- a/src/main/java/com/rarchives/ripme/ripper/DownloadThreadPool.java +++ b/src/main/java/com/rarchives/ripme/ripper/DownloadThreadPool.java @@ -4,16 +4,16 @@ import java.util.concurrent.Executors; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import org.apache.log4j.Logger; - import com.rarchives.ripme.utils.Utils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; /** * Simple wrapper around a FixedThreadPool. */ public class DownloadThreadPool { - private static final Logger logger = Logger.getLogger(DownloadThreadPool.class); + private static final Logger logger = LogManager.getLogger(DownloadThreadPool.class); private ThreadPoolExecutor threadPool = null; public DownloadThreadPool() { @@ -35,10 +35,10 @@ public class DownloadThreadPool { } /** * For adding threads to execution pool. - * @param t + * @param t * Thread to be added. */ - public void addThread(Thread t) { + public void addThread(Runnable t) { threadPool.execute(t); } diff --git a/src/main/java/com/rarchives/ripme/ripper/DownloadVideoThread.java b/src/main/java/com/rarchives/ripme/ripper/DownloadVideoThread.java index ef55e54e..9430adce 100644 --- a/src/main/java/com/rarchives/ripme/ripper/DownloadVideoThread.java +++ b/src/main/java/com/rarchives/ripme/ripper/DownloadVideoThread.java @@ -1,36 +1,36 @@ package com.rarchives.ripme.ripper; import java.io.BufferedInputStream; -import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.HttpURLConnection; import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; import javax.net.ssl.HttpsURLConnection; -import org.apache.log4j.Logger; - import com.rarchives.ripme.ui.RipStatusMessage.STATUS; import com.rarchives.ripme.utils.Utils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; /** * Thread for downloading files. * Includes retry logic, observer notifications, and other goodies. */ -class DownloadVideoThread extends Thread { +class DownloadVideoThread implements Runnable { - private static final Logger logger = Logger.getLogger(DownloadVideoThread.class); + private static final Logger logger = LogManager.getLogger(DownloadVideoThread.class); - private URL url; - private File saveAs; - private String prettySaveAs; - private AbstractRipper observer; - private int retries; + private final URL url; + private final Path saveAs; + private final String prettySaveAs; + private final AbstractRipper observer; + private final int retries; - public DownloadVideoThread(URL url, File saveAs, AbstractRipper observer) { + public DownloadVideoThread(URL url, Path saveAs, AbstractRipper observer) { super(); this.url = url; this.saveAs = saveAs; @@ -43,6 +43,7 @@ class DownloadVideoThread extends Thread { * Attempts to download the file. Retries as needed. * Notifies observers upon completion/error/warn. */ + @Override public void run() { try { observer.stopCheck(); @@ -50,10 +51,14 @@ class DownloadVideoThread extends Thread { observer.downloadErrored(url, "Download interrupted"); return; } - if (saveAs.exists()) { + if (Files.exists(saveAs)) { if (Utils.getConfigBoolean("file.overwrite", false)) { logger.info("[!] Deleting existing file" + prettySaveAs); - saveAs.delete(); + try { + Files.delete(saveAs); + } catch (IOException e) { + e.printStackTrace(); + } } else { logger.info("[!] Skipping " + url + " -- file already exists: " + prettySaveAs); observer.downloadExists(url, saveAs); @@ -100,7 +105,7 @@ class DownloadVideoThread extends Thread { huc.connect(); // Check status code bis = new BufferedInputStream(huc.getInputStream()); - fos = new FileOutputStream(saveAs); + fos = Files.newOutputStream(saveAs); while ( (bytesRead = bis.read(data)) != -1) { try { observer.stopCheck(); @@ -122,10 +127,10 @@ class DownloadVideoThread extends Thread { // Close any open streams try { if (bis != null) { bis.close(); } - } catch (IOException e) { } + } catch (IOException ignored) { } try { if (fos != null) { fos.close(); } - } catch (IOException e) { } + } catch (IOException ignored) { } } if (tries > this.retries) { logger.error("[!] Exceeded maximum retries (" + this.retries + ") for URL " + url); diff --git a/src/main/java/com/rarchives/ripme/ripper/RipperInterface.java b/src/main/java/com/rarchives/ripme/ripper/RipperInterface.java index 550209c0..824d639e 100644 --- a/src/main/java/com/rarchives/ripme/ripper/RipperInterface.java +++ b/src/main/java/com/rarchives/ripme/ripper/RipperInterface.java @@ -2,6 +2,7 @@ package com.rarchives.ripme.ripper; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; /** @@ -11,10 +12,10 @@ import java.net.URL; * (cheers!) */ interface RipperInterface { - void rip() throws IOException; + void rip() throws IOException, URISyntaxException; boolean canRip(URL url); - URL sanitizeURL(URL url) throws MalformedURLException; - void setWorkingDir(URL url) throws IOException; + URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException; + void setWorkingDir(URL url) throws IOException, URISyntaxException; String getHost(); - String getGID(URL url) throws MalformedURLException; + String getGID(URL url) throws MalformedURLException, URISyntaxException; } \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java b/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java index 4fb0f32a..014998fa 100644 --- a/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java @@ -8,7 +8,9 @@ import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; +import java.nio.file.Path; import java.util.Map; @@ -21,7 +23,7 @@ public abstract class VideoRipper extends AbstractRipper { super(url); } - public abstract void rip() throws IOException; + public abstract void rip() throws IOException, URISyntaxException; public abstract String getHost(); @@ -43,10 +45,10 @@ public abstract class VideoRipper extends AbstractRipper { } @Override - public boolean addURLToDownload(URL url, File saveAs) { + public boolean addURLToDownload(URL url, Path saveAs) { if (Utils.getConfigBoolean("urls_only.save", false)) { // Output URL to file - String urlFile = this.workingDir + File.separator + "urls.txt"; + String urlFile = this.workingDir + "/urls.txt"; try (FileWriter fw = new FileWriter(urlFile, true)) { fw.write(url.toExternalForm()); @@ -66,13 +68,17 @@ public abstract class VideoRipper extends AbstractRipper { this.url = url; return true; } + if (shouldIgnoreURL(url)) { + sendUpdate(STATUS.DOWNLOAD_SKIP, "Skipping " + url.toExternalForm() + " - ignored extension"); + return false; + } threadPool.addThread(new DownloadVideoThread(url, saveAs, this)); } return true; } @Override - public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) { + public boolean addURLToDownload(URL url, Path saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) { return addURLToDownload(url, saveAs); } @@ -83,7 +89,9 @@ public abstract class VideoRipper extends AbstractRipper { */ @Override public void setWorkingDir(URL url) throws IOException { - String path = Utils.getWorkingDirectory().getCanonicalPath(); + Path wd = Utils.getWorkingDirectory(); + // TODO - change to nio + String path = wd.toAbsolutePath().toString(); if (!path.endsWith(File.separator)) { path += File.separator; @@ -93,7 +101,7 @@ public abstract class VideoRipper extends AbstractRipper { workingDir = new File(path); if (!workingDir.exists()) { - LOGGER.info("[+] Creating directory: " + Utils.removeCWD(workingDir)); + LOGGER.info("[+] Creating directory: " + Utils.removeCWD(workingDir.toPath())); workingDir.mkdirs(); } @@ -115,7 +123,7 @@ public abstract class VideoRipper extends AbstractRipper { * @param saveAs Path to file, including filename. */ @Override - public void downloadCompleted(URL url, File saveAs) { + public void downloadCompleted(URL url, Path saveAs) { if (observer == null) { return; } @@ -149,12 +157,11 @@ public abstract class VideoRipper extends AbstractRipper { /** * Runs if user tries to redownload an already existing File. - * - * @param url Target URL + * @param url Target URL * @param file Existing file */ @Override - public void downloadExists(URL url, File file) { + public void downloadExists(URL url, Path file) { if (observer == null) { return; } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/AerisdiesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/AerisdiesRipper.java index a11b08a4..8213d510 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/AerisdiesRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/AerisdiesRipper.java @@ -2,6 +2,7 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -14,7 +15,6 @@ import org.jsoup.nodes.Element; import org.jsoup.select.Elements; import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; import java.util.HashMap; public class AerisdiesRipper extends AbstractHTMLRipper { @@ -47,9 +47,9 @@ public class AerisdiesRipper extends AbstractHTMLRipper { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { - Element el = getFirstPage().select(".headtext").first(); + Element el = getCachedFirstPage().select(".headtext").first(); if (el == null) { throw new IOException("Unable to get album title"); } @@ -62,11 +62,6 @@ public class AerisdiesRipper extends AbstractHTMLRipper { return super.getAlbumTitle(url); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document page) { List imageURLs = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/AllporncomicRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/AllporncomicRipper.java index 66ddba4d..da8c7bd7 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/AllporncomicRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/AllporncomicRipper.java @@ -12,7 +12,6 @@ import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; public class AllporncomicRipper extends AbstractHTMLRipper { @@ -46,17 +45,11 @@ public class AllporncomicRipper extends AbstractHTMLRipper { "allporncomic.com/TITLE/CHAPTER - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); for (Element el : doc.select(".wp-manga-chapter-img")) { - result.add(el.attr("src")); + result.add(el.attr("data-src")); } return result; } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ArtStationRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ArtStationRipper.java index f61f48f8..bc824769 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ArtStationRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ArtStationRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -50,7 +52,7 @@ public class ArtStationRipper extends AbstractJSONRipper { try { // groupData = Http.url(albumURL.getLocation()).getJSON(); groupData = getJson(albumURL.getLocation()); - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { throw new MalformedURLException("Couldn't load JSON from " + albumURL.getLocation()); } return groupData.getString("title"); @@ -60,9 +62,9 @@ public class ArtStationRipper extends AbstractJSONRipper { // URL points to user portfolio, use user's full name as GID String userInfoURL = "https://www.artstation.com/users/" + albumURL.getID() + "/quick.json"; try { - // groupData = Http.url(userInfoURL).getJSON(); +// groupData = Http.url(userInfoURL).getJSON(); groupData = getJson(userInfoURL); - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { throw new MalformedURLException("Couldn't load JSON from " + userInfoURL); } return groupData.getString("full_name"); @@ -74,7 +76,7 @@ public class ArtStationRipper extends AbstractJSONRipper { } @Override - protected JSONObject getFirstPage() throws IOException { + protected JSONObject getFirstPage() throws IOException, URISyntaxException { if (albumURL.getType() == URL_TYPE.SINGLE_PROJECT) { // URL points to JSON of a single project, just return it // return Http.url(albumURL.getLocation()).getJSON(); @@ -90,7 +92,7 @@ public class ArtStationRipper extends AbstractJSONRipper { if (albumContent.getInt("total_count") > 0) { // Get JSON of the first project and return it JSONObject projectInfo = albumContent.getJSONArray("data").getJSONObject(0); - ParsedURL projectURL = parseURL(new URL(projectInfo.getString("permalink"))); + ParsedURL projectURL = parseURL(new URI(projectInfo.getString("permalink")).toURL()); // return Http.url(projectURL.getLocation()).getJSON(); return getJson(projectURL.getLocation()); } @@ -100,7 +102,7 @@ public class ArtStationRipper extends AbstractJSONRipper { } @Override - protected JSONObject getNextPage(JSONObject doc) throws IOException { + protected JSONObject getNextPage(JSONObject doc) throws IOException, URISyntaxException { if (albumURL.getType() == URL_TYPE.USER_PORTFOLIO) { // Initialize the page number if it hasn't been initialized already if (projectPageNumber == null) { @@ -117,7 +119,7 @@ public class ArtStationRipper extends AbstractJSONRipper { projectIndex = 0; } - Integer currentProject = ((projectPageNumber - 1) * 50) + (projectIndex + 1); + int currentProject = ((projectPageNumber - 1) * 50) + (projectIndex + 1); // JSONObject albumContent = Http.url(albumURL.getLocation() + "?page=" + // projectPageNumber).getJSON(); JSONObject albumContent = getJson(albumURL.getLocation() + "?page=" + projectPageNumber); @@ -125,7 +127,7 @@ public class ArtStationRipper extends AbstractJSONRipper { if (albumContent.getInt("total_count") > currentProject) { // Get JSON of the next project and return it JSONObject projectInfo = albumContent.getJSONArray("data").getJSONObject(projectIndex); - ParsedURL projectURL = parseURL(new URL(projectInfo.getString("permalink"))); + ParsedURL projectURL = parseURL(new URI(projectInfo.getString("permalink")).toURL()); projectIndex++; // return Http.url(projectURL.getLocation()).getJSON(); return getJson(projectURL.getLocation()); @@ -254,7 +256,7 @@ public class ArtStationRipper extends AbstractJSONRipper { con.userAgent("Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0"); con.header("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"); con.header("Accept-Language", "en-US,en;q=0.5"); - con.header("Accept-Encoding", "gzip, deflate, br"); +// con.header("Accept-Encoding", "gzip, deflate, br"); con.header("Upgrade-Insecure-Requests", "1"); Response res = con.execute(); int status = res.statusCode(); @@ -309,7 +311,7 @@ public class ArtStationRipper extends AbstractJSONRipper { "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.95 Safari/537.11"); con.header("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"); con.header("Accept-Language", "en-US,en;q=0.5"); - con.header("Accept-Encoding", "gzip, deflate, br"); +// con.header("Accept-Encoding", "gzip, deflate, br"); con.header("Upgrade-Insecure-Requests", "1"); Response res = con.execute(); int status = res.statusCode(); @@ -320,8 +322,8 @@ public class ArtStationRipper extends AbstractJSONRipper { throw new IOException("Error fetching json. Status code:" + status); } - private JSONObject getJson(String url) throws IOException { - return getJson(new URL(url)); + private JSONObject getJson(String url) throws IOException, URISyntaxException { + return getJson(new URI(url).toURL()); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ArtstnRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ArtstnRipper.java index 82b6e97c..1caeead4 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ArtstnRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ArtstnRipper.java @@ -1,58 +1,60 @@ -package com.rarchives.ripme.ripper.rippers; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; - -import org.jsoup.Connection.Response; - -import com.rarchives.ripme.utils.Http; - -/* - * Ripper for ArtStation's short URL domain. - * Example URL: https://artstn.co/p/JlE15Z - */ - -public class ArtstnRipper extends ArtStationRipper { - public URL artStationUrl = null; - - public ArtstnRipper(URL url) throws IOException { - super(url); - } - - @Override - public boolean canRip(URL url) { - return url.getHost().endsWith("artstn.co"); - } - - @Override - public String getGID(URL url) throws MalformedURLException { - if (artStationUrl == null) { - // Run only once. - try { - artStationUrl = getFinalUrl(url); - if (artStationUrl == null) { - throw new IOException("Null url received."); - } - } catch (IOException e) { - LOGGER.error("Couldnt resolve URL.", e); - } - - } - return super.getGID(artStationUrl); - } - - public URL getFinalUrl(URL url) throws IOException { - if (url.getHost().endsWith("artstation.com")) { - return url; - } - - LOGGER.info("Checking url: " + url); - Response response = Http.url(url).connection().followRedirects(false).execute(); - if (response.statusCode() / 100 == 3 && response.hasHeader("location")) { - return getFinalUrl(new URL(response.header("location"))); - } else { - return null; - } - } -} +package com.rarchives.ripme.ripper.rippers; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; + +import org.jsoup.Connection.Response; + +import com.rarchives.ripme.utils.Http; + +/* + * Ripper for ArtStation's short URL domain. + * Example URL: https://artstn.co/p/JlE15Z + */ + +public class ArtstnRipper extends ArtStationRipper { + public URL artStationUrl = null; + + public ArtstnRipper(URL url) throws IOException { + super(url); + } + + @Override + public boolean canRip(URL url) { + return url.getHost().endsWith("artstn.co"); + } + + @Override + public String getGID(URL url) throws MalformedURLException { + if (artStationUrl == null) { + // Run only once. + try { + artStationUrl = getFinalUrl(url); + if (artStationUrl == null) { + throw new IOException("Null url received."); + } + } catch (IOException | URISyntaxException e) { + LOGGER.error("Couldnt resolve URL.", e); + } + + } + return super.getGID(artStationUrl); + } + + public URL getFinalUrl(URL url) throws IOException, URISyntaxException { + if (url.getHost().endsWith("artstation.com")) { + return url; + } + + LOGGER.info("Checking url: " + url); + Response response = Http.url(url).connection().followRedirects(false).execute(); + if (response.statusCode() / 100 == 3 && response.hasHeader("location")) { + return getFinalUrl(new URI(response.header("location")).toURL()); + } else { + return null; + } + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java index 25491dfe..8502e6b6 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java @@ -2,6 +2,7 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -13,7 +14,6 @@ import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; public class BatoRipper extends AbstractHTMLRipper { @@ -70,10 +70,10 @@ public class BatoRipper extends AbstractHTMLRipper { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title as GID - return getHost() + "_" + getGID(url) + "_" + getFirstPage().select("title").first().text().replaceAll(" ", "_"); + return getHost() + "_" + getGID(url) + "_" + getCachedFirstPage().select("title").first().text().replaceAll(" ", "_"); } catch (IOException e) { // Fall back to default album naming convention LOGGER.info("Unable to find title at " + url); @@ -94,11 +94,6 @@ public class BatoRipper extends AbstractHTMLRipper { return m.matches(); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } @Override public List getURLsFromPage(Document doc) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/BcfakesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/BcfakesRipper.java index 2a77f02d..2798b1ea 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/BcfakesRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/BcfakesRipper.java @@ -47,11 +47,6 @@ public class BcfakesRipper extends AbstractHTMLRipper { + " Got: " + url); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { // Find next page diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/BlackbrickroadofozRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/BlackbrickroadofozRipper.java index cb5d4b14..d99fe61d 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/BlackbrickroadofozRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/BlackbrickroadofozRipper.java @@ -41,12 +41,6 @@ public class BlackbrickroadofozRipper extends AbstractHTMLRipper { "www.blackbrickroadofoz.com/comic/PAGE - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { sleep(1000); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/BooruRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/BooruRipper.java index 7d6b17a6..974a0061 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/BooruRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/BooruRipper.java @@ -12,12 +12,14 @@ import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.log4j.Logger; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; public class BooruRipper extends AbstractHTMLRipper { - private static final Logger logger = Logger.getLogger(BooruRipper.class); + private static final Logger logger = LogManager.getLogger(BooruRipper.class); private static Pattern gidPattern = null; diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java index 8c7aea6b..f1d41426 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java @@ -2,10 +2,11 @@ package com.rarchives.ripme.ripper.rippers; import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.ripper.rippers.ripperhelpers.ChanSite; -import com.rarchives.ripme.utils.Http; +import com.rarchives.ripme.utils.Utils; import com.rarchives.ripme.utils.RipUtils; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; @@ -13,7 +14,6 @@ import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.rarchives.ripme.utils.Utils; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; @@ -72,7 +72,7 @@ public class ChanRipper extends AbstractHTMLRipper { ); private ChanSite chanSite; - private Boolean generalChanSite = true; + private boolean generalChanSite = true; public ChanRipper(URL url) throws IOException { super(url); @@ -104,7 +104,7 @@ public class ChanRipper extends AbstractHTMLRipper { public String getAlbumTitle(URL url) throws MalformedURLException { try { // Attempt to use album title as GID - Document doc = getFirstPage(); + Document doc = getCachedFirstPage(); try { String subject = doc.select(".post.op > .postinfo > .subject").first().text(); return getHost() + "_" + getGID(url) + "_" + subject; @@ -195,11 +195,9 @@ public class ChanRipper extends AbstractHTMLRipper { return this.url.getHost(); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(this.url).get(); + public Document getFirstPage() throws IOException, URISyntaxException { + return super.getFirstPage(); } - private boolean isURLBlacklisted(String url) { for (String blacklist_item : url_piece_blacklist) { if (url.contains(blacklist_item)) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java index 005ba5c7..c66465eb 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java @@ -2,6 +2,7 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; @@ -50,10 +51,10 @@ public class CheveretoRipper extends AbstractHTMLRipper { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title as GID - Element titleElement = getFirstPage().select("meta[property=og:title]").first(); + Element titleElement = getCachedFirstPage().select("meta[property=og:title]").first(); String title = titleElement.attr("content"); title = title.substring(title.lastIndexOf('/') + 1); return getHost() + "_" + title.trim(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ComicextraRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ComicextraRipper.java index 08b27a76..e794e072 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ComicextraRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ComicextraRipper.java @@ -1,173 +1,174 @@ -package com.rarchives.ripme.ripper.rippers; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; - -/** - * @author Tushar - * - */ -public class ComicextraRipper extends AbstractHTMLRipper { - - private static final String FILE_NAME = "page"; - - private Pattern p1 = - Pattern.compile("https:\\/\\/www.comicextra.com\\/comic\\/([A-Za-z0-9_-]+)"); - private Pattern p2 = Pattern.compile( - "https:\\/\\/www.comicextra.com\\/([A-Za-z0-9_-]+)\\/([A-Za-z0-9_-]+)(?:\\/full)?"); - private UrlType urlType = UrlType.UNKNOWN; - private List chaptersList = null; - private int chapterIndex = -1; // index for the chaptersList, useful in getting the next page. - private int imageIndex = 0; // image index for each chapter images. - - public ComicextraRipper(URL url) throws IOException { - super(url); - } - - @Override - protected String getDomain() { - return "comicextra.com"; - } - - @Override - public String getHost() { - return "comicextra"; - } - - @Override - public String getGID(URL url) throws MalformedURLException { - Matcher m1 = p1.matcher(url.toExternalForm()); - if (m1.matches()) { - // URL is of comic( https://www.comicextra.com/comic/the-punisher-frank-castle-max). - urlType = UrlType.COMIC; - return m1.group(1); - } - - Matcher m2 = p2.matcher(url.toExternalForm()); - if (m2.matches()) { - // URL is of chapter( https://www.comicextra.com/the-punisher-frank-castle-max/chapter-75). - urlType = UrlType.CHAPTER; - return m2.group(1); - } - - throw new MalformedURLException( - "Expected comicextra.com url of type: https://www.comicextra.com/comic/some-comic-name\n" - + " or https://www.comicextra.com/some-comic-name/chapter-001 got " + url - + " instead"); - } - - @Override - protected Document getFirstPage() throws IOException { - Document doc = null; - - switch (urlType) { - case COMIC: - // For COMIC type url we extract the urls of each chapters and store them in chapters. - chaptersList = new ArrayList<>(); - Document comicPage = Http.url(url).get(); - Elements elements = comicPage.select("div.episode-list a"); - for (Element e : elements) { - chaptersList.add(getCompleteChapterUrl(e.attr("abs:href"))); - } - - // Set the first chapter from the chapterList as the doc. - chapterIndex = 0; - doc = Http.url(chaptersList.get(chapterIndex)).get(); - break; - case CHAPTER: - doc = Http.url(url).get(); - break; - case UNKNOWN: - default: - throw new IOException("Unknown url type encountered."); - } - - return doc; - } - - @Override - public Document getNextPage(Document doc) throws IOException { - if (urlType == UrlType.COMIC) { - ++chapterIndex; - imageIndex = 0; // Resetting the imagesIndex so that images prefix within each chapter starts from '001_'. - if (chapterIndex < chaptersList.size()) { - return Http.url(chaptersList.get(chapterIndex)).get(); - } - } - - return super.getNextPage(doc); - } - - @Override - protected List getURLsFromPage(Document page) { - List urls = new ArrayList<>(); - - if (urlType == UrlType.COMIC || urlType == UrlType.CHAPTER) { - Elements images = page.select("img.chapter_img"); - for (Element img : images) { - urls.add(img.attr("src")); - } - } - - return urls; - } - - @Override - protected void downloadURL(URL url, int index) { - String subdirectory = getSubDirectoryName(); - String prefix = getPrefix(++imageIndex); - - addURLToDownload(url, prefix, subdirectory, null, null, FILE_NAME, null, Boolean.TRUE); - } - - /* - * This function appends /full at the end of the chapters url to get all the images for the - * chapter in the same Document. - */ - private String getCompleteChapterUrl(String chapterUrl) { - if (!chapterUrl.endsWith("/full")) { - chapterUrl = chapterUrl + "/full"; - } - return chapterUrl; - } - - /* - * This functions returns sub folder name for the current chapter. - */ - private String getSubDirectoryName() { - String subDirectory = ""; - - if (urlType == UrlType.COMIC) { - Matcher m = p2.matcher(chaptersList.get(chapterIndex)); - if (m.matches()) { - subDirectory = m.group(2); - } - } - - if (urlType == UrlType.CHAPTER) { - Matcher m = p2.matcher(url.toExternalForm()); - if (m.matches()) { - subDirectory = m.group(2); - } - } - - return subDirectory; - } - - /* - * Enum to classify different types of urls. - */ - private enum UrlType { - COMIC, CHAPTER, UNKNOWN - } -} +package com.rarchives.ripme.ripper.rippers; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; +import org.jsoup.select.Elements; +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.utils.Http; + +/** + * @author Tushar + * + */ +public class ComicextraRipper extends AbstractHTMLRipper { + + private static final String FILE_NAME = "page"; + + private Pattern p1 = + Pattern.compile("https:\\/\\/www.comicextra.com\\/comic\\/([A-Za-z0-9_-]+)"); + private Pattern p2 = Pattern.compile( + "https:\\/\\/www.comicextra.com\\/([A-Za-z0-9_-]+)\\/([A-Za-z0-9_-]+)(?:\\/full)?"); + private UrlType urlType = UrlType.UNKNOWN; + private List chaptersList = null; + private int chapterIndex = -1; // index for the chaptersList, useful in getting the next page. + private int imageIndex = 0; // image index for each chapter images. + + public ComicextraRipper(URL url) throws IOException { + super(url); + } + + @Override + protected String getDomain() { + return "comicextra.com"; + } + + @Override + public String getHost() { + return "comicextra"; + } + + @Override + public String getGID(URL url) throws MalformedURLException { + Matcher m1 = p1.matcher(url.toExternalForm()); + if (m1.matches()) { + // URL is of comic( https://www.comicextra.com/comic/the-punisher-frank-castle-max). + urlType = UrlType.COMIC; + return m1.group(1); + } + + Matcher m2 = p2.matcher(url.toExternalForm()); + if (m2.matches()) { + // URL is of chapter( https://www.comicextra.com/the-punisher-frank-castle-max/chapter-75). + urlType = UrlType.CHAPTER; + return m2.group(1); + } + + throw new MalformedURLException( + "Expected comicextra.com url of type: https://www.comicextra.com/comic/some-comic-name\n" + + " or https://www.comicextra.com/some-comic-name/chapter-001 got " + url + + " instead"); + } + + @Override + protected Document getFirstPage() throws IOException { + Document doc = null; + + switch (urlType) { + case COMIC: + // For COMIC type url we extract the urls of each chapters and store them in chapters. + chaptersList = new ArrayList<>(); + Document comicPage = Http.url(url).get(); + Elements elements = comicPage.select("div.episode-list a"); + for (Element e : elements) { + chaptersList.add(getCompleteChapterUrl(e.attr("abs:href"))); + } + + // Set the first chapter from the chapterList as the doc. + chapterIndex = 0; + doc = Http.url(chaptersList.get(chapterIndex)).get(); + break; + case CHAPTER: + doc = Http.url(url).get(); + break; + case UNKNOWN: + default: + throw new IOException("Unknown url type encountered."); + } + + return doc; + } + + @Override + public Document getNextPage(Document doc) throws IOException, URISyntaxException { + if (urlType == UrlType.COMIC) { + ++chapterIndex; + imageIndex = 0; // Resetting the imagesIndex so that images prefix within each chapter starts from '001_'. + if (chapterIndex < chaptersList.size()) { + return Http.url(chaptersList.get(chapterIndex)).get(); + } + } + + return super.getNextPage(doc); + } + + @Override + protected List getURLsFromPage(Document page) { + List urls = new ArrayList<>(); + + if (urlType == UrlType.COMIC || urlType == UrlType.CHAPTER) { + Elements images = page.select("img.chapter_img"); + for (Element img : images) { + urls.add(img.attr("src")); + } + } + + return urls; + } + + @Override + protected void downloadURL(URL url, int index) { + String subdirectory = getSubDirectoryName(); + String prefix = getPrefix(++imageIndex); + + addURLToDownload(url, subdirectory, null, null, prefix, FILE_NAME, null, Boolean.TRUE); + } + + /* + * This function appends /full at the end of the chapters url to get all the images for the + * chapter in the same Document. + */ + private String getCompleteChapterUrl(String chapterUrl) { + if (!chapterUrl.endsWith("/full")) { + chapterUrl = chapterUrl + "/full"; + } + return chapterUrl; + } + + /* + * This functions returns sub folder name for the current chapter. + */ + private String getSubDirectoryName() { + String subDirectory = ""; + + if (urlType == UrlType.COMIC) { + Matcher m = p2.matcher(chaptersList.get(chapterIndex)); + if (m.matches()) { + subDirectory = m.group(2); + } + } + + if (urlType == UrlType.CHAPTER) { + Matcher m = p2.matcher(url.toExternalForm()); + if (m.matches()) { + subDirectory = m.group(2); + } + } + + return subDirectory; + } + + /* + * Enum to classify different types of urls. + */ + private enum UrlType { + COMIC, CHAPTER, UNKNOWN + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/CoomerPartyRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/CoomerPartyRipper.java new file mode 100644 index 00000000..f990ae66 --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/CoomerPartyRipper.java @@ -0,0 +1,180 @@ +package com.rarchives.ripme.ripper.rippers; +import com.rarchives.ripme.ripper.AbstractJSONRipper; +import com.rarchives.ripme.utils.Http; +import com.rarchives.ripme.utils.Utils; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.json.JSONArray; +import org.json.JSONException; +import org.json.JSONObject; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +/** + * See this link for the API schema. + */ +public class CoomerPartyRipper extends AbstractJSONRipper { + private static final Logger LOGGER = LogManager.getLogger(CoomerPartyRipper.class); + private static final String IMG_URL_BASE = "https://c3.coomer.su/data"; + private static final String VID_URL_BASE = "https://c1.coomer.su/data"; + private static final Pattern IMG_PATTERN = Pattern.compile("^.*\\.(jpg|jpeg|png|gif|apng|webp|tif|tiff)$", Pattern.CASE_INSENSITIVE); + private static final Pattern VID_PATTERN = Pattern.compile("^.*\\.(webm|mp4|m4v)$", Pattern.CASE_INSENSITIVE); + + // just so we can return a JSONObject from getFirstPage + private static final String KEY_WRAPPER_JSON_ARRAY = "array"; + + private static final String KEY_FILE = "file"; + private static final String KEY_PATH = "path"; + private static final String KEY_ATTACHMENTS = "attachments"; + + // Posts Request Endpoint + private static final String POSTS_ENDPOINT = "https://coomer.su/api/v1/%s/user/%s?o=%d"; + + // Pagination is strictly 50 posts per page, per API schema. + private Integer pageCount = 0; + private static final Integer postCount = 50; + + // "Service" of the page to be ripped: Onlyfans, Fansly, Candfans + private final String service; + + // Username of the page to be ripped + private final String user; + + + + public CoomerPartyRipper(URL url) throws IOException { + super(url); + List pathElements = Arrays.stream(url.getPath().split("/")) + .filter(element -> !element.isBlank()) + .collect(Collectors.toList()); + + service = pathElements.get(0); + user = pathElements.get(2); + + if (service == null || user == null || service.isBlank() || user.isBlank()) { + LOGGER.warn("service=" + service + ", user=" + user); + throw new MalformedURLException("Invalid coomer.party URL: " + url); + } + LOGGER.debug("Parsed service=" + service + " and user=" + user + " from " + url); + } + + @Override + protected String getDomain() { + return "coomer.party"; + } + + @Override + public String getHost() { + return "coomer.party"; + } + + @Override + public boolean canRip(URL url) { + String host = url.getHost(); + return host.endsWith("coomer.party") || host.endsWith("coomer.su"); + } + + @Override + public String getGID(URL url) { + return Utils.filesystemSafe(String.format("%s_%s", service, user)); + } + + private JSONObject getJsonPostsForOffset(Integer offset) throws IOException { + String apiUrl = String.format(POSTS_ENDPOINT, service, user, offset); + + String jsonArrayString = Http.url(apiUrl) + .ignoreContentType() + .response() + .body(); + JSONArray jsonArray = new JSONArray(jsonArrayString); + + // Ideally we'd just return the JSONArray from here, but we have to wrap it in a JSONObject + JSONObject wrapperObject = new JSONObject(); + wrapperObject.put(KEY_WRAPPER_JSON_ARRAY, jsonArray); + return wrapperObject; + } + + @Override + protected JSONObject getFirstPage() throws IOException { + return getJsonPostsForOffset(0); + } + + @Override + protected JSONObject getNextPage(JSONObject doc) throws IOException, URISyntaxException { + pageCount++; + Integer offset = postCount * pageCount; + return getJsonPostsForOffset(offset); + } + + + @Override + protected List getURLsFromJSON(JSONObject json) { + // extract the array from our wrapper JSONObject + JSONArray posts = json.getJSONArray(KEY_WRAPPER_JSON_ARRAY); + ArrayList urls = new ArrayList<>(); + for (int i = 0; i < posts.length(); i++) { + JSONObject post = posts.getJSONObject(i); + pullFileUrl(post, urls); + pullAttachmentUrls(post, urls); + } + LOGGER.debug("Pulled " + urls.size() + " URLs from " + posts.length() + " posts"); + return urls; + } + + @Override + protected void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index)); + } + + private void pullFileUrl(JSONObject post, ArrayList results) { + try { + JSONObject file = post.getJSONObject(KEY_FILE); + String path = file.getString(KEY_PATH); + if (isImage(path)) { + String url = IMG_URL_BASE + path; + results.add(url); + } else if (isVideo(path)) { + String url = VID_URL_BASE + path; + results.add(url); + } else { + LOGGER.error("Unknown extension for coomer.su path: " + path); + } + } catch (JSONException e) { + /* No-op */ + LOGGER.error("Unable to Parse FileURL " + e.getMessage()); + } + } + + private void pullAttachmentUrls(JSONObject post, ArrayList results) { + try { + JSONArray attachments = post.getJSONArray(KEY_ATTACHMENTS); + for (int i = 0; i < attachments.length(); i++) { + JSONObject attachment = attachments.getJSONObject(i); + pullFileUrl(attachment, results); + } + } catch (JSONException e) { + /* No-op */ + LOGGER.error("Unable to Parse AttachmentURL " + e.getMessage()); + } + } + + private boolean isImage(String path) { + Matcher matcher = IMG_PATTERN.matcher(path); + return matcher.matches(); + } + + private boolean isVideo(String path) { + Matcher matcher = VID_PATTERN.matcher(path); + return matcher.matches(); + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/CyberdropRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/CyberdropRipper.java new file mode 100644 index 00000000..81a39823 --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/CyberdropRipper.java @@ -0,0 +1,55 @@ +package com.rarchives.ripme.ripper.rippers; + +import java.io.IOException; +import java.net.*; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.utils.Http; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; + +public class CyberdropRipper extends AbstractHTMLRipper { + + public CyberdropRipper(URL url) throws IOException { + super(url); + } + + @Override + public String getHost() { + return "cyberdrop"; + } + + @Override + public String getDomain() { + return "cyberdrop.me"; + } + + @Override + public String getGID(URL url) throws MalformedURLException { + Pattern p = Pattern.compile("^https?://cyberdrop\\.me/a/([a-zA-Z0-9]+).*?$"); + Matcher m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(1); + } + throw new MalformedURLException("Expected cyberdrop.me URL format: " + + "https://cyberdrop.me/a/xxxxxxxx - got " + url + "instead"); + } + + @Override + public void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index)); + } + + @Override + protected List getURLsFromPage(Document page) { + ArrayList urls = new ArrayList<>(); + for (Element element: page.getElementsByClass("image")) { + urls.add(element.attr("href")); + } + return urls; + } +} \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DanbooruRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DanbooruRipper.java new file mode 100644 index 00000000..9496bb57 --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/DanbooruRipper.java @@ -0,0 +1,148 @@ +package com.rarchives.ripme.ripper.rippers; + +import com.rarchives.ripme.ripper.AbstractJSONRipper; +import com.rarchives.ripme.utils.Http; +import com.rarchives.ripme.utils.Utils; +import okhttp3.OkHttpClient; +import okhttp3.Request; +import okhttp3.Response; +import org.jetbrains.annotations.Nullable; +import org.json.JSONArray; +import org.json.JSONObject; +import org.jsoup.Connection; + +import java.io.BufferedReader; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.zip.GZIPInputStream; + +public class DanbooruRipper extends AbstractJSONRipper { + private static final String DOMAIN = "danbooru.donmai.us", + HOST = "danbooru"; + private final OkHttpClient client; + + private Pattern gidPattern = null; + + private int currentPageNum = 1; + + public DanbooruRipper(URL url) throws IOException { + super(url); + this.client = new OkHttpClient.Builder() + .readTimeout(60, TimeUnit.SECONDS) + .writeTimeout(60, TimeUnit.SECONDS) + .build(); + } + + @Override + protected String getDomain() { + return DOMAIN; + } + + @Override + public String getHost() { + return HOST; + } + + private String getPage(int num) throws MalformedURLException { + return "https://" + getDomain() + "/posts.json?page=" + num + "&tags=" + getTag(url); + } + + private final String userAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0"; + @Override + protected JSONObject getFirstPage() throws MalformedURLException { + return getCurrentPage(); + } + + @Override + protected JSONObject getNextPage(JSONObject doc) throws IOException { + return getCurrentPage(); + } + + @Nullable + private JSONObject getCurrentPage() throws MalformedURLException { + Request request = new Request.Builder() + .url(getPage(currentPageNum)) + .header("User-Agent", "Mozilla/5.0 (iPhone; CPU iPhone OS 15_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.0 Mobile/15E148 Safari/604.1") + .header("Accept", "application/json,text/javascript,*/*;q=0.01") + .header("Accept-Language", "en-US,en;q=0.9") + .header("Sec-Fetch-Dest", "empty") + .header("Sec-Fetch-Mode", "cors") + .header("Sec-Fetch-Site", "same-origin") + .header("Referer", "https://danbooru.donmai.us/") + .header("X-Requested-With", "XMLHttpRequest") + .header("Connection", "keep-alive") + .build(); + Response response = null; + currentPageNum++; + try { + response = client.newCall(request).execute(); + if (!response.isSuccessful()) throw new IOException("Unexpected code " + response); + + String responseData = response.body().string(); + JSONArray jsonArray = new JSONArray(responseData); + if(!jsonArray.isEmpty()){ + String newCompatibleJSON = "{ \"resources\":" + jsonArray + " }"; + return new JSONObject(newCompatibleJSON); + } + } catch (IOException e) { + e.printStackTrace(); + } finally { + if(response !=null) { + response.body().close(); + } + } + return null; + } + + @Override + protected List getURLsFromJSON(JSONObject json) { + List res = new ArrayList<>(100); + JSONArray jsonArray = json.getJSONArray("resources"); + for (int i = 0; i < jsonArray.length(); i++) { + if (jsonArray.getJSONObject(i).has("file_url")) { + res.add(jsonArray.getJSONObject(i).getString("file_url")); + } + } + return res; + } + + @Override + public String getGID(URL url) throws MalformedURLException { + try { + return Utils.filesystemSafe(new URI(getTag(url).replaceAll("([?&])tags=", "")).getPath()); + } catch (URISyntaxException ex) { + LOGGER.error(ex); + } + + throw new MalformedURLException("Expected booru URL format: " + getDomain() + "/posts?tags=searchterm - got " + url + " instead"); + } + + @Override + protected void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index)); + } + + private String getTag(URL url) throws MalformedURLException { + gidPattern = Pattern.compile("https?://danbooru.donmai.us/(posts)?.*([?&]tags=([^&]*)(?:&z=([0-9]+))?$)"); + Matcher m = gidPattern.matcher(url.toExternalForm()); + + if (m.matches()) { + return m.group(3); + } + + throw new MalformedURLException("Expected danbooru URL format: " + getDomain() + "/posts?tags=searchterm - got " + url + " instead"); + } + +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DerpiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DerpiRipper.java index 1feaf692..a0538614 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/DerpiRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/DerpiRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -37,7 +39,7 @@ public class DerpiRipper extends AbstractJSONRipper { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { String u = url.toExternalForm(); String[] uu = u.split("\\?", 2); String newU = uu[0]; @@ -54,7 +56,7 @@ public class DerpiRipper extends AbstractJSONRipper { newU += "&key=" + key; } - return new URL(newU); + return new URI(newU).toURL(); } @Override @@ -99,10 +101,10 @@ public class DerpiRipper extends AbstractJSONRipper { } @Override - public JSONObject getNextPage(JSONObject doc) throws IOException { + public JSONObject getNextPage(JSONObject doc) throws IOException, URISyntaxException { currPage++; String u = currUrl.toExternalForm() + "&page=" + Integer.toString(currPage); - JSONObject json = Http.url(new URL(u)).getJSON(); + JSONObject json = Http.url(new URI(u).toURL()).getJSON(); JSONArray arr; if (json.has("images")) { arr = json.getJSONArray("images"); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java index 99374ad1..9f26a268 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java @@ -13,12 +13,13 @@ import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.regex.Matcher; @@ -28,7 +29,6 @@ import org.jsoup.Connection; import org.jsoup.Connection.Method; import org.jsoup.Connection.Response; import org.jsoup.HttpStatusException; -import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; @@ -383,11 +383,11 @@ public class DeviantartRipper extends AbstractHTMLRipper { try { String url = cleanURL(); if (this.usingCatPath) { - return (new URL(url + "?catpath=/&offset=" + offset)); + return (new URI(url + "?catpath=/&offset=" + offset)).toURL(); } else { - return (new URL(url + "?offset=" + offset)); + return (new URI(url + "?offset=" + offset).toURL()); } - } catch (MalformedURLException e) { + } catch (MalformedURLException | URISyntaxException e) { e.printStackTrace(); } return null; @@ -518,8 +518,8 @@ public class DeviantartRipper extends AbstractHTMLRipper { * @author MrPlaygon * */ - private class DeviantartImageThread extends Thread { - private URL url; + private class DeviantartImageThread implements Runnable { + private final URL url; public DeviantartImageThread(URL url) { this.url = url; @@ -533,8 +533,6 @@ public class DeviantartRipper extends AbstractHTMLRipper { /** * Get URL to Artwork and return fullsize URL with file ending. * - * @param page Like - * https://www.deviantart.com/apofiss/art/warmest-of-the-days-455668450 * @return URL like * https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/intermediary/f/07f7a6bb-2d35-4630-93fc-be249af22b3e/d7jak0y-d20e5932-df72-4d13-b002-5e122037b373.jpg * @@ -630,11 +628,11 @@ public class DeviantartRipper extends AbstractHTMLRipper { } String[] tmpParts = downloadString.split("\\."); //split to get file ending - addURLToDownload(new URL(downloadString), "", "", "", new HashMap(), + addURLToDownload(new URI(downloadString).toURL(), "", "", "", new HashMap(), title + "." + tmpParts[tmpParts.length - 1]); return; - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { e.printStackTrace(); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DrawcrowdRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DrawcrowdRipper.java deleted file mode 100644 index 521bc7c4..00000000 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/DrawcrowdRipper.java +++ /dev/null @@ -1,91 +0,0 @@ -package com.rarchives.ripme.ripper.rippers; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; - -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; - -public class DrawcrowdRipper extends AbstractHTMLRipper { - - public DrawcrowdRipper(URL url) throws IOException { - super(url); - } - - @Override - public String getHost() { - return "drawcrowd"; - } - @Override - public String getDomain() { - return "drawcrowd.com"; - } - - @Override - public String getGID(URL url) throws MalformedURLException { - Pattern p; Matcher m; - - p = Pattern.compile("^.*drawcrowd.com/projects/.*$"); - m = p.matcher(url.toExternalForm()); - if (m.matches()) { - throw new MalformedURLException("Cannot rip drawcrowd.com/projects/ pages"); - } - - p = Pattern.compile("^.*drawcrowd.com/([a-zA-Z0-9\\-_]+).*$"); - m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - - throw new MalformedURLException( - "Expected drawcrowd.com gallery format: " - + "drawcrowd.com/username" - + " Got: " + url); - } - - @Override - public Document getFirstPage() throws IOException { - return Http.url(this.url).get(); - } - - @Override - public Document getNextPage(Document doc) throws IOException { - Elements loadMore = doc.select("a#load-more"); - if (loadMore.isEmpty()) { - throw new IOException("No next page found"); - } - if (!sleep(1000)) { - throw new IOException("Interrupted while waiting for next page"); - } - String nextPage = "http://drawcrowd.com" + loadMore.get(0).attr("href"); - return Http.url(nextPage).get(); - } - - @Override - public List getURLsFromPage(Document page) { - List imageURLs = new ArrayList<>(); - for (Element thumb : page.select("div.item.asset img")) { - String image = thumb.attr("src"); - image = image - .replaceAll("/medium/", "/large/") - .replaceAll("/small/", "/large/"); - imageURLs.add(image); - } - return imageURLs; - } - - @Override - public void downloadURL(URL url, int index) { - addURLToDownload(url, getPrefix(index)); - } - -} \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DribbbleRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DribbbleRipper.java index dc8cd77e..c463f5a8 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/DribbbleRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/DribbbleRipper.java @@ -41,10 +41,6 @@ public class DribbbleRipper extends AbstractHTMLRipper { "dribbble.com/albumid - got " + url + "instead"); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } @Override public Document getNextPage(Document doc) throws IOException { // Find next page diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DuckmoviesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DuckmoviesRipper.java deleted file mode 100644 index 48c1856c..00000000 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/DuckmoviesRipper.java +++ /dev/null @@ -1,139 +0,0 @@ -package com.rarchives.ripme.ripper.rippers; - -import com.rarchives.ripme.ripper.AbstractRipper; -import com.rarchives.ripme.ripper.AbstractSingleFileRipper; -import com.rarchives.ripme.utils.Http; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -public class DuckmoviesRipper extends AbstractSingleFileRipper { - public DuckmoviesRipper(URL url) throws IOException { - super(url); - } - - @Override - public boolean hasQueueSupport() { - return true; - } - - @Override - public boolean pageContainsAlbums(URL url) { - Pattern pa = Pattern.compile("https?://[a-zA-Z0-9]+.[a-zA-Z]+/(models|category)/([a-zA-Z0-9_-])+/?"); - Matcher ma = pa.matcher(url.toExternalForm()); - if (ma.matches()) { - return true; - } - pa = Pattern.compile("https?://[a-zA-Z0-9]+.[a-zA-Z]+/(models|category)/([a-zA-Z0-9_-])+/page/\\d+/?"); - ma = pa.matcher(url.toExternalForm()); - if (ma.matches()) { - return true; - } - return false; - } - - @Override - public List getAlbumsToQueue(Document doc) { - List urlsToAddToQueue = new ArrayList<>(); - for (Element elem : doc.select(".post > li > div > div > a")) { - urlsToAddToQueue.add(elem.attr("href")); - } - return urlsToAddToQueue; - } - - - private static List explicit_domains = Arrays.asList( - "vidporntube.fun", - "pornbj.fun", - "iwantporn.fun", - "neoporn.fun", - "yayporn.fun", - "freshporn.co", - "palapaja.stream", - "freshporn.co", - "pornvidx.fun", - "palapaja.com" - ); - - @Override - public String getHost() { - return url.toExternalForm().split("/")[2]; - } - - @Override - public String getDomain() { - return url.toExternalForm().split("/")[2]; - } - - @Override - public boolean canRip(URL url) { - String url_name = url.toExternalForm(); - return explicit_domains.contains(url_name.split("/")[2]); - } - - @Override - public Document getFirstPage() throws IOException { - return Http.url(this.url).get(); - } - - @Override - public List getURLsFromPage(Document doc) { - List results = new ArrayList<>(); - String duckMoviesUrl = doc.select("iframe").attr("src"); - try { - Document duckDoc = Http.url(new URL(duckMoviesUrl)).get(); - String videoURL = duckDoc.select("source").attr("src"); - // remove any white spaces so we can download the movie without a 400 error - videoURL = videoURL.replaceAll(" ", "%20"); - results.add(videoURL); - } catch (MalformedURLException e) { - LOGGER.error(duckMoviesUrl + " is not a valid url"); - } catch (IOException e) { - LOGGER.error("Unable to load page " + duckMoviesUrl); - e.printStackTrace(); - } - return results; - } - - - @Override - public String getGID(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("https://[a-zA-Z0-9]+\\.[a-zA-Z]+/([a-zA-Z0-9\\-_]+)/?"); - Matcher m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - p = Pattern.compile("https?://[a-zA-Z0-9]+.[a-zA-Z]+/(category|models)/([a-zA-Z0-9_-])+/?"); - m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - p = Pattern.compile("https?://[a-zA-Z0-9]+.[a-zA-Z]+/(category|models)/([a-zA-Z0-9_-])+/page/\\d+"); - m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - - throw new MalformedURLException( - "Expected duckmovies format:" - + "domain.tld/Video-title" - + " Got: " + url); - } - - @Override - public void downloadURL(URL url, int index) { - addURLToDownload(url, "", "", null, null, AbstractRipper.getFileName(url, null, null).replaceAll("%20", "_")); - } - - @Override - public boolean tryResumeDownload() {return true;} -} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DynastyscansRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DynastyscansRipper.java index 37d3ad93..f8eaa72d 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/DynastyscansRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/DynastyscansRipper.java @@ -42,12 +42,6 @@ public class DynastyscansRipper extends AbstractHTMLRipper { "dynasty-scans.com/chapters/ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { Element elem = doc.select("a[id=next_link]").first(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/E621Ripper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/E621Ripper.java index 534a1d0d..1d29a736 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/E621Ripper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/E621Ripper.java @@ -3,32 +3,76 @@ package com.rarchives.ripme.ripper.rippers; import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.ripper.DownloadThreadPool; import com.rarchives.ripme.utils.Http; +import com.rarchives.ripme.utils.RipUtils; import com.rarchives.ripme.utils.Utils; +import com.rarchives.ripme.ui.RipStatusMessage; +import com.rarchives.ripme.ui.RipStatusMessage.STATUS; + import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.log4j.Logger; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; + + public class E621Ripper extends AbstractHTMLRipper { - private static final Logger logger = Logger.getLogger(E621Ripper.class); + private static final Logger logger = LogManager.getLogger(E621Ripper.class); private static Pattern gidPattern = null; private static Pattern gidPattern2 = null; private static Pattern gidPatternPool = null; + private static Pattern gidPatternNew = null; + private static Pattern gidPatternPoolNew = null; + private DownloadThreadPool e621ThreadPool = new DownloadThreadPool("e621"); + private Map cookies = new HashMap(); + private String userAgent = USER_AGENT; + public E621Ripper(URL url) throws IOException { super(url); } + private void loadConfig() { + String cookiesString = Utils.getConfigString("e621.cookies", ""); + if(!cookiesString.equals("")) { + cookies = RipUtils.getCookiesFromString(cookiesString); + if(cookies.containsKey("cf_clearance")) + sendUpdate(STATUS.DOWNLOAD_WARN, "Using CloudFlare captcha cookies, make sure to update them and set your browser's useragent in config!"); + if(cookies.containsKey("remember")) + sendUpdate(STATUS.DOWNLOAD_WARN, "Logging in using auth cookie."); + } + userAgent = Utils.getConfigString("e621.useragent", USER_AGENT); + + } + + private void warnAboutBlacklist(Document page) { + if(!page.select("div.hidden-posts-notice").isEmpty()) + sendUpdate(STATUS.DOWNLOAD_WARN, "Some posts are blacklisted. Consider logging in. Search for \"e621\" in this wiki page: https://github.com/RipMeApp/ripme/wiki/Config-options"); + } + + private Document getDocument(String url, int retries) throws IOException { + return Http.url(url).userAgent(userAgent).retries(retries).cookies(cookies).get(); + } + + private Document getDocument(String url) throws IOException { + return getDocument(url, 1); + } + @Override public DownloadThreadPool getThreadPool() { return e621ThreadPool; @@ -46,15 +90,20 @@ public class E621Ripper extends AbstractHTMLRipper { @Override public Document getFirstPage() throws IOException { - if (url.getPath().startsWith("/pool/show/")) - return Http.url("https://e621.net/pool/show/" + getTerm(url)).get(); + loadConfig(); + Document page; + if (url.getPath().startsWith("/pool")) + page = getDocument("https://e621.net/pools/" + getTerm(url)); else - return Http.url("https://e621.net/post/index/1/" + getTerm(url)).get(); + page = getDocument("https://e621.net/posts?tags=" + getTerm(url)); + + warnAboutBlacklist(page); + return page; } @Override public List getURLsFromPage(Document page) { - Elements elements = page.select("div > span.thumb > a"); + Elements elements = page.select("article > a"); List res = new ArrayList<>(); for (Element e : elements) { @@ -68,8 +117,9 @@ public class E621Ripper extends AbstractHTMLRipper { @Override public Document getNextPage(Document page) throws IOException { - if (!page.select("a.next_page").isEmpty()) { - return Http.url(page.select("a.next_page").attr("abs:href")).get(); + warnAboutBlacklist(page); + if (!page.select("a#paginator-next").isEmpty()) { + return getDocument(page.select("a#paginator-next").attr("abs:href")); } else { throw new IOException("No more pages."); } @@ -82,12 +132,19 @@ public class E621Ripper extends AbstractHTMLRipper { } private String getTerm(URL url) throws MalformedURLException { + // old url style => new url style: + // /post/index/1/ => /posts?tags= + // /pool/show/ => /pools/id if (gidPattern == null) gidPattern = Pattern.compile( "^https?://(www\\.)?e621\\.net/post/index/[^/]+/([a-zA-Z0-9$_.+!*'():,%\\-]+)(/.*)?(#.*)?$"); if (gidPatternPool == null) gidPatternPool = Pattern.compile( "^https?://(www\\.)?e621\\.net/pool/show/([a-zA-Z0-9$_.+!*'(),%:\\-]+)(\\?.*)?(/.*)?(#.*)?$"); + if (gidPatternNew == null) + gidPatternNew = Pattern.compile("^https?://(www\\.)?e621\\.net/posts\\?([\\S]*?)tags=([a-zA-Z0-9$_.+!*'(),%:\\-]+)(\\&[\\S]+)?"); + if (gidPatternPoolNew == null) + gidPatternPoolNew = Pattern.compile("^https?://(www\\.)?e621\\.net/pools/([\\d]+)(\\?[\\S]*)?"); Matcher m = gidPattern.matcher(url.toExternalForm()); if (m.matches()) { @@ -100,36 +157,48 @@ public class E621Ripper extends AbstractHTMLRipper { return m.group(2); } + m = gidPatternNew.matcher(url.toExternalForm()); + if (m.matches()) { + LOGGER.info(m.group(3)); + return m.group(3); + } + + m = gidPatternPoolNew.matcher(url.toExternalForm()); + if (m.matches()) { + LOGGER.info(m.group(2)); + return m.group(2); + } + throw new MalformedURLException( - "Expected e621.net URL format: e621.net/post/index/1/searchterm - got " + url + " instead"); + "Expected e621.net URL format: e621.net/posts?tags=searchterm - got " + url + " instead"); } @Override public String getGID(URL url) throws MalformedURLException { String prefix = ""; - if (url.getPath().startsWith("/pool/show/")) { + if (url.getPath().startsWith("/pool")) { prefix = "pool_"; } return Utils.filesystemSafe(prefix + getTerm(url)); } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { if (gidPattern2 == null) gidPattern2 = Pattern.compile( "^https?://(www\\.)?e621\\.net/post/search\\?tags=([a-zA-Z0-9$_.+!*'():,%-]+)(/.*)?(#.*)?$"); Matcher m = gidPattern2.matcher(url.toExternalForm()); if (m.matches()) - return new URL("https://e621.net/post/index/1/" + m.group(2).replace("+", "%20")); + return new URI("https://e621.net/post/index/1/" + m.group(2).replace("+", "%20")).toURL(); return url; } - public class E621FileThread extends Thread { + public class E621FileThread implements Runnable { - private URL url; - private String index; + private final URL url; + private final String index; public E621FileThread(URL url, String index) { this.url = url; @@ -141,16 +210,16 @@ public class E621Ripper extends AbstractHTMLRipper { try { String fullSizedImage = getFullSizedImage(url); if (fullSizedImage != null && !fullSizedImage.equals("")) { - addURLToDownload(new URL(fullSizedImage), index); + addURLToDownload(new URI(fullSizedImage).toURL(), index); } - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { logger.error("Unable to get full sized image from " + url); } } private String getFullSizedImage(URL imageURL) throws IOException { - Document page = Http.url(imageURL).retries(3).get(); - Elements video = page.select("video > source"); + Document page = getDocument(imageURL.toExternalForm(), 3); + /*Elements video = page.select("video > source"); Elements flash = page.select("embed"); Elements image = page.select("a#highres"); if (video.size() > 0) { @@ -161,8 +230,15 @@ public class E621Ripper extends AbstractHTMLRipper { return image.attr("href"); } else { throw new IOException(); - } + }*/ + if (!page.select("div#image-download-link > a").isEmpty()) { + return page.select("div#image-download-link > a").attr("abs:href"); + } else { + if(!page.select("#blacklist-box").isEmpty()) + sendUpdate(RipStatusMessage.STATUS.RIP_ERRORED, "Cannot download image - blocked by blacklist. Consider logging in. Search for \"e621\" in this wiki page: https://github.com/RipMeApp/ripme/wiki/Config-options"); + throw new IOException(); + } } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java index 3cdbae4e..cccfeb09 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java @@ -1,9 +1,23 @@ package com.rarchives.ripme.ripper.rippers; -import java.io.File; +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.ripper.DownloadThreadPool; +import com.rarchives.ripme.ui.RipStatusMessage; +import com.rarchives.ripme.ui.RipStatusMessage.STATUS; +import com.rarchives.ripme.utils.Http; +import com.rarchives.ripme.utils.RipUtils; +import com.rarchives.ripme.utils.Utils; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; +import org.jsoup.select.Elements; + import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -11,46 +25,33 @@ import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.rarchives.ripme.ui.RipStatusMessage; -import com.rarchives.ripme.utils.RipUtils; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; - -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.ripper.DownloadThreadPool; -import com.rarchives.ripme.ui.RipStatusMessage.STATUS; -import com.rarchives.ripme.utils.Http; -import com.rarchives.ripme.utils.Utils; - public class EHentaiRipper extends AbstractHTMLRipper { // All sleep times are in milliseconds - private static final int PAGE_SLEEP_TIME = 3000; - private static final int IMAGE_SLEEP_TIME = 1500; - private static final int IP_BLOCK_SLEEP_TIME = 60 * 1000; + private static final int PAGE_SLEEP_TIME = 3000; + private static final int IMAGE_SLEEP_TIME = 1500; + private static final int IP_BLOCK_SLEEP_TIME = 60 * 1000; + private static final Map cookies = new HashMap<>(); - private String lastURL = null; - - // Thread pool for finding direct image links from "image" pages (html) - private DownloadThreadPool ehentaiThreadPool = new DownloadThreadPool("ehentai"); - @Override - public DownloadThreadPool getThreadPool() { - return ehentaiThreadPool; - } - - // Current HTML document - private Document albumDoc = null; - - private static final Map cookies = new HashMap<>(); static { cookies.put("nw", "1"); cookies.put("tip", "1"); } + private String lastURL = null; + // Thread pool for finding direct image links from "image" pages (html) + private final DownloadThreadPool ehentaiThreadPool = new DownloadThreadPool("ehentai"); + // Current HTML document + private Document albumDoc = null; + public EHentaiRipper(URL url) throws IOException { super(url); } + @Override + public DownloadThreadPool getThreadPool() { + return ehentaiThreadPool; + } + @Override public String getHost() { return "e-hentai"; @@ -61,7 +62,7 @@ public class EHentaiRipper extends AbstractHTMLRipper { return "e-hentai.org"; } - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title as GID if (albumDoc == null) { @@ -93,12 +94,6 @@ public class EHentaiRipper extends AbstractHTMLRipper { + " Got: " + url); } - /** - * Attempts to get page, checks for IP ban, waits. - * @param url - * @return Page document - * @throws IOException If page loading errors, or if retries are exhausted - */ private Document getPageWithRetries(URL url) throws IOException { Document doc; int retries = 3; @@ -106,9 +101,9 @@ public class EHentaiRipper extends AbstractHTMLRipper { sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm()); LOGGER.info("Retrieving " + url); doc = Http.url(url) - .referrer(this.url) - .cookies(cookies) - .get(); + .referrer(this.url) + .cookies(cookies) + .get(); if (doc.toString().contains("IP address will be automatically banned")) { if (retries == 0) { throw new IOException("Hit rate limit and maximum number of retries, giving up"); @@ -120,8 +115,7 @@ public class EHentaiRipper extends AbstractHTMLRipper { } catch (InterruptedException e) { throw new IOException("Interrupted while waiting for rate limit to subside"); } - } - else { + } else { return doc; } } @@ -155,7 +149,7 @@ public class EHentaiRipper extends AbstractHTMLRipper { } @Override - public Document getNextPage(Document doc) throws IOException { + public Document getNextPage(Document doc) throws IOException, URISyntaxException { // Check if we've stopped if (isStopped()) { throw new IOException("Ripping interrupted"); @@ -175,7 +169,7 @@ public class EHentaiRipper extends AbstractHTMLRipper { // Sleep before loading next page sleep(PAGE_SLEEP_TIME); // Load next page - Document nextPage = getPageWithRetries(new URL(nextURL)); + Document nextPage = getPageWithRetries(new URI(nextURL).toURL()); this.lastURL = nextURL; return nextPage; } @@ -183,7 +177,7 @@ public class EHentaiRipper extends AbstractHTMLRipper { @Override public List getURLsFromPage(Document page) { List imageURLs = new ArrayList<>(); - Elements thumbs = page.select("#gdt > .gdtm a"); + Elements thumbs = page.select("#gdt > a"); // Iterate over images on page for (Element thumb : thumbs) { imageURLs.add(thumb.attr("href")); @@ -193,27 +187,26 @@ public class EHentaiRipper extends AbstractHTMLRipper { @Override public void downloadURL(URL url, int index) { - EHentaiImageThread t = new EHentaiImageThread(url, index, this.workingDir); + EHentaiImageThread t = new EHentaiImageThread(url, index, this.workingDir.toPath()); ehentaiThreadPool.addThread(t); try { Thread.sleep(IMAGE_SLEEP_TIME); - } - catch (InterruptedException e) { + } catch (InterruptedException e) { LOGGER.warn("Interrupted while waiting to load next image", e); } } /** * Helper class to find and download images found on "image" pages - * + *

* Handles case when site has IP-banned the user. */ - private class EHentaiImageThread extends Thread { - private URL url; - private int index; - private File workingDir; + private class EHentaiImageThread implements Runnable { + private final URL url; + private final int index; + private final Path workingDir; - EHentaiImageThread(URL url, int index, File workingDir) { + EHentaiImageThread(URL url, int index, Path workingDir) { super(); this.url = url; this.index = index; @@ -246,22 +239,21 @@ public class EHentaiRipper extends AbstractHTMLRipper { Matcher m = p.matcher(imgsrc); if (m.matches()) { // Manually discover filename from URL - String savePath = this.workingDir + File.separator; + String savePath = this.workingDir + "/"; if (Utils.getConfigBoolean("download.save_order", true)) { savePath += String.format("%03d_", index); } savePath += m.group(1); - addURLToDownload(new URL(imgsrc), new File(savePath)); - } - else { + addURLToDownload(new URI(imgsrc).toURL(), Paths.get(savePath)); + } else { // Provide prefix and let the AbstractRipper "guess" the filename String prefix = ""; if (Utils.getConfigBoolean("download.save_order", true)) { prefix = String.format("%03d_", index); } - addURLToDownload(new URL(imgsrc), prefix); + addURLToDownload(new URI(imgsrc).toURL(), prefix); } - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java index 22968216..7cfd568f 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java @@ -1,8 +1,7 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; +import java.net.*; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -10,8 +9,6 @@ import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.rarchives.ripme.utils.Utils; -import org.json.JSONObject; import org.jsoup.Connection.Response; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; @@ -23,13 +20,7 @@ import com.rarchives.ripme.utils.Http; public class EightmusesRipper extends AbstractHTMLRipper { - private Document albumDoc = null; - private Map cookies = new HashMap<>(); - // TODO put up a wiki page on using maps to store titles - // the map for storing the title of each album when downloading sub albums - private Map urlTitles = new HashMap<>(); - - private Boolean rippingSubalbums = false; + private Map cookies = new HashMap<>(); public EightmusesRipper(URL url) throws IOException { super(url); @@ -61,10 +52,10 @@ public class EightmusesRipper extends AbstractHTMLRipper { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title as GID - Element titleElement = getFirstPage().select("meta[name=description]").first(); + Element titleElement = getCachedFirstPage().select("meta[name=description]").first(); String title = titleElement.attr("content"); title = title.replace("A huge collection of free porn comics for adults. Read", ""); title = title.replace("online for free at 8muses.com", ""); @@ -78,21 +69,18 @@ public class EightmusesRipper extends AbstractHTMLRipper { @Override public Document getFirstPage() throws IOException { - if (albumDoc == null) { - Response resp = Http.url(url).response(); - cookies.putAll(resp.cookies()); - albumDoc = resp.parse(); - } - return albumDoc; + Response resp = Http.url(url).response(); + cookies.putAll(resp.cookies()); + return resp.parse(); } @Override public List getURLsFromPage(Document page) { List imageURLs = new ArrayList<>(); - int x = 1; // This contains the thumbnails of all images on the page Elements pageImages = page.getElementsByClass("c-tile"); - for (Element thumb : pageImages) { + for (int i = 0; i < pageImages.size(); i++) { + Element thumb = pageImages.get(i); // If true this link is a sub album if (thumb.attr("href").contains("/comics/album/")) { String subUrl = "https://www.8muses.com" + thumb.attr("href"); @@ -116,24 +104,14 @@ public class EightmusesRipper extends AbstractHTMLRipper { if (thumb.hasAttr("data-cfsrc")) { image = thumb.attr("data-cfsrc"); } else { - // Deobfustace the json data - String rawJson = deobfuscateJSON(page.select("script#ractive-public").html() - .replaceAll(">", ">").replaceAll("<", "<").replace("&", "&")); - JSONObject json = new JSONObject(rawJson); + Element imageElement = thumb.select("img").first(); + image = "https://comics.8muses.com" + imageElement.attr("data-src").replace("/th/", "/fl/"); try { - for (int i = 0; i != json.getJSONArray("pictures").length(); i++) { - image = "https://www.8muses.com/image/fl/" + json.getJSONArray("pictures").getJSONObject(i).getString("publicUri"); - URL imageUrl = new URL(image); - addURLToDownload(imageUrl, getPrefixShort(x), getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies, "", null, true); - // X is our page index - x++; - if (isThisATest()) { - break; - } - } - return imageURLs; - } catch (MalformedURLException e) { + URL imageUrl = new URI(image).toURL(); + addURLToDownload(imageUrl, getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies, getPrefixShort(i), "", null, true); + } catch (MalformedURLException | URISyntaxException e) { LOGGER.error("\"" + image + "\" is malformed"); + LOGGER.error(e.getMessage()); } } if (!image.contains("8muses.com")) { @@ -173,25 +151,4 @@ public class EightmusesRipper extends AbstractHTMLRipper { public String getPrefixShort(int index) { return String.format("%03d", index); } - - private String deobfuscateJSON(String obfuscatedString) { - StringBuilder deobfuscatedString = new StringBuilder(); - // The first char in one of 8muses obfuscated strings is always ! so we replace it - for (char ch : obfuscatedString.replaceFirst("!", "").toCharArray()){ - deobfuscatedString.append(deobfuscateChar(ch)); - } - return deobfuscatedString.toString(); - } - - private String deobfuscateChar(char c) { - if ((int) c == 32) { - return fromCharCode(32); - } - return fromCharCode(33 + (c + 14) % 94); - - } - - private static String fromCharCode(int... codePoints) { - return new String(codePoints, 0, codePoints.length); - } } \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java index d64e9600..0f77e03c 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java @@ -7,6 +7,8 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -93,11 +95,11 @@ public class EroShareRipper extends AbstractHTMLRipper { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { if (!is_profile(url)) { try { // Attempt to use album title as GID - Element titleElement = getFirstPage().select("meta[property=og:title]").first(); + Element titleElement = getCachedFirstPage().select("meta[property=og:title]").first(); String title = titleElement.attr("content"); title = title.substring(title.lastIndexOf('/') + 1); return getHost() + "_" + getGID(url) + "_" + title.trim(); @@ -119,7 +121,6 @@ public class EroShareRipper extends AbstractHTMLRipper { for (Element img : imgs) { if (img.hasClass("album-image")) { String imageURL = img.attr("src"); - imageURL = imageURL; URLs.add(imageURL); } } @@ -195,7 +196,7 @@ public class EroShareRipper extends AbstractHTMLRipper { throw new MalformedURLException("eroshare album not found in " + url + ", expected https://eroshare.com/album or eroshae.com/album"); } - public static List getURLs(URL url) throws IOException{ + public static List getURLs(URL url) throws IOException, URISyntaxException { Response resp = Http.url(url) .ignoreContentType() @@ -209,7 +210,7 @@ public class EroShareRipper extends AbstractHTMLRipper { for (Element img : imgs) { if (img.hasClass("album-image")) { String imageURL = img.attr("src"); - URLs.add(new URL(imageURL)); + URLs.add(new URI(imageURL).toURL()); } } //Videos @@ -218,7 +219,7 @@ public class EroShareRipper extends AbstractHTMLRipper { if (vid.hasClass("album-video")) { Elements source = vid.getElementsByTag("source"); String videoURL = source.first().attr("src"); - URLs.add(new URL(videoURL)); + URLs.add(new URI(videoURL).toURL()); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ErofusRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ErofusRipper.java index dc535dea..95528470 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ErofusRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ErofusRipper.java @@ -9,6 +9,8 @@ import org.jsoup.select.Elements; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.HashMap; @@ -48,11 +50,6 @@ public class ErofusRipper extends AbstractHTMLRipper { return m.group(m.groupCount()); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document page) { LOGGER.info(page); @@ -94,8 +91,8 @@ public class ErofusRipper extends AbstractHTMLRipper { Map opts = new HashMap(); opts.put("subdirectory", page.title().replaceAll(" \\| Erofus - Sex and Porn Comics", "").replaceAll(" ", "_")); opts.put("prefix", getPrefix(x)); - addURLToDownload(new URL(image), opts); - } catch (MalformedURLException e) { + addURLToDownload(new URI(image).toURL(), opts); + } catch (MalformedURLException | URISyntaxException e) { LOGGER.info(e.getMessage()); } x++; diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java index 9b586b9a..3035d746 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java @@ -2,16 +2,19 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; +import com.rarchives.ripme.utils.Utils; import org.jsoup.Connection.Response; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.utils.Http; @@ -23,7 +26,7 @@ import com.rarchives.ripme.utils.Http; public class EromeRipper extends AbstractHTMLRipper { boolean rippingProfile; - + private HashMap cookies = new HashMap<>(); public EromeRipper (URL url) throws IOException { super(url); @@ -31,17 +34,17 @@ public class EromeRipper extends AbstractHTMLRipper { @Override public String getDomain() { - return "erome.com"; + return "erome.com"; } @Override public String getHost() { - return "erome"; + return "erome"; } @Override public void downloadURL(URL url, int index) { - addURLToDownload(url, getPrefix(index)); + addURLToDownload(url, getPrefix(index), "", "erome.com", this.cookies); } @Override @@ -66,39 +69,40 @@ public class EromeRipper extends AbstractHTMLRipper { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { - try { - // Attempt to use album title as GID - Element titleElement = getFirstPage().select("meta[property=og:title]").first(); - String title = titleElement.attr("content"); - title = title.substring(title.lastIndexOf('/') + 1); - return getHost() + "_" + getGID(url) + "_" + title.trim(); - } catch (IOException e) { - // Fall back to default album naming convention - LOGGER.info("Unable to find title at " + url); - } catch (NullPointerException e) { - return getHost() + "_" + getGID(url); - } - return super.getAlbumTitle(url); + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { + try { + // Attempt to use album title as GID + Element titleElement = getCachedFirstPage().select("meta[property=og:title]").first(); + String title = titleElement.attr("content"); + title = title.substring(title.lastIndexOf('/') + 1); + return getHost() + "_" + getGID(url) + "_" + title.trim(); + } catch (IOException e) { + // Fall back to default album naming convention + LOGGER.info("Unable to find title at " + url); + } catch (NullPointerException e) { + return getHost() + "_" + getGID(url); + } + return super.getAlbumTitle(url); } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { - return new URL(url.toExternalForm().replaceAll("https?://erome.com", "https://www.erome.com")); + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { + return new URI(url.toExternalForm().replaceAll("https?://erome.com", "https://www.erome.com")).toURL(); } @Override public List getURLsFromPage(Document doc) { - List URLs = new ArrayList<>(); return getMediaFromPage(doc); } @Override public Document getFirstPage() throws IOException { + this.setAuthCookie(); Response resp = Http.url(this.url) - .ignoreContentType() - .response(); + .cookies(cookies) + .ignoreContentType() + .response(); return resp.parse(); } @@ -124,18 +128,17 @@ public class EromeRipper extends AbstractHTMLRipper { private List getMediaFromPage(Document doc) { List results = new ArrayList<>(); for (Element el : doc.select("img.img-front")) { - if (el.hasAttr("src")) { - if (el.attr("src").startsWith("https:")) { - results.add(el.attr("src")); - } else { - results.add("https:" + el.attr("src")); - } - } else if (el.hasAttr("data-src")) { - //to add images that are not loaded( as all images are lasyloaded as we scroll). - results.add(el.attr("data-src")); - } - - } + if (el.hasAttr("data-src")) { + //to add images that are not loaded( as all images are lasyloaded as we scroll). + results.add(el.attr("data-src")); + } else if (el.hasAttr("src")) { + if (el.attr("src").startsWith("https:")) { + results.add(el.attr("src")); + } else { + results.add("https:" + el.attr("src")); + } + } + } for (Element el : doc.select("source[label=HD]")) { if (el.attr("src").startsWith("https:")) { results.add(el.attr("src")); @@ -152,7 +155,22 @@ public class EromeRipper extends AbstractHTMLRipper { results.add("https:" + el.attr("src")); } } + + if (results.size() == 0) { + if (cookies.isEmpty()) { + LOGGER.warn("You might try setting erome.laravel_session manually " + + "if you think this page definitely contains media."); + } + } + return results; } + private void setAuthCookie() { + String sessionId = Utils.getConfigString("erome.laravel_session", null); + if (sessionId != null) { + cookies.put("laravel_session", sessionId); + } + } + } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ErotivRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ErotivRipper.java index 10e73346..04511085 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ErotivRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ErotivRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -11,7 +13,6 @@ import java.util.regex.Pattern; import org.jsoup.Connection.Response; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.utils.Http; @@ -57,8 +58,8 @@ public class ErotivRipper extends AbstractHTMLRipper { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { - return new URL(url.toExternalForm().replaceAll("https?://www.erotiv.io", "https://erotiv.io")); + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { + return new URI(url.toExternalForm().replaceAll("https?://www.erotiv.io", "https://erotiv.io")).toURL(); } @Override diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FemjoyhunterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FemjoyhunterRipper.java index 1922002b..2661d055 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/FemjoyhunterRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FemjoyhunterRipper.java @@ -12,7 +12,6 @@ import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; public class FemjoyhunterRipper extends AbstractHTMLRipper { @@ -41,12 +40,6 @@ public class FemjoyhunterRipper extends AbstractHTMLRipper { "femjoyhunter.com/ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FitnakedgirlsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FitnakedgirlsRipper.java index de6fb73d..51d5f15f 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/FitnakedgirlsRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FitnakedgirlsRipper.java @@ -1,72 +1,66 @@ -package com.rarchives.ripme.ripper.rippers; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; - -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; - -public class FitnakedgirlsRipper extends AbstractHTMLRipper { - - public FitnakedgirlsRipper(URL url) throws IOException { - super(url); - } - - @Override - public String getHost() { - return "fitnakedgirls"; - } - - @Override - public String getDomain() { - return "fitnakedgirls.com"; - } - - @Override - public String getGID(URL url) throws MalformedURLException { - Pattern p; - Matcher m; - - p = Pattern.compile("^.*fitnakedgirls\\.com/gallery/(.+)$"); - m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - - throw new MalformedURLException( - "Expected fitnakedgirls.com gallery format: " + "fitnakedgirls.com/gallery/####" + " Got: " + url); - } - - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - - @Override - public List getURLsFromPage(Document doc) { - List imageURLs = new ArrayList<>(); - - Elements imgs = doc.select("div[class*=wp-tiles-tile-bg] > img"); - for (Element img : imgs) { - String imgSrc = img.attr("src"); - imageURLs.add(imgSrc); - } - - return imageURLs; - } - - @Override - public void downloadURL(URL url, int index) { - // Send referrer when downloading images - addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null); - } +package com.rarchives.ripme.ripper.rippers; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; +import org.jsoup.select.Elements; + +import com.rarchives.ripme.ripper.AbstractHTMLRipper; + +public class FitnakedgirlsRipper extends AbstractHTMLRipper { + + public FitnakedgirlsRipper(URL url) throws IOException { + super(url); + } + + @Override + public String getHost() { + return "fitnakedgirls"; + } + + @Override + public String getDomain() { + return "fitnakedgirls.com"; + } + + @Override + public String getGID(URL url) throws MalformedURLException { + Pattern p; + Matcher m; + + p = Pattern.compile("^.*fitnakedgirls\\.com/gallery/(.+)$"); + m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(1); + } + + throw new MalformedURLException( + "Expected fitnakedgirls.com gallery format: " + "fitnakedgirls.com/gallery/####" + " Got: " + url); + } + + @Override + public List getURLsFromPage(Document doc) { + List imageURLs = new ArrayList<>(); + + Elements imgs = doc.select("div[class*=wp-tiles-tile-bg] > img"); + for (Element img : imgs) { + String imgSrc = img.attr("src"); + imageURLs.add(imgSrc); + } + + return imageURLs; + } + + @Override + public void downloadURL(URL url, int index) { + // Send referrer when downloading images + addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null); + } } \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java index 6591dd01..bba284f1 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java @@ -1,10 +1,9 @@ package com.rarchives.ripme.ripper.rippers; -import java.io.File; import java.io.IOException; -import java.net.HttpURLConnection; -import java.net.MalformedURLException; -import java.net.URL; +import java.net.*; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher; @@ -163,8 +162,8 @@ public class FivehundredpxRipper extends AbstractJSONRipper { } @Override - public JSONObject getFirstPage() throws IOException { - URL apiURL = new URL(baseURL + "&consumer_key=" + CONSUMER_KEY); + public JSONObject getFirstPage() throws IOException, URISyntaxException { + URL apiURL = new URI(baseURL + "&consumer_key=" + CONSUMER_KEY).toURL(); LOGGER.debug("apiURL: " + apiURL); JSONObject json = Http.url(apiURL).getJSON(); @@ -231,7 +230,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper { } @Override - public JSONObject getNextPage(JSONObject json) throws IOException { + public JSONObject getNextPage(JSONObject json) throws IOException, URISyntaxException { if (isThisATest()) { return null; } @@ -248,9 +247,9 @@ public class FivehundredpxRipper extends AbstractJSONRipper { sleep(500); ++page; - URL apiURL = new URL(baseURL + URL apiURL = new URI(baseURL + "&page=" + page - + "&consumer_key=" + CONSUMER_KEY); + + "&consumer_key=" + CONSUMER_KEY).toURL(); return Http.url(apiURL).getJSON(); } @@ -295,14 +294,9 @@ public class FivehundredpxRipper extends AbstractJSONRipper { } } } - if (imageURL == null) { - LOGGER.error("Failed to find image for photo " + photo.toString()); - } - else { - imageURLs.add(imageURL); - if (isThisATest()) { - break; - } + imageURLs.add(imageURL); + if (isThisATest()) { + break; } } return imageURLs; @@ -310,13 +304,13 @@ public class FivehundredpxRipper extends AbstractJSONRipper { private boolean urlExists(String url) { try { - HttpURLConnection connection = (HttpURLConnection) new URL(url).openConnection(); + HttpURLConnection connection = (HttpURLConnection) new URI(url).toURL().openConnection(); connection.setRequestMethod("HEAD"); if (connection.getResponseCode() != 200) { throw new IOException("Couldn't find full-size image at " + url); } return true; - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { return false; } } @@ -330,8 +324,8 @@ public class FivehundredpxRipper extends AbstractJSONRipper { public void downloadURL(URL url, int index) { String u = url.toExternalForm(); String[] fields = u.split("/"); - String prefix = getPrefix(index) + fields[fields.length - 3]; - File saveAs = new File(getWorkingDir() + File.separator + prefix + ".jpg"); + String prefix = "/" + getPrefix(index) + fields[fields.length - 3]; + Path saveAs = Paths.get(getWorkingDir() + prefix + ".jpg"); addURLToDownload(url, saveAs, "", null, false); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java index e56cb4a1..c58a7e71 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.*; import java.util.regex.Matcher; @@ -10,6 +12,7 @@ import java.util.regex.Pattern; import com.rarchives.ripme.ui.RipStatusMessage; import org.json.JSONArray; import org.json.JSONObject; +import org.json.JSONException; import org.jsoup.nodes.Document; import com.rarchives.ripme.ripper.AbstractHTMLRipper; @@ -19,8 +22,23 @@ import org.jsoup.nodes.Element; public class FlickrRipper extends AbstractHTMLRipper { - private Document albumDoc = null; private final DownloadThreadPool flickrThreadPool; + + private enum UrlType { + USER, + PHOTOSET + } + + private class Album { + final UrlType type; + final String id; + + Album(UrlType type, String id) { + this.type = type; + this.id = id; + } + } + @Override public DownloadThreadPool getThreadPool() { return flickrThreadPool; @@ -46,7 +64,7 @@ public class FlickrRipper extends AbstractHTMLRipper { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { String sUrl = url.toExternalForm(); // Strip out https sUrl = sUrl.replace("https://secure.flickr.com", "http://www.flickr.com"); @@ -57,7 +75,7 @@ public class FlickrRipper extends AbstractHTMLRipper { } sUrl += "pool"; } - return new URL(sUrl); + return new URI(sUrl).toURL(); } // FLickr is one of those sites what includes a api key in sites javascript // TODO let the user provide their own api key @@ -81,40 +99,44 @@ public class FlickrRipper extends AbstractHTMLRipper { } // The flickr api is a monster of weird settings so we just request everything that the webview does - private String apiURLBuilder(String photoset, String pageNumber, String apiKey) { - LOGGER.info("https://api.flickr.com/services/rest?extras=can_addmeta," + - "can_comment,can_download,can_share,contact,count_comments,count_faves,count_views,date_taken," + - "date_upload,icon_urls_deep,isfavorite,ispro,license,media,needs_interstitial,owner_name," + - "owner_datecreate,path_alias,realname,rotation,safety_level,secret_k,secret_h,url_c,url_f,url_h,url_k," + - "url_l,url_m,url_n,url_o,url_q,url_s,url_sq,url_t,url_z,visibility,visibility_source,o_dims," + - "is_marketplace_printable,is_marketplace_licensable,publiceditability&per_page=100&page="+ pageNumber + "&" + - "get_user_info=1&primary_photo_extras=url_c,%20url_h,%20url_k,%20url_l,%20url_m,%20url_n,%20url_o" + - ",%20url_q,%20url_s,%20url_sq,%20url_t,%20url_z,%20needs_interstitial,%20can_share&jump_to=&" + - "photoset_id=" + photoset + "&viewerNSID=&method=flickr.photosets.getPhotos&csrf=&" + - "api_key=" + apiKey + "&format=json&hermes=1&hermesClient=1&reqId=358ed6a0&nojsoncallback=1"); + private String apiURLBuilder(Album album, String pageNumber, String apiKey) { + String method = null; + String idField = null; + switch (album.type) { + case PHOTOSET: + method = "flickr.photosets.getPhotos"; + idField = "photoset_id=" + album.id; + break; + case USER: + method = "flickr.people.getPhotos"; + idField = "user_id=" + album.id; + break; + } + return "https://api.flickr.com/services/rest?extras=can_addmeta," + - "can_comment,can_download,can_share,contact,count_comments,count_faves,count_views,date_taken," + - "date_upload,icon_urls_deep,isfavorite,ispro,license,media,needs_interstitial,owner_name," + - "owner_datecreate,path_alias,realname,rotation,safety_level,secret_k,secret_h,url_c,url_f,url_h,url_k," + - "url_l,url_m,url_n,url_o,url_q,url_s,url_sq,url_t,url_z,visibility,visibility_source,o_dims," + - "is_marketplace_printable,is_marketplace_licensable,publiceditability&per_page=100&page="+ pageNumber + "&" + - "get_user_info=1&primary_photo_extras=url_c,%20url_h,%20url_k,%20url_l,%20url_m,%20url_n,%20url_o" + - ",%20url_q,%20url_s,%20url_sq,%20url_t,%20url_z,%20needs_interstitial,%20can_share&jump_to=&" + - "photoset_id=" + photoset + "&viewerNSID=&method=flickr.photosets.getPhotos&csrf=&" + - "api_key=" + apiKey + "&format=json&hermes=1&hermesClient=1&reqId=358ed6a0&nojsoncallback=1"; + "can_comment,can_download,can_share,contact,count_comments,count_faves,count_views,date_taken," + + "date_upload,icon_urls_deep,isfavorite,ispro,license,media,needs_interstitial,owner_name," + + "owner_datecreate,path_alias,realname,rotation,safety_level,secret_k,secret_h,url_c,url_f,url_h,url_k," + + "url_l,url_m,url_n,url_o,url_q,url_s,url_sq,url_t,url_z,visibility,visibility_source,o_dims," + + "is_marketplace_printable,is_marketplace_licensable,publiceditability&per_page=100&page="+ pageNumber + "&" + + "get_user_info=1&primary_photo_extras=url_c,%20url_h,%20url_k,%20url_l,%20url_m,%20url_n,%20url_o" + + ",%20url_q,%20url_s,%20url_sq,%20url_t,%20url_z,%20needs_interstitial,%20can_share&jump_to=&" + + idField + "&viewerNSID=&method=" + method + "&csrf=&" + + "api_key=" + apiKey + "&format=json&hermes=1&hermesClient=1&reqId=358ed6a0&nojsoncallback=1"; } private JSONObject getJSON(String page, String apiKey) { URL pageURL = null; String apiURL = null; try { - apiURL = apiURLBuilder(getPhotosetID(url.toExternalForm()), page, apiKey); - pageURL = new URL(apiURL); - } catch (MalformedURLException e) { + apiURL = apiURLBuilder(getAlbum(url.toExternalForm()), page, apiKey); + pageURL = new URI(apiURL).toURL(); + } catch (MalformedURLException | URISyntaxException e) { LOGGER.error("Unable to get api link " + apiURL + " is malformed"); } try { - LOGGER.info(Http.url(pageURL).ignoreContentType().get().text()); + LOGGER.info("Fetching: " + apiURL); + LOGGER.info("Response: " + Http.url(pageURL).ignoreContentType().get().text()); return new JSONObject(Http.url(pageURL).ignoreContentType().get().text()); } catch (IOException e) { LOGGER.error("Unable to get api link " + apiURL + " is malformed"); @@ -122,31 +144,42 @@ public class FlickrRipper extends AbstractHTMLRipper { } } - private String getPhotosetID(String url) { + private Album getAlbum(String url) throws MalformedURLException { Pattern p; Matcher m; - // Root: https://www.flickr.com/photos/115858035@N04/ + // User photostream: https://www.flickr.com/photos/115858035@N04/ // Album: https://www.flickr.com/photos/115858035@N04/sets/72157644042355643/ final String domainRegex = "https?://[wm.]*flickr.com"; final String userRegex = "[a-zA-Z0-9@_-]+"; // Album - p = Pattern.compile("^" + domainRegex + "/photos/(" + userRegex + ")/(sets|albums)/([0-9]+)/?.*$"); + p = Pattern.compile("^" + domainRegex + "/photos/" + userRegex + "/(sets|albums)/([0-9]+)/?.*$"); m = p.matcher(url); if (m.matches()) { - return m.group(3); + return new Album(UrlType.PHOTOSET, m.group(2)); } - return null; + + // User photostream + p = Pattern.compile("^" + domainRegex + "/photos/(" + userRegex + ")/?$"); + m = p.matcher(url); + if (m.matches()) { + return new Album(UrlType.USER, m.group(1)); + } + + String errorMessage = "Failed to extract photoset ID from url: " + url; + + LOGGER.error(errorMessage); + throw new MalformedURLException(errorMessage); } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { if (!url.toExternalForm().contains("/sets/")) { return super.getAlbumTitle(url); } - try { + try { // Attempt to use album title as GID - Document doc = getFirstPage(); + Document doc = getCachedFirstPage(); String user = url.toExternalForm(); user = user.substring(user.indexOf("/photos/") + "/photos/".length()); user = user.substring(0, user.indexOf("/")); @@ -196,13 +229,6 @@ public class FlickrRipper extends AbstractHTMLRipper { + " Got: " + url); } - @Override - public Document getFirstPage() throws IOException { - if (albumDoc == null) { - albumDoc = Http.url(url).get(); - } - return albumDoc; - } @Override public List getURLsFromPage(Document doc) { @@ -214,15 +240,29 @@ public class FlickrRipper extends AbstractHTMLRipper { if (jsonData.has("stat") && jsonData.getString("stat").equals("fail")) { break; } else { - int totalPages = jsonData.getJSONObject("photoset").getInt("pages"); + // Determine root key + JSONObject rootData; + + try { + rootData = jsonData.getJSONObject("photoset"); + } catch (JSONException e) { + try { + rootData = jsonData.getJSONObject("photos"); + } catch (JSONException innerE) { + LOGGER.error("Unable to find photos in response"); + break; + } + } + + int totalPages = rootData.getInt("pages"); LOGGER.info(jsonData); - JSONArray pictures = jsonData.getJSONObject("photoset").getJSONArray("photo"); + JSONArray pictures = rootData.getJSONArray("photo"); for (int i = 0; i < pictures.length(); i++) { LOGGER.info(i); JSONObject data = (JSONObject) pictures.get(i); try { addURLToDownload(getLargestImageURL(data.getString("id"), apiKey)); - } catch (MalformedURLException e) { + } catch (MalformedURLException | URISyntaxException e) { LOGGER.error("Flickr MalformedURLException: " + e.getMessage()); } @@ -245,11 +285,11 @@ public class FlickrRipper extends AbstractHTMLRipper { addURLToDownload(url, getPrefix(index)); } - private URL getLargestImageURL(String imageID, String apiKey) throws MalformedURLException { + private URL getLargestImageURL(String imageID, String apiKey) throws MalformedURLException, URISyntaxException { TreeMap imageURLMap = new TreeMap<>(); try { - URL imageAPIURL = new URL("https://www.flickr.com/services/rest/?method=flickr.photos.getSizes&api_key=" + apiKey + "&photo_id=" + imageID + "&format=json&nojsoncallback=1"); + URL imageAPIURL = new URI("https://www.flickr.com/services/rest/?method=flickr.photos.getSizes&api_key=" + apiKey + "&photo_id=" + imageID + "&format=json&nojsoncallback=1").toURL(); JSONArray imageSizes = new JSONObject(Http.url(imageAPIURL).ignoreContentType().get().text()).getJSONObject("sizes").getJSONArray("size"); for (int i = 0; i < imageSizes.length(); i++) { JSONObject imageInfo = imageSizes.getJSONObject(i); @@ -264,6 +304,6 @@ public class FlickrRipper extends AbstractHTMLRipper { LOGGER.error("IOException while looking at image sizes: " + e.getMessage()); } - return new URL(imageURLMap.lastEntry().getValue()); + return new URI(imageURLMap.lastEntry().getValue()).toURL(); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MulemaxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FooktubeRipper.java similarity index 67% rename from src/main/java/com/rarchives/ripme/ripper/rippers/MulemaxRipper.java rename to src/main/java/com/rarchives/ripme/ripper/rippers/FooktubeRipper.java index 01bf4b1c..fed1abe0 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/MulemaxRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FooktubeRipper.java @@ -10,17 +10,10 @@ import java.util.regex.Pattern; import com.rarchives.ripme.ripper.AbstractSingleFileRipper; import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; -import com.rarchives.ripme.ripper.VideoRipper; -import com.rarchives.ripme.utils.Http; +public class FooktubeRipper extends AbstractSingleFileRipper { -public class MulemaxRipper extends AbstractSingleFileRipper { - - private static final String HOST = "mulemax"; - - public MulemaxRipper(URL url) throws IOException { + public FooktubeRipper(URL url) throws IOException { super(url); } @@ -34,14 +27,10 @@ public class MulemaxRipper extends AbstractSingleFileRipper { return "mulemax.com"; } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } @Override public boolean canRip(URL url) { - Pattern p = Pattern.compile("^https?://.*mulemax\\.com/video/(.*)/.*$"); + Pattern p = Pattern.compile("^https?://.*fooktube\\.com/video/(.*)/.*$"); Matcher m = p.matcher(url.toExternalForm()); return m.matches(); } @@ -53,15 +42,15 @@ public class MulemaxRipper extends AbstractSingleFileRipper { @Override public String getGID(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("^https?://.*mulemax\\.com/video/(.*)/(.*)$"); + Pattern p = Pattern.compile("^https?://.*fooktube\\.com/video/(.*)/(.*)$"); Matcher m = p.matcher(url.toExternalForm()); if (m.matches()) { return m.group(2); } throw new MalformedURLException( - "Expected mulemax format:" - + "mulemax.com/video/####" + "Expected fooktube format:" + + "fooktube.com/video/####" + " Got: " + url); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FreeComicOnlineRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FreeComicOnlineRipper.java new file mode 100644 index 00000000..a39d3b9b --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FreeComicOnlineRipper.java @@ -0,0 +1,74 @@ +package com.rarchives.ripme.ripper.rippers; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.utils.Http; + +public class FreeComicOnlineRipper extends AbstractHTMLRipper { + + public FreeComicOnlineRipper(URL url) throws IOException { + super(url); + } + + @Override + public String getHost() { + return "freecomiconline"; + } + + @Override + public String getDomain() { + return "freecomiconline.me"; + } + + @Override + public String getGID(URL url) throws MalformedURLException { + Pattern p = Pattern.compile("https://freecomiconline.me/comic/([a-zA-Z0-9_\\-]+)/([a-zA-Z0-9_\\-]+)/?$"); + Matcher m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(1) + "_" + m.group(2); + } + p = Pattern.compile("^https://freecomiconline.me/comic/([a-zA-Z0-9_\\-]+)/?$"); + m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(1); + } + throw new MalformedURLException("Expected freecomiconline URL format: " + + "freecomiconline.me/TITLE/CHAPTER - got " + url + " instead"); + } + + @Override + public Document getNextPage(Document doc) throws IOException { + String nextPage = doc.select("div.select-pagination a").get(1).attr("href"); + String nextUrl = ""; + Pattern p = Pattern.compile("https://freecomiconline.me/comic/([a-zA-Z0-9_\\-]+)/([a-zA-Z0-9_\\-]+)/?$"); + Matcher m = p.matcher(nextPage); + if(m.matches()){ + nextUrl = m.group(0); + } + if(nextUrl.equals("")) throw new IOException("No more pages"); + sleep(500); + return Http.url(nextUrl).get(); + } + + @Override + public List getURLsFromPage(Document doc) { + List result = new ArrayList<>(); + for (Element el : doc.select(".wp-manga-chapter-img")) { + result.add(el.attr("src")); + } + return result; + } + + @Override + public void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index)); + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java index 683c791b..dbb46fe1 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java @@ -1,10 +1,12 @@ package com.rarchives.ripme.ripper.rippers; -import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.OutputStream; import java.net.MalformedURLException; import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -15,11 +17,10 @@ import java.util.regex.Pattern; import com.rarchives.ripme.ui.RipStatusMessage; import com.rarchives.ripme.utils.Utils; import org.jsoup.Connection.Response; -import org.jsoup.HttpStatusException; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; -import org.jsoup.safety.Whitelist; +import org.jsoup.safety.Safelist; import org.jsoup.select.Elements; import com.rarchives.ripme.ripper.AbstractHTMLRipper; @@ -91,14 +92,13 @@ public class FuraffinityRipper extends AbstractHTMLRipper { String nextUrl = urlBase + nextPageUrl.first().attr("href"); sleep(500); - Document nextPage = Http.url(nextUrl).cookies(cookies).get(); - return nextPage; + return Http.url(nextUrl).cookies(cookies).get(); } private String getImageFromPost(String url) { sleep(1000); - Document d = null; + Document d; try { d = Http.url(url).cookies(cookies).get(); Elements links = d.getElementsByTag("a"); @@ -125,6 +125,9 @@ public class FuraffinityRipper extends AbstractHTMLRipper { urls.add(urlToAdd); } } + if (isStopped() || isThisATest()) { + break; + } } return urls; } @@ -164,7 +167,7 @@ public class FuraffinityRipper extends AbstractHTMLRipper { ele.select("br").append("\\n"); ele.select("p").prepend("\\n\\n"); LOGGER.debug("Returning description at " + page); - String tempPage = Jsoup.clean(ele.html().replaceAll("\\\\n", System.getProperty("line.separator")), "", Whitelist.none(), new Document.OutputSettings().prettyPrint(false)); + String tempPage = Jsoup.clean(ele.html().replaceAll("\\\\n", System.getProperty("line.separator")), "", Safelist.none(), new Document.OutputSettings().prettyPrint(false)); return documentz.select("meta[property=og:title]").attr("content") + "\n" + tempPage; // Overridden saveText takes first line and makes it the file name. } catch (IOException ioe) { LOGGER.info("Failed to get description " + page + " : '" + ioe.getMessage() + "'"); @@ -181,24 +184,22 @@ public class FuraffinityRipper extends AbstractHTMLRipper { } String newText = ""; String saveAs = ""; - File saveFileAs; + Path saveFileAs; saveAs = text.split("\n")[0]; saveAs = saveAs.replaceAll("^(\\S+)\\s+by\\s+(.*)$", "$2_$1"); for (int i = 1;i < text.split("\n").length; i++) { newText = newText.replace("\\","").replace("/","").replace("~","") + "\n" + text.split("\n")[i]; } try { - if (!subdirectory.equals("")) { - subdirectory = File.separator + subdirectory; - } - saveFileAs = new File( - workingDir.getCanonicalPath() + saveFileAs = Paths.get( + workingDir + + "/" + subdirectory - + File.separator + + "/" + saveAs + ".txt"); // Write the file - FileOutputStream out = (new FileOutputStream(saveFileAs)); + OutputStream out = Files.newOutputStream(saveFileAs); out.write(text.getBytes()); out.close(); } catch (IOException e) { @@ -206,9 +207,13 @@ public class FuraffinityRipper extends AbstractHTMLRipper { return false; } LOGGER.debug("Downloading " + url + "'s description to " + saveFileAs); - if (!saveFileAs.getParentFile().exists()) { + if (!Files.exists(saveFileAs.getParent())) { LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent())); - saveFileAs.getParentFile().mkdirs(); + try { + Files.createDirectory(saveFileAs.getParent()); + } catch (IOException e) { + e.printStackTrace(); + } } return true; } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java index d88b16e8..62a60fcc 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -40,7 +42,7 @@ public class FuskatorRipper extends AbstractHTMLRipper { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { String u = url.toExternalForm(); if (u.contains("/thumbs/")) { u = u.replace("/thumbs/", "/full/"); @@ -48,7 +50,7 @@ public class FuskatorRipper extends AbstractHTMLRipper { if (u.contains("/expanded/")) { u = u.replaceAll("/expanded/", "/full/"); } - return new URL(u); + return new URI(u).toURL(); } @Override diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatRipper.java deleted file mode 100644 index 16205115..00000000 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatRipper.java +++ /dev/null @@ -1,159 +0,0 @@ -package com.rarchives.ripme.ripper.rippers; - - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import org.json.JSONArray; -import org.json.JSONObject; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; - -import com.rarchives.ripme.utils.Http; - - -public class GfycatRipper extends AbstractHTMLRipper { - - private static final String HOST = "gfycat.com"; - String username = ""; - String cursor = ""; - String count = "30"; - - - - public GfycatRipper(URL url) throws IOException { - super(new URL(url.toExternalForm().split("-")[0].replace("thumbs.", ""))); - } - - @Override - public String getDomain() { - return "gfycat.com"; - } - - @Override - public String getHost() { - return "gfycat"; - } - - @Override - public boolean canRip(URL url) { - return url.getHost().endsWith(HOST); - } - - @Override - public URL sanitizeURL(URL url) throws MalformedURLException { - String sUrl = url.toExternalForm(); - sUrl = sUrl.replace("/gifs/detail", ""); - sUrl = sUrl.replace("/amp", ""); - return new URL(sUrl); - } - - public boolean isProfile() { - Pattern p = Pattern.compile("^https?://[wm.]*gfycat\\.com/@([a-zA-Z0-9]+).*$"); - Matcher m = p.matcher(url.toExternalForm()); - return m.matches(); - } - - @Override - public Document getFirstPage() throws IOException { - if (!isProfile()) { - return Http.url(url).get(); - } else { - username = getGID(url); - return Http.url(new URL("https://api.gfycat.com/v1/users/" + username + "/gfycats")).ignoreContentType().get(); - } - } - - @Override - public void downloadURL(URL url, int index) { - addURLToDownload(url, getPrefix(index)); - } - - @Override - public String getGID(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("^https?://(thumbs\\.|[wm\\.]*)gfycat\\.com/@?([a-zA-Z0-9]+).*$"); - Matcher m = p.matcher(url.toExternalForm()); - - if (m.matches()) - return m.group(2); - - throw new MalformedURLException( - "Expected gfycat.com format: " - + "gfycat.com/id or " - + "thumbs.gfycat.com/id.gif" - + " Got: " + url); - } - - private String stripHTMLTags(String t) { - t = t.replaceAll("\n" + - " \n" + - " ", ""); - t = t.replaceAll("\n" + - "", ""); - t = t.replaceAll("\n", ""); - t = t.replaceAll("=\"\"", ""); - return t; - } - - @Override - public Document getNextPage(Document doc) throws IOException { - if (cursor.equals("")) { - throw new IOException("No more pages"); - } - return Http.url(new URL("https://api.gfycat.com/v1/users/" + username + "/gfycats?count=" + count + "&cursor=" + cursor)).ignoreContentType().get(); - } - - @Override - public List getURLsFromPage(Document doc) { - List result = new ArrayList<>(); - if (isProfile()) { - JSONObject page = new JSONObject(stripHTMLTags(doc.html())); - JSONArray content = page.getJSONArray("gfycats"); - for (int i = 0; i < content.length(); i++) { - result.add(content.getJSONObject(i).getString("mp4Url")); - } - cursor = page.getString("cursor"); - } else { - Elements videos = doc.select("script"); - for (Element el : videos) { - String json = el.html(); - if (json.startsWith("{")) { - JSONObject page = new JSONObject(json); - result.add(page.getJSONObject("video").getString("contentUrl")); - } - } - } - return result; - } - - /** - * Helper method for retrieving video URLs. - * @param url URL to gfycat page - * @return URL to video - * @throws IOException - */ - public static String getVideoURL(URL url) throws IOException { - LOGGER.info("Retrieving " + url.toExternalForm()); - - //Sanitize the URL first - url = new URL(url.toExternalForm().replace("/gifs/detail", "")); - - Document doc = Http.url(url).get(); - Elements videos = doc.select("script"); - for (Element el : videos) { - String json = el.html(); - if (json.startsWith("{")) { - JSONObject page = new JSONObject(json); - return page.getJSONObject("video").getString("contentUrl"); - } - } - throw new IOException(); - } -} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatporntubeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatporntubeRipper.java index fd8c292a..bdb58ad2 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatporntubeRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatporntubeRipper.java @@ -11,8 +11,6 @@ import java.util.regex.Pattern; import com.rarchives.ripme.ripper.AbstractSingleFileRipper; import org.jsoup.nodes.Document; -import com.rarchives.ripme.utils.Http; - public class GfycatporntubeRipper extends AbstractSingleFileRipper { public GfycatporntubeRipper(URL url) throws IOException { @@ -40,12 +38,6 @@ public class GfycatporntubeRipper extends AbstractSingleFileRipper { "gfycatporntube.com/NAME - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java index 2afc79d1..49cbfc60 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java @@ -2,6 +2,7 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -13,11 +14,8 @@ import org.jsoup.nodes.Element; import org.jsoup.select.Elements; import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; public class GirlsOfDesireRipper extends AbstractHTMLRipper { - // Current HTML document - private Document albumDoc = null; public GirlsOfDesireRipper(URL url) throws IOException { super(url); @@ -32,10 +30,10 @@ public class GirlsOfDesireRipper extends AbstractHTMLRipper { return "girlsofdesire.org"; } - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title as GID - Document doc = getFirstPage(); + Document doc = getCachedFirstPage(); Elements elems = doc.select(".albumName"); return getHost() + "_" + elems.first().text(); } catch (Exception e) { @@ -62,14 +60,6 @@ public class GirlsOfDesireRipper extends AbstractHTMLRipper { + " Got: " + url); } - @Override - public Document getFirstPage() throws IOException { - if (albumDoc == null) { - albumDoc = Http.url(url).get(); - } - return albumDoc; - } - @Override public List getURLsFromPage(Document doc) { List imageURLs = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HbrowseRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HbrowseRipper.java index fd3b23c2..040ca978 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HbrowseRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HbrowseRipper.java @@ -2,6 +2,7 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -49,9 +50,9 @@ public class HbrowseRipper extends AbstractHTMLRipper { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { - Document doc = getFirstPage(); + Document doc = getCachedFirstPage(); String title = doc.select("div[id=main] > table.listTable > tbody > tr > td.listLong").first().text(); return getHost() + "_" + title + "_" + getGID(url); } catch (Exception e) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java index cb521523..2b8ac967 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java @@ -2,6 +2,7 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -90,7 +91,7 @@ public class Hentai2readRipper extends AbstractHTMLRipper { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { return getHost() + "_" + getGID(url); } catch (Exception e) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiNexusRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiNexusRipper.java new file mode 100644 index 00000000..4d28f7a2 --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiNexusRipper.java @@ -0,0 +1,184 @@ +package com.rarchives.ripme.ripper.rippers; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.Base64; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import com.rarchives.ripme.utils.Http; +import org.json.JSONArray; +import org.json.JSONException; +import org.json.JSONObject; + +import com.rarchives.ripme.ripper.AbstractJSONRipper; +import org.jsoup.nodes.DataNode; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; + +public class HentaiNexusRipper extends AbstractJSONRipper { + + public HentaiNexusRipper(URL url) throws IOException { + super(url); + } + + @Override + public String getHost() { + return "hentainexus"; + } + @Override + public String getDomain() { + return "hentainexus.com"; + } + + @Override + public String getGID(URL url) throws MalformedURLException { + /* + Valid URLs are /view/id, /read/id and those 2 with #pagenumber + https://hentainexus.com/view/9202 + https://hentainexus.com/read/9202 + https://hentainexus.com/view/9202#001 + https://hentainexus.com/read/9202#001 + */ + + Pattern p = Pattern.compile("^https?://hentainexus\\.com/(?:view|read)/([0-9]+)(?:\\#[0-9]+)*$"); + Matcher m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(1); + } + throw new MalformedURLException("Expected hentainexus.com URL format: " + + "hentainexus.com/view/id OR hentainexus.com/read/id - got " + url + "instead"); + } + + @Override + public void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index)); + } + + + @Override + protected List getURLsFromJSON(JSONObject json) throws JSONException { + + List urlList = new ArrayList<>(); + + JSONArray imagesList = json.getJSONArray("f"); + String host = json.getString("b"); + String folder = json.getString("r"); + String id = json.getString("i"); + + for (Object singleImage : imagesList) { + String hashTMP = ((JSONObject) singleImage).getString("h"); + String fileNameTMP = ((JSONObject) singleImage).getString("p"); + String imageUrlTMP = String.format("%s%s%s/%s/%s",host,folder,hashTMP,id,fileNameTMP); + urlList.add(imageUrlTMP); + } + + return urlList; + } + + @Override + protected JSONObject getFirstPage() throws IOException, URISyntaxException { + String jsonEncodedString = getJsonEncodedStringFromPage(); + String jsonDecodedString = decodeJsonString(jsonEncodedString); + return new JSONObject(jsonDecodedString); + } + + public String getJsonEncodedStringFromPage() throws MalformedURLException, IOException, URISyntaxException { + // Image data only appears on the /read/ page and not on the /view/ one. + URL readUrl = new URI(String.format("http://hentainexus.com/read/%s",getGID(url))).toURL(); + Document document = Http.url(readUrl).response().parse(); + + for (Element scripts : document.getElementsByTag("script")) { + for (DataNode dataNode : scripts.dataNodes()) { + if (dataNode.getWholeData().contains("initReader")) { + // Extract JSON encoded string from the JavaScript initReader() call. + String data = dataNode.getWholeData().trim().replaceAll("\\r|\\n|\\t",""); + + Pattern p = Pattern.compile(".*?initReader\\(\"(.*?)\",.*?\\).*?"); + Matcher m = p.matcher(data); + if (m.matches()) { + return m.group(1); + } + } + } + } + return ""; + } + + public String decodeJsonString(String jsonEncodedString) + { + /* + The initReader() JavaScript function accepts 2 parameters: a weird string and the window title (we can ignore this). + The weird string is a JSON string with some bytes shifted and swapped around and then encoded in base64. + The following code is a Java adaptation of the initRender() JavaScript function after manual deobfuscation. + */ + + byte[] jsonBytes = Base64.getDecoder().decode(jsonEncodedString); + + ArrayList unknownArray = new ArrayList(); + ArrayList indexesToUse = new ArrayList<>(); + + for (int i = 0x2; unknownArray.size() < 0x10; ++i) { + if (!indexesToUse.contains(i)) { + unknownArray.add(i); + for (int j = i << 0x1; j <= 0x100; j += i) { + if (!indexesToUse.contains(j)) { + indexesToUse.add(j); + } + } + } + } + + byte magicByte = 0x0; + for (int i = 0x0; i < 0x40; i++) { + magicByte = (byte) (signedToUnsigned(magicByte) ^ signedToUnsigned(jsonBytes[i])); + for (int j = 0x0; j < 0x8; j++) { + long unsignedMagicByteTMP = signedToUnsigned(magicByte); + magicByte = (byte) ((unsignedMagicByteTMP & 0x1) == 1 ? unsignedMagicByteTMP >>> 0x1 ^ 0xc : unsignedMagicByteTMP >>> 0x1); + } + } + + magicByte = (byte) (magicByte & 0x7); + ArrayList newArray = new ArrayList<>(); + + for (int i = 0x0; i < 0x100; i++) { + newArray.add(i); + } + + int newIndex = 0, backup = 0; + for (int i = 0x0; i < 0x100; i++) { + newIndex = (newIndex + newArray.get(i) + (int) signedToUnsigned(jsonBytes[i % 0x40])) % 0x100; + backup = newArray.get(i); + newArray.set(i, newArray.get(newIndex)); + newArray.set(newIndex, backup); + } + + int magicByteTranslated = (int) unknownArray.get(magicByte); + int index1 = 0x0, index2 = 0x0, index3 = 0x0, swap1 = 0x0, xorNumber = 0x0; + String decodedJsonString = ""; + + for (int i = 0x0; i + 0x40 < jsonBytes.length; i++) { + index1 = (index1 + magicByteTranslated) % 0x100; + index2 = (index3 + newArray.get((index2 + newArray.get(index1)) % 0x100)) % 0x100; + index3 = (index3 + index1 + newArray.get(index1)) % 0x100; + swap1 = newArray.get(index1); + newArray.set(index1, newArray.get(index2)); + newArray.set(index2,swap1); + xorNumber = newArray.get((index2 + newArray.get((index1 + newArray.get((xorNumber + index3) % 0x100)) % 0x100)) % 0x100); + decodedJsonString += Character.toString((char) signedToUnsigned((jsonBytes[i + 0x40] ^ xorNumber))); + } + + return decodedJsonString; + } + + + private static long signedToUnsigned(int signed) { + return (byte) signed & 0xFF; + } + +} \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaidudeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaidudeRipper.java index 7950f0cf..24625859 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaidudeRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaidudeRipper.java @@ -10,6 +10,7 @@ import org.jsoup.nodes.Document; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; import java.net.URL; import java.util.ArrayList; import java.util.HashMap; @@ -50,12 +51,6 @@ public class HentaidudeRipper extends AbstractSingleFileRipper { "Expected hqporner URL format: " + "hentaidude.com/VIDEO - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); @@ -84,7 +79,7 @@ public class HentaidudeRipper extends AbstractSingleFileRipper { return hentaidudeThreadPool; } - private class HentaidudeDownloadThread extends Thread { + private class HentaidudeDownloadThread implements Runnable { private URL url; @@ -97,7 +92,7 @@ public class HentaidudeRipper extends AbstractSingleFileRipper { public void run() { try { Document doc = Http.url(url).get(); - URL videoSourceUrl = new URL(getVideoUrl(doc)); + URL videoSourceUrl = new URI(getVideoUrl(doc)).toURL(); addURLToDownload(videoSourceUrl, "", "", "", null, getVideoName(), "mp4"); } catch (Exception e) { LOGGER.error("Could not get video url for " + getVideoName(), e); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoxRipper.java index a4e5895d..d6dba419 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoxRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoxRipper.java @@ -2,6 +2,7 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -12,7 +13,6 @@ import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; public class HentaifoxRipper extends AbstractHTMLRipper { @@ -41,12 +41,6 @@ public class HentaifoxRipper extends AbstractHTMLRipper { "https://hentaifox.com/gallery/ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { LOGGER.info(doc); @@ -59,9 +53,9 @@ public class HentaifoxRipper extends AbstractHTMLRipper { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { - Document doc = getFirstPage(); + Document doc = getCachedFirstPage(); String title = doc.select("div.info > h1").first().text(); return getHost() + "_" + title + "_" + getGID(url); } catch (Exception e) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiimageRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiimageRipper.java index df7bfb96..45628e82 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiimageRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiimageRipper.java @@ -52,13 +52,6 @@ public class HentaiimageRipper extends AbstractHTMLRipper { "https://hentai-image.com/image/ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java index 3196c139..d312b75b 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -13,7 +15,6 @@ import org.jsoup.nodes.Document; import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.utils.Http; -import org.jsoup.nodes.Element; public class HitomiRipper extends AbstractHTMLRipper { @@ -35,20 +36,20 @@ public class HitomiRipper extends AbstractHTMLRipper { @Override public String getGID(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("https://hitomi.la/galleries/([\\d]+).html"); + Pattern p = Pattern.compile("https://hitomi.la/(cg|doujinshi|gamecg|manga)/(.+).html"); Matcher m = p.matcher(url.toExternalForm()); if (m.matches()) { galleryId = m.group(1); return m.group(1); } throw new MalformedURLException("Expected hitomi URL format: " + - "https://hitomi.la/galleries/ID.html - got " + url + " instead"); + "https://hitomi.la/(cg|doujinshi|gamecg|manga)/ID.html - got " + url + " instead"); } @Override - public Document getFirstPage() throws IOException { + public Document getFirstPage() throws IOException, URISyntaxException { // if we go to /GALLERYID.js we get a nice json array of all images in the gallery - return Http.url(new URL(url.toExternalForm().replaceAll("hitomi", "ltn.hitomi").replaceAll(".html", ".js"))).ignoreContentType().get(); + return Http.url(new URI(url.toExternalForm().replaceAll("hitomi", "ltn.hitomi").replaceAll(".html", ".js")).toURL()).ignoreContentType().get(); } @@ -65,7 +66,7 @@ public class HitomiRipper extends AbstractHTMLRipper { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title and username as GID Document doc = Http.url(url).get(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HqpornerRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HqpornerRipper.java index 8d13f113..0f69c75b 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HqpornerRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HqpornerRipper.java @@ -11,6 +11,8 @@ import org.jsoup.select.Elements; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -63,9 +65,8 @@ public class HqpornerRipper extends AbstractHTMLRipper { } @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); + public Document getFirstPage() throws IOException, URISyntaxException { + return super.getFirstPage(); } @Override @@ -130,7 +131,7 @@ public class HqpornerRipper extends AbstractHTMLRipper { return true; } - private class HqpornerDownloadThread extends Thread { + private class HqpornerDownloadThread implements Runnable { private URL hqpornerVideoPageUrl; //private int index; @@ -164,10 +165,10 @@ public class HqpornerRipper extends AbstractHTMLRipper { } if (downloadUrl != null) { - addURLToDownload(new URL(downloadUrl), "", subdirectory, "", null, getVideoName(), "mp4"); + addURLToDownload(new URI(downloadUrl).toURL(), "", subdirectory, "", null, getVideoName(), "mp4"); } - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { LOGGER.error("[!] Exception while downloading video.", e); } } @@ -215,7 +216,7 @@ public class HqpornerRipper extends AbstractHTMLRipper { try { logger.info("Trying to download from unknown video host " + videoPageurl); - URL url = new URL(videoPageurl); + URL url = new URI(videoPageurl).toURL(); Response response = Http.url(url).referrer(hqpornerVideoPageUrl).response(); Document doc = response.parse(); @@ -245,7 +246,7 @@ public class HqpornerRipper extends AbstractHTMLRipper { } } - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { logger.error("Unable to get video url using generic methods."); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java index 5b481258..15420655 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java @@ -46,12 +46,6 @@ public class HypnohubRipper extends AbstractHTMLRipper { "hypnohub.net/pool/show/ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - private String ripPost(String url) throws IOException { LOGGER.info(url); Document doc = Http.url(url).get(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagearnRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagearnRipper.java deleted file mode 100644 index 062217b2..00000000 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagearnRipper.java +++ /dev/null @@ -1,112 +0,0 @@ -package com.rarchives.ripme.ripper.rippers; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; - -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; - -public class ImagearnRipper extends AbstractHTMLRipper { - - public ImagearnRipper(URL url) throws IOException { - super(url); - } - - @Override - public String getHost() { - return "imagearn"; - } - @Override - public String getDomain() { - return "imagearn.com"; - } - - @Override - public String getGID(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("^.*imagearn.com/+gallery.php\\?id=([0-9]+).*$"); - Matcher m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - throw new MalformedURLException( - "Expected imagearn.com gallery formats: " - + "imagearn.com/gallery.php?id=####..." - + " Got: " + url); - } - - public URL sanitizeURL(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("^.*imagearn.com/+image.php\\?id=[0-9]+.*$"); - Matcher m = p.matcher(url.toExternalForm()); - if (m.matches()) { - // URL points to imagearn *image*, not gallery - try { - url = getGalleryFromImage(url); - } catch (Exception e) { - LOGGER.error("[!] " + e.getMessage(), e); - } - } - return url; - } - - private URL getGalleryFromImage(URL url) throws IOException { - Document doc = Http.url(url).get(); - for (Element link : doc.select("a[href~=^gallery\\.php.*$]")) { - LOGGER.info("LINK: " + link.toString()); - if (link.hasAttr("href") - && link.attr("href").contains("gallery.php")) { - url = new URL("http://imagearn.com/" + link.attr("href")); - LOGGER.info("[!] Found gallery from given link: " + url); - return url; - } - } - throw new IOException("Failed to find gallery at URL " + url); - } - - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - - @Override - public String getAlbumTitle(URL url) throws MalformedURLException { - try { - Document doc = getFirstPage(); - String title = doc.select("h3 > strong").first().text(); // profile name - return getHost() + "_" + title + "_" + getGID(url); - } catch (Exception e) { - // Fall back to default album naming convention - LOGGER.warn("Failed to get album title from " + url, e); - } - return super.getAlbumTitle(url); - } - - @Override - public List getURLsFromPage(Document doc) { - List imageURLs = new ArrayList<>(); - for (Element thumb : doc.select("div#gallery > div > a")) { - String imageURL = thumb.attr("href"); - try { - Document imagedoc = new Http("http://imagearn.com/" + imageURL).get(); - String image = imagedoc.select("a.thickbox").first().attr("href"); - imageURLs.add(image); - } catch (IOException e) { - LOGGER.warn("Was unable to download page: " + imageURL); - } - } - return imageURLs; - } - - @Override - public void downloadURL(URL url, int index) { - addURLToDownload(url, getPrefix(index)); - sleep(1000); - } -} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java index 3aca67cf..0699273f 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java @@ -6,20 +6,24 @@ import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.Utils; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; + +import org.apache.commons.lang.StringUtils; +import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; public class ImagebamRipper extends AbstractHTMLRipper { - // Current HTML document - private Document albumDoc = null; - // Thread pool for finding direct image links from "image" pages (html) private DownloadThreadPool imagebamThreadPool = new DownloadThreadPool("imagebam"); @Override @@ -45,7 +49,7 @@ public class ImagebamRipper extends AbstractHTMLRipper { Pattern p; Matcher m; - p = Pattern.compile("^https?://[wm.]*imagebam.com/gallery/([a-zA-Z0-9]+).*$"); + p = Pattern.compile("^https?://[wm.]*imagebam.com/(gallery|view)/([a-zA-Z0-9]+).*$"); m = p.matcher(url.toExternalForm()); if (m.matches()) { return m.group(1); @@ -57,14 +61,6 @@ public class ImagebamRipper extends AbstractHTMLRipper { + " Got: " + url); } - @Override - public Document getFirstPage() throws IOException { - if (albumDoc == null) { - albumDoc = Http.url(url).get(); - } - return albumDoc; - } - @Override public Document getNextPage(Document doc) throws IOException { // Find next page @@ -80,7 +76,7 @@ public class ImagebamRipper extends AbstractHTMLRipper { @Override public List getURLsFromPage(Document doc) { List imageURLs = new ArrayList<>(); - for (Element thumb : doc.select("div > a[target=_blank]:not(.footera)")) { + for (Element thumb : doc.select("div > a[class=thumbnail]:not(.footera)")) { imageURLs.add(thumb.attr("href")); } return imageURLs; @@ -94,18 +90,15 @@ public class ImagebamRipper extends AbstractHTMLRipper { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title as GID - Elements elems = getFirstPage().select("legend"); + Elements elems = getCachedFirstPage().select("[id=gallery-name]"); String title = elems.first().text(); LOGGER.info("Title text: '" + title + "'"); - Pattern p = Pattern.compile("^(.*)\\s\\d* image.*$"); - Matcher m = p.matcher(title); - if (m.matches()) { - return getHost() + "_" + getGID(url) + " (" + m.group(1).trim() + ")"; + if (StringUtils.isNotBlank(title)) { + return getHost() + "_" + getGID(url) + " (" + title + ")"; } - LOGGER.info("Doesn't match " + p.pattern()); } catch (Exception e) { // Fall back to default album naming convention LOGGER.warn("Failed to get album title from " + url, e); @@ -118,9 +111,9 @@ public class ImagebamRipper extends AbstractHTMLRipper { * * Handles case when site has IP-banned the user. */ - private class ImagebamImageThread extends Thread { - private URL url; //link to "image page" - private int index; //index in album + private class ImagebamImageThread implements Runnable { + private final URL url; //link to "image page" + private final int index; //index in album ImagebamImageThread(URL url, int index) { super(); @@ -138,19 +131,19 @@ public class ImagebamRipper extends AbstractHTMLRipper { */ private void fetchImage() { try { - Document doc = Http.url(url).get(); + Map cookies = new HashMap<>(); + cookies.put("nsfw_inter", "1"); + Document doc = Jsoup.connect(url.toString()) + .cookies(cookies) + .get(); + // Find image Elements metaTags = doc.getElementsByTag("meta"); String imgsrc = "";//initialize, so no NullPointerExceptions should ever happen. - - for (Element metaTag: metaTags) { - //the direct link to the image seems to always be linked in the part of the html. - if (metaTag.attr("property").equals("og:image")) { - imgsrc = metaTag.attr("content"); - LOGGER.info("Found URL " + imgsrc); - break;//only one (useful) image possible for an "image page". - } + Elements elem = doc.select("img[class*=main-image]"); + if ((elem != null) && (elem.size() > 0)) { + imgsrc = elem.first().attr("src"); } //for debug, or something goes wrong. @@ -165,8 +158,8 @@ public class ImagebamRipper extends AbstractHTMLRipper { prefix = String.format("%03d_", index); } - addURLToDownload(new URL(imgsrc), prefix); - } catch (IOException e) { + addURLToDownload(new URI(imgsrc).toURL(), prefix); + } catch (IOException | URISyntaxException e) { LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java index f097e667..4fcf2201 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java @@ -1,8 +1,13 @@ package com.rarchives.ripme.ripper.rippers; +import java.io.File; +import java.io.FileWriter; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; +import java.text.MessageFormat; import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher; @@ -10,14 +15,26 @@ import java.util.regex.Pattern; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; +import org.jsoup.select.Elements; import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.ui.RipStatusMessage.STATUS; import com.rarchives.ripme.utils.Http; public class ImagefapRipper extends AbstractHTMLRipper { - private Document albumDoc = null; - private boolean isNewAlbumType = false; + private int callsMade = 0; + private long startTime = System.nanoTime(); + + private static final int RETRY_LIMIT = 10; + private static final int HTTP_RETRY_LIMIT = 3; + private static final int RATE_LIMIT_HOUR = 1000; + + // All sleep times are in milliseconds + private static final int PAGE_SLEEP_TIME = 60 * 60 * 1000 / RATE_LIMIT_HOUR; + private static final int IMAGE_SLEEP_TIME = 60 * 60 * 1000 / RATE_LIMIT_HOUR; + // Timeout when blocked = 1 hours. Retry every retry within the hour mark + 1 time after the hour mark. + private static final int IP_BLOCK_SLEEP_TIME = (int) Math.round((double) 60 / (RETRY_LIMIT - 1) * 60 * 1000); public ImagefapRipper(URL url) throws IOException { super(url); @@ -36,54 +53,40 @@ public class ImagefapRipper extends AbstractHTMLRipper { * Reformat given URL into the desired format (all images on single page) */ @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { String gid = getGID(url); - String newURL = "http://www.imagefap.com/gallery.php?"; - if (isNewAlbumType) { - newURL += "p"; - } - newURL += "gid=" + gid + "&view=2"; + String newURL = "https://www.imagefap.com/pictures/" + gid + "/random-string"; LOGGER.debug("Changed URL from " + url + " to " + newURL); - return new URL(newURL); + return new URI(newURL).toURL(); } @Override public String getGID(URL url) throws MalformedURLException { Pattern p; Matcher m; + // Old format (I suspect no longer supported) p = Pattern.compile("^.*imagefap.com/gallery.php\\?pgid=([a-f0-9]+).*$"); m = p.matcher(url.toExternalForm()); if (m.matches()) { - isNewAlbumType = true; return m.group(1); } + p = Pattern.compile("^.*imagefap.com/gallery.php\\?gid=([0-9]+).*$"); m = p.matcher(url.toExternalForm()); if (m.matches()) { return m.group(1); } - p = Pattern.compile("^.*imagefap.com/pictures/([0-9]+).*$"); - m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - p = Pattern.compile("^.*imagefap.com/pictures/([a-f0-9]+).*$"); - m = p.matcher(url.toExternalForm()); - if (m.matches()) { - isNewAlbumType = true; - return m.group(1); - } - - p = Pattern.compile("^.*imagefap.com/gallery/([0-9]+).*$"); - m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } p = Pattern.compile("^.*imagefap.com/gallery/([a-f0-9]+).*$"); m = p.matcher(url.toExternalForm()); if (m.matches()) { - isNewAlbumType = true; + return m.group(1); + } + + // most recent format + p = Pattern.compile("^.*imagefap.com/pictures/([a-f0-9]+).*$"); + m = p.matcher(url.toExternalForm()); + if (m.matches()) { return m.group(1); } @@ -96,41 +99,72 @@ public class ImagefapRipper extends AbstractHTMLRipper { @Override public Document getFirstPage() throws IOException { - if (albumDoc == null) { - albumDoc = Http.url(url).get(); - } - return albumDoc; + + Document firstPage = getPageWithRetries(url); + + sendUpdate(STATUS.LOADING_RESOURCE, "Loading first page..."); + + return firstPage; } @Override - public Document getNextPage(Document doc) throws IOException { + public Document getNextPage(Document doc) throws IOException, URISyntaxException { String nextURL = null; for (Element a : doc.select("a.link3")) { if (a.text().contains("next")) { - nextURL = "http://imagefap.com/gallery.php" + a.attr("href"); + nextURL = this.sanitizeURL(this.url) + a.attr("href"); break; } } if (nextURL == null) { throw new IOException("No next page found"); } - sleep(1000); - return Http.url(nextURL).get(); + // Sleep before fetching next page. + sleep(PAGE_SLEEP_TIME); + + sendUpdate(STATUS.LOADING_RESOURCE, "Loading next page URL: " + nextURL); + LOGGER.info("Attempting to load next page URL: " + nextURL); + + // Load next page + Document nextPage = getPageWithRetries(new URI(nextURL).toURL()); + + return nextPage; } @Override public List getURLsFromPage(Document doc) { + List imageURLs = new ArrayList<>(); + + LOGGER.debug("Trying to get URLs from document... "); + for (Element thumb : doc.select("#gallery img")) { if (!thumb.hasAttr("src") || !thumb.hasAttr("width")) { continue; } String image = getFullSizedImage("https://www.imagefap.com" + thumb.parent().attr("href")); + + if (image == null) { + for (int i = 0; i < HTTP_RETRY_LIMIT; i++) { + image = getFullSizedImage("https://www.imagefap.com" + thumb.parent().attr("href")); + if (image != null) { + break; + } + sleep(PAGE_SLEEP_TIME); + } + if (image == null) + throw new RuntimeException("Unable to extract image URL from single image page! Unable to continue"); + } + + LOGGER.debug("Adding imageURL: '" + image + "'"); + imageURLs.add(image); if (isThisATest()) { break; } } + LOGGER.debug("Adding " + imageURLs.size() + " URLs to download"); + return imageURLs; } @@ -141,10 +175,10 @@ public class ImagefapRipper extends AbstractHTMLRipper { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title as GID - String title = getFirstPage().title(); + String title = getCachedFirstPage().title(); title = title.replace("Porn Pics & Porn GIFs", ""); title = title.replace(" ", "_"); String toReturn = getHost() + "_" + title + "_" + getGID(url); @@ -156,11 +190,128 @@ public class ImagefapRipper extends AbstractHTMLRipper { private String getFullSizedImage(String pageURL) { try { - Document doc = Http.url(pageURL).get(); - return doc.select("img#mainPhoto").attr("src"); - } catch (IOException e) { + // Sleep before fetching image. + sleep(IMAGE_SLEEP_TIME); + + Document doc = getPageWithRetries(new URI(pageURL).toURL()); + + String framedPhotoUrl = doc.select("img#mainPhoto").attr("data-src"); + + // we use a no query param version of the URL to reduce failure rate because of some query params that change between the li elements and the mainPhotoURL + String noQueryPhotoUrl = framedPhotoUrl.split("\\?")[0]; + + LOGGER.debug("noQueryPhotoUrl: " + noQueryPhotoUrl); + + // we look for a li > a element who's framed attribute starts with the noQueryPhotoUrl (only reference in the page to the full URL) + Elements selectedItem = doc.select("ul.thumbs > li > a[framed^='"+noQueryPhotoUrl+"']"); + + // the fullsize URL is in the href attribute + String fullSizedUrl = selectedItem.attr("href"); + + if("".equals(fullSizedUrl)) + throw new IOException("JSoup full URL extraction failed from '" + selectedItem.html() + "'"); + + LOGGER.debug("fullSizedUrl: " + fullSizedUrl); + + return fullSizedUrl; + + } catch (IOException | URISyntaxException e) { + LOGGER.debug("Unable to get full size image URL from page: " + pageURL + " because: " + e.getMessage()); return null; } } + /** + * Attempts to get page, checks for IP ban, waits. + * @param url + * @return Page document + * @throws IOException If page loading errors, or if retries are exhausted + */ + private Document getPageWithRetries(URL url) throws IOException { + Document doc = null; + int retries = RETRY_LIMIT; + while (true) { + + sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm()); + + // For debugging rate limit checker. Useful to track wheter the timeout should be altered or not. + callsMade++; + checkRateLimit(); + + LOGGER.info("Retrieving " + url); + + boolean httpCallThrottled = false; + int httpAttempts = 0; + + // we attempt the http call, knowing it can fail for network reasons + while(true) { + httpAttempts++; + try { + doc = Http.url(url).get(); + } catch(IOException e) { + + LOGGER.info("Retrieving " + url + " error: " + e.getMessage()); + + if(e.getMessage().contains("404")) + throw new IOException("Gallery/Page not found!"); + + if(httpAttempts < HTTP_RETRY_LIMIT) { + sendUpdate(STATUS.DOWNLOAD_WARN, "HTTP call failed: " + e.getMessage() + " retrying " + httpAttempts + " / " + HTTP_RETRY_LIMIT); + + // we sleep for a few seconds + sleep(PAGE_SLEEP_TIME); + continue; + } else { + sendUpdate(STATUS.DOWNLOAD_WARN, "HTTP call failed too many times: " + e.getMessage() + " treating this as a throttle"); + httpCallThrottled = true; + } + } + // no errors, we exit + break; + } + + if (httpCallThrottled || (doc != null && doc.toString().contains("Your IP made too many requests to our servers and we need to check that you are a real human being"))) { + if (retries == 0) { + throw new IOException("Hit rate limit and maximum number of retries, giving up"); + } + String message = "Probably hit rate limit while loading " + url + ", sleeping for " + IP_BLOCK_SLEEP_TIME + "ms, " + retries + " retries remaining"; + LOGGER.warn(message); + sendUpdate(STATUS.DOWNLOAD_WARN, message); + retries--; + try { + Thread.sleep(IP_BLOCK_SLEEP_TIME); + } catch (InterruptedException e) { + throw new IOException("Interrupted while waiting for rate limit to subside"); + } + } else { + return doc; + } + } + } + + /** + * Used for debugging the rate limit issue. + * This in order to prevent hitting the rate limit altoghether by remaining under the limit threshold. + * @return Long duration + */ + private long checkRateLimit() { + long endTime = System.nanoTime(); + long duration = (endTime - startTime) / 1000000; + + int rateLimitMinute = 100; + int rateLimitFiveMinutes = 200; + int rateLimitHour = RATE_LIMIT_HOUR; // Request allowed every 3.6 seconds. + + if(duration / 1000 < 60){ + LOGGER.debug("Rate limit: " + (rateLimitMinute - callsMade) + " calls remaining for first minute mark."); + } else if(duration / 1000 < 300){ + LOGGER.debug("Rate limit: " + (rateLimitFiveMinutes - callsMade) + " calls remaining for first 5 minute mark."); + } else if(duration / 1000 < 3600){ + LOGGER.debug("Rate limit: " + (RATE_LIMIT_HOUR - callsMade) + " calls remaining for first hour mark."); + } + + return duration; + } + + } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java index f50a84a0..4691c7c6 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -56,11 +58,6 @@ public class ImagevenueRipper extends AbstractHTMLRipper { + " Got: " + url); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - public List getURLsFromPage(Document doc) { List imageURLs = new ArrayList<>(); for (Element thumb : doc.select("a[target=_blank]")) { @@ -79,9 +76,9 @@ public class ImagevenueRipper extends AbstractHTMLRipper { * * Handles case when site has IP-banned the user. */ - private class ImagevenueImageThread extends Thread { - private URL url; - private int index; + private class ImagevenueImageThread implements Runnable { + private final URL url; + private final int index; ImagevenueImageThread(URL url, int index) { super(); @@ -113,8 +110,8 @@ public class ImagevenueRipper extends AbstractHTMLRipper { if (Utils.getConfigBoolean("download.save_order", true)) { prefix = String.format("%03d_", index); } - addURLToDownload(new URL(imgsrc), prefix); - } catch (IOException e) { + addURLToDownload(new URI(imgsrc).toURL(), prefix); + } catch (IOException | URISyntaxException e) { LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImgboxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImgboxRipper.java index f3050a13..b32fcad4 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImgboxRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImgboxRipper.java @@ -40,10 +40,6 @@ public class ImgboxRipper extends AbstractHTMLRipper { "imgbox.com/g/albumid - got " + url + "instead"); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } @Override public List getURLsFromPage(Document doc) { List imageURLs = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java index 93cb809e..4904ac60 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java @@ -1,10 +1,14 @@ package com.rarchives.ripme.ripper.rippers; -import java.io.File; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -15,15 +19,15 @@ import org.json.JSONObject; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; -import org.jsoup.safety.Whitelist; +import org.jsoup.safety.Safelist; import org.jsoup.select.Elements; -import com.rarchives.ripme.ripper.AlbumRipper; +import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.ui.RipStatusMessage.STATUS; import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.Utils; -public class ImgurRipper extends AlbumRipper { +public class ImgurRipper extends AbstractHTMLRipper { private static final String DOMAIN = "imgur.com", HOST = "imgur"; @@ -38,7 +42,6 @@ public class ImgurRipper extends AlbumRipper { USER_ALBUM, USER_IMAGES, SINGLE_IMAGE, - SERIES_OF_IMAGES, SUBREDDIT } @@ -58,6 +61,7 @@ public class ImgurRipper extends AlbumRipper { return albumType == ALBUM_TYPE.USER; } + @Override public boolean canRip(URL url) { if (!url.getHost().endsWith(DOMAIN)) { return false; @@ -71,7 +75,24 @@ public class ImgurRipper extends AlbumRipper { return true; } - public URL sanitizeURL(URL url) throws MalformedURLException { + @Override + protected String getDomain() { + return DOMAIN; + } + + @Override + protected void downloadURL(URL url, int index) { + // No-op as we override rip() method + } + + @Override + protected List getURLsFromPage(Document page) { + // No-op as we override rip() method + return Arrays.asList(); + } + + @Override + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { String u = url.toExternalForm(); if (u.indexOf('#') >= 0) { u = u.substring(0, u.indexOf('#')); @@ -79,11 +100,17 @@ public class ImgurRipper extends AlbumRipper { u = u.replace("imgur.com/gallery/", "imgur.com/a/"); u = u.replace("https?://m\\.imgur\\.com", "http://imgur.com"); u = u.replace("https?://i\\.imgur\\.com", "http://imgur.com"); - return new URL(u); + return new URI(u).toURL(); } + @Override public String getAlbumTitle(URL url) throws MalformedURLException { - String gid = getGID(url); + String gid = null; + try { + gid = getGID(url); + } catch (URISyntaxException e) { + throw new MalformedURLException(e.getMessage()); + } if (this.albumType == ALBUM_TYPE.ALBUM) { try { // Attempt to use album title as GID @@ -91,7 +118,7 @@ public class ImgurRipper extends AlbumRipper { albumDoc = Http.url(url).get(); } - Elements elems = null; + Elements elems; /* // TODO: Add config option for including username in album title. @@ -106,15 +133,13 @@ public class ImgurRipper extends AlbumRipper { } */ - String title = null; + String title; final String defaultTitle1 = "Imgur: The most awesome images on the Internet"; final String defaultTitle2 = "Imgur: The magic of the Internet"; LOGGER.info("Trying to get album title"); elems = albumDoc.select("meta[property=og:title]"); - if (elems != null) { - title = elems.attr("content"); - LOGGER.debug("Title is " + title); - } + title = elems.attr("content"); + LOGGER.debug("Title is " + title); // This is here encase the album is unnamed, to prevent // Imgur: The most awesome images on the Internet from being added onto the album name if (title.contains(defaultTitle1) || title.contains(defaultTitle2)) { @@ -124,27 +149,17 @@ public class ImgurRipper extends AlbumRipper { title = ""; LOGGER.debug("Trying to use title tag to get title"); elems = albumDoc.select("title"); - if (elems != null) { - if (elems.text().contains(defaultTitle1) || elems.text().contains(defaultTitle2)) { - LOGGER.debug("Was unable to get album title or album was untitled"); - } - else { - title = elems.text(); - } + if (elems.text().contains(defaultTitle1) || elems.text().contains(defaultTitle2)) { + LOGGER.debug("Was unable to get album title or album was untitled"); + } + else { + title = elems.text(); } } String albumTitle = "imgur_"; - /* - // TODO: Add config option (see above) - if (user != null) { - albumTitle += "user_" + user; - } - */ albumTitle += gid; - if (title != null) { - albumTitle += "_" + title; - } + albumTitle += "_" + title; return albumTitle; } catch (IOException e) { @@ -156,118 +171,83 @@ public class ImgurRipper extends AlbumRipper { @Override public void rip() throws IOException { - switch (albumType) { - case ALBUM: - // Fall-through - case USER_ALBUM: - LOGGER.info("Album type is USER_ALBUM"); - // Don't call getAlbumTitle(this.url) with this - // as it seems to cause the album to be downloaded to a subdir. - ripAlbum(this.url); - break; - case SERIES_OF_IMAGES: - LOGGER.info("Album type is SERIES_OF_IMAGES"); - ripAlbum(this.url); - break; - case SINGLE_IMAGE: - LOGGER.info("Album type is SINGLE_IMAGE"); - ripSingleImage(this.url); - break; - case USER: - LOGGER.info("Album type is USER"); - ripUserAccount(url); - break; - case SUBREDDIT: - LOGGER.info("Album type is SUBREDDIT"); - ripSubreddit(url); - break; - case USER_IMAGES: - LOGGER.info("Album type is USER_IMAGES"); - ripUserImages(url); - break; + try { + switch (albumType) { + case ALBUM: + // Fall-through + case USER_ALBUM: + LOGGER.info("Album type is USER_ALBUM"); + // Don't call getAlbumTitle(this.url) with this + // as it seems to cause the album to be downloaded to a subdir. + ripAlbum(this.url); + break; + case SINGLE_IMAGE: + LOGGER.info("Album type is SINGLE_IMAGE"); + ripSingleImage(this.url); + break; + case USER: + LOGGER.info("Album type is USER"); + ripUserAccount(url); + break; + case SUBREDDIT: + LOGGER.info("Album type is SUBREDDIT"); + ripSubreddit(url); + break; + case USER_IMAGES: + LOGGER.info("Album type is USER_IMAGES"); + ripUserImages(url); + break; + } + } catch (URISyntaxException e) { + throw new IOException("Failed ripping " + this.url, e); } waitForThreads(); } - private void ripSingleImage(URL url) throws IOException { + private void ripSingleImage(URL url) throws IOException, URISyntaxException { String strUrl = url.toExternalForm(); - Document document = getDocument(strUrl); - Matcher m = getEmbeddedJsonMatcher(document); - if (m.matches()) { - JSONObject json = new JSONObject(m.group(1)).getJSONObject("image"); - addURLToDownload(extractImageUrlFromJson(json), ""); + var gid = getGID(url); + var json = getSingleImageData(String.format("https://api.imgur.com/post/v1/media/%s?include=media,adconfig,account", gid)); + var media = json.getJSONArray("media"); + if (media.length()==0) { + throw new IOException(String.format("Failed to fetch image for url %s", strUrl)); + } + if (media.length()>1) { + LOGGER.warn(String.format("Got multiple images for url %s", strUrl)); } + addURLToDownload(extractImageUrlFromJson((JSONObject)media.get(0)), ""); } - private void ripAlbum(URL url) throws IOException { + private void ripAlbum(URL url) throws IOException, URISyntaxException { ripAlbum(url, ""); } - private void ripAlbum(URL url, String subdirectory) throws IOException { - int index = 0; + private void ripAlbum(URL url, String subdirectory) throws IOException, URISyntaxException { + int index; this.sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm()); index = 0; ImgurAlbum album = getImgurAlbum(url); for (ImgurImage imgurImage : album.images) { stopCheck(); - String saveAs = workingDir.getCanonicalPath(); - if (!saveAs.endsWith(File.separator)) { - saveAs += File.separator; - } + Path saveAs = workingDir.toPath(); if (subdirectory != null && !subdirectory.equals("")) { - saveAs += subdirectory; + saveAs = saveAs.resolve(subdirectory); } - if (!saveAs.endsWith(File.separator)) { - saveAs += File.separator; - } - File subdirFile = new File(saveAs); - if (!subdirFile.exists()) { - subdirFile.mkdirs(); + if (!Files.exists(saveAs)) { + Files.createDirectory(saveAs); } index += 1; + var imgPath = imgurImage.getSaveAs().replaceAll("\\?\\d", ""); if (Utils.getConfigBoolean("download.save_order", true)) { - saveAs += String.format("%03d_", index); + saveAs = saveAs.resolve(String.format("%03d_%s", index, imgPath)); + } else { + saveAs = saveAs.resolve(imgPath); } - saveAs += imgurImage.getSaveAs(); - saveAs = saveAs.replaceAll("\\?\\d", ""); - addURLToDownload(imgurImage.url, new File(saveAs)); + addURLToDownload(imgurImage.url, saveAs); } } - public static ImgurAlbum getImgurSeries(URL url) throws IOException { - Pattern p = Pattern.compile("^.*imgur\\.com/([a-zA-Z0-9,]*).*$"); - Matcher m = p.matcher(url.toExternalForm()); - ImgurAlbum album = new ImgurAlbum(url); - if (m.matches()) { - String[] imageIds = m.group(1).split(","); - for (String imageId : imageIds) { - // TODO: Fetch image with ID imageId - LOGGER.debug("Fetching image info for ID " + imageId); - try { - JSONObject json = Http.url("https://api.imgur.com/2/image/" + imageId + ".json").getJSON(); - if (!json.has("image")) { - continue; - } - JSONObject image = json.getJSONObject("image"); - if (!image.has("links")) { - continue; - } - JSONObject links = image.getJSONObject("links"); - if (!links.has("original")) { - continue; - } - String original = links.getString("original"); - ImgurImage theImage = new ImgurImage(new URL(original)); - album.addImage(theImage); - } catch (Exception e) { - LOGGER.error("Got exception while fetching imgur ID " + imageId, e); - } - } - } - return album; - } - - public static ImgurAlbum getImgurAlbum(URL url) throws IOException { + public static ImgurAlbum getImgurAlbum(URL url) throws IOException, URISyntaxException { String strUrl = url.toExternalForm(); if (!strUrl.contains(",")) { strUrl += "/all"; @@ -275,13 +255,11 @@ public class ImgurRipper extends AlbumRipper { LOGGER.info(" Retrieving " + strUrl); Document doc = getAlbumData("https://api.imgur.com/3/album/" + strUrl.split("/a/")[1]); // Try to use embedded JSON to retrieve images - LOGGER.info(Jsoup.clean(doc.body().toString(), Whitelist.none())); - try { - JSONObject json = new JSONObject(Jsoup.clean(doc.body().toString(), Whitelist.none())); + JSONObject json = new JSONObject(Jsoup.clean(doc.body().toString(), Safelist.none())); JSONArray jsonImages = json.getJSONObject("data").getJSONArray("images"); return createImgurAlbumFromJsonArray(url, jsonImages); - } catch (JSONException e) { + } catch (JSONException | URISyntaxException e) { LOGGER.debug("Error while parsing JSON at " + url + ", continuing", e); } @@ -309,54 +287,48 @@ public class ImgurRipper extends AlbumRipper { image = "http:" + thumb.select("img").attr("src"); } else { // Unable to find image in this div - LOGGER.error("[!] Unable to find image in div: " + thumb.toString()); + LOGGER.error("[!] Unable to find image in div: " + thumb); continue; } if (image.endsWith(".gif") && Utils.getConfigBoolean("prefer.mp4", false)) { image = image.replace(".gif", ".mp4"); } - ImgurImage imgurImage = new ImgurImage(new URL(image)); + ImgurImage imgurImage = new ImgurImage(new URI(image).toURL()); imgurAlbum.addImage(imgurImage); } return imgurAlbum; } - private static Matcher getEmbeddedJsonMatcher(Document doc) { - Pattern p = Pattern.compile("^.*widgetFactory.mergeConfig\\('gallery', (.*?)\\);.*$", Pattern.DOTALL); - return p.matcher(doc.body().html()); - } - - private static ImgurAlbum createImgurAlbumFromJsonArray(URL url, JSONArray jsonImages) throws MalformedURLException { + private static ImgurAlbum createImgurAlbumFromJsonArray(URL url, JSONArray jsonImages) throws MalformedURLException, URISyntaxException { ImgurAlbum imgurAlbum = new ImgurAlbum(url); int imagesLength = jsonImages.length(); for (int i = 0; i < imagesLength; i++) { JSONObject ob = jsonImages.getJSONObject(i); - imgurAlbum.addImage(new ImgurImage( new URL(ob.getString("link")))); + imgurAlbum.addImage(new ImgurImage( new URI(ob.getString("link")).toURL())); } return imgurAlbum; } - private static ImgurImage createImgurImageFromJson(JSONObject json) throws MalformedURLException { - return new ImgurImage(extractImageUrlFromJson(json)); - } - - private static URL extractImageUrlFromJson(JSONObject json) throws MalformedURLException { + private static URL extractImageUrlFromJson(JSONObject json) throws MalformedURLException, URISyntaxException { String ext = json.getString("ext"); + if (!ext.startsWith(".")) { + ext = "." + ext; + } if (ext.equals(".gif") && Utils.getConfigBoolean("prefer.mp4", false)) { ext = ".mp4"; } - return new URL( - "http://i.imgur.com/" - + json.getString("hash") - + ext); + return new URI( + "https://i.imgur.com/" + + json.getString("id") + + ext).toURL(); } - private static Document getDocument(String strUrl) throws IOException { - return Jsoup.connect(strUrl) + private static JSONObject getSingleImageData(String strUrl) throws IOException { + return Http.url(strUrl) .userAgent(USER_AGENT) .timeout(10 * 1000) - .maxBodySize(0) - .get(); + .header("Authorization", "Client-ID " + Utils.getConfigString("imgur.client_id", "546c25a59c58ad7")) + .getJSON(); } private static Document getAlbumData(String strUrl) throws IOException { @@ -369,35 +341,71 @@ public class ImgurRipper extends AlbumRipper { .get(); } + private static JSONObject getUserData(String userUrl) throws IOException { + return Http.url(userUrl) + .userAgent(USER_AGENT) + .timeout(10 * 1000) + .header("Authorization", "Client-ID " + Utils.getConfigString("imgur.client_id", "546c25a59c58ad7")) + .getJSON(); + } + /** * Rips all albums in an imgur user's account. * @param url - * URL to imgur user account (http://username.imgur.com) - * @throws IOException + * URL to imgur user account (http://username.imgur.com | https://imgur.com/user/username) */ - private void ripUserAccount(URL url) throws IOException { + private void ripUserAccount(URL url) throws IOException, URISyntaxException { + int cPage = -1, cImage = 0; + String apiUrl = "https://api.imgur.com/3/account/%s/submissions/%d/newest?album_previews=1"; + // Strip 'user_' from username + var username = getGID(url).replace("user_", ""); LOGGER.info("Retrieving " + url); sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm()); - Document doc = Http.url(url).get(); - for (Element album : doc.select("div.cover a")) { - stopCheck(); - if (!album.hasAttr("href") - || !album.attr("href").contains("imgur.com/a/")) { - continue; + + while (true) { + cPage += 1; + var pageUrl = String.format(apiUrl, username, cPage); + var json = getUserData(pageUrl); + var success = json.getBoolean("success"); + var status = json.getInt("status"); + if (!success || status!=200) { + throw new IOException(String.format("Unexpected status code %d for url %s and page %d", status, url, cPage)); } - String albumID = album.attr("href").substring(album.attr("href").lastIndexOf('/') + 1); - URL albumURL = new URL("http:" + album.attr("href") + "/noscript"); - try { - ripAlbum(albumURL, albumID); - Thread.sleep(SLEEP_BETWEEN_ALBUMS * 1000); - } catch (Exception e) { - LOGGER.error("Error while ripping album: " + e.getMessage(), e); + var data = json.getJSONArray("data"); + if (data.isEmpty()) { + // Data array is empty for pages beyond the last page + break; + } + for (int i = 0; i < data.length(); i++) { + cImage += 1; + String prefixOrSubdir = ""; + if (Utils.getConfigBoolean("download.save_order", true)) { + prefixOrSubdir = String.format("%03d_", cImage); + } + var d = (JSONObject)data.get(i); + var l = d.getString("link"); + if (d.getBoolean("is_album")) { + // For album links with multiple images create a prefixed folder with album id + prefixOrSubdir += d.getString("id"); + ripAlbum(new URI(l).toURL(), prefixOrSubdir); + try { + Thread.sleep(SLEEP_BETWEEN_ALBUMS * 1000L); + } catch (InterruptedException e) { + LOGGER.error(String.format("Error! Interrupted ripping album %s for user account %s", l, username), e); + } + } else { + // For direct links + if (d.has("mp4") && Utils.getConfigBoolean("prefer.mp4", false)) { + l = d.getString("mp4"); + } + addURLToDownload(new URI(l).toURL(), prefixOrSubdir); + } } } } - private void ripUserImages(URL url) throws IOException { + private void ripUserImages(URL url) { int page = 0; int imagesFound = 0; int imagesTotal = 0; String jsonUrl = url.toExternalForm().replace("/all", "/ajax/images"); if (jsonUrl.contains("#")) { @@ -417,12 +425,12 @@ public class ImgurRipper extends AlbumRipper { for (int i = 0; i < images.length(); i++) { imagesFound++; JSONObject image = images.getJSONObject(i); - String imageUrl = "http://i.imgur.com/" + image.getString("hash") + image.getString("ext"); + String imageUrl = "https://i.imgur.com/" + image.getString("hash") + image.getString("ext"); String prefix = ""; if (Utils.getConfigBoolean("download.save_order", true)) { prefix = String.format("%03d_", imagesFound); } - addURLToDownload(new URL(imageUrl), prefix); + addURLToDownload(new URI(imageUrl).toURL(), prefix); } if (imagesFound >= imagesTotal) { break; @@ -435,7 +443,7 @@ public class ImgurRipper extends AlbumRipper { } } - private void ripSubreddit(URL url) throws IOException { + private void ripSubreddit(URL url) throws IOException, URISyntaxException { int page = 0; while (true) { stopCheck(); @@ -455,7 +463,7 @@ public class ImgurRipper extends AlbumRipper { if (image.contains("b.")) { image = image.replace("b.", "."); } - URL imageURL = new URL(image); + URL imageURL = new URI(image).toURL(); addURLToDownload(imageURL); } if (imgs.isEmpty()) { @@ -477,29 +485,30 @@ public class ImgurRipper extends AlbumRipper { } @Override - public String getGID(URL url) throws MalformedURLException { - Pattern p = null; - Matcher m = null; + public String getGID(URL url) throws MalformedURLException, URISyntaxException { + Pattern p; + Matcher m; - p = Pattern.compile("^https?://(www\\.|m\\.)?imgur\\.com/(a|gallery)/([a-zA-Z0-9]{5,}).*$"); + p = Pattern.compile("^https?://(?:www\\.|m\\.)?imgur\\.com/gallery/(?:(?:[a-zA-Z0-9]*/)?.*-)?([a-zA-Z0-9]+)$"); m = p.matcher(url.toExternalForm()); if (m.matches()) { // Imgur album or gallery albumType = ALBUM_TYPE.ALBUM; String gid = m.group(m.groupCount()); - this.url = new URL("http://imgur.com/a/" + gid); + this.url = new URI("https://imgur.com/a/" + gid).toURL(); return gid; } - p = Pattern.compile("^https?://(www\\.|m\\.)?imgur\\.com/(a|gallery|t)/[a-zA-Z0-9]*/([a-zA-Z0-9]{5,}).*$"); + // Match urls with path /a + p = Pattern.compile("^https?://(?:www\\.|m\\.)?imgur\\.com/(?:a|t)/(?:(?:[a-zA-Z0-9]*/)?.*-)?([a-zA-Z0-9]+).*$"); m = p.matcher(url.toExternalForm()); if (m.matches()) { // Imgur album or gallery albumType = ALBUM_TYPE.ALBUM; String gid = m.group(m.groupCount()); - this.url = new URL("http://imgur.com/a/" + gid); + this.url = new URI("https://imgur.com/a/" + gid).toURL(); return gid; } - p = Pattern.compile("^https?://([a-zA-Z0-9\\-]{3,})\\.imgur\\.com/?$"); + p = Pattern.compile("^https?://([a-zA-Z0-9\\-]{4,})\\.imgur\\.com/?$"); m = p.matcher(url.toExternalForm()); if (m.matches()) { // Root imgur account @@ -510,6 +519,14 @@ public class ImgurRipper extends AlbumRipper { albumType = ALBUM_TYPE.USER; return "user_" + gid; } + // Pattern for new imgur user url https://imgur.com/user/username + p = Pattern.compile("^https?://(?:www\\.|m\\.)?imgur\\.com/user/([a-zA-Z0-9]+).*$"); + m = p.matcher(url.toExternalForm()); + if (m.matches()) { + String gid = m.group(1); + albumType = ALBUM_TYPE.USER; + return "user_" + gid; + } p = Pattern.compile("^https?://([a-zA-Z0-9\\-]{3,})\\.imgur\\.com/all.*$"); m = p.matcher(url.toExternalForm()); if (m.matches()) { @@ -529,13 +546,13 @@ public class ImgurRipper extends AlbumRipper { if (m.matches()) { // Imgur subreddit aggregator albumType = ALBUM_TYPE.SUBREDDIT; - String album = m.group(2); + StringBuilder album = new StringBuilder(m.group(2)); for (int i = 3; i <= m.groupCount(); i++) { if (m.group(i) != null) { - album += "_" + m.group(i).replace("/", ""); + album.append("_").append(m.group(i).replace("/", "")); } } - return album; + return album.toString(); } p = Pattern.compile("^https?://(i\\.|www\\.|m\\.)?imgur\\.com/r/(\\w+)/([a-zA-Z0-9,]{5,}).*$"); m = p.matcher(url.toExternalForm()); @@ -544,7 +561,7 @@ public class ImgurRipper extends AlbumRipper { albumType = ALBUM_TYPE.ALBUM; String subreddit = m.group(m.groupCount() - 1); String gid = m.group(m.groupCount()); - this.url = new URL("http://imgur.com/r/" + subreddit + "/" + gid); + this.url = new URI("https://imgur.com/r/" + subreddit + "/" + gid).toURL(); return "r_" + subreddit + "_" + gid; } p = Pattern.compile("^https?://(i\\.|www\\.|m\\.)?imgur\\.com/([a-zA-Z0-9]{5,})$"); @@ -554,29 +571,14 @@ public class ImgurRipper extends AlbumRipper { albumType = ALBUM_TYPE.SINGLE_IMAGE; return m.group(m.groupCount()); } - p = Pattern.compile("^https?://(i\\.|www\\.|m\\.)?imgur\\.com/([a-zA-Z0-9,]{5,}).*$"); - m = p.matcher(url.toExternalForm()); - if (m.matches()) { - // Series of imgur images - albumType = ALBUM_TYPE.SERIES_OF_IMAGES; - String gid = m.group(m.groupCount()); - if (!gid.contains(",")) { - throw new MalformedURLException("Imgur image doesn't contain commas"); - } - return gid.replaceAll(",", "-"); - } throw new MalformedURLException("Unsupported imgur URL format: " + url.toExternalForm()); } - public ALBUM_TYPE getAlbumType() { - return albumType; - } - public static class ImgurImage { String title = ""; String description = ""; - String extension = ""; - public URL url = null; + String extension; + public URL url; ImgurImage(URL url) { this.url = url; @@ -586,14 +588,7 @@ public class ImgurRipper extends AlbumRipper { this.extension = this.extension.substring(0, this.extension.indexOf("?")); } } - ImgurImage(URL url, String title) { - this(url); - this.title = title; - } - public ImgurImage(URL url, String title, String description) { - this(url, title); - this.description = description; - } + String getSaveAs() { String saveAs = this.title; String u = url.toExternalForm(); @@ -613,7 +608,7 @@ public class ImgurRipper extends AlbumRipper { public static class ImgurAlbum { String title = null; - public URL url = null; + public URL url; public List images = new ArrayList<>(); ImgurAlbum(URL url) { this.url = url; diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java index f231def4..4a4122ad 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java @@ -1,18 +1,13 @@ package com.rarchives.ripme.ripper.rippers; +import com.oracle.js.parser.ErrorManager; +import com.oracle.js.parser.Parser; +import com.oracle.js.parser.ScriptEnvironment; +import com.oracle.js.parser.Source; +import com.oracle.js.parser.ir.*; import com.rarchives.ripme.ripper.AbstractJSONRipper; import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.Utils; -import jdk.nashorn.internal.ir.Block; -import jdk.nashorn.internal.ir.CallNode; -import jdk.nashorn.internal.ir.ExpressionStatement; -import jdk.nashorn.internal.ir.FunctionNode; -import jdk.nashorn.internal.ir.Statement; -import jdk.nashorn.internal.parser.Parser; -import jdk.nashorn.internal.runtime.Context; -import jdk.nashorn.internal.runtime.ErrorManager; -import jdk.nashorn.internal.runtime.Source; -import jdk.nashorn.internal.runtime.options.Options; import org.json.JSONArray; import org.json.JSONObject; import org.jsoup.Connection; @@ -26,12 +21,7 @@ import java.time.Instant; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Spliterators; +import java.util.*; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; @@ -176,13 +166,17 @@ public class InstagramRipper extends AbstractJSONRipper { if (postRip) { return null; } - Predicate hrefFilter = (storiesRip || pinnedReelRip) ? href -> href.contains("Consumer.js") : - href -> href.contains("ProfilePageContainer.js") || href.contains("TagPageContainer.js"); + + Predicate hrefFilter = href -> href.contains("Consumer.js"); + if (taggedRip) { + hrefFilter = href -> href.contains("ProfilePageContainer.js") || href.contains("TagPageContainer.js"); + } String href = doc.select("link[rel=preload]").stream() - .map(link -> link.attr("href")) - .filter(hrefFilter) - .findFirst().orElse(""); + .map(link -> link.attr("href")) + .filter(hrefFilter) + .findFirst().orElse(""); + String body = Http.url("https://www.instagram.com" + href).cookies(cookies).response().body(); Function hashExtractor = @@ -198,7 +192,8 @@ public class InstagramRipper extends AbstractJSONRipper { } private String getProfileHash(String jsData) { - return getHashValue(jsData, "loadProfilePageExtras", -1); + return getHashValue(jsData, "loadProfilePageExtras", -1, + s -> s.replaceAll(".*queryId\\s?:\\s?\"([0-9a-f]*)\".*", "$1")); } private String getPinnedHash(String jsData) { @@ -386,7 +381,7 @@ public class InstagramRipper extends AbstractJSONRipper { case "GraphSidecar": JSONArray sideCar = getJsonArrayByPath(mediaItem, "edge_sidecar_to_children.edges"); return getStreamOfJsonArray(sideCar).map(object -> object.getJSONObject("node")) - .flatMap(this::parseRootForUrls); + .flatMap(this::parseRootForUrls); default: return Stream.empty(); } @@ -413,26 +408,35 @@ public class InstagramRipper extends AbstractJSONRipper { // Javascript parsing /* ------------------------------------------------------------------------------------------------------- */ - private String getHashValue(String javaScriptData, String keyword, int offset) { + private String getHashValue(String javaScriptData, String keyword, int offset, + Function extractHash) { List statements = getJsBodyBlock(javaScriptData).getStatements(); + return statements.stream() - .flatMap(statement -> filterItems(statement, ExpressionStatement.class)) - .map(ExpressionStatement::getExpression) - .flatMap(expression -> filterItems(expression, CallNode.class)) - .map(CallNode::getArgs) - .map(expressions -> expressions.get(0)) - .flatMap(expression -> filterItems(expression, FunctionNode.class)) - .map(FunctionNode::getBody) - .map(Block::getStatements) - .map(statementList -> lookForHash(statementList, keyword, offset)) - .filter(Objects::nonNull) - .findFirst().orElse(null); + .flatMap(statement -> filterItems(statement, ExpressionStatement.class)) + .map(ExpressionStatement::getExpression) + .flatMap(expression -> filterItems(expression, CallNode.class)) + .map(CallNode::getArgs) + .map(expressions -> expressions.get(0)) + .flatMap(expression -> filterItems(expression, FunctionNode.class)) + .map(FunctionNode::getBody) + .map(Block::getStatements) + .map(statementList -> lookForHash(statementList, keyword, offset, extractHash)) + .filter(Objects::nonNull) + .findFirst().orElse(null); } - private String lookForHash(List list, String keyword, int offset) { + private String getHashValue(String javaScriptData, String keyword, int offset) { + return getHashValue(javaScriptData, keyword, offset, null); + } + + private String lookForHash(List list, String keyword, int offset, Function extractHash) { for (int i = 0; i < list.size(); i++) { Statement st = list.get(i); if (st.toString().contains(keyword)) { + if (extractHash != null) { + return extractHash.apply(list.get(i + offset).toString()); + } return list.get(i + offset).toString().replaceAll(".*\"([0-9a-f]*)\".*", "$1"); } } @@ -444,9 +448,10 @@ public class InstagramRipper extends AbstractJSONRipper { } private Block getJsBodyBlock(String javaScriptData) { - ErrorManager errors = new ErrorManager(); - Context context = new Context(new Options("nashorn"), errors, Thread.currentThread().getContextClassLoader()); - return new Parser(context.getEnv(), Source.sourceFor("name", javaScriptData), errors).parse().getBody(); + ScriptEnvironment env = ScriptEnvironment.builder().ecmaScriptVersion(10).constAsVar(true).build(); + ErrorManager errorManager = new ErrorManager.ThrowErrorManager(); + Source src = Source.sourceFor("name", javaScriptData); + return new Parser(env, src, errorManager).parse().getBody(); } // Some JSON helper methods below diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/JabArchivesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/JabArchivesRipper.java index e7af19bc..84fad505 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/JabArchivesRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/JabArchivesRipper.java @@ -55,12 +55,6 @@ public class JabArchivesRipper extends AbstractHTMLRipper { "jabarchives.com/main/view/albumname - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { // Find next page diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java index d5df1fe5..2f2d5c33 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -40,12 +42,6 @@ public class JagodibujaRipper extends AbstractHTMLRipper { throw new MalformedURLException("Expected jagodibuja.com gallery formats hwww.jagodibuja.com/Comic name/ got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); @@ -62,8 +58,8 @@ public class JagodibujaRipper extends AbstractHTMLRipper { Element elem = comicPage.select("span.full-size-link > a").first(); LOGGER.info("Got link " + elem.attr("href")); try { - addURLToDownload(new URL(elem.attr("href")), ""); - } catch (MalformedURLException e) { + addURLToDownload(new URI(elem.attr("href")).toURL(), ""); + } catch (MalformedURLException | URISyntaxException e) { LOGGER.warn("Malformed URL"); e.printStackTrace(); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/Jpg3Ripper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/Jpg3Ripper.java new file mode 100644 index 00000000..c79e02bc --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/Jpg3Ripper.java @@ -0,0 +1,70 @@ +package com.rarchives.ripme.ripper.rippers; + +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.utils.Http; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; + +public class Jpg3Ripper extends AbstractHTMLRipper { + + public Jpg3Ripper(URL url) throws IOException { + super(url); + } + + @Override + public String getDomain() { + return "jpg3.su"; + } + + @Override + public String getHost() { + return "jpg3"; + } + + @Override + public List getURLsFromPage(Document page) { + List urls = new ArrayList<>(); + + for (Element el : page.select(".image-container > img")) { + urls.add(el.attr("src").replaceAll("\\.md", "")); + } + + return urls; + } + + @Override + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { + String u = url.toExternalForm(); + u = u.replaceAll("https?://jpg3.su/a/([^/]+)/?.*", "https://jpg3.su/a/$1"); + LOGGER.debug("Changed URL from " + url + " to " + u); + return new URI(u).toURL(); + } + + @Override + public Document getNextPage(Document page) throws IOException, URISyntaxException { + String href = page.select("[data-pagination='next']").attr("href"); + if (!href.isEmpty()) { + return Http.url(href).get(); + } else { + return null; + } + } + + @Override + public String getGID(URL url) throws MalformedURLException { + return url.toString().split("/")[url.toString().split("/").length - 1]; + } + + @Override + protected void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null); + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PorncomixDotOneRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/KingcomixRipper.java similarity index 53% rename from src/main/java/com/rarchives/ripme/ripper/rippers/PorncomixDotOneRipper.java rename to src/main/java/com/rarchives/ripme/ripper/rippers/KingcomixRipper.java index c1e7fac7..bb8194bc 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PorncomixDotOneRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/KingcomixRipper.java @@ -14,49 +14,38 @@ import org.jsoup.nodes.Element; import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.utils.Http; -public class PorncomixDotOneRipper extends AbstractHTMLRipper { +public class KingcomixRipper extends AbstractHTMLRipper { - public PorncomixDotOneRipper(URL url) throws IOException { + public KingcomixRipper(URL url) throws IOException { super(url); } @Override public String getHost() { - return "porncomix"; + return "kingcomix"; } @Override public String getDomain() { - return "porncomix.one"; + return "kingcomix.com"; } @Override public String getGID(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("https?://www.porncomix.one/gallery/([a-zA-Z0-9_\\-]*)/?$"); + Pattern p = Pattern.compile("https://kingcomix.com/([a-zA-Z1-9_-]*)/?$"); Matcher m = p.matcher(url.toExternalForm()); if (m.matches()) { return m.group(1); } - throw new MalformedURLException("Expected proncomix URL format: " + - "porncomix.one/gallery/comic - got " + url + " instead"); - } - - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); + throw new MalformedURLException("Expected kingcomix URL format: " + + "kingcomix.com/COMIX - got " + url + " instead"); } @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); - // We have 2 loops here to cover all the different album types - for (Element el : doc.select(".dgwt-jg-item > a")) { - result.add(el.attr("href")); - } - for (Element el : doc.select(".unite-gallery > img")) { - result.add(el.attr("data-image")); - + for (Element el : doc.select("div.entry-content > p > img")) { + result.add(el.attr("src")); } return result; } @@ -65,4 +54,4 @@ public class PorncomixDotOneRipper extends AbstractHTMLRipper { public void downloadURL(URL url, int index) { addURLToDownload(url, getPrefix(index)); } -} \ No newline at end of file +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ListalRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ListalRipper.java index 8986fd91..408310a7 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ListalRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ListalRipper.java @@ -1,234 +1,236 @@ -package com.rarchives.ripme.ripper.rippers; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.ripper.DownloadThreadPool; -import com.rarchives.ripme.utils.Http; - - - -/** - * @author Tushar - * - */ -public class ListalRipper extends AbstractHTMLRipper { - - private Pattern p1 = Pattern.compile("https:\\/\\/www.listal.com\\/list\\/([a-zA-Z0-9-]+)"); - private Pattern p2 = - Pattern.compile("https:\\/\\/www.listal.com\\/((?:(?:[a-zA-Z0-9-]+)\\/?)+)"); - private String listId = null; // listId to get more images via POST. - private String postUrl = "https://www.listal.com/item-list/"; //to load more images. - private UrlType urlType = UrlType.UNKNOWN; - - private DownloadThreadPool listalThreadPool = new DownloadThreadPool("listalThreadPool"); - - public ListalRipper(URL url) throws IOException { - super(url); - } - - @Override - public String getDomain() { - return "listal.com"; - } - - @Override - public String getHost() { - return "listal"; - } - - @Override - public Document getFirstPage() throws IOException { - Document doc = Http.url(url).get(); - if (urlType == UrlType.LIST) { - listId = doc.select("#customlistitems").first().attr("data-listid"); // Used for list types. - } - return doc; - } - - @Override - public List getURLsFromPage(Document page) { - if (urlType == UrlType.LIST) { - // for url of type LIST, https://www.listal.com/list/my-list - return getURLsForListType(page); - } else if (urlType == UrlType.FOLDER) { - // for url of type FOLDER, https://www.listal.com/jim-carrey/pictures - return getURLsForFolderType(page); - } - return null; - } - - @Override - public void downloadURL(URL url, int index) { - listalThreadPool.addThread(new ListalImageDownloadThread(url, index)); - } - - @Override - public String getGID(URL url) throws MalformedURLException { - Matcher m1 = p1.matcher(url.toExternalForm()); - if (m1.matches()) { - // Return the text contained between () in the regex - urlType = UrlType.LIST; - return m1.group(1); - } - - Matcher m2 = p2.matcher(url.toExternalForm()); - if (m2.matches()) { - // Return only gid from capturing group of type listal.com/tvOrSomething/dexter/pictures - urlType = UrlType.FOLDER; - return getFolderTypeGid(m2.group(1)); - } - - throw new MalformedURLException("Expected listal.com URL format: " - + "listal.com/list/my-list-name - got " + url + " instead."); - } - - @Override - public Document getNextPage(Document page) throws IOException { - Document nextPage = super.getNextPage(page); - switch (urlType) { - case LIST: - if (!page.select(".loadmoreitems").isEmpty()) { - // All items are not loaded. - // Load remaining items using postUrl. - - String offSet = page.select(".loadmoreitems").last().attr("data-offset"); - Map postParams = new HashMap<>(); - postParams.put("listid", listId); - postParams.put("offset", offSet); - try { - nextPage = Http.url(postUrl).data(postParams).retries(3).post(); - } catch (IOException e1) { - LOGGER.error("Failed to load more images after " + offSet, e1); - throw e1; - } - } - break; - - case FOLDER: - Elements pageLinks = page.select(".pages a"); - if (!pageLinks.isEmpty() && pageLinks.last().text().startsWith("Next")) { - String nextUrl = pageLinks.last().attr("abs:href"); - nextPage = Http.url(nextUrl).retries(3).get(); - } - break; - - case UNKNOWN: - default: - } - return nextPage; - } - - - @Override - public DownloadThreadPool getThreadPool() { - return listalThreadPool; - } - - /** - * Returns the image urls for UrlType LIST. - */ - private List getURLsForListType(Document page) { - List list = new ArrayList<>(); - for (Element e : page.select(".pure-g a[href*=viewimage]")) { - //list.add("https://www.listal.com" + e.attr("href") + "h"); - list.add(e.attr("abs:href") + "h"); - } - - return list; - } - - /** - * Returns the image urls for UrlType FOLDER. - */ - private List getURLsForFolderType(Document page) { - List list = new ArrayList<>(); - for (Element e : page.select("#browseimagescontainer .imagewrap-outer a")) { - list.add(e.attr("abs:href") + "h"); - } - return list; - } - - /** - * Returns the gid for url type listal.com/tvOrSomething/dexter/pictures - */ - public String getFolderTypeGid(String group) throws MalformedURLException { - String[] folders = group.split("/"); - try { - if (folders.length == 2 && folders[1].equals("pictures")) { - // Url is probably for an actor. - return folders[0]; - } - - if (folders.length == 3 && folders[2].equals("pictures")) { - // Url if for a folder(like movies, tv etc). - Document doc = Http.url(url).get(); - return doc.select(".itemheadingmedium").first().text(); - } - - } catch (Exception e) { - LOGGER.error(e); - } - throw new MalformedURLException("Unable to fetch the gid for given url."); - } - - private class ListalImageDownloadThread extends Thread { - - private URL url; - private int index; - - public ListalImageDownloadThread(URL url, int index) { - super(); - this.url = url; - this.index = index; - } - - @Override - public void run() { - getImage(); - } - - public void getImage() { - try { - Document doc = Http.url(url).get(); - - String imageUrl = doc.getElementsByClass("pure-img").attr("src"); - if (imageUrl != "") { - addURLToDownload(new URL(imageUrl), getPrefix(index), "", null, null, - getImageName()); - } else { - LOGGER.error("Couldnt find image from url: " + url); - } - } catch (IOException e) { - LOGGER.error("[!] Exception while downloading image: " + url, e); - } - } - - public String getImageName() { - // Returns the image number of the link if possible. - String name = this.url.toExternalForm(); - try { - name = name.substring(name.lastIndexOf("/") + 1); - } catch (Exception e) { - LOGGER.info("Failed to get name for the image."); - name = null; - } - // Listal stores images as .jpg - return name + ".jpg"; - } - } - - private static enum UrlType { - LIST, FOLDER, UNKNOWN - } -} +package com.rarchives.ripme.ripper.rippers; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; +import org.jsoup.select.Elements; +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.ripper.DownloadThreadPool; +import com.rarchives.ripme.utils.Http; + + + +/** + * @author Tushar + * + */ +public class ListalRipper extends AbstractHTMLRipper { + + private Pattern p1 = Pattern.compile("https:\\/\\/www.listal.com\\/list\\/([a-zA-Z0-9-]+)"); + private Pattern p2 = + Pattern.compile("https:\\/\\/www.listal.com\\/((?:(?:[a-zA-Z0-9-_%]+)\\/?)+)"); + private String listId = null; // listId to get more images via POST. + private String postUrl = "https://www.listal.com/item-list/"; //to load more images. + private UrlType urlType = UrlType.UNKNOWN; + + private DownloadThreadPool listalThreadPool = new DownloadThreadPool("listalThreadPool"); + + public ListalRipper(URL url) throws IOException { + super(url); + } + + @Override + public String getDomain() { + return "listal.com"; + } + + @Override + public String getHost() { + return "listal"; + } + + @Override + public Document getFirstPage() throws IOException { + Document doc = Http.url(url).get(); + if (urlType == UrlType.LIST) { + listId = doc.select("#customlistitems").first().attr("data-listid"); // Used for list types. + } + return doc; + } + + @Override + public List getURLsFromPage(Document page) { + if (urlType == UrlType.LIST) { + // for url of type LIST, https://www.listal.com/list/my-list + return getURLsForListType(page); + } else if (urlType == UrlType.FOLDER) { + // for url of type FOLDER, https://www.listal.com/jim-carrey/pictures + return getURLsForFolderType(page); + } + return null; + } + + @Override + public void downloadURL(URL url, int index) { + listalThreadPool.addThread(new ListalImageDownloadThread(url, index)); + } + + @Override + public String getGID(URL url) throws MalformedURLException { + Matcher m1 = p1.matcher(url.toExternalForm()); + if (m1.matches()) { + // Return the text contained between () in the regex + urlType = UrlType.LIST; + return m1.group(1); + } + + Matcher m2 = p2.matcher(url.toExternalForm()); + if (m2.matches()) { + // Return only gid from capturing group of type listal.com/tvOrSomething/dexter/pictures + urlType = UrlType.FOLDER; + return getFolderTypeGid(m2.group(1)); + } + + throw new MalformedURLException("Expected listal.com URL format: " + + "listal.com/list/my-list-name - got " + url + " instead."); + } + + @Override + public Document getNextPage(Document page) throws IOException, URISyntaxException { + Document nextPage = super.getNextPage(page); + switch (urlType) { + case LIST: + if (!page.select(".loadmoreitems").isEmpty()) { + // All items are not loaded. + // Load remaining items using postUrl. + + String offSet = page.select(".loadmoreitems").last().attr("data-offset"); + Map postParams = new HashMap<>(); + postParams.put("listid", listId); + postParams.put("offset", offSet); + try { + nextPage = Http.url(postUrl).data(postParams).retries(3).post(); + } catch (IOException e1) { + LOGGER.error("Failed to load more images after " + offSet, e1); + throw e1; + } + } + break; + + case FOLDER: + Elements pageLinks = page.select(".pages a"); + if (!pageLinks.isEmpty() && pageLinks.last().text().startsWith("Next")) { + String nextUrl = pageLinks.last().attr("abs:href"); + nextPage = Http.url(nextUrl).retries(3).get(); + } + break; + + case UNKNOWN: + default: + } + return nextPage; + } + + + @Override + public DownloadThreadPool getThreadPool() { + return listalThreadPool; + } + + /** + * Returns the image urls for UrlType LIST. + */ + private List getURLsForListType(Document page) { + List list = new ArrayList<>(); + for (Element e : page.select(".pure-g a[href*=viewimage]")) { + //list.add("https://www.listal.com" + e.attr("href") + "h"); + list.add(e.attr("abs:href") + "h"); + } + + return list; + } + + /** + * Returns the image urls for UrlType FOLDER. + */ + private List getURLsForFolderType(Document page) { + List list = new ArrayList<>(); + for (Element e : page.select("#browseimagescontainer .imagewrap-outer a")) { + list.add(e.attr("abs:href") + "h"); + } + return list; + } + + /** + * Returns the gid for url type listal.com/tvOrSomething/dexter/pictures + */ + public String getFolderTypeGid(String group) throws MalformedURLException { + String[] folders = group.split("/"); + try { + if (folders.length == 2 && folders[1].equals("pictures")) { + // Url is probably for an actor. + return folders[0]; + } + + if (folders.length == 3 && folders[2].equals("pictures")) { + // Url if for a folder(like movies, tv etc). + Document doc = Http.url(url).get(); + return doc.select(".itemheadingmedium").first().text(); + } + + } catch (Exception e) { + LOGGER.error(e); + } + throw new MalformedURLException("Unable to fetch the gid for given url."); + } + + private class ListalImageDownloadThread implements Runnable { + + private final URL url; + private final int index; + + public ListalImageDownloadThread(URL url, int index) { + super(); + this.url = url; + this.index = index; + } + + @Override + public void run() { + getImage(); + } + + public void getImage() { + try { + Document doc = Http.url(url).get(); + + String imageUrl = doc.getElementsByClass("pure-img").attr("src"); + if (imageUrl != "") { + addURLToDownload(new URI(imageUrl).toURL(), getPrefix(index), "", null, null, + getImageName()); + } else { + LOGGER.error("Couldnt find image from url: " + url); + } + } catch (IOException | URISyntaxException e) { + LOGGER.error("[!] Exception while downloading image: " + url, e); + } + } + + public String getImageName() { + // Returns the image number of the link if possible. + String name = this.url.toExternalForm(); + try { + name = name.substring(name.lastIndexOf("/") + 1); + } catch (Exception e) { + LOGGER.info("Failed to get name for the image."); + name = null; + } + // Listal stores images as .jpg + return name + ".jpg"; + } + } + + private static enum UrlType { + LIST, FOLDER, UNKNOWN + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java index b10a1dc2..53b0fef5 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java @@ -1,26 +1,26 @@ package com.rarchives.ripme.ripper.rippers; +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.utils.Http; +import org.json.JSONArray; +import org.json.JSONObject; +import org.jsoup.Connection; +import org.jsoup.nodes.Document; + import java.io.IOException; +import java.io.UnsupportedEncodingException; import java.net.MalformedURLException; import java.net.URL; +import java.net.URLEncoder; import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; - -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.ripper.DownloadThreadPool; -import com.rarchives.ripme.utils.Http; - public class LusciousRipper extends AbstractHTMLRipper { - private static final int RETRY_COUNT = 5; // Keeping it high for read timeout exception. + private static String albumid; - private Pattern p = Pattern.compile("^https?://(?:www\\.)?(?:members\\.||legacy\\.||old\\.)?luscious\\.net/albums/([-_.0-9a-zA-Z]+).*$"); - private DownloadThreadPool lusciousThreadPool = new DownloadThreadPool("lusciousThreadPool"); + private static final Pattern p = Pattern.compile("^https?://(?:www\\.)?(?:members\\.||legacy\\.||old\\.)?luscious\\.net/albums/([-_.0-9a-zA-Z]+).*$"); public LusciousRipper(URL url) throws IOException { super(url); @@ -46,40 +46,48 @@ public class LusciousRipper extends AbstractHTMLRipper { } @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - Document page = Http.url(url).get(); - LOGGER.info("First page is " + url); - return page; - } - - @Override - public List getURLsFromPage(Document page) { + public List getURLsFromPage(Document page) { // gets urls for all pages through the api List urls = new ArrayList<>(); - Elements urlElements = page.select("div.item.thumbnail.ic_container > a"); - for (Element e : urlElements) { - urls.add(e.attr("abs:href")); + int totalPages = 1; + + for (int i = 1; i <= totalPages; i++) { + String APIStringWOVariables = "https://apicdn.luscious.net/graphql/nobatch/?operationName=PictureListInsideAlbum&query=%2520query%2520PictureListInsideAlbum%28%2524input%253A%2520PictureListInput%21%29%2520%257B%2520picture%2520%257B%2520list%28input%253A%2520%2524input%29%2520%257B%2520info%2520%257B%2520...FacetCollectionInfo%2520%257D%2520items%2520%257B%2520__typename%2520id%2520title%2520description%2520created%2520like_status%2520number_of_comments%2520number_of_favorites%2520moderation_status%2520width%2520height%2520resolution%2520aspect_ratio%2520url_to_original%2520url_to_video%2520is_animated%2520position%2520permissions%2520url%2520tags%2520%257B%2520category%2520text%2520url%2520%257D%2520thumbnails%2520%257B%2520width%2520height%2520size%2520url%2520%257D%2520%257D%2520%257D%2520%257D%2520%257D%2520fragment%2520FacetCollectionInfo%2520on%2520FacetCollectionInfo%2520%257B%2520page%2520has_next_page%2520has_previous_page%2520total_items%2520total_pages%2520items_per_page%2520url_complete%2520%257D%2520&variables="; + Connection con = Http.url(APIStringWOVariables + encodeVariablesPartOfURL(i, albumid)).method(Connection.Method.GET).retries(5).connection(); + con.ignoreHttpErrors(true); + con.ignoreContentType(true); + con.userAgent("Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0"); + Connection.Response res; + try { + res = con.execute(); + } catch (IOException e) { + throw new RuntimeException(e); + } + String body = res.body(); + + JSONObject jsonObject = new JSONObject(body); + + JSONObject data = jsonObject.getJSONObject("data"); + JSONObject picture = data.getJSONObject("picture"); + JSONObject list = picture.getJSONObject("list"); + JSONArray items = list.getJSONArray("items"); + JSONObject info = list.getJSONObject("info"); + totalPages = info.getInt("total_pages"); + + for (int j = 0; j < items.length(); j++) { + JSONObject item = items.getJSONObject(j); + String urlToOriginal = item.getString("url_to_original"); + urls.add(urlToOriginal); + } } return urls; } - @Override - public Document getNextPage(Document doc) throws IOException { - // luscious sends xhr requests to nextPageUrl and appends new set of images to the current page while in browser. - // Simply GET the nextPageUrl also works. Therefore, we do this... - Element nextPageElement = doc.select("div#next_page > div > a").first(); - if (nextPageElement == null) { - throw new IOException("No next page found."); - } - - return Http.url(nextPageElement.attr("abs:href")).get(); - } - @Override public String getGID(URL url) throws MalformedURLException { - Matcher m = p.matcher(url.toExternalForm()); + Matcher m = P.matcher(url.toExternalForm()); if (m.matches()) { + albumid = m.group(1).split("_")[m.group(1).split("_").length - 1]; return m.group(1); } throw new MalformedURLException("Expected luscious.net URL format: " @@ -87,45 +95,17 @@ public class LusciousRipper extends AbstractHTMLRipper { } @Override - public void downloadURL(URL url, int index) { - lusciousThreadPool.addThread(new LusciousDownloadThread(url, index)); + protected void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null); } - @Override - public DownloadThreadPool getThreadPool() { - return lusciousThreadPool; - } + public static String encodeVariablesPartOfURL(int page, String albumId) { + try { + String json = "{\"input\":{\"filters\":[{\"name\":\"album_id\",\"value\":\"" + albumId + "\"}],\"display\":\"rating_all_time\",\"items_per_page\":50,\"page\":" + page + "}}"; - public class LusciousDownloadThread extends Thread { - private URL url; - private int index; - - public LusciousDownloadThread(URL url, int index) { - this.url = url; - this.index = index; + return URLEncoder.encode(json, "UTF-8"); + } catch (UnsupportedEncodingException e) { + throw new IllegalStateException("Could not encode variables"); } - - @Override - public void run() { - try { - Document page = Http.url(url).retries(RETRY_COUNT).get(); - - String downloadUrl = page.select(".icon-download").attr("abs:href"); - if (downloadUrl.equals("")) { - // This is here for pages with mp4s instead of images. - downloadUrl = page.select("div > video > source").attr("src"); - if (!downloadUrl.equals("")) { - throw new IOException("Could not find download url for image or video."); - } - } - - //If a valid download url was found. - addURLToDownload(new URL(downloadUrl), getPrefix(index)); - - } catch (IOException e) { - LOGGER.error("Error downloadiong url " + url, e); - } - } - } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MangadexRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MangadexRipper.java index 6697a45b..8c6c9227 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/MangadexRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MangadexRipper.java @@ -1,36 +1,42 @@ package com.rarchives.ripme.ripper.rippers; import com.rarchives.ripme.ripper.AbstractJSONRipper; +import com.rarchives.ripme.ui.RipStatusMessage; import com.rarchives.ripme.utils.Http; -import com.rarchives.ripme.utils.Utils; import org.json.JSONArray; import org.json.JSONObject; -import org.jsoup.Connection; -import org.jsoup.nodes.Document; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; import java.util.List; +import java.util.TreeMap; import java.util.regex.Matcher; import java.util.regex.Pattern; public class MangadexRipper extends AbstractJSONRipper { - private String chapterApiEndPoint = "https://mangadex.org/api/chapter/"; - - private String getImageUrl(String chapterHash, String imageName, String server) { - return server + chapterHash + "/" + imageName; - } + private final String chapterApiEndPoint = "https://mangadex.org/api/chapter/"; + private final String mangaApiEndPoint = "https://mangadex.org/api/manga/"; + private boolean isSingleChapter; public MangadexRipper(URL url) throws IOException { super(url); } + private String getImageUrl(String chapterHash, String imageName, String server) { + return server + chapterHash + "/" + imageName; + } + @Override public String getHost() { return "mangadex"; } + @Override public String getDomain() { return "mangadex.org"; @@ -44,14 +50,19 @@ public class MangadexRipper extends AbstractJSONRipper { @Override public String getGID(URL url) throws MalformedURLException { String capID = getChapterID(url.toExternalForm()); + String mangaID = getMangaID(url.toExternalForm()); if (capID != null) { + isSingleChapter = true; return capID; + } else if (mangaID != null) { + isSingleChapter = false; + return mangaID; } throw new MalformedURLException("Unable to get chapter ID from" + url); } private String getChapterID(String url) { - Pattern p = Pattern.compile("https://mangadex.org/chapter/([\\d]+)/?"); + Pattern p = Pattern.compile("https://mangadex.org/chapter/([\\d]+)/([\\d+]?)"); Matcher m = p.matcher(url); if (m.matches()) { return m.group(1); @@ -59,26 +70,79 @@ public class MangadexRipper extends AbstractJSONRipper { return null; } + private String getMangaID(String url) { + Pattern p = Pattern.compile("https://mangadex.org/title/([\\d]+)/(.+)"); + Matcher m = p.matcher(url); + if (m.matches()) { + return m.group(1); + } + return null; + } + + @Override - public JSONObject getFirstPage() throws IOException { + public JSONObject getFirstPage() throws IOException, URISyntaxException { // Get the chapter ID String chapterID = getChapterID(url.toExternalForm()); - return Http.url(new URL(chapterApiEndPoint + chapterID)).getJSON(); + String mangaID = getMangaID(url.toExternalForm()); + if (mangaID != null) { + return Http.url(new URI(mangaApiEndPoint + mangaID).toURL()).getJSON(); + } else + return Http.url(new URI(chapterApiEndPoint + chapterID).toURL()).getJSON(); } @Override protected List getURLsFromJSON(JSONObject json) { + if (isSingleChapter) { + List assetURLs = new ArrayList<>(); + JSONArray currentObject; + String chapterHash; + // Server is the cdn hosting the images. + String server; + chapterHash = json.getString("hash"); + server = json.getString("server"); + for (int i = 0; i < json.getJSONArray("page_array").length(); i++) { + currentObject = json.getJSONArray("page_array"); + + assetURLs.add(getImageUrl(chapterHash, currentObject.getString(i), server)); + } + return assetURLs; + } + JSONObject chaptersJSON = (JSONObject) json.get("chapter"); + JSONObject temp; + Iterator keys = chaptersJSON.keys(); + HashMap chapterIDs = new HashMap<>(); + while (keys.hasNext()) { + String keyValue = keys.next(); + temp = (JSONObject) chaptersJSON.get(keyValue); + if (temp.getString("lang_name").equals("English")) { + chapterIDs.put(temp.getDouble("chapter"), keyValue); + } + + } + List assetURLs = new ArrayList<>(); JSONArray currentObject; - - String chapterHash = json.getString("hash"); + String chapterHash; // Server is the cdn hosting the images. - String server = json.getString("server"); + String server; + JSONObject chapterJSON = null; + TreeMap treeMap = new TreeMap<>(chapterIDs); + for (Double aDouble : treeMap.keySet()) { + double key = (double) aDouble; + try { + chapterJSON = Http.url(new URI(chapterApiEndPoint + treeMap.get(key)).toURL()).getJSON(); + } catch (IOException | URISyntaxException e) { + e.printStackTrace(); + } + sendUpdate(RipStatusMessage.STATUS.LOADING_RESOURCE, "chapter " + key); + chapterHash = chapterJSON.getString("hash"); + server = chapterJSON.getString("server"); + for (int i = 0; i < chapterJSON.getJSONArray("page_array").length(); i++) { + currentObject = chapterJSON.getJSONArray("page_array"); - for (int i = 0; i < json.getJSONArray("page_array").length(); i++) { - currentObject = json.getJSONArray("page_array"); - - assetURLs.add(getImageUrl(chapterHash, currentObject.getString(i), server)); + assetURLs.add(getImageUrl(chapterHash, currentObject.getString(i), server)); + } } return assetURLs; @@ -91,4 +155,5 @@ public class MangadexRipper extends AbstractJSONRipper { addURLToDownload(url, getPrefix(index)); } -} + +} \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java index f4325aa1..c5f6b142 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java @@ -48,12 +48,6 @@ public class ManganeloRipper extends AbstractHTMLRipper { "/manganelo.com/manga/ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { Element elem = doc.select("div.btn-navigation-chap > a.back").first(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MeituriRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MeituriRipper.java index 8bdd2b2f..2c83ce7e 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/MeituriRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MeituriRipper.java @@ -21,12 +21,12 @@ public class MeituriRipper extends AbstractHTMLRipper { @Override public String getHost() { - return "meituri"; + return "tujigu"; } @Override public String getDomain() { - return "meituri.com"; + return "tujigu.com"; } // To use in getting URLs @@ -35,23 +35,18 @@ public class MeituriRipper extends AbstractHTMLRipper { @Override public String getGID(URL url) throws MalformedURLException { // without escape - // ^https?://[w.]*meituri\.com/a/([0-9]+)/([0-9]+\.html)*$ - // https://www.meituri.com/a/14449/ - // also matches https://www.meituri.com/a/14449/3.html etc. + // ^https?://[w.]*tujigu\.com/a/([0-9]+)/([0-9]+\.html)*$ + // https://www.tujigu.com/a/14449/ + // also matches https://www.tujigu.com/a/14449/3.html etc. // group 1 is 14449 - Pattern p = Pattern.compile("^https?://[w.]*meituri\\.com/a/([0-9]+)/([0-9]+\\.html)*$"); + Pattern p = Pattern.compile("^https?://[w.]*tujigu\\.com/a/([0-9]+)/([0-9]+\\.html)*$"); Matcher m = p.matcher(url.toExternalForm()); if (m.matches()) { albumID = m.group(1); return m.group(1); } throw new MalformedURLException( - "Expected meituri.com URL format: " + "meituri.com/a/albumid/ - got " + url + "instead"); - } - - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); + "Expected tujigu.com URL format: " + "tujigu.com/a/albumid/ - got " + url + "instead"); } @Override @@ -71,7 +66,7 @@ public class MeituriRipper extends AbstractHTMLRipper { } // Base URL: http://ii.hywly.com/a/1/albumid/imgnum.jpg - String baseURL = "http://ii.hywly.com/a/1/" + albumID + "/"; + String baseURL = "https://tjg.hywly.com/a/1/" + albumID + "/"; // Loop through and add images to the URL list for (int i = 1; i <= numOfImages; i++) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ModelxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ModelxRipper.java index 0b513b37..c2d6ed47 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ModelxRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ModelxRipper.java @@ -41,11 +41,6 @@ public class ModelxRipper extends AbstractHTMLRipper { throw new MalformedURLException("Expected URL format: http://www.modelx.org/[category (one or more)]/xxxxx got: " + url); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document page) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java index 7bb8451a..d2af02a1 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -13,7 +15,6 @@ import org.jsoup.nodes.Element; import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.ripper.DownloadThreadPool; -import com.rarchives.ripme.ui.RipStatusMessage.STATUS; import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.Utils; import org.jsoup.select.Elements; @@ -59,20 +60,21 @@ public class MotherlessRipper extends AbstractHTMLRipper { if (!notHome) { StringBuilder newPath = new StringBuilder(path); newPath.insert(2, "M"); - firstURL = new URL(this.url, "https://" + DOMAIN + newPath); + firstURL = URI.create("https://" + DOMAIN + newPath).toURL(); LOGGER.info("Changed URL to " + firstURL); } return Http.url(firstURL).referrer("https://motherless.com").get(); } @Override - public Document getNextPage(Document doc) throws IOException { + public Document getNextPage(Document doc) throws IOException, URISyntaxException { + Elements nextPageLink = doc.head().select("link[rel=next]"); if (nextPageLink.isEmpty()) { throw new IOException("Last page reached"); } else { String referrerLink = doc.head().select("link[rel=canonical]").first().attr("href"); - URL nextURL = new URL(this.url, nextPageLink.first().attr("href")); + URL nextURL = this.url.toURI().resolve(nextPageLink.first().attr("href")).toURL(); return Http.url(nextURL).referrer(referrerLink).get(); } } @@ -81,7 +83,7 @@ public class MotherlessRipper extends AbstractHTMLRipper { protected List getURLsFromPage(Document page) { List pageURLs = new ArrayList<>(); - for (Element thumb : page.select("div.thumb a.img-container")) { + for (Element thumb : page.select("div.thumb-container a.img-container")) { if (isStopped()) { break; } @@ -109,7 +111,7 @@ public class MotherlessRipper extends AbstractHTMLRipper { @Override protected void downloadURL(URL url, int index) { // Create thread for finding image at "url" page - MotherlessImageThread mit = new MotherlessImageThread(url, index); + MotherlessImageRunnable mit = new MotherlessImageRunnable(url, index); motherlessThreadPool.addThread(mit); try { Thread.sleep(IMAGE_SLEEP_TIME); @@ -148,15 +150,19 @@ public class MotherlessRipper extends AbstractHTMLRipper { throw new MalformedURLException("Expected URL format: https://motherless.com/GIXXXXXXX, got: " + url); } - + @Override + protected DownloadThreadPool getThreadPool() { + return motherlessThreadPool; + } + /** * Helper class to find and download images found on "image" pages */ - private class MotherlessImageThread extends Thread { - private URL url; - private int index; + private class MotherlessImageRunnable implements Runnable { + private final URL url; + private final int index; - MotherlessImageThread(URL url, int index) { + MotherlessImageRunnable(URL url, int index) { super(); this.url = url; this.index = index; @@ -180,11 +186,11 @@ public class MotherlessRipper extends AbstractHTMLRipper { if (Utils.getConfigBoolean("download.save_order", true)) { prefix = String.format("%03d_", index); } - addURLToDownload(new URL(file), prefix); + addURLToDownload(new URI(file).toURL(), prefix); } else { LOGGER.warn("[!] could not find '__fileurl' at " + url); } - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MrCongRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MrCongRipper.java new file mode 100644 index 00000000..642c6417 --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MrCongRipper.java @@ -0,0 +1,223 @@ +package com.rarchives.ripme.ripper.rippers; + +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.utils.Http; +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; + + +public class MrCongRipper extends AbstractHTMLRipper { + + private Document currDoc; + private int lastPageNum; + private int currPageNum; + private boolean tagPage = false; + + public MrCongRipper(URL url) throws IOException { + super(url); + currPageNum = 1; + } + + @Override + public String getHost() { + return "mrcong"; + } + + @Override + public String getDomain() { + return "mrcong.com"; + } + + @Override + public String getGID(URL url) throws MalformedURLException { + System.out.println(url.toExternalForm()); + Pattern p = Pattern.compile("^https?://mrcong\\.com/(\\S*)[0-9]+-anh(-[0-9]+-videos)?(|/|/[0-9]+)$"); + Pattern p2 = Pattern.compile("^https?://mrcong\\.com/tag/(\\S*)/$"); //Added 6-10-21 + Matcher m = p.matcher(url.toExternalForm()); + Matcher m2 = p2.matcher(url.toExternalForm()); //6-10-21 + if (m.matches()) { + return m.group(1); + } + else if(m2.matches()) { //Added 6-10-21 + tagPage = true; + System.out.println("tagPage = TRUE"); + return m2.group(1); + } + + throw new MalformedURLException("Expected mrcong.com URL format: " + + "mrcong.com/GALLERY_NAME(-anh OR -anh/ OR -anh/PAGE_NUMBER OR -anh/PAGE_NUMBER/) - got " + url + " instead"); + } + + @Override + public Document getFirstPage() throws IOException { //returns the root gallery page regardless of actual page number + // "url" is an instance field of the superclass + String rootUrlStr; + URL rootUrl; + + if(!tagPage) { + rootUrlStr = url.toExternalForm().replaceAll("(|/|/[0-9]+/?)$", "/"); + } else { //6-10-21 + rootUrlStr = url.toExternalForm().replaceAll("(page/[0-9]+/)$", "page/1/"); + } + + rootUrl = URI.create(rootUrlStr).toURL(); + url = rootUrl; + currPageNum = 1; + currDoc = Http.url(url).get(); + getMaxPageNumber(currDoc); + return currDoc; + } + + @Override + public Document getNextPage(Document doc) throws IOException { + int pageNum = currPageNum; + String urlStr; + if(!tagPage) { + if (pageNum == 1 && lastPageNum > 1) { + urlStr = url.toExternalForm().concat((pageNum + 1) + ""); + System.out.printf("Old Str: %s New Str: %s\n", url.toExternalForm(), urlStr); + } else if (pageNum < lastPageNum) { + urlStr = url.toExternalForm().replaceAll("(/([0-9]*)/?)$", ("/" + (pageNum + 1) + "/")); + System.out.printf("Old Str: %s New Str: %s\n", url.toString(), urlStr); + } else { + //System.out.printf("Error: Page number provided goes past last valid page number\n"); + throw (new IOException("Error: Page number provided goes past last valid page number\n")); + } + } else { //6-10-21 + //if (pageNum == 1 && lastPageNum >= 1) { + if (pageNum == 1 && lastPageNum > 1) { //6-10-21 + urlStr = url.toExternalForm().concat("page/" + (pageNum + 1) + ""); + System.out.printf("Old Str: %s New Str: %s\n", url.toExternalForm(), urlStr); + } else if (pageNum < lastPageNum) { + urlStr = url.toExternalForm().replaceAll("(page/([0-9]*)/?)$", ("page/" + (pageNum + 1) + "/")); + System.out.printf("Old Str: %s New Str: %s\n", url.toString(), urlStr); + } else { + //System.out.printf("Error: Page number provided goes past last valid page number\n"); + System.out.print("Error: There is no next page!\n"); + return null; + //throw (new IOException("Error: Page number provided goes past last valid page number\n")); + } + } + + url = URI.create(urlStr).toURL(); + currDoc = Http.url(url).get(); + currPageNum ++;//hi + return currDoc; + } + + private int getMaxPageNumber(Document doc) { + if(!tagPage) { + try { + lastPageNum = Integer.parseInt(doc.select("div.page-link > a").last().text()); //gets the last possible page for the gallery + } catch(Exception e) { + return 1; + } + } else { + try { + lastPageNum = Integer.parseInt(doc.select("div.pagination > a").last().text()); //gets the last possible page for the gallery + System.out.println("The last page found for " + url + " was " + lastPageNum); + } catch(Exception e) { + return 1; + } + } + + return lastPageNum; + } + + private int getCurrentPageNum(Document doc) { + int currPage; //6-10-21 + + if(!tagPage) { + currPage = Integer.parseInt(doc.select("div.page-link > span").first().text()); + } else { + currPage = Integer.parseInt(doc.select("div.pagination > span").first().text()); + } + + System.out.println("The current page was found to be: " + currPage); + + return currPage; + } + + @Override + public List getURLsFromPage(Document doc) { //gets the urls of the images + List result = new ArrayList<>(); + + if(!tagPage) { + for (Element el : doc.select("p > img")) { + String imageSource = el.attr("src"); + result.add(imageSource); + } + + System.out.println("\n1.)Printing List: " + result + "\n"); + } else { //6-10-21 + //List gallery_set_list = new ArrayList<>(); + + for (Element el : doc.select("h2 > a")) { + String pageSource = el.attr("href"); + if(!pageSource.equals("https://mrcong.com/")) { + result.add(pageSource); + System.out.println("\n" + pageSource + " has been added to the list."); + } + } + + /*for (String el2 : gallery_set_list) { + try { + URL temp_urL = URI.create(el2).toURL(); + MrCongRipper mcr = new MrCongRipper(temp_urL); + System.out.println("URL being ripped: " + mcr.url.toString()); + result.addAll(mcr.getURLsFromPage(mcr.getFirstPage())); + + Document nextPg = mcr.getNextPage(mcr.currDoc); + while(nextPg != null) { + result.addAll(mcr.getURLsFromPage(nextPg)); + nextPg = mcr.getNextPage(mcr.currDoc); + } + } catch (IOException e) { + e.printStackTrace(); + } + + }*/ + + System.out.println("\n2.)Printing List: " + result + "\n"); + } + + return result; + } + + @Override + public void downloadURL(URL url, int index) { + //addURLToDownload(url, getPrefix(index)); + + if(!tagPage) { + addURLToDownload(url, getPrefix(index)); + } else { + try { + List ls = this.getURLsFromPage(this.currDoc); + Document np = this.getNextPage(this.currDoc); + + while(np != null) { //Creates a list of all sets to download + ls.addAll(this.getURLsFromPage(np)); + np = this.getNextPage(np); + } + + for(String urlStr : ls) { + MrCongRipper mcr = new MrCongRipper(URI.create(urlStr).toURL()); + mcr.setup(); + mcr.rip(); + } + + } catch (IOException | URISyntaxException e) { + e.printStackTrace(); + } + } + } +} \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MultpornRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MultpornRipper.java new file mode 100644 index 00000000..cdc873f2 --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MultpornRipper.java @@ -0,0 +1,71 @@ +package com.rarchives.ripme.ripper.rippers; + +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.utils.Http; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; +import org.jsoup.select.Elements; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class MultpornRipper extends AbstractHTMLRipper { + + public MultpornRipper(URL url) throws IOException { + super(url); + } + + @Override + protected String getDomain() { + return "multporn.net"; + } + + @Override + public String getHost() { + return "multporn"; + } + + @Override + public String getGID(URL url) throws MalformedURLException, URISyntaxException { + Pattern p = Pattern.compile("^https?://multporn\\.net/node/(\\d+)/.*$"); + Matcher m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(1); + } + + try { + String nodeHref = Http.url(url).get().select(".simple-mode-switcher").attr("href"); + p = Pattern.compile("/node/(\\d+)/.*"); + m = p.matcher(nodeHref); + if (m.matches()) { + this.url = new URI("https://multporn.net" + nodeHref).toURL(); + return m.group(1); + } + }catch (Exception ignored){}; + + throw new MalformedURLException("Expected multporn.net URL format: " + + "multporn.net/comics/comicid / multporn.net/node/id/* - got " + url + " instead"); + } + + @Override + protected List getURLsFromPage(Document page) { + List imageURLs = new ArrayList<>(); + Elements thumbs = page.select(".mfp-gallery-image .mfp-item"); + for (Element el : thumbs) { + imageURLs.add(el.attr("href")); + } + return imageURLs; + } + + @Override + protected void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null); + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java index 453826a3..deedfb88 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java @@ -4,6 +4,7 @@ import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.utils.Http; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -13,8 +14,6 @@ import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; public class MyhentaicomicsRipper extends AbstractHTMLRipper { - private static boolean isTag; - public MyhentaicomicsRipper(URL url) throws IOException { super(url); } @@ -69,7 +68,6 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper { Pattern pat = Pattern.compile("^https?://myhentaicomics.com/index.php/tag/([0-9]*)/?([a-zA-Z%0-9+?=:]*)?$"); Matcher mat = pat.matcher(url.toExternalForm()); if (mat.matches()) { - isTag = true; return true; } return false; @@ -85,9 +83,8 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper { } @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); + public Document getFirstPage() throws IOException, URISyntaxException { + return super.getFirstPage(); } @Override diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaigalleryRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaigalleryRipper.java index d8422942..c9f4c0bd 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaigalleryRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaigalleryRipper.java @@ -40,12 +40,6 @@ public class MyhentaigalleryRipper extends AbstractHTMLRipper { + "myhentaigallery.com/gallery/thumbnails/ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MyreadingmangaRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MyreadingmangaRipper.java index 20a3cf2d..30fab521 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/MyreadingmangaRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MyreadingmangaRipper.java @@ -41,12 +41,6 @@ public class MyreadingmangaRipper extends AbstractHTMLRipper { + "myreadingmanga.info/title - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NatalieMuRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NatalieMuRipper.java index 952b434e..8cf24fd8 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/NatalieMuRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NatalieMuRipper.java @@ -79,11 +79,6 @@ public class NatalieMuRipper extends AbstractHTMLRipper { return this.url.getHost(); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(this.url).get(); - } - @Override public List getURLsFromPage(Document page) { List imageURLs = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NewgroundsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NewgroundsRipper.java index b3ededc4..a7be157a 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/NewgroundsRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NewgroundsRipper.java @@ -53,7 +53,7 @@ public class NewgroundsRipper extends AbstractHTMLRipper { @Override protected Document getFirstPage() throws IOException { - return Http.url("https://" + this.username + ".newgrounds.com/art").get(); + return Http.url("https://" + this.username + ".newgrounds.com/art").timeout(10*1000).get(); } @Override @@ -71,7 +71,7 @@ public class NewgroundsRipper extends AbstractHTMLRipper { List imageURLs = new ArrayList<>(); String documentHTMLString = page.toString().replaceAll(""", ""); - String findStr = "newgrounds.com\\/art\\/view\\/" + this.username; + String findStr = "newgrounds.com/art/view/" + this.username; int lastIndex = 0; // Index where findStr is found; each occasion contains the link to an image @@ -95,7 +95,7 @@ public class NewgroundsRipper extends AbstractHTMLRipper { if(i == indices.size() - 1){ s = documentHTMLString.substring(indices.get(i) + 2); } else{ - s = documentHTMLString.substring(indices.get(i) + 2, indices.get(i + 1)); + s = documentHTMLString.substring(indices.get(i) + 1, indices.get(i + 1)); } s = s.replaceAll("\n", "").replaceAll("\t", "") @@ -106,13 +106,14 @@ public class NewgroundsRipper extends AbstractHTMLRipper { if (m.lookingAt()) { String testURL = m.group(3) + "_" + this.username + "_" + m.group(1); + testURL = testURL.replace("_full", ""); // Open new document to get full sized image try { Document imagePage = Http.url(inLink + m.group(1)).get(); for(String extensions: this.ALLOWED_EXTENSIONS){ if(imagePage.toString().contains(testURL + "." + extensions)){ - imageUrl += m.group(2) + "/" + m.group(3) + "_" + this.username + "_" + m.group(1) + "." + extensions; + imageUrl += m.group(2) + "/" + m.group(3).replace("_full","") + "_" + this.username + "_" + m.group(1) + "." + extensions; imageURLs.add(imageUrl); break; } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NewsfilterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NewsfilterRipper.java deleted file mode 100644 index bafa3690..00000000 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/NewsfilterRipper.java +++ /dev/null @@ -1,80 +0,0 @@ -package com.rarchives.ripme.ripper.rippers; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; - -public class NewsfilterRipper extends AbstractHTMLRipper { - - private static final String HOST = "newsfilter"; - private static final String DOMAIN = "newsfilter.org"; - - public NewsfilterRipper(URL url) throws IOException { - super(url); - } - - @Override - public URL sanitizeURL(URL url) throws MalformedURLException { - String u = url.toExternalForm(); - if (u.indexOf('#') >= 0) { - u = u.substring(0, u.indexOf('#')); - } - u = u.replace("https?://m\\.newsfilter\\.org", "http://newsfilter.org"); - return new URL(u); - } - - @Override - public String getGID(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("^https?://([wm]+\\.)?newsfilter\\.org/gallery/([^/]+)$"); - Matcher m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(2); - } - throw new MalformedURLException( - "Expected newsfilter gallery format: http://newsfilter.org/gallery/galleryid" + - " Got: " + url); - } - - @Override - public String getHost() { - return HOST; - } - - @Override - protected String getDomain() { - return DOMAIN; - } - - @Override - protected Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - - @Override - protected List getURLsFromPage(Document page) { - List imgURLs = new ArrayList<>(); - Elements thumbnails = page.select("#galleryImages .inner-block img"); - for (Element thumb : thumbnails) { - String thumbUrl = thumb.attr("src"); - String picUrl = thumbUrl.replace("thumbs/", ""); - // use HTTP instead of HTTPS (less headaches) - imgURLs.add(picUrl.replaceFirst("https://", "http://")); - } - return imgURLs; - } - - @Override - protected void downloadURL(URL url, int index) { - addURLToDownload(url, getPrefix(index)); - } -} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NfsfwRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NfsfwRipper.java index 86079edc..35a1f8ad 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/NfsfwRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NfsfwRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -29,8 +31,6 @@ public class NfsfwRipper extends AbstractHTMLRipper { "https?://[wm.]*nfsfw.com/gallery/v/[^/]+/(.+)$" ); - // cached first page - private Document fstPage; // threads pool for downloading images from image pages private DownloadThreadPool nfsfwThreadPool; @@ -49,13 +49,6 @@ public class NfsfwRipper extends AbstractHTMLRipper { return HOST; } - @Override - protected Document getFirstPage() throws IOException { - // cache the first page - this.fstPage = Http.url(url).get(); - return fstPage; - } - @Override public Document getNextPage(Document page) throws IOException { String nextURL = null; @@ -113,13 +106,13 @@ public class NfsfwRipper extends AbstractHTMLRipper { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { // always start on the first page of an album // (strip the options after the '?') String u = url.toExternalForm(); if (u.contains("?")) { u = u.substring(0, u.indexOf("?")); - return new URL(u); + return new URI(u).toURL(); } else { return url; } @@ -157,9 +150,15 @@ public class NfsfwRipper extends AbstractHTMLRipper { @Override public boolean pageContainsAlbums(URL url) { - List imageURLs = getImagePageURLs(fstPage); - List subalbumURLs = getSubalbumURLs(fstPage); - return imageURLs.isEmpty() && !subalbumURLs.isEmpty(); + try { + final var fstPage = getCachedFirstPage(); + List imageURLs = getImagePageURLs(fstPage); + List subalbumURLs = getSubalbumURLs(fstPage); + return imageURLs.isEmpty() && !subalbumURLs.isEmpty(); + } catch (IOException | URISyntaxException e) { + LOGGER.error("Unable to load " + url, e); + return false; + } } @Override @@ -196,10 +195,10 @@ public class NfsfwRipper extends AbstractHTMLRipper { /** * Helper class to find and download images found on "image" pages */ - private class NfsfwImageThread extends Thread { - private URL url; - private String subdir; - private int index; + private class NfsfwImageThread implements Runnable { + private final URL url; + private final String subdir; + private final int index; NfsfwImageThread(URL url, String subdir, int index) { super(); @@ -223,8 +222,8 @@ public class NfsfwRipper extends AbstractHTMLRipper { if (file.startsWith("/")) { file = "http://nfsfw.com" + file; } - addURLToDownload(new URL(file), getPrefix(index), this.subdir); - } catch (IOException e) { + addURLToDownload(new URI(file).toURL(), getPrefix(index), this.subdir); + } catch (IOException | URISyntaxException e) { LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java index 49fc1d8a..fe50f1f1 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java @@ -126,7 +126,7 @@ public class NhentaiRipper extends AbstractHTMLRipper { List imageURLs = new ArrayList<>(); Elements thumbs = page.select("a.gallerythumb > img"); for (Element el : thumbs) { - imageURLs.add(el.attr("data-src").replaceAll("t\\.n", "i.n").replaceAll("t\\.", ".")); + imageURLs.add(el.attr("data-src").replaceAll("://t", "://i").replaceAll("t\\.", ".")); } return imageURLs; } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NsfwXxxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NsfwXxxRipper.java new file mode 100644 index 00000000..7e26faa2 --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NsfwXxxRipper.java @@ -0,0 +1,135 @@ +package com.rarchives.ripme.ripper.rippers; + +import com.rarchives.ripme.ripper.AbstractJSONRipper; +import com.rarchives.ripme.utils.Http; +import org.apache.commons.lang.StringEscapeUtils; +import org.json.JSONArray; +import org.json.JSONObject; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public class NsfwXxxRipper extends AbstractJSONRipper { + + public NsfwXxxRipper(URL url) throws IOException { + super(url); + } + + @Override + protected String getDomain() { + return "nsfw.xxx"; + } + + @Override + public String getHost() { + return "nsfw_xxx"; + } + + + @Override + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { + String u = url.toExternalForm(); + // https://nsfw.xxx/user/kelly-kat/foo -> https://nsfw.xxx/user/kelly-kat + // https://nsfw.xxx/user/kelly-kat -> https://nsfw.xxx/user/kelly-kat + // keep up to and including the username + u = u.replaceAll("https?://nsfw.xxx/user/([^/]+)/?.*", "https://nsfw.xxx/user/$1"); + if (!u.contains("nsfw.xxx/user")) { + throw new MalformedURLException("Invalid URL: " + url); + } + + return new URI(u).toURL(); + } + + String getUser() throws MalformedURLException { + return getGID(url); + } + + URL getPage(int page) throws MalformedURLException, URISyntaxException { + return new URI("https://nsfw.xxx/slide-page/" + page + "?nsfw%5B%5D=0&types%5B%5D=image&types%5B%5D=video&types%5B%5D=gallery&slider=1&jsload=1&user=" + getUser()).toURL(); + } + + + @Override + public String getGID(URL url) throws MalformedURLException { + Pattern p = Pattern.compile("https://nsfw.xxx/user/([^/]+)/?$"); + Matcher m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(1); + } + throw new MalformedURLException("Expected URL format: " + + "nsfw.xxx/user/USER - got " + url + " instead"); + } + + + int currentPage = 1; + + @Override + protected JSONObject getFirstPage() throws IOException, URISyntaxException { + return Http.url(getPage(1)).getJSON(); + } + + List descriptions = new ArrayList<>(); + + @Override + protected JSONObject getNextPage(JSONObject doc) throws IOException, URISyntaxException { + currentPage++; + JSONObject nextPage = Http.url(getPage(doc.getInt("page") + 1)).getJSON(); + JSONArray items = nextPage.getJSONArray("items"); + if (items.isEmpty()) { + throw new IOException("No more pages"); + } + return nextPage; + } + + class ApiEntry { + String srcUrl; + String author; + String title; + + public ApiEntry(String srcUrl, String author, String title) { + this.srcUrl = srcUrl; + this.author = author; + this.title = title; + } + } + + @Override + protected List getURLsFromJSON(JSONObject json) { + JSONArray items = json.getJSONArray("items"); + List data = IntStream + .range(0, items.length()) + .mapToObj(items::getJSONObject) + .map(o -> { + String srcUrl; + if(o.has("src")) { + srcUrl = o.getString("src"); + } else { + // video source + Pattern videoHtmlSrcPattern = Pattern.compile("src=\"([^\"]+)\""); + Matcher matches = videoHtmlSrcPattern.matcher(o.getString("html")); + matches.find(); + srcUrl = StringEscapeUtils.unescapeHtml(matches.group(1)); + } + + return new ApiEntry(srcUrl, o.getString("author"), o.getString("title")); + }) + .toList(); + + data.forEach(e -> descriptions.add(e.title)); + return data.stream().map(e -> e.srcUrl).collect(Collectors.toList()); + } + + @Override + protected void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index) + descriptions.get(index - 1) + "_" , "", "", null); + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NudeGalsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NudeGalsRipper.java index 3300da50..ea145aad 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/NudeGalsRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NudeGalsRipper.java @@ -16,8 +16,6 @@ import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.utils.Http; public class NudeGalsRipper extends AbstractHTMLRipper { - // Current HTML document - private Document albumDoc = null; public NudeGalsRipper(URL url) throws IOException { super(url); @@ -50,14 +48,6 @@ public class NudeGalsRipper extends AbstractHTMLRipper { + " Got: " + url); } - @Override - public Document getFirstPage() throws IOException { - if (albumDoc == null) { - albumDoc = Http.url(url).get(); - } - return albumDoc; - } - @Override public List getURLsFromPage(Document doc) { List imageURLs = new ArrayList<>(); @@ -77,4 +67,4 @@ public class NudeGalsRipper extends AbstractHTMLRipper { // Send referrer when downloading images addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null); } -} \ No newline at end of file +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/OglafRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/OglafRipper.java index a5183397..e03d3bdc 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/OglafRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/OglafRipper.java @@ -46,12 +46,6 @@ public class OglafRipper extends AbstractHTMLRipper { return getDomain(); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { if (doc.select("div#nav > a > div#nx").first() == null) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PahealRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PahealRipper.java index d2421f37..39d56b83 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PahealRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PahealRipper.java @@ -3,25 +3,28 @@ package com.rarchives.ripme.ripper.rippers; import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.Utils; -import java.io.File; import java.io.IOException; import java.net.MalformedURLException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.log4j.Logger; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; public class PahealRipper extends AbstractHTMLRipper { - private static final Logger logger = Logger.getLogger(PahealRipper.class); + private static final Logger logger = LogManager.getLogger(PahealRipper.class); private static Map cookies = null; private static Pattern gidPattern = null; @@ -56,7 +59,7 @@ public class PahealRipper extends AbstractHTMLRipper { @Override public Document getNextPage(Document page) throws IOException { for (Element e : page.select("#paginator a")) { - if (e.text().toLowerCase().equals("next")) { + if (e.text().equalsIgnoreCase("next")) { return Http.url(e.absUrl("href")).cookies(getCookies()).get(); } } @@ -88,12 +91,12 @@ public class PahealRipper extends AbstractHTMLRipper { name = name.substring(0, name.length() - ext.length()); } - File outFile = new File(workingDir.getCanonicalPath() - + File.separator + Path outFile = Paths.get(workingDir + + "/" + Utils.filesystemSafe(new URI(name).getPath()) + ext); addURLToDownload(url, outFile); - } catch (IOException | URISyntaxException ex) { + } catch (URISyntaxException ex) { logger.error("Error while downloading URL " + url, ex); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PawooRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PawooRipper.java index 8f5c8c37..100068ed 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PawooRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PawooRipper.java @@ -3,6 +3,11 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.URL; +import com.rarchives.ripme.utils.Http; + +import org.jsoup.nodes.Document; +import org.jsoup.select.Elements; + public class PawooRipper extends MastodonRipper { public PawooRipper(URL url) throws IOException { super(url); @@ -17,4 +22,5 @@ public class PawooRipper extends MastodonRipper { public String getDomain() { return "pawoo.net"; } + } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PhotobucketRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PhotobucketRipper.java index 680d2c09..097fe2c0 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PhotobucketRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PhotobucketRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -82,7 +84,7 @@ public class PhotobucketRipper extends AbstractHTMLRipper { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { LOGGER.info(url); String u = url.toExternalForm(); if (u.contains("?")) { @@ -93,11 +95,11 @@ public class PhotobucketRipper extends AbstractHTMLRipper { // append trailing slash u = u + "/"; } - return new URL(u); + return new URI(u).toURL(); } @Override - public String getGID(URL url) throws MalformedURLException { + public String getGID(URL url) throws MalformedURLException, URISyntaxException { Matcher m; URL sanitized = sanitizeURL(url); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PichunterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PichunterRipper.java index e6c5d110..bdb5f528 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PichunterRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PichunterRipper.java @@ -63,12 +63,6 @@ public class PichunterRipper extends AbstractHTMLRipper { return m.matches(); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { // We use comic-nav-next to the find the next page diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PicstatioRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PicstatioRipper.java index 1bd103b5..65d43d39 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PicstatioRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PicstatioRipper.java @@ -51,12 +51,6 @@ public class PicstatioRipper extends AbstractHTMLRipper { "www.picstatio.com//ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { if (doc.select("a.next_page") != null) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PorncomixRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PorncomixRipper.java index b4579684..f021269f 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PorncomixRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PorncomixRipper.java @@ -41,12 +41,6 @@ public class PorncomixRipper extends AbstractHTMLRipper { "porncomix.info/comic - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PorncomixinfoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PorncomixinfoRipper.java new file mode 100644 index 00000000..8aef59a6 --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PorncomixinfoRipper.java @@ -0,0 +1,79 @@ +package com.rarchives.ripme.ripper.rippers; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; + +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.utils.Http; + +public class PorncomixinfoRipper extends AbstractHTMLRipper { + + public PorncomixinfoRipper(URL url) throws IOException { + super(url); + } + + @Override + public String getHost() { + return "porncomixinfo"; + } + + @Override + public String getDomain() { + return "porncomixinfo.net"; + } + + @Override + public String getGID(URL url) throws MalformedURLException { + Pattern p = Pattern.compile("https://porncomixinfo.net/chapter/([a-zA-Z1-9_-]*)/([a-zA-Z1-9_-]*)/?$"); + Matcher m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(1); + } + throw new MalformedURLException("Expected porncomixinfo URL format: " + + "porncomixinfo.net/chapter/CHAP/ID - got " + url + " instead"); + } + + @Override + public Document getNextPage(Document doc) throws IOException { + // Find next page + String nextUrl = ""; + // We use comic-nav-next to the find the next page + Element elem = doc.select("a.next_page").first(); + if (elem == null) { + throw new IOException("No more pages"); + } + String nextPage = elem.attr("href"); + // Some times this returns a empty string + // This for stops that + if (nextPage.equals("")) { + return null; + } + else { + return Http.url(nextPage).get(); + } + } + + @Override + public List getURLsFromPage(Document doc) { + List result = new ArrayList<>(); + for (Element el : doc.select("img.wp-manga-chapter-img")) { { + String imageSource = el.attr("src"); + result.add(imageSource); + } + } + return result; + } + + @Override + public void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index)); + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PornhubRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PornhubRipper.java index 197bdcbd..a2ce4a19 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PornhubRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PornhubRipper.java @@ -1,9 +1,11 @@ package com.rarchives.ripme.ripper.rippers; -import java.io.File; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; +import java.nio.file.Path; import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher; @@ -47,12 +49,12 @@ public class PornhubRipper extends AbstractHTMLRipper { } @Override - public Document getNextPage(Document page) throws IOException { + public Document getNextPage(Document page) throws IOException, URISyntaxException { Elements nextPageLink = page.select("li.page_next > a"); if (nextPageLink.isEmpty()){ throw new IOException("No more pages"); } else { - URL nextURL = new URL(this.url, nextPageLink.first().attr("href")); + URL nextURL = this.url.toURI().resolve(nextPageLink.first().attr("href")).toURL(); return Http.url(nextURL).get(); } } @@ -74,7 +76,7 @@ public class PornhubRipper extends AbstractHTMLRipper { @Override protected void downloadURL(URL url, int index) { - PornhubImageThread t = new PornhubImageThread(url, index, this.workingDir); + PornhubImageThread t = new PornhubImageThread(url, index, this.workingDir.toPath()); pornhubThreadPool.addThread(t); try { Thread.sleep(IMAGE_SLEEP_TIME); @@ -83,13 +85,13 @@ public class PornhubRipper extends AbstractHTMLRipper { } } - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { // always start on the first page of an album // (strip the options after the '?') String u = url.toExternalForm(); if (u.contains("?")) { u = u.substring(0, u.indexOf("?")); - return new URL(u); + return new URI(u).toURL(); } else { return url; } @@ -126,11 +128,11 @@ public class PornhubRipper extends AbstractHTMLRipper { * * Handles case when site has IP-banned the user. */ - private class PornhubImageThread extends Thread { - private URL url; - private int index; + private class PornhubImageThread implements Runnable { + private final URL url; + private final int index; - PornhubImageThread(URL url, int index, File workingDir) { + PornhubImageThread(URL url, int index, Path workingDir) { super(); this.url = url; this.index = index; @@ -159,10 +161,10 @@ public class PornhubRipper extends AbstractHTMLRipper { prefix = String.format("%03d_", index); } - URL imgurl = new URL(url, imgsrc); + URL imgurl = url.toURI().resolve(imgsrc).toURL(); addURLToDownload(imgurl, prefix); - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PornpicsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PornpicsRipper.java index b779c480..799f7294 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PornpicsRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PornpicsRipper.java @@ -41,12 +41,6 @@ public class PornpicsRipper extends AbstractHTMLRipper { "www.pornpics.com/galleries/ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ReadcomicRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ReadcomicRipper.java new file mode 100644 index 00000000..55b3559a --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ReadcomicRipper.java @@ -0,0 +1,55 @@ +package com.rarchives.ripme.ripper.rippers; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; + +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.utils.Http; + +public class ReadcomicRipper extends ViewcomicRipper { + + public ReadcomicRipper(URL url) throws IOException { + super(url); + } + + @Override + public String getHost() { + return "read-comic"; + } + + @Override + public String getDomain() { + return "read-comic.com"; + } + + + + @Override + public String getGID(URL url) throws MalformedURLException { + Pattern p = Pattern.compile("https?://read-comic.com/([a-zA-Z1-9_-]*)/?$"); + Matcher m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(1); + } + throw new MalformedURLException("Expected view-comic URL format: " + + "read-comic.com/COMIC_NAME - got " + url + " instead"); + } + + @Override + public List getURLsFromPage(Document doc) { + List result = new ArrayList(); + for (Element el : doc.select("div.pinbin-copy > a > img")) { + result.add(el.attr("src")); + } + return result; + } + +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java index e68e477d..dcfa14e7 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java @@ -1,15 +1,24 @@ package com.rarchives.ripme.ripper.rippers; -import java.io.File; import java.io.IOException; +import java.io.OutputStream; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Date; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; import com.rarchives.ripme.ui.RipStatusMessage; +import j2html.TagCreator; +import j2html.tags.ContainerTag; +import j2html.tags.specialized.DivTag; import org.json.JSONArray; +import org.json.JSONException; import org.json.JSONObject; import org.json.JSONTokener; @@ -18,6 +27,9 @@ import com.rarchives.ripme.ui.UpdateUtils; import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.RipUtils; import com.rarchives.ripme.utils.Utils; +import org.jsoup.Jsoup; + +import static j2html.TagCreator.*; public class RedditRipper extends AlbumRipper { @@ -46,41 +58,52 @@ public class RedditRipper extends AlbumRipper { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { String u = url.toExternalForm(); // Strip '/u/' from URL u = u.replaceAll("reddit\\.com/u/", "reddit.com/user/"); - return new URL(u); + return new URI(u).toURL(); } - private URL getJsonURL(URL url) throws MalformedURLException { + private URL getJsonURL(URL url) throws MalformedURLException, URISyntaxException { + // Convert gallery to post link and append ".json" + Pattern p = Pattern.compile("^https?://[a-zA-Z0-9.]{0,4}reddit\\.com/gallery/([a-zA-Z0-9]+).*$"); + Matcher m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return new URI("https://reddit.com/" +m.group(m.groupCount())+ ".json").toURL(); + } + // Append ".json" to URL in appropriate location. String result = url.getProtocol() + "://" + url.getHost() + url.getPath() + ".json"; if (url.getQuery() != null) { result += "?" + url.getQuery(); } - return new URL(result); + return new URI(result).toURL(); } @Override public void rip() throws IOException { - URL jsonURL = getJsonURL(this.url); - while (true) { - if (shouldAddURL()) { - sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_COMPLETE_HISTORY, "Already seen the last " + alreadyDownloadedUrls + " images ending rip"); - break; - } - jsonURL = getAndParseAndReturnNext(jsonURL); - if (jsonURL == null || isThisATest() || isStopped()) { - break; + try { + URL jsonURL = getJsonURL(this.url); + while (true) { + if (shouldAddURL()) { + sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_COMPLETE_HISTORY, "Already seen the last " + alreadyDownloadedUrls + " images ending rip"); + break; + } + jsonURL = getAndParseAndReturnNext(jsonURL); + if (jsonURL == null || isThisATest() || isStopped()) { + break; + } } + } catch (URISyntaxException e) { + new IOException(e.getMessage()); } waitForThreads(); } - private URL getAndParseAndReturnNext(URL url) throws IOException { + private URL getAndParseAndReturnNext(URL url) throws IOException, URISyntaxException { JSONArray jsonArray = getJsonArrayFromURL(url), children; JSONObject json, data; URL nextURL = null; @@ -95,7 +118,19 @@ public class RedditRipper extends AlbumRipper { } children = data.getJSONArray("children"); for (int j = 0; j < children.length(); j++) { - parseJsonChild(children.getJSONObject(j)); + try { + parseJsonChild(children.getJSONObject(j)); + + if (children.getJSONObject(j).getString("kind").equals("t3") && + children.getJSONObject(j).getJSONObject("data").getBoolean("is_self") + ) { + URL selfPostURL = new URI(children.getJSONObject(j).getJSONObject("data").getString("url")).toURL(); + System.out.println(selfPostURL.toExternalForm()); + saveText(getJsonArrayFromURL(getJsonURL(selfPostURL))); + } + } catch (Exception e) { + LOGGER.debug("at index " + i + ", for this data: " + data.toString() + e); + } } if (data.has("after") && !data.isNull("after")) { String nextURLString = Utils.stripURLParameter(url.toExternalForm(), "after"); @@ -105,7 +140,7 @@ public class RedditRipper extends AlbumRipper { else { nextURLString = nextURLString.concat("?after=" + data.getString("after")); } - nextURL = new URL(nextURLString); + nextURL = new URI(nextURLString).toURL(); } } @@ -188,6 +223,8 @@ public class RedditRipper extends AlbumRipper { if (data.getBoolean("is_self")) { // TODO Parse self text handleBody(data.getString("selftext"), data.getString("id"), data.getString("title")); + } else if (!data.isNull("gallery_data") && !data.isNull("media_metadata")) { + handleGallery(data.getJSONObject("gallery_data").getJSONArray("items"), data.getJSONObject("media_metadata"), data.getString("id"), data.getString("title")); } else { // Get link handleURL(data.getString("url"), data.getString("id"), data.getString("title")); @@ -215,8 +252,123 @@ public class RedditRipper extends AlbumRipper { } } + private void saveText(JSONArray jsonArray) throws JSONException { + Path saveFileAs; + + JSONObject selfPost = jsonArray.getJSONObject(0).getJSONObject("data") + .getJSONArray("children").getJSONObject(0).getJSONObject("data"); + JSONArray comments = jsonArray.getJSONObject(1).getJSONObject("data") + .getJSONArray("children"); + + if (selfPost.getString("selftext").equals("")) { return; } + + final String title = selfPost.getString("title"); + final String id = selfPost.getString("id"); + final String author = selfPost.getString("author"); + final String creationDate = new Date((long) selfPost.getInt("created") * 1000).toString(); + final String subreddit = selfPost.getString("subreddit"); + final String selfText = selfPost.getString("selftext_html"); + final String permalink = selfPost.getString("url"); + + String html = TagCreator.html( + head( + title(title), + style(rawHtml(HTML_STYLING)) + ), + body( + div( + h1(title), + a(subreddit).withHref("https://www.reddit.com/r/" + subreddit), + a("Original").withHref(permalink), + br() + ).withClass("thing"), + div( + div( + span( + a(author).withHref("https://www.reddit.com/u/" + author) + ).withClass("author op") + ).withClass("thing oppost") + .withText(creationDate) + .with(rawHtml(Jsoup.parse(selfText).text())) + ).withClass("flex") + ).with(getComments(comments, author)), + script(rawHtml(HTML_SCRIPT)) + ).renderFormatted(); + + try { + saveFileAs = Utils.getPath(workingDir + + "/" + + id + "_" + Utils.filesystemSafe(title) + + ".html"); + OutputStream out = Files.newOutputStream(saveFileAs); + out.write(html.getBytes()); + out.close(); + } catch (IOException e) { + LOGGER.error("[!] Error creating save file path for description '" + url + "':", e); + return; + } + + LOGGER.debug("Downloading " + url + "'s self post to " + saveFileAs); + super.retrievingSource(permalink); + if (!Files.exists(saveFileAs.getParent())) { + LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent())); + try { + Files.createDirectory(saveFileAs.getParent()); + } catch (IOException e) { + e.printStackTrace(); + } + } + } + + private ContainerTag getComments(JSONArray comments, String author) { + ContainerTag commentsDiv = div().withId("comments"); + + for (int i = 0; i < comments.length(); i++) { + JSONObject data = comments.getJSONObject(i).getJSONObject("data"); + + try { + ContainerTag commentDiv = + div( + span(data.getString("author")).withClasses("author", iff(data.getString("author").equals(author), "op")), + a(new Date((long) data.getInt("created") * 1000).toString()).withHref("#" + data.getString("name")) + ).withClass("thing comment").withId(data.getString("name")) + .with(rawHtml(Jsoup.parse(data.getString("body_html")).text())); + getNestedComments(data, commentDiv, author); + commentsDiv.with(commentDiv); + } catch (Exception e) { + LOGGER.debug("at index " + i + ", for this data: " + data.toString() + e); + } + } + return commentsDiv; + } + + private ContainerTag getNestedComments(JSONObject data, ContainerTag parentDiv, String author) { + if (data.has("replies") && data.get("replies") instanceof JSONObject) { + JSONArray commentChildren = data.getJSONObject("replies").getJSONObject("data").getJSONArray("children"); + for (int i = 0; i < commentChildren.length(); i++) { + JSONObject nestedComment = commentChildren + .getJSONObject(i).getJSONObject("data"); + + String nestedCommentAuthor = nestedComment.optString("author"); + if (!nestedCommentAuthor.isBlank()) { + ContainerTag childDiv = + div( + div( + span(nestedCommentAuthor).withClasses("author", iff(nestedCommentAuthor.equals(author), "op")), + a(new Date((long) nestedComment.getInt("created") * 1000).toString()).withHref("#" + nestedComment.getString("name")) + ).withClass("comment").withId(nestedComment.getString("name")) + .with(rawHtml(Jsoup.parse(nestedComment.getString("body_html")).text())) + ).withClass("child"); + + parentDiv.with(getNestedComments(nestedComment, childDiv, author)); + } + } + } + return parentDiv; + } + private URL parseRedditVideoMPD(String vidURL) { - org.jsoup.nodes.Document doc = null; + org.jsoup.nodes.Document doc; try { doc = Http.url(vidURL + "/DASHPlaylist.mpd").ignoreContentType().get(); int largestHeight = 0; @@ -232,8 +384,8 @@ public class RedditRipper extends AlbumRipper { baseURL = doc.select("MPD > Period > AdaptationSet > Representation[height=" + height + "]").select("BaseURL").text(); } } - return new URL(vidURL + "/" + baseURL); - } catch (IOException e) { + return new URI(vidURL + "/" + baseURL).toURL(); + } catch (IOException | URISyntaxException e) { e.printStackTrace(); } return null; @@ -243,8 +395,8 @@ public class RedditRipper extends AlbumRipper { private void handleURL(String theUrl, String id, String title) { URL originalURL; try { - originalURL = new URL(theUrl); - } catch (MalformedURLException e) { + originalURL = new URI(theUrl).toURL(); + } catch (MalformedURLException | URISyntaxException e) { return; } String subdirectory = ""; @@ -264,21 +416,21 @@ public class RedditRipper extends AlbumRipper { Matcher m = p.matcher(url); if (m.matches()) { // It's from reddituploads. Assume .jpg extension. - String savePath = this.workingDir + File.separator; - savePath += id + "-" + m.group(1) + title + ".jpg"; - addURLToDownload(urls.get(0), new File(savePath)); + String savePath = this.workingDir + "/"; + savePath += id + "-" + m.group(1) + Utils.filesystemSafe(title) + ".jpg"; + addURLToDownload(urls.get(0), Utils.getPath(savePath)); } if (url.contains("v.redd.it")) { - String savePath = this.workingDir + File.separator; - savePath += id + "-" + url.split("/")[3] + title + ".mp4"; + String savePath = this.workingDir + "/"; + savePath += id + "-" + url.split("/")[3] + Utils.filesystemSafe(title) + ".mp4"; URL urlToDownload = parseRedditVideoMPD(urls.get(0).toExternalForm()); if (urlToDownload != null) { LOGGER.info("url: " + urlToDownload + " file: " + savePath); - addURLToDownload(urlToDownload, new File(savePath)); + addURLToDownload(urlToDownload, Utils.getPath(savePath)); } } else { - addURLToDownload(urls.get(0), id + title, "", theUrl, null); + addURLToDownload(urls.get(0), Utils.filesystemSafe(id + title), "", theUrl, null); } } else if (urls.size() > 1) { for (int i = 0; i < urls.size(); i++) { @@ -291,6 +443,35 @@ public class RedditRipper extends AlbumRipper { } } + private void handleGallery(JSONArray data, JSONObject metadata, String id, String title){ + //TODO handle captions and caption urls + String subdirectory = ""; + if (Utils.getConfigBoolean("reddit.use_sub_dirs", true)) { + if (Utils.getConfigBoolean("album_titles.save", true)) { + subdirectory = title; + } + } + for (int i = 0; i < data.length(); i++) { + JSONObject media = metadata.getJSONObject(data.getJSONObject(i).getString("media_id")); + String prefix = id + "-"; + if (Utils.getConfigBoolean("download.save_order", true)) { + //announcement says up to 20 (https://www.reddit.com/r/announcements/comments/hrrh23/now_you_can_make_posts_with_multiple_images/) + prefix += String.format("%02d-", i + 1); + } + try { + URL mediaURL; + if (!media.getJSONObject("s").isNull("gif")) { + mediaURL = new URI(media.getJSONObject("s").getString("gif").replaceAll("&", "&")).toURL(); + } else { + mediaURL = new URI(media.getJSONObject("s").getString("u").replaceAll("&", "&")).toURL(); + } + addURLToDownload(mediaURL, prefix, subdirectory); + } catch (MalformedURLException | JSONException | URISyntaxException e) { + LOGGER.error("[!] Unable to parse gallery JSON:\ngallery_data:\n" + data +"\nmedia_metadata:\n" + metadata); + } + } + } + @Override public String getHost() { return HOST; @@ -312,6 +493,13 @@ public class RedditRipper extends AlbumRipper { return "post_" + m.group(m.groupCount()); } + // Gallery + p = Pattern.compile("^https?://[a-zA-Z0-9.]{0,4}reddit\\.com/gallery/([a-zA-Z0-9]+).*$"); + m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return "post_" + m.group(m.groupCount()); + } + // Subreddit p = Pattern.compile("^https?://[a-zA-Z0-9.]{0,4}reddit\\.com/r/([a-zA-Z0-9_]+).*$"); m = p.matcher(url.toExternalForm()); @@ -319,7 +507,10 @@ public class RedditRipper extends AlbumRipper { return "sub_" + m.group(m.groupCount()); } - throw new MalformedURLException("Only accepts user pages, subreddits, or post, can't understand " + url); + throw new MalformedURLException("Only accepts user pages, subreddits, post, or gallery can't understand " + url); } + private static final String HTML_STYLING = " .author { font-weight: bold; } .op { color: blue; } .comment { border: 0px; margin: 0 0 25px; padding-left: 5px; } .child { margin: 2px 0 0 20px; border-left: 2px dashed #AAF; } .collapsed { background: darkgrey; margin-bottom: 0; } .collapsed > div { display: none; } .md { max-width: 840px; padding-right: 1em; } h1 { margin: 0; } body { position: relative; background-color: #eeeeec; color: #00000a; font-weight: 400; font-style: normal; font-variant: normal; font-family: Helvetica,Arial,sans-serif; line-height: 1.4 } blockquote { margin: 5px 5px 5px 15px; padding: 1px 1px 1px 15px; max-width: 60em; border: 1px solid #ccc; border-width: 0 0 0 1px; } pre { white-space: pre-wrap; } img, video { max-width: 60vw; max-height: 90vh; object-fit: contain; } .thing { overflow: hidden; margin: 0 5px 3px 40px; border: 1px solid #e0e0e0; background-color: #fcfcfb; } :target > .md { border: 5px solid blue; } .post { margin-bottom: 20px; margin-top: 20px; } .gold { background: goldenrod; } .silver { background: silver; } .platinum { background: aqua; } .deleted { background: #faa; } .md.deleted { background: inherit; border: 5px solid #faa; } .oppost { background-color: #EEF; } blockquote > p { margin: 0; } #related { max-height: 20em; overflow-y: scroll; background-color: #F4FFF4; } #related h3 { position: sticky; top: 0; background-color: white; } .flex { display: flex; flex-flow: wrap; flex-direction: row-reverse; justify-content: flex-end; } "; + private static final String HTML_SCRIPT = "document.addEventListener('mousedown', function(e) { var t = e.target; if (t.className == 'author') { t = t.parentElement; } if (t.classList.contains('comment')) { t.classList.toggle('collapsed'); e.preventDefault(); e.stopPropagation(); return false; } });"; + } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/RedgifsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/RedgifsRipper.java new file mode 100644 index 00000000..e82db4b2 --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/RedgifsRipper.java @@ -0,0 +1,370 @@ +package com.rarchives.ripme.ripper.rippers; + +import com.rarchives.ripme.utils.Http; + +import org.json.JSONObject; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.http.client.utils.URIBuilder; + +import com.rarchives.ripme.ripper.AbstractJSONRipper; + +public class RedgifsRipper extends AbstractJSONRipper { + + private static final String HOST = "redgifs.com"; + private static final String HOST_2 = "gifdeliverynetwork.com"; + private static final String GIFS_DETAIL_ENDPOINT = "https://api.redgifs.com/v2/gifs/%s"; + private static final String USERS_SEARCH_ENDPOINT = "https://api.redgifs.com/v2/users/%s/search"; + private static final String GALLERY_ENDPOINT = "https://api.redgifs.com/v2/gallery/%s"; + private static final String SEARCH_ENDPOINT = "https://api.redgifs.com/v2/search/%s"; + private static final String TAGS_ENDPOINT = "https://api.redgifs.com/v2/gifs/search"; + private static final String TEMPORARY_AUTH_ENDPOINT = "https://api.redgifs.com/v2/auth/temporary"; + private static final Pattern PROFILE_PATTERN = Pattern.compile("^https?://[a-zA-Z0-9.]*redgifs\\.com/users/([a-zA-Z0-9_.-]+).*$"); + private static final Pattern SEARCH_PATTERN = Pattern.compile("^https?:\\/\\/[a-zA-Z0-9.]*redgifs\\.com\\/search(?:\\/[a-zA-Z]+)?\\?.*?query=([a-zA-Z0-9-_+%]+).*$"); + private static final Pattern TAGS_PATTERN = Pattern.compile("^https?:\\/\\/[a-zA-Z0-9.]*redgifs\\.com\\/gifs\\/([a-zA-Z0-9_.,-]+).*$"); + private static final Pattern SINGLETON_PATTERN = Pattern.compile("^https?://[a-zA-Z0-9.]*redgifs\\.com/watch/([a-zA-Z0-9_-]+).*$"); + + /** + * Keep a single auth token for the complete lifecycle of the app. + * This should prevent fetching of multiple tokens. + */ + private static String authToken = ""; + + String username = ""; + int count = 40; + int currentPage = 1; + int maxPages = 1; + + public RedgifsRipper(URL url) throws IOException, URISyntaxException { + super(new URI(url.toExternalForm().replace("thumbs.", "")).toURL()); + } + + @Override + public String getDomain() { return "redgifs.com"; } + + @Override + public String getHost() { + return "redgifs"; + } + + @Override + public boolean canRip(URL url) { + return url.getHost().endsWith(HOST) || url.getHost().endsWith(HOST_2); + } + + @Override + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { + String sUrl = url.toExternalForm(); + sUrl = sUrl.replace("/gifs/detail", ""); + sUrl = sUrl.replace("/amp", ""); + sUrl = sUrl.replace("gifdeliverynetwork.com", "redgifs.com/watch"); + return new URI(sUrl).toURL(); + } + + public Matcher isProfile() { + return PROFILE_PATTERN.matcher(url.toExternalForm()); + } + + public Matcher isSearch() { + return SEARCH_PATTERN.matcher(url.toExternalForm()); + } + + public Matcher isTags() { + return TAGS_PATTERN.matcher(url.toExternalForm()); + } + + public Matcher isSingleton() { + return SINGLETON_PATTERN.matcher(url.toExternalForm()); + } + + @Override + public JSONObject getFirstPage() throws IOException { + try { + if (authToken == null || authToken.isBlank()) { + fetchAuthToken(); + } + + if (isSingleton().matches()) { + maxPages = 1; + String gifDetailsURL = String.format(GIFS_DETAIL_ENDPOINT, getGID(url)); + return Http.url(gifDetailsURL).header("Authorization", "Bearer " + authToken).getJSON(); + } else if (isSearch().matches() || isTags().matches()) { + var json = Http.url(getSearchOrTagsURL()).header("Authorization", "Bearer " + authToken).getJSON(); + maxPages = json.getInt("pages"); + return json; + } else { + username = getGID(url); + var uri = new URIBuilder(String.format(USERS_SEARCH_ENDPOINT, username)); + uri.addParameter("order", "new"); + uri.addParameter("count", Integer.toString(count)); + uri.addParameter("page", Integer.toString(currentPage)); + var json = Http.url(uri.build().toURL()).header("Authorization", "Bearer " + authToken).getJSON(); + maxPages = json.getInt("pages"); + return json; + } + } catch (URISyntaxException e) { + throw new IOException("Failed to build first page url", e); + } + } + + @Override + public void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index)); + } + + @Override + public String getGID(URL url) throws MalformedURLException { + Matcher m = isProfile(); + if (m.matches()) { + return m.group(1); + } + m = isSearch(); + if (m.matches()) { + var sText = m.group(1); + if (sText == null || sText.isBlank()) { + throw new MalformedURLException(String.format("Expected redgifs.com/search?query=searchtext\n Got %s", url)); + } + sText = URLDecoder.decode(sText, StandardCharsets.UTF_8); + sText = sText.replaceAll("[^A-Za-z0-9_-]", "-"); + return sText; + } + m = isTags(); + if (m.matches()) { + var sText = m.group(1); + if (sText == null || sText.isBlank()) { + throw new MalformedURLException(String.format("Expected redgifs.com/gifs/searchtags\n Got %s", url)); + } + sText = URLDecoder.decode(sText, StandardCharsets.UTF_8); + var list = Arrays.asList(sText.split(",")); + if (list.size() > 1) { + LOGGER.warn("Url with multiple tags found. \nThey will be sorted alphabetically for folder name."); + } + Collections.sort(list); + var gid = list.stream().reduce("", (acc, val) -> acc.concat("_" + val)); + gid = gid.replaceAll("[^A-Za-z0-9_-]", "-"); + return gid; + } + m = isSingleton(); + if (m.matches()) { + return m.group(1).split("-")[0]; + } + throw new MalformedURLException( + "Expected redgifs.com format: " + + "redgifs.com/watch/id or " + + "redgifs.com/users/id or " + + "redgifs.com/gifs/id or " + + "redgifs.com/search?query=text" + + " Got: " + url); + } + + @Override + public JSONObject getNextPage(JSONObject doc) throws IOException, URISyntaxException { + if (currentPage == maxPages || isSingleton().matches()) { + return null; + } + currentPage++; + if (isSearch().matches() || isTags().matches()) { + var json = Http.url(getSearchOrTagsURL()).header("Authorization", "Bearer " + authToken).getJSON(); + // Handle rare maxPages change during a rip + maxPages = json.getInt("pages"); + return json; + } else if (isProfile().matches()) { + var uri = new URIBuilder(String.format(USERS_SEARCH_ENDPOINT, getGID(url))); + uri.addParameter("order", "new"); + uri.addParameter("count", Integer.toString(count)); + uri.addParameter("page", Integer.toString(currentPage)); + var json = Http.url(uri.build().toURL()).header("Authorization", "Bearer " + authToken).getJSON(); + // Handle rare maxPages change during a rip + maxPages = json.getInt("pages"); + return json; + } else { + return null; + } + } + + @Override + public List getURLsFromJSON(JSONObject json) { + List result = new ArrayList<>(); + if (isProfile().matches() || isSearch().matches() || isTags().matches()) { + var gifs = json.getJSONArray("gifs"); + for (var gif : gifs) { + if (((JSONObject)gif).isNull("gallery")) { + var hdURL = ((JSONObject)gif).getJSONObject("urls").getString("hd"); + result.add(hdURL); + } else { + var galleryID = ((JSONObject)gif).getString("gallery"); + var gifID = ((JSONObject)gif).getString("id"); + result.addAll(getURLsForGallery(galleryID, gifID)); + } + } + } else { + var gif = json.getJSONObject("gif"); + if (gif.isNull("gallery")) { + String hdURL = gif.getJSONObject("urls").getString("hd"); + result.add(hdURL); + } else { + var galleryID = gif.getString("gallery"); + var gifID = gif.getString("id"); + result.addAll(getURLsForGallery(galleryID, gifID)); + } + } + return result; + } + + + /** + * Get all images for a gif url with multiple images + * @param galleryID gallery id + * @param gifID gif id with multiple images for logging + * @return List + */ + private static List getURLsForGallery(String galleryID, String gifID) { + List list = new ArrayList<>(); + if (galleryID == null || galleryID.isBlank()) { + return list; + } + try { + var json = Http.url(String.format(GALLERY_ENDPOINT, galleryID)).header("Authorization", "Bearer " + authToken).getJSON(); + for (var gif : json.getJSONArray("gifs")) { + var hdURL = ((JSONObject)gif).getJSONObject("urls").getString("hd"); + list.add(hdURL); + } + } catch (IOException e) { + LOGGER.error(String.format("Error fetching gallery %s for gif %s", galleryID, gifID), e); + } + return list; + } + /** + * Static helper method for retrieving video URLs for usage in RipUtils. + * Most of the code is lifted from getFirstPage and getURLsFromJSON + * @param url URL to redgif page + * @return URL to video + * @throws IOException + */ + public static String getVideoURL(URL url) throws IOException, URISyntaxException { + LOGGER.info("Retrieving " + url.toExternalForm()); + var m = SINGLETON_PATTERN.matcher(url.toExternalForm()); + if (!m.matches()){ + throw new IOException(String.format("Cannot fetch redgif url %s", url.toExternalForm())); + } + if (authToken == null || authToken.isBlank()){ + fetchAuthToken(); + } + var gid = m.group(1).split("-")[0]; + var gifDetailsURL = String.format(GIFS_DETAIL_ENDPOINT, gid); + var json = Http.url(gifDetailsURL).header("Authorization", "Bearer " + authToken).getJSON(); + var gif = json.getJSONObject("gif"); + if (!gif.isNull("gallery")){ + // TODO check how to handle a image gallery + throw new IOException(String.format("Multiple images found for url %s", url)); + } + return gif.getJSONObject("urls").getString("hd"); + } + + + /** + * Fetch a temorary auth token for the rip + * @throws IOException + */ + private static void fetchAuthToken() throws IOException{ + var json = Http.url(TEMPORARY_AUTH_ENDPOINT).getJSON(); + var token = json.getString("token"); + authToken = token; + LOGGER.info("Incase of redgif 401 errors, please restart the app to refresh the auth token"); + } + + /** + * Map browser url query params to search or tags endpoint query params and return the complete url. + * + * Search text for search url comes from the query params, whereas search text for tags url comes from the path. + * + * Tab type for search url comes from the path whereas, tab type for tags url comes from query params. + * @return Search or tags endpoint url + */ + private URL getSearchOrTagsURL() throws IOException, URISyntaxException { + URIBuilder uri; + Map endpointQueryParams = new HashMap<>(); + var browserURLQueryParams = new URIBuilder(url.toString()).getQueryParams(); + for (var qp : browserURLQueryParams) { + var name = qp.getName(); + var value = qp.getValue(); + switch (name) { + case "query": + endpointQueryParams.put("query", URLDecoder.decode(value, StandardCharsets.UTF_8)); + break; + case "tab": + switch (value) { + case "gifs" -> endpointQueryParams.put("type", "g"); + case "images" -> endpointQueryParams.put("type", "i"); + default -> LOGGER.warn(String.format("Unsupported tab for tags url %s", value)); + } + break; + case "verified": + if (value != null && value.equals("1")) { + if (isTags().matches()) { + endpointQueryParams.put("verified", "y"); + } else { + endpointQueryParams.put("verified", "yes"); + } + } + break; + case "order": + endpointQueryParams.put("order", value); + break; + case "viewMode": + break; + default: + LOGGER.warn(String.format("Unexpected query param %s for search url. Skipping.", name)); + } + } + + // Build the search or tags url and add missing query params if any + if (isTags().matches()) { + var subpaths = url.getPath().split("/"); + if (subpaths.length != 0) { + endpointQueryParams.put("search_text", subpaths[subpaths.length-1]); + } else { + throw new IOException("Failed to get search tags for url"); + } + // Check if it is the main tags page with all gifs, images, creator etc + if (!endpointQueryParams.containsKey("type")) { + LOGGER.warn("No tab selected, defaulting to gifs"); + endpointQueryParams.put("type", "g"); + } + uri = new URIBuilder(TAGS_ENDPOINT); + } else { + var tabType = "gifs"; + var subpaths = url.getPath().split("/"); + if (subpaths.length != 0) { + switch (subpaths[subpaths.length-1]) { + case "gifs" -> tabType = "gifs"; + case "images" -> tabType = "images"; + case "search" -> LOGGER.warn("No tab selected, defaulting to gifs"); + default -> LOGGER.warn(String.format("Unsupported search tab %s, defaulting to gifs", subpaths[subpaths.length-1])); + } + } + uri = new URIBuilder(String.format(SEARCH_ENDPOINT, tabType)); + } + + endpointQueryParams.put("page", Integer.toString(currentPage)); + endpointQueryParams.put("count", Integer.toString(count)); + endpointQueryParams.forEach((k, v) -> uri.addParameter(k, v)); + + return uri.build().toURL(); + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/Rule34Ripper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/Rule34Ripper.java index 681738fa..c7245739 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/Rule34Ripper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/Rule34Ripper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -51,13 +53,13 @@ public class Rule34Ripper extends AbstractHTMLRipper { "rule34.xxx/index.php?page=post&s=list&tags=TAG - got " + url + " instead"); } - public URL getAPIUrl() throws MalformedURLException { - URL urlToReturn = new URL("https://rule34.xxx/index.php?page=dapi&s=post&q=index&limit=100&tags=" + getGID(url)); + public URL getAPIUrl() throws MalformedURLException, URISyntaxException { + URL urlToReturn = new URI("https://rule34.xxx/index.php?page=dapi&s=post&q=index&limit=100&tags=" + getGID(url)).toURL(); return urlToReturn; } @Override - public Document getFirstPage() throws IOException { + public Document getFirstPage() throws IOException, URISyntaxException { apiUrl = getAPIUrl().toExternalForm(); // "url" is an instance field of the superclass return Http.url(getAPIUrl()).get(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/RulePornRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/RulePornRipper.java index c9c487a7..be33c945 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/RulePornRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/RulePornRipper.java @@ -40,11 +40,6 @@ public class RulePornRipper extends AbstractSingleFileRipper { "Expected ruleporn.com URL format: " + "ruleporn.com/NAME - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ScrolllerRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ScrolllerRipper.java new file mode 100644 index 00000000..2df6ab2c --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ScrolllerRipper.java @@ -0,0 +1,293 @@ +package com.rarchives.ripme.ripper.rippers; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.net.*; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.java_websocket.client.WebSocketClient; + +import org.apache.http.NameValuePair; +import org.apache.http.client.utils.URLEncodedUtils; +import org.java_websocket.handshake.ServerHandshake; +import org.json.JSONArray; +import org.json.JSONException; +import org.json.JSONObject; + +import com.rarchives.ripme.ripper.AbstractJSONRipper; + +public class ScrolllerRipper extends AbstractJSONRipper { + + public ScrolllerRipper(URL url) throws IOException { + super(url); + } + + @Override + public String getHost() { + return "scrolller"; + } + @Override + public String getDomain() { + return "scrolller.com"; + } + + @Override + public String getGID(URL url) throws MalformedURLException { + // Typical URL is: https://scrolller.com/r/subreddit + // Parameters like "filter" and "sort" can be passed (ex: https://scrolller.com/r/subreddit?filter=xxx&sort=yyyy) + Pattern p = Pattern.compile("^https?://scrolller\\.com/r/([a-zA-Z0-9]+).*?$"); + Matcher m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(1); + } + throw new MalformedURLException("Expected scrolller.com URL format: " + + "scrolller.com/r/subreddit OR scroller.com/r/subreddit?filter= - got " + url + "instead"); + } + + @Override + public void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index)); + } + + + private JSONObject prepareQuery(String iterator, String gid, String sortByString) throws IOException, URISyntaxException { + + String QUERY_NOSORT = "query SubredditQuery( $url: String! $filter: SubredditPostFilter $iterator: String ) { getSubreddit(url: $url) { children( limit: 50 iterator: $iterator filter: $filter ) { iterator items { __typename url title subredditTitle subredditUrl redditPath isNsfw albumUrl isFavorite mediaSources { url width height isOptimized } } } } }"; + String QUERY_SORT = "subscription SubredditSubscription( $url: String! $sortBy: SubredditSortBy $timespan: SubredditTimespan $iterator: String $limit: Int $filter: SubredditPostFilter ) { fetchSubreddit( url: $url sortBy: $sortBy timespan: $timespan iterator: $iterator limit: $limit filter: $filter ) { __typename ... on Subreddit { __typename url title secondaryTitle description createdAt isNsfw subscribers isComplete itemCount videoCount pictureCount albumCount isFollowing } ... on SubredditPost { __typename url title subredditTitle subredditUrl redditPath isNsfw albumUrl isFavorite mediaSources { url width height isOptimized } } ... on Iterator { iterator } ... on Error { message } } }"; + + String filterString = convertFilterString(getParameter(this.url,"filter")); + + JSONObject variablesObject = new JSONObject().put("url", String.format("/r/%s", gid)).put("sortBy", sortByString.toUpperCase()); + JSONObject finalQueryObject = new JSONObject().put("variables", variablesObject).put("query", sortByString.equals("") ? QUERY_NOSORT : QUERY_SORT); + + if (iterator != null) { + // Iterator is not present on the first page + variablesObject.put("iterator", iterator); + } + if (!filterString.equals("NOFILTER")) { + variablesObject.put("filter", filterString); + } + + return sortByString.equals("") ? getPosts(finalQueryObject) : getPostsSorted(finalQueryObject); + + } + + + public String convertFilterString(String filterParameter) { + // Converts the ?filter= parameter of the URL to one that can be used in the GraphQL query + // I could basically remove the last "s" and call toUpperCase instead of this switch statement but this looks easier to read. + switch (filterParameter.toLowerCase()) { + case "pictures": + return "PICTURE"; + case "videos": + return "VIDEO"; + case "albums": + return "ALBUM"; + case "": + return "NOFILTER"; + default: + LOGGER.error(String.format("Invalid filter %s using no filter",filterParameter)); + return ""; + } + } + + public String getParameter(URL url, String parameter) throws MalformedURLException { + // Gets passed parameters from the URL + String toReplace = String.format("https://scrolller.com/r/%s?",getGID(url)); + List args= URLEncodedUtils.parse(url.toExternalForm(), Charset.defaultCharset()); + for (NameValuePair arg:args) { + // First parameter contains part of the url so we have to remove it + // Ex: for the url https://scrolller.com/r/CatsStandingUp?filter=xxxx&sort=yyyy + // 1) arg.getName() => https://scrolller.com/r/CatsStandingUp?filter + // 2) arg.getName() => sort + + if (arg.getName().replace(toReplace,"").toLowerCase().equals((parameter))) { + return arg.getValue(); + } + } + return ""; + } + + private JSONObject getPosts(JSONObject data) { + // The actual GraphQL query call + + try { + String url = "https://api.scrolller.com/api/v2/graphql"; + + URL obj = new URI(url).toURL(); + HttpURLConnection conn = (HttpURLConnection) obj.openConnection(); + conn.setReadTimeout(5000); + conn.addRequestProperty("Accept-Language", "en-US,en;q=0.8"); + conn.addRequestProperty("User-Agent", "Mozilla"); + conn.addRequestProperty("Referer", "scrolller.com"); + + conn.setDoOutput(true); + + OutputStreamWriter w = new OutputStreamWriter(conn.getOutputStream(), "UTF-8"); + + w.write(data.toString()); + w.close(); + + BufferedReader in = new BufferedReader(new InputStreamReader(conn.getInputStream())); + String inputLine; + StringBuffer jsonString = new StringBuffer(); + + while ((inputLine = in.readLine()) != null) { + jsonString.append(inputLine); + } + + in.close(); + conn.disconnect(); + + return new JSONObject(jsonString.toString()); + + } catch (Exception e) { + e.printStackTrace(); + } + + return new JSONObject("{}"); + } + + private JSONObject getPostsSorted(JSONObject data) throws MalformedURLException { + + // The actual GraphQL query call (if sort parameter is present) + try { + + ArrayList postsJsonStrings = new ArrayList<>(); + + WebSocketClient wsc = new WebSocketClient(new URI("wss://api.scrolller.com/api/v2/graphql")) { + @Override + public void onOpen(ServerHandshake serverHandshake) { + // As soon as the WebSocket connects send our query + this.send(data.toString()); + } + + @Override + public void onMessage(String s) { + postsJsonStrings.add(s); + if (new JSONObject(s).getJSONObject("data").getJSONObject("fetchSubreddit").has("iterator")) { + this.close(); + } + } + + @Override + public void onClose(int i, String s, boolean b) { + } + + @Override + public void onError(Exception e) { + LOGGER.error(String.format("WebSocket error, server reported %s", e.getMessage())); + } + }; + wsc.connect(); + + while (!wsc.isClosed()) { + // Posts list is not over until the connection closes. + } + + JSONObject finalObject = new JSONObject(); + JSONArray posts = new JSONArray(); + + // Iterator is the last object in the post list, let's duplicate it in his own object for clarity. + finalObject.put("iterator", new JSONObject(postsJsonStrings.get(postsJsonStrings.size()-1))); + + for (String postString : postsJsonStrings) { + posts.put(new JSONObject(postString)); + } + finalObject.put("posts", posts); + + if (finalObject.getJSONArray("posts").length() == 1 && !finalObject.getJSONArray("posts").getJSONObject(0).getJSONObject("data").getJSONObject("fetchSubreddit").has("mediaSources")) { + // Only iterator, no posts. + return null; + } + + return finalObject; + + + } catch (URISyntaxException ue) { + // Nothing to catch, it's an hardcoded URI. + } + + return null; + } + + + @Override + protected List getURLsFromJSON(JSONObject json) throws JSONException { + + boolean sortRequested = json.has("posts"); + + int bestArea = 0; + String bestUrl = ""; + List list = new ArrayList<>(); + + JSONArray itemsList = sortRequested ? json.getJSONArray("posts") : json.getJSONObject("data").getJSONObject("getSubreddit").getJSONObject("children").getJSONArray("items"); + + for (Object item : itemsList) { + + if (sortRequested && !((JSONObject) item).getJSONObject("data").getJSONObject("fetchSubreddit").has("mediaSources")) { + continue; + } + + JSONArray sourcesTMP = sortRequested ? ((JSONObject) item).getJSONObject("data").getJSONObject("fetchSubreddit").getJSONArray("mediaSources") : ((JSONObject) item).getJSONArray("mediaSources"); + for (Object sourceTMP : sourcesTMP) + { + int widthTMP = ((JSONObject) sourceTMP).getInt("width"); + int heightTMP = ((JSONObject) sourceTMP).getInt("height"); + int areaTMP = widthTMP * heightTMP; + + if (areaTMP > bestArea) { + bestArea = widthTMP; + bestUrl = ((JSONObject) sourceTMP).getString("url"); + } + } + list.add(bestUrl); + bestUrl = ""; + bestArea = 0; + } + + return list; + } + + @Override + protected JSONObject getFirstPage() throws IOException { + try { + return prepareQuery(null, this.getGID(url), getParameter(url,"sort")); + } catch (URISyntaxException e) { + LOGGER.error(String.format("Error obtaining first page: %s", e.getMessage())); + return null; + } + } + + @Override + public JSONObject getNextPage(JSONObject source) throws IOException { + // Every call the the API contains an "iterator" string that we need to pass to the API to get the next page + // Checking if iterator is null is not working for some reason, hence why the weird "iterator.toString().equals("null")" + + Object iterator = null; + if (source.has("iterator")) { + // Sort requested, custom JSON. + iterator = source.getJSONObject("iterator").getJSONObject("data").getJSONObject("fetchSubreddit").get("iterator"); + } else { + iterator = source.getJSONObject("data").getJSONObject("getSubreddit").getJSONObject("children").get("iterator"); + } + + if (!iterator.toString().equals("null")) { + // Need to change page. + try { + return prepareQuery(iterator.toString(), this.getGID(url), getParameter(url,"sort")); + } catch (URISyntaxException e) { + LOGGER.error(String.format("Error changing page: %s", e.getMessage())); + return null; + } + } else { + return null; + } + } +} \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ShesFreakyRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ShesFreakyRipper.java index 73dad1b1..b96e2f6b 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ShesFreakyRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ShesFreakyRipper.java @@ -12,7 +12,6 @@ import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; public class ShesFreakyRipper extends AbstractHTMLRipper { @@ -41,11 +40,6 @@ public class ShesFreakyRipper extends AbstractHTMLRipper { + "shesfreaky.com/gallery/... - got " + url + "instead"); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List imageURLs = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java index d6a0f9cb..f3a216f4 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java @@ -41,12 +41,6 @@ public class SinfestRipper extends AbstractHTMLRipper { "sinfest.net/view.php?date=XXXX-XX-XX/ - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { Element elem = doc.select("td.style5 > a > img").last(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SinnercomicsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SinnercomicsRipper.java deleted file mode 100644 index 4411adfe..00000000 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/SinnercomicsRipper.java +++ /dev/null @@ -1,168 +0,0 @@ -package com.rarchives.ripme.ripper.rippers; - -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; - -public class SinnercomicsRipper extends AbstractHTMLRipper { - - private static final String HOST = "sinnercomics", - DOMAIN = "sinnercomics.com"; - - private static final int SLEEP_TIME = 500; - - enum RIP_TYPE { - HOMEPAGE, - PINUP, - COMIC - } - - private RIP_TYPE ripType; - private Integer pageNum; - - public SinnercomicsRipper(URL url) throws IOException { - super(url); - } - - @Override - public String getHost() { - return HOST; - } - - @Override - public String getDomain() { - return DOMAIN; - } - - @Override - public String normalizeUrl(String url) { - // Remove the comments hashtag - return url.replaceAll("/#(comments|disqus_thread)", "/"); - } - - @Override - public String getGID(URL url) throws MalformedURLException { - String cleanUrl = normalizeUrl(url.toExternalForm()); - Pattern p; - Matcher m; - - p = Pattern.compile("^https?://sinnercomics\\.com/comic/([a-zA-Z0-9-]*)/?$"); - m = p.matcher(cleanUrl); - if (m.matches()) { - // Comic - this.ripType = RIP_TYPE.COMIC; - return m.group(1).replaceAll("-page-\\d+", ""); - } - - p = Pattern.compile("^https?://sinnercomics\\.com(?:/page/([0-9]+))?/?$"); - m = p.matcher(cleanUrl); - if (m.matches()) { - // Homepage - this.ripType = RIP_TYPE.HOMEPAGE; - if (m.group(1) != null) { - this.pageNum = Integer.valueOf(m.group(1)); - } else { - this.pageNum = 1; - } - return "homepage"; - } - - p = Pattern.compile("^https?://sinnercomics\\.com/([a-zA-Z0-9-]+)(?:/#comments)?/?$"); - m = p.matcher(cleanUrl); - if (m.matches()) { - // Pinup image - this.ripType = RIP_TYPE.PINUP; - return m.group(1); - } - - throw new MalformedURLException("Expected sinnercomics.com URL format: " + - "/pinupName or /comic/albumName or /page/number - got " + cleanUrl + " instead"); - } - - @Override - public boolean canRip(URL url) { - if (!url.getHost().endsWith(DOMAIN)) { - return false; - } - try { - getGID(url); - } catch (MalformedURLException e) { - // Can't get GID, can't rip it. - return false; - } - return true; - } - - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - - @Override - public Document getNextPage(Document doc) throws IOException { - String nextUrl = null; - - switch (this.ripType) { - case PINUP: - throw new IOException("No next page on a pinup"); - - case COMIC: - // We use comic-nav-next to the find the next page - Element elem = doc.select("a.comic-nav-next").first(); - if (elem == null) { - throw new IOException("No more pages"); - } - nextUrl = elem.attr("href"); - break; - - default: // case HOMEPAGE: - this.pageNum++; - nextUrl = "https://sinnercomics.com/page/" + String.valueOf(this.pageNum); - break; - } - - // Wait to avoid IP bans - sleep(SLEEP_TIME); - return Http.url(nextUrl).get(); - } - - @Override - public List getURLsFromPage(Document doc) { - List result = new ArrayList<>(); - - switch (this.ripType) { - case COMIC: - // comic pages only contain one image, determined by a meta tag - for (Element el : doc.select("meta[property=og:image]")) { - String imageSource = el.attr("content"); - imageSource = imageSource.replace(" alt=", ""); - result.add(imageSource); - } - break; - default: - for (Element el : doc.select(".entry p img")) { - // These filters match the full size images but might match ads too... - result.add(el.attr("src")); - } - break; - } - - return result; - } - - @Override - public void downloadURL(URL url, int index) { - addURLToDownload(url, getPrefix(index)); - } - -} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SmuttyRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SmuttyRipper.java index b61f2fef..ad00e5c8 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/SmuttyRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/SmuttyRipper.java @@ -89,11 +89,6 @@ public class SmuttyRipper extends AbstractHTMLRipper { } @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - public void downloadURL(URL url, int index) { addURLToDownload(url, getPrefix(index)); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SoundgasmRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SoundgasmRipper.java new file mode 100644 index 00000000..ab9ebfa9 --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/SoundgasmRipper.java @@ -0,0 +1,69 @@ +package com.rarchives.ripme.ripper.rippers; + +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; +import org.jsoup.select.Elements; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class SoundgasmRipper extends AbstractHTMLRipper { + + private static final String HOST = "soundgasm.net"; + + public SoundgasmRipper(URL url) throws IOException, URISyntaxException { + super(new URI(url.toExternalForm()).toURL()); + } + + @Override + protected String getDomain() { return "soundgasm.net"; } + + @Override + public String getHost() { return "soundgasm"; } + + @Override + public String getGID(URL url) throws MalformedURLException { + Pattern p = Pattern.compile("^/u/([a-zA-Z0-9_-]+)/([a-zA-Z0-9_-]+).*$"); + Matcher m = p.matcher(url.getFile()); + if (m.find()) { + return m.group(m.groupCount()); + } + throw new MalformedURLException( + "Expected soundgasm.net format: " + + "soundgasm.net/u/username/id or " + + " Got: " + url); + } + + @Override + public Document getFirstPage() throws IOException, URISyntaxException { + return super.getFirstPage(); + } + + @Override + public List getURLsFromPage(Document page) { + List res = new ArrayList<>(); + + Elements script = page.select("script"); + Pattern p = Pattern.compile("m4a\\:\\s\"(https?:.*)\\\""); + + for (Element e: script) { + Matcher m = p.matcher(e.data()); + if (m.find()) { res.add(m.group(1)); } + } + return res; + } + + @Override + public void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index)); + } + +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SpankbangRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SpankbangRipper.java index bca5ef66..9ea1a130 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/SpankbangRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/SpankbangRipper.java @@ -28,11 +28,6 @@ public class SpankbangRipper extends AbstractSingleFileRipper { return "spankbang.com"; } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/StaRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/StaRipper.java index b331bbce..ac7414dd 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/StaRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/StaRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.HashMap; @@ -47,12 +49,6 @@ public class StaRipper extends AbstractHTMLRipper { "sta.sh/ALBUMID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); @@ -61,10 +57,10 @@ public class StaRipper extends AbstractHTMLRipper { Document thumbPage = null; if (checkURL(thumbPageURL)) { try { - Connection.Response resp = Http.url(new URL(thumbPageURL)).response(); + Connection.Response resp = Http.url(new URI(thumbPageURL).toURL()).response(); cookies.putAll(resp.cookies()); thumbPage = resp.parse(); - } catch (MalformedURLException e) { + } catch (MalformedURLException | URISyntaxException e) { LOGGER.info(thumbPageURL + " is a malformed URL"); } catch (IOException e) { LOGGER.info(e.getMessage()); @@ -81,9 +77,9 @@ public class StaRipper extends AbstractHTMLRipper { private boolean checkURL(String url) { try { - new URL(url); + new URI(url).toURL(); return true; - } catch (MalformedURLException e) { + } catch (MalformedURLException | URISyntaxException e) { return false; } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TapasticRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TapasticRipper.java index 369ce741..d514c1e6 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/TapasticRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TapasticRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -46,11 +48,6 @@ public class TapasticRipper extends AbstractHTMLRipper { return "tapas"; } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document page) { List urls = new ArrayList<>(); @@ -87,12 +84,12 @@ public class TapasticRipper extends AbstractHTMLRipper { prefix.append(String.format("-%0" + imgLog + "dof%0" + imgLog + "d-", i + 1, images.size())); prefix.append(episode.filename.replace(" ", "-")); prefix.append("-"); - addURLToDownload(new URL(link), prefix.toString()); + addURLToDownload(new URI(link).toURL(), prefix.toString()); if (isThisATest()) { break; } } - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { LOGGER.error("[!] Exception while downloading " + url, e); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TeenplanetRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TeenplanetRipper.java index 9791ab90..25edb5f7 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/TeenplanetRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TeenplanetRipper.java @@ -34,11 +34,6 @@ public class TeenplanetRipper extends AbstractHTMLRipper { return HOST; } - @Override - protected Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override protected List getURLsFromPage(Document page) { List imageURLs = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ThechiveRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ThechiveRipper.java index 3c9d751d..8105fe73 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ThechiveRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ThechiveRipper.java @@ -70,12 +70,6 @@ public class ThechiveRipper extends AbstractHTMLRipper { + "thechive.com/YEAR/MONTH/DAY/POSTTITLE/ OR i.thechive.com/username, got " + url + " instead."); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result; diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TheyiffgalleryRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TheyiffgalleryRipper.java index ac3e363c..3f616faa 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/TheyiffgalleryRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TheyiffgalleryRipper.java @@ -41,12 +41,6 @@ public class TheyiffgalleryRipper extends AbstractHTMLRipper { "theyiffgallery.com/index?/category/#### - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { String nextPage = doc.select("span.navPrevNext > a").attr("href"); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java index 846c4795..49baa384 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java @@ -1,9 +1,11 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; +import java.io.UnsupportedEncodingException; import java.net.MalformedURLException; import java.net.URL; import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -110,11 +112,11 @@ public class TsuminoRipper extends AbstractHTMLRipper { } @Override - public List getURLsFromPage(Document doc) { + public List getURLsFromPage(Document doc) throws UnsupportedEncodingException { JSONArray imageIds = getPageUrls(); List result = new ArrayList<>(); for (int i = 0; i < imageIds.length(); i++) { - result.add("http://www.tsumino.com/Image/Object?name=" + URLEncoder.encode(imageIds.getString(i))); + result.add("http://www.tsumino.com/Image/Object?name=" + URLEncoder.encode(imageIds.getString(i), StandardCharsets.UTF_8.name())); } return result; @@ -127,6 +129,6 @@ public class TsuminoRipper extends AbstractHTMLRipper { There is no way to tell if an image returned from tsumino.com is a png to jpg. The content-type header is always "image/jpeg" even when the image is a png. The file ext is not included in the url. */ - addURLToDownload(url, getPrefix(index), "", null, null, null, null, true); + addURLToDownload(url, "", null, null, getPrefix(index), null, null, true); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/Tubex6Ripper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/Tubex6Ripper.java deleted file mode 100644 index 35ca8281..00000000 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/Tubex6Ripper.java +++ /dev/null @@ -1,60 +0,0 @@ -package com.rarchives.ripme.ripper.rippers; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import com.rarchives.ripme.ripper.AbstractSingleFileRipper; -import org.jsoup.nodes.Document; - -import com.rarchives.ripme.utils.Http; - -public class Tubex6Ripper extends AbstractSingleFileRipper { - - public Tubex6Ripper(URL url) throws IOException { - super(url); - } - - @Override - public String getHost() { - return "tubex6"; - } - - @Override - public String getDomain() { - return "tubex6.com"; - } - - @Override - public String getGID(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("^http://.*tubex6\\.com/(.*)/$"); - Matcher m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - throw new MalformedURLException("Expected tubex6.com URL format: " + - "tubex6.com/NAME - got " + url + " instead"); - } - - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - - @Override - public List getURLsFromPage(Document doc) { - List result = new ArrayList<>(); - result.add(doc.select("source[type=video/mp4]").attr("src")); - return result; - } - - @Override - public void downloadURL(URL url, int index) { - // We have to send a referrer or the site returns a 403 error - addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null); - } -} \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java index 0c561d77..6d91361b 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java @@ -1,9 +1,7 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; -import java.net.HttpURLConnection; -import java.net.MalformedURLException; -import java.net.URL; +import java.net.*; import java.util.Arrays; import java.util.List; import java.util.Random; @@ -100,11 +98,11 @@ public class TumblrRipper extends AlbumRipper { * @throws MalformedURLException */ @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { String u = url.toExternalForm(); // Convert .tumblr.com/path to /path if needed if (StringUtils.countMatches(u, ".") > 2) { - url = new URL(u.replace(".tumblr.com", "")); + url = new URI(u.replace(".tumblr.com", "")).toURL(); if (isTumblrURL(url)) { LOGGER.info("Detected tumblr site: " + url); } @@ -263,7 +261,7 @@ public class TumblrRipper extends AlbumRipper { fileLocation = photo.getJSONObject("original_size").getString("url").replaceAll("http:", "https:"); qualM = qualP.matcher(fileLocation); fileLocation = qualM.replaceFirst("_1280.$1"); - fileURL = new URL(fileLocation); + fileURL = new URI(fileLocation).toURL(); m = p.matcher(fileURL.toString()); if (m.matches()) { @@ -278,7 +276,7 @@ public class TumblrRipper extends AlbumRipper { } } else if (post.has("video_url")) { try { - fileURL = new URL(post.getString("video_url").replaceAll("http:", "https:")); + fileURL = new URI(post.getString("video_url").replaceAll("http:", "https:")).toURL(); downloadURL(fileURL, date); } catch (Exception e) { LOGGER.error("[!] Error while parsing video in " + post, e); @@ -293,8 +291,8 @@ public class TumblrRipper extends AlbumRipper { // If the image is any smaller, it will still get the largest available size qualM = qualP.matcher(imgSrc); imgSrc = qualM.replaceFirst("_1280.$1"); - downloadURL(new URL(imgSrc), date); - } catch (MalformedURLException e) { + downloadURL(new URI(imgSrc).toURL(), date); + } catch (MalformedURLException | URISyntaxException e) { LOGGER.error("[!] Error while getting embedded image at " + post, e); return true; } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TwitterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TwitterRipper.java index 1a1bf1ab..2ce65834 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/TwitterRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TwitterRipper.java @@ -121,7 +121,7 @@ public class TwitterRipper extends AbstractJSONRipper { case ACCOUNT: req.append("https://api.twitter.com/1.1/statuses/user_timeline.json") .append("?screen_name=" + this.accountName).append("&include_entities=true") - .append("&exclude_replies=true").append("&trim_user=true").append("&count=" + MAX_ITEMS_REQUEST) + .append("&exclude_replies=false").append("&trim_user=true").append("&count=" + MAX_ITEMS_REQUEST) .append("&tweet_mode=extended"); break; case SEARCH:// Only get tweets from last week diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/VidbleRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/VidbleRipper.java index 9a962f3a..baf5e212 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/VidbleRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/VidbleRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -45,11 +47,6 @@ public class VidbleRipper extends AbstractHTMLRipper { + " Got: " + url); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { return getURLsFromPageStatic(doc); @@ -75,11 +72,11 @@ public class VidbleRipper extends AbstractHTMLRipper { addURLToDownload(url, getPrefix(index)); } - public static List getURLsFromPage(URL url) throws IOException { + public static List getURLsFromPage(URL url) throws IOException, URISyntaxException { List urls = new ArrayList<>(); Document doc = Http.url(url).get(); for (String stringURL : getURLsFromPageStatic(doc)) { - urls.add(new URL(stringURL)); + urls.add(new URI(stringURL).toURL()); } return urls; } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java index 27015a06..72f65249 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java @@ -2,6 +2,7 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -31,10 +32,10 @@ public class ViewcomicRipper extends AbstractHTMLRipper { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title as GID - String titleText = getFirstPage().select("title").first().text(); + String titleText = getCachedFirstPage().select("title").first().text(); String title = titleText.replace("Viewcomic reading comics online for free", ""); title = title.replace("_", ""); title = title.replace("|", ""); @@ -60,12 +61,6 @@ public class ViewcomicRipper extends AbstractHTMLRipper { "view-comic.com/COMIC_NAME - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/VkRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/VkRipper.java index 99310dc4..c6394bb8 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/VkRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/VkRipper.java @@ -2,14 +2,18 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.*; import java.util.regex.Matcher; import java.util.regex.Pattern; - +import org.apache.commons.lang.StringEscapeUtils; import com.rarchives.ripme.ripper.AbstractJSONRipper; import org.json.JSONArray; import org.json.JSONObject; +import org.jsoup.Connection.Method; +import org.jsoup.Connection.Response; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; @@ -26,6 +30,7 @@ public class VkRipper extends AbstractJSONRipper { private RipType RIP_TYPE; private String oid; + private int offset = 0; public VkRipper(URL url) throws IOException { super(url); @@ -59,68 +64,18 @@ public class VkRipper extends AbstractJSONRipper { String[] jsonStrings = doc.toString().split(""); return new JSONObject(jsonStrings[jsonStrings.length - 1]); } else { - Map photoIDsToURLs = new HashMap<>(); - int offset = 0; - while (true) { - LOGGER.info(" Retrieving " + this.url); - Map postData = new HashMap<>(); - postData.put("al", "1"); - postData.put("offset", Integer.toString(offset)); - postData.put("part", "1"); - Document doc = Http.url(this.url) - .referrer(this.url) - .ignoreContentType() - .data(postData) - .post(); - - String body = doc.toString(); - if (!body.contains(" elements = doc.select("a"); - Set photoIDsToGet = new HashSet<>(); - for (Element a : elements) { - if (!a.attr("onclick").contains("showPhoto('")) { - LOGGER.error("a: " + a); - continue; - } - String photoID = a.attr("onclick"); - photoID = photoID.substring(photoID.indexOf("showPhoto('") + "showPhoto('".length()); - photoID = photoID.substring(0, photoID.indexOf("'")); - if (!photoIDsToGet.contains(photoID)) { - photoIDsToGet.add(photoID); - } - } - for (String photoID : photoIDsToGet) { - if (!photoIDsToURLs.containsKey(photoID)) { - try { - photoIDsToURLs.putAll(getPhotoIDsToURLs(photoID)); - } catch (IOException e) { - LOGGER.error("Exception while retrieving photo id " + photoID, e); - continue; - } - } - if (!photoIDsToURLs.containsKey(photoID)) { - LOGGER.error("Could not find URL for photo ID: " + photoID); - continue; - } - if (isStopped() || isThisATest()) { - break; - } - } - - if (elements.size() < 40 || isStopped() || isThisATest()) { - break; - } - offset += elements.size(); - } - // Slight hack to make this into effectively a JSON ripper - return new JSONObject(photoIDsToURLs); + return getPage(); } } + @Override + protected JSONObject getNextPage(JSONObject doc) throws IOException { + if (isStopped() || isThisATest()) { + return null; + } + return getPage(); + } + @Override protected List getURLsFromJSON(JSONObject page) { List pageURLs = new ArrayList<>(); @@ -142,9 +97,9 @@ public class VkRipper extends AbstractJSONRipper { pageURLs.add(videoURL); } } else { - Iterator keys = page.keys(); + Iterator keys = page.keys(); while (keys.hasNext()) { - pageURLs.add(page.getString((String) keys.next())); + pageURLs.add(page.getString(keys.next())); } } return pageURLs; @@ -184,19 +139,20 @@ public class VkRipper extends AbstractJSONRipper { } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { if (this.url.toExternalForm().contains("/videos")) { RIP_TYPE = RipType.VIDEO; JSONObject json = getFirstPage(); List URLs = getURLsFromJSON(json); for (int index = 0; index < URLs.size(); index ++) { - downloadURL(new URL(URLs.get(index)), index); + downloadURL(new URI(URLs.get(index)).toURL(), index); } waitForThreads(); } else { RIP_TYPE = RipType.IMAGE; } + super.rip(); } private Map getPhotoIDsToURLs(String photoID) throws IOException { @@ -208,40 +164,182 @@ public class VkRipper extends AbstractJSONRipper { postData.put("al", "1"); postData.put("module", "photos"); postData.put("photo", photoID); - Document doc = Jsoup - .connect("https://vk.com/al_photos.php") + Response res = Jsoup.connect("https://vk.com/al_photos.php") .header("Referer", this.url.toExternalForm()) + .header("Accept", "*/*") + .header("Accept-Language", "en-US,en;q=0.5") + .header("Content-Type", "application/x-www-form-urlencoded") + .header("X-Requested-With", "XMLHttpRequest") .ignoreContentType(true) .userAgent(USER_AGENT) .timeout(5000) .data(postData) - .post(); - String jsonString = doc.toString(); - jsonString = jsonString.substring(jsonString.indexOf("") + "".length()); - jsonString = jsonString.substring(0, jsonString.indexOf("")); - JSONArray json = new JSONArray(jsonString); - for (int i = 0; i < json.length(); i++) { - JSONObject jsonImage = json.getJSONObject(i); - for (String key : new String[] {"z_src", "y_src", "x_src"}) { - if (!jsonImage.has(key)) { - continue; - } - photoIDsToURLs.put(jsonImage.getString("id"), jsonImage.getString(key)); - break; - } + .method(Method.POST) + .execute(); + String jsonString = res.body(); + JSONObject json = new JSONObject(jsonString); + JSONObject photoObject = findJSONObjectContainingPhotoId(photoID, json); + String bestSourceUrl = getBestSourceUrl(photoObject); + + if (bestSourceUrl != null) { + photoIDsToURLs.put(photoID, bestSourceUrl); + } else { + LOGGER.error("Could not find image source for " + photoID); } + return photoIDsToURLs; } @Override public String getGID(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("^https?://(www\\.)?vk\\.com/(photos|album|videos)-?([a-zA-Z0-9_]+).*$"); + Pattern p = Pattern.compile("^https?:\\/\\/(?:www\\.)?vk\\.com\\/((?:photos|album|videos)-?(?:[a-zA-Z0-9_]+).*$)"); Matcher m = p.matcher(url.toExternalForm()); if (!m.matches()) { throw new MalformedURLException("Expected format: http://vk.com/album#### or vk.com/photos####"); } - int count = m.groupCount(); - return m.group(count - 1) + m.group(count); + return m.group(1); } + + /** + * Finds the nested JSON object with entry "id": "photoID" recursively. + * @param photoID The photoId string to be found with "id" as the key. + * @param json Object of type JSONObject or JSONArray. + * @return JSONObject with id as the photoID or null. + */ + public JSONObject findJSONObjectContainingPhotoId(String photoID, Object json) { + // Termination condition + if (json instanceof JSONObject && ((JSONObject) json).has("id") + && ((JSONObject) json).optString("id").equals(photoID)) { + return ((JSONObject) json); + } + + if (json instanceof JSONObject) { + // Iterate through every key:value pair in the json. + Iterator iterator = ((JSONObject) json).keys(); + while (iterator.hasNext()) { + Object o = ((JSONObject) json).get(iterator.next()); + JSONObject responseJson = findJSONObjectContainingPhotoId(photoID, o); + if (responseJson != null) { + return responseJson; + } + } + + } + + if (json instanceof JSONArray) { + // Iterate through every array value in the json + for (Object o : (JSONArray) json) { + if (o instanceof JSONObject || o instanceof JSONArray) { + JSONObject responseJson = findJSONObjectContainingPhotoId(photoID, o); + if (responseJson != null) { + return responseJson; + } + } + } + } + + return null; + } + + /** + * Find the best source url( with highest resolution). + * @param json JSONObject containing src urls. + * @return Url string for the image src or null. + */ + public String getBestSourceUrl(JSONObject json) { + String bestSourceKey = null; + int bestSourceResolution = 0; + Iterator iterator = json.keys(); + + while (iterator.hasNext()) { + String key = iterator.next(); + Object o = json.get(key); + // JSON contains source urls in the below format. Check VkRipperTest.java for sample json. + // {..., + // "x_src":"src-url", + // "x_": ["incomplete-url", width, height], + // ...} + if (o instanceof JSONArray && ((JSONArray) o).length() == 3 + && !((JSONArray) o).optString(0).equals("") && ((JSONArray) o).optInt(1) != 0 + && ((JSONArray) o).optInt(2) != 0 && json.has(key + "src")) { + if (((JSONArray) o).optInt(1) * ((JSONArray) o).optInt(2) >= bestSourceResolution) { + bestSourceResolution = ((JSONArray) o).optInt(1) * ((JSONArray) o).optInt(2); + bestSourceKey = key; + } + } + } + + // In case no suitable source has been found, we fall back to the older way. + if(bestSourceKey == null) { + for (String key : new String[] {"z_src", "y_src", "x_src", "w_src"}) { + if(!json.has(key)) { + continue; + } + return json.getString(key); + } + }else { + return json.getString(bestSourceKey + "src"); + } + + return null; + } + + /** + * Common function to get the next page( containing next batch of images). + * @return JSONObject containing entries of "imgId": "src" + * @throws IOException + */ + private JSONObject getPage() throws IOException { + Map photoIDsToURLs = new HashMap<>(); + Map postData = new HashMap<>(); + + LOGGER.info("Retrieving " + this.url + " from offset " + offset); + postData.put("al", "1"); + postData.put("offset", Integer.toString(offset)); + postData.put("part", "1"); + Document doc = + Http.url(this.url).referrer(this.url).ignoreContentType().data(postData).post(); + String body = doc.toString(); + if (!body.contains(" elements = doc.select("a"); + Set photoIDsToGet = new HashSet<>(); + for (Element a : elements) { + if (!a.attr("onclick").contains("showPhoto('")) { + continue; + } + String photoID = a.attr("onclick"); + photoID = photoID.substring(photoID.indexOf("showPhoto('") + "showPhoto('".length()); + photoID = photoID.substring(0, photoID.indexOf("'")); + if (!photoIDsToGet.contains(photoID)) { + photoIDsToGet.add(photoID); + } + } + for (String photoID : photoIDsToGet) { + if (!photoIDsToURLs.containsKey(photoID)) { + try { + photoIDsToURLs.putAll(getPhotoIDsToURLs(photoID)); + } catch (IOException e) { + LOGGER.error("Exception while retrieving photo id " + photoID, e); + continue; + } + } + if (!photoIDsToURLs.containsKey(photoID)) { + LOGGER.error("Could not find URL for photo ID: " + photoID); + continue; + } + if (isStopped() || isThisATest()) { + break; + } + } + + offset += elements.size(); + // Slight hack to make this into effectively a JSON ripper + return new JSONObject(photoIDsToURLs); + } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/VscoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/VscoRipper.java index 5b23b8bd..a4fc08cc 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/VscoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/VscoRipper.java @@ -1,230 +1,223 @@ -package com.rarchives.ripme.ripper.rippers; - -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.*; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import org.json.JSONObject; -import org.jsoup.Jsoup; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; - -/** - * For ripping VSCO pictures. - */ -public class VscoRipper extends AbstractHTMLRipper { - - int pageNumber = 1; - JSONObject profileJSON; - - - private static final String DOMAIN = "vsco.co", - HOST = "vsco"; - - public VscoRipper(URL url) throws IOException{ - super(url); - } - - /** - * Checks to see if VscoRipper can Rip specified url. - * @param url - * @return True if can rip. - * False if cannot rip. - */ - @Override - public boolean canRip(URL url) { - if (!url.getHost().endsWith(DOMAIN)) { - return false; - } - // Ignores personalized things (e.g. login, feed) and store page - // Allows links to user profiles and links to images. - //@TODO: Add support for journals and collections. - String u = url.toExternalForm(); - return !u.contains("/store/") || - !u.contains("/feed/") || - !u.contains("/login/") || - !u.contains("/journal/") || - !u.contains("/collection/")|| - !u.contains("/images/") || - u.contains("/media/"); - - } - - @Override - public URL sanitizeURL(URL url) throws MalformedURLException { - //no sanitization needed. - return url; - } - - /** - *

Gets the direct URL of full-sized image through the tag.

- * When expanding future functionality (e.g. support from journals), put everything into this method. - * @param page - * @return - */ - @Override - public List getURLsFromPage(Document page){ - List toRip = new ArrayList<>(); - //If user wanted to rip single image - if (url.toString().contains("/media/")){ - try { - toRip.add(vscoImageToURL(url.toExternalForm())); - } catch (IOException ex) { - LOGGER.debug("Failed to convert " + url.toString() + " to external form."); - } - - } else { - String username = getUserName(); - String userTkn = getUserTkn(username); - String siteID = getSiteID(userTkn, username); - while (true) { - profileJSON = getProfileJSON(userTkn, username, Integer.toString(pageNumber), siteID); - for (int i = 0; i < profileJSON.getJSONArray("media").length(); i++) { - toRip.add("https://" + profileJSON.getJSONArray("media").getJSONObject(i).getString("responsive_url")); - } - if (pageNumber * 1000 > profileJSON.getInt("total")) { - return toRip; - } - pageNumber++; - } - - - } - - return toRip; - } - - private String getUserTkn(String username) { - String userinfoPage = "https://vsco.co/content/Static/userinfo"; - String referer = "https://vsco.co/" + username + "/images/1"; - Map cookies = new HashMap<>(); - cookies.put("vs_anonymous_id", UUID.randomUUID().toString()); - try { - Element doc = Http.url(userinfoPage).cookies(cookies).referrer(referer).ignoreContentType().get().body(); - String json = doc.text().replaceAll("define\\(", ""); - json = json.replaceAll("\\)", ""); - return new JSONObject(json).getString("tkn"); - } catch (IOException e) { - LOGGER.error("Could not get user tkn"); - return null; - } - } - - private String getUserName() { - Pattern p = Pattern.compile("^https?://vsco.co/([a-zA-Z0-9-]+)/images/[0-9]+"); - Matcher m = p.matcher(url.toExternalForm()); - - if (m.matches()) { - String user = m.group(1); - return user; - } - return null; - } - - private JSONObject getProfileJSON(String tkn, String username, String page, String siteId) { - String size = "1000"; - String purl = "https://vsco.co/ajxp/" + tkn + "/2.0/medias?site_id=" + siteId + "&page=" + page + "&size=" + size; - Map cookies = new HashMap<>(); - cookies.put("vs", tkn); - try { - JSONObject j = Http.url(purl).cookies(cookies).getJSON(); - return j; - } catch (IOException e) { - LOGGER.error("Could not profile images"); - return null; - } - } - - private String getSiteID(String tkn, String username) { - Map cookies = new HashMap<>(); - cookies.put("vs", tkn); - try { - JSONObject j = Http.url("https://vsco.co/ajxp/" + tkn + "/2.0/sites?subdomain=" + username).cookies(cookies).getJSON(); - return Integer.toString(j.getJSONArray("sites").getJSONObject(0).getInt("id")); - } catch (IOException e) { - LOGGER.error("Could not get site id"); - return null; - } - } - - private String vscoImageToURL(String url) throws IOException{ - Document page = Jsoup.connect(url).userAgent(USER_AGENT) - .get(); - //create Elements filled only with Elements with the "meta" tag. - Elements metaTags = page.getElementsByTag("meta"); - String result = ""; - - for(Element metaTag : metaTags){ - //find URL inside meta-tag with property of "og:image" - if (metaTag.attr("property").equals("og:image")){ - String givenURL = metaTag.attr("content"); - givenURL = givenURL.replaceAll("\\?h=[0-9]+", "");//replace the "?h=xxx" tag at the end of the URL (where each x is a number) - - result = givenURL; - LOGGER.debug("Found image URL: " + givenURL); - break;//immediately stop after getting URL (there should only be 1 image to be downloaded) - } - } - - //Means website changed, things need to be fixed. - if (result.isEmpty()){ - LOGGER.error("Could not find image URL at: " + url); - } - - return result; - - } - - @Override - public String getHost() { - return HOST; - } - - @Override - public String getGID(URL url) throws MalformedURLException { - - //Single Image - Pattern p = Pattern.compile("^https?://vsco\\.co/([a-zA-Z0-9-]+)/media/([a-zA-Z0-9]+)"); - Matcher m = p.matcher(url.toExternalForm()); - - if (m.matches()){ - // Return the text contained between () in the regex - String user = m.group(1); - String imageNum = m.group(2).substring(0, 5);//first 5 characters should be enough to make each rip unique - return user + "/" + imageNum; - } - - //Member profile (Usernames should all be different, so this should work. - p = Pattern.compile("^https?://vsco.co/([a-zA-Z0-9-]+)/images/[0-9]+"); - m = p.matcher(url.toExternalForm()); - - if (m.matches()){ - String user = m.group(1); - return user; - } - - throw new MalformedURLException("Expected a URL to a single image or to a member profile, got " + url + " instead"); - - } - - @Override - public String getDomain() { - return DOMAIN; - } - - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - - @Override - public void downloadURL(URL url, int index) { - addURLToDownload(url, getPrefix(index)); - } - -} +package com.rarchives.ripme.ripper.rippers; + +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.utils.Http; +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.*; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.json.JSONObject; +import org.jsoup.Jsoup; +import org.jsoup.nodes.Document; +import org.jsoup.Connection.Response; +import org.jsoup.nodes.Element; +import org.jsoup.select.Elements; + +/** + * For ripping VSCO pictures. + */ +public class VscoRipper extends AbstractHTMLRipper { + + int pageNumber = 1; + JSONObject profileJSON; + + + private static final String DOMAIN = "vsco.co", + HOST = "vsco"; + + public VscoRipper(URL url) throws IOException{ + super(url); + } + + /** + * Checks to see if VscoRipper can Rip specified url. + * @param url + * @return True if can rip. + * False if cannot rip. + */ + @Override + public boolean canRip(URL url) { + if (!url.getHost().endsWith(DOMAIN)) { + return false; + } + // Ignores personalized things (e.g. login, feed) and store page + // Allows links to user profiles and links to images. + //@TODO: Add support for journals and collections. + String u = url.toExternalForm(); + return !u.contains("/store/") || + !u.contains("/feed/") || + !u.contains("/login/") || + !u.contains("/journal/") || + !u.contains("/collection/")|| + !u.contains("/images/") || + u.contains("/media/"); + + } + + @Override + public URL sanitizeURL(URL url) throws MalformedURLException { + //no sanitization needed. + return url; + } + + /** + *

Gets the direct URL of full-sized image through the tag.

+ * When expanding future functionality (e.g. support from journals), put everything into this method. + * @param page + * @return + */ + @Override + public List getURLsFromPage(Document page){ + List toRip = new ArrayList<>(); + //If user wanted to rip single image + if (url.toString().contains("/media/")){ + try { + toRip.add(vscoImageToURL(url.toExternalForm())); + } catch (IOException ex) { + LOGGER.debug("Failed to convert " + url.toString() + " to external form."); + } + + } else { + String username = getUserName(); + String userTkn = getUserTkn(username); + String siteID = getSiteID(userTkn, username); + while (true) { + profileJSON = getProfileJSON(userTkn, username, Integer.toString(pageNumber), siteID); + for (int i = 0; i < profileJSON.getJSONArray("media").length(); i++) { + toRip.add("https://" + profileJSON.getJSONArray("media").getJSONObject(i).getString("responsive_url")); + } + if (pageNumber * 1000 > profileJSON.getInt("total")) { + return toRip; + } + pageNumber++; + } + + + } + + return toRip; + } + + private String getUserTkn(String username) { + String userTokenPage = "https://vsco.co/content/Static"; + Map responseCookies = new HashMap<>(); + try { + Response resp = Http.url(userTokenPage).ignoreContentType().response(); + responseCookies = resp.cookies(); + return responseCookies.get("vs"); + } catch (IOException e) { + LOGGER.error("Could not get user tkn"); + return null; + } + } + + private String getUserName() { + Pattern p = Pattern.compile("^https?://vsco.co/([a-zA-Z0-9-]+)(/gallery)?(/)?"); + Matcher m = p.matcher(url.toExternalForm()); + + if (m.matches()) { + String user = m.group(1); + return user; + } + return null; + } + + private JSONObject getProfileJSON(String tkn, String username, String page, String siteId) { + String size = "1000"; + String purl = "https://vsco.co/ajxp/" + tkn + "/2.0/medias?site_id=" + siteId + "&page=" + page + "&size=" + size; + Map cookies = new HashMap<>(); + cookies.put("vs", tkn); + try { + JSONObject j = Http.url(purl).cookies(cookies).getJSON(); + return j; + } catch (IOException e) { + LOGGER.error("Could not profile images"); + return null; + } + } + + private String getSiteID(String tkn, String username) { + Map cookies = new HashMap<>(); + cookies.put("vs", tkn); + try { + JSONObject j = Http.url("https://vsco.co/ajxp/" + tkn + "/2.0/sites?subdomain=" + username).cookies(cookies).getJSON(); + return Integer.toString(j.getJSONArray("sites").getJSONObject(0).getInt("id")); + } catch (IOException e) { + LOGGER.error("Could not get site id"); + return null; + } + } + + private String vscoImageToURL(String url) throws IOException{ + Document page = Jsoup.connect(url).userAgent(USER_AGENT) + .get(); + //create Elements filled only with Elements with the "meta" tag. + Elements metaTags = page.getElementsByTag("meta"); + String result = ""; + + for(Element metaTag : metaTags){ + //find URL inside meta-tag with property of "og:image" + if (metaTag.attr("property").equals("og:image")){ + String givenURL = metaTag.attr("content"); + givenURL = givenURL.replaceAll("\\?h=[0-9]+", "");//replace the "?h=xxx" tag at the end of the URL (where each x is a number) + + result = givenURL; + LOGGER.debug("Found image URL: " + givenURL); + break;//immediately stop after getting URL (there should only be 1 image to be downloaded) + } + } + + //Means website changed, things need to be fixed. + if (result.isEmpty()){ + LOGGER.error("Could not find image URL at: " + url); + } + + return result; + + } + + @Override + public String getHost() { + return HOST; + } + + @Override + public String getGID(URL url) throws MalformedURLException { + + //Single Image + Pattern p = Pattern.compile("^https?://vsco\\.co/([a-zA-Z0-9-]+)/media/([a-zA-Z0-9]+)"); + Matcher m = p.matcher(url.toExternalForm()); + + if (m.matches()){ + // Return the text contained between () in the regex + String user = m.group(1); + String imageNum = m.group(2).substring(0, 5);//first 5 characters should be enough to make each rip unique + return user + "/" + imageNum; + } + + //Member profile (Usernames should all be different, so this should work. + p = Pattern.compile("^https?://vsco.co/([a-zA-Z0-9-]+)(/gallery)?(/)?"); + m = p.matcher(url.toExternalForm()); + + if (m.matches()){ + String user = m.group(1); + return user; + } + + throw new MalformedURLException("Expected a URL to a single image or to a member profile, got " + url + " instead"); + + } + + @Override + public String getDomain() { + return DOMAIN; + } + + @Override + public void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index)); + } + +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/WebtoonsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/WebtoonsRipper.java index ded3ce2c..0da345b7 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/WebtoonsRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/WebtoonsRipper.java @@ -2,6 +2,7 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -43,7 +44,7 @@ public class WebtoonsRipper extends AbstractHTMLRipper { @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { Pattern pat = Pattern.compile("https?://www.webtoons.com/[a-zA-Z-_]+/[a-zA-Z_-]+/([a-zA-Z0-9_-]*)/[a-zA-Z0-9_-]+/\\S*"); Matcher mat = pat.matcher(url.toExternalForm()); if (mat.matches()) { @@ -84,7 +85,10 @@ public class WebtoonsRipper extends AbstractHTMLRipper { public Document getFirstPage() throws IOException { Response resp = Http.url(url).response(); cookies = resp.cookies(); - return Http.url(url).get(); + cookies.put("needCOPPA", "false"); + cookies.put("needCCPA", "false"); + cookies.put("needGDPR", "false"); + return Http.url(url).cookies(cookies).get(); } @Override diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/WordpressComicRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/WordpressComicRipper.java index 143c396a..6c696272 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/WordpressComicRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/WordpressComicRipper.java @@ -2,6 +2,7 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; @@ -220,7 +221,7 @@ public class WordpressComicRipper extends AbstractHTMLRipper { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { Pattern totempole666Pat = Pattern.compile("(?:https?://)?(?:www\\.)?totempole666.com/comic/([a-zA-Z0-9_-]*)/?$"); Matcher totempole666Mat = totempole666Pat.matcher(url.toExternalForm()); if (totempole666Mat.matches()) { @@ -376,7 +377,7 @@ public class WordpressComicRipper extends AbstractHTMLRipper { // freeadultcomix gets it own if because it needs to add http://freeadultcomix.com to the start of each link // TODO review the above comment which no longer applies -- see if there's a refactoring we should do here. if (url.toExternalForm().contains("freeadultcomix.com")) { - for (Element elem : doc.select("div.single-post > p > img.aligncenter")) { + for (Element elem : doc.select("div.post-texto > p > noscript > img[class*=aligncenter]")) { result.add(elem.attr("src")); } } else if (url.toExternalForm().contains("comics-xxx.com")) { @@ -384,7 +385,7 @@ public class WordpressComicRipper extends AbstractHTMLRipper { result.add(elem.attr("src")); } } else if (url.toExternalForm().contains("shipinbottle.pepsaga.com")) { - for (Element elem : doc.select("div#comic > div.comicpane > a > img")) { + for (Element elem : doc.select("div#comic > a > img")) { result.add(elem.attr("src")); } } else if (url.toExternalForm().contains("8muses.download")) { @@ -421,10 +422,4 @@ public class WordpressComicRipper extends AbstractHTMLRipper { } - - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/XcartxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/XcartxRipper.java index 5ed01e6f..0b616726 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/XcartxRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/XcartxRipper.java @@ -44,11 +44,6 @@ public class XcartxRipper extends AbstractHTMLRipper { } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document page) { List imageURLs = new ArrayList<>(); @@ -56,7 +51,7 @@ public class XcartxRipper extends AbstractHTMLRipper { for (Element image : imageElements) { String imageUrl = image.attr("data-src"); - imageURLs.add(getDomain() + imageUrl); + imageURLs.add("https://" + getDomain() + imageUrl); } return imageURLs; } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java index 7ae570f3..2cea95a7 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -43,37 +45,36 @@ public class XhamsterRipper extends AbstractHTMLRipper { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { if (isVideoUrl(url)) { return url; } String URLToReturn = url.toExternalForm(); - URLToReturn = URLToReturn.replaceAll("https?://\\w?\\w?\\.?xhamster\\.", "https://m.xhamster."); - URLToReturn = URLToReturn.replaceAll("https?://xhamster2\\.", "https://m.xhamster2."); - URL san_url = new URL(URLToReturn); + URLToReturn = URLToReturn.replaceAll("https?://\\w?\\w?\\.?xhamster([^<]*)\\.", "https://m.xhamster$1."); + URL san_url = new URI(URLToReturn).toURL(); LOGGER.info("sanitized URL is " + san_url.toExternalForm()); return san_url; } @Override public String getGID(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("^https?://[\\w\\w.]*xhamster2?\\.com/photos/gallery/.*?(\\d+)$"); + Pattern p = Pattern.compile("^https?://([\\w\\w]*\\.)?xhamster([^<]*)\\.(com|desi)/photos/gallery/.*?(\\d+)$"); Matcher m = p.matcher(url.toExternalForm()); if (m.matches()) { - return m.group(1); + return m.group(4); } - p = Pattern.compile("^https?://[\\w\\w.]*xhamster2?\\.com/users/([a-zA-Z0-9_-]+)/(photos|videos)(/\\d+)?"); + p = Pattern.compile("^https?://[\\w\\w.]*xhamster([^<]*)\\.(com|desi)/users/([a-zA-Z0-9_-]+)/(photos|videos)(/\\d+)?"); m = p.matcher(url.toExternalForm()); if (m.matches()) { return "user_" + m.group(1); } - p = Pattern.compile("^https?://.*xhamster2?\\.com/(movies|videos)/(.*)$"); + p = Pattern.compile("^https?://.*xhamster([^<]*)\\.(com|desi)/(movies|videos)/(.*$)"); m = p.matcher(url.toExternalForm()); if (m.matches()) { - return m.group(2); + return m.group(4); } - throw new MalformedURLException( + throw new MalformedURLException( "Expected xhamster.com gallery formats: " + "xhamster.com/photos/gallery/xxxxx-#####" + " Got: " + url); @@ -85,6 +86,9 @@ public class XhamsterRipper extends AbstractHTMLRipper { LOGGER.info("getting albums"); for (Element elem : doc.select("div.item-container > a.item")) { urlsToAddToQueue.add(elem.attr("href")); + if (isStopped() || isThisATest()) { + break; + } } LOGGER.info(doc.html()); return urlsToAddToQueue; @@ -97,33 +101,26 @@ public class XhamsterRipper extends AbstractHTMLRipper { @Override public boolean pageContainsAlbums(URL url) { - Pattern p = Pattern.compile("^https?://[\\w\\w.]*xhamster\\.com/users/([a-zA-Z0-9_-]+)/(photos|videos)(/\\d+)?"); + Pattern p = Pattern.compile("^https?://[\\w\\w.]*xhamster([^<]*)\\.(com|desi)/users/([a-zA-Z0-9_-]+)/(photos|videos)(/\\d+)?"); Matcher m = p.matcher(url.toExternalForm()); LOGGER.info("Checking if page has albums"); LOGGER.info(m.matches()); return m.matches(); } - - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public boolean canRip(URL url) { - Pattern p = Pattern.compile("^https?://([\\w\\w]*\\.)?xhamster2?\\.(com|one|desi)/photos/gallery/.*?(\\d+)$"); + Pattern p = Pattern.compile("^https?://([\\w\\w]*\\.)?xhamster([^<]*)\\.(com|desi)/photos/gallery/.*?(\\d+)$"); Matcher m = p.matcher(url.toExternalForm()); if (m.matches()) { return true; } - p = Pattern.compile("^https?://[\\w\\w.]*xhamster2?\\.(com|one|desi)/users/([a-zA-Z0-9_-]+)/(photos|videos)(/\\d+)?"); + p = Pattern.compile("^https?://[\\w\\w.]*xhamster([^<]*)\\.(com|desi)/users/([a-zA-Z0-9_-]+)/(photos|videos)(/\\d+)?"); m = p.matcher(url.toExternalForm()); if (m.matches()) { return true; } - p = Pattern.compile("^https?://.*xhamster2?\\.(com|one|desi)/(movies|videos)/.*$"); + p = Pattern.compile("^https?://.*xhamster([^<]*)\\.(com|desi)/(movies|videos)/(.*$)"); m = p.matcher(url.toExternalForm()); if (m.matches()) { return true; @@ -132,18 +129,17 @@ public class XhamsterRipper extends AbstractHTMLRipper { } private boolean isVideoUrl(URL url) { - Pattern p = Pattern.compile("^https?://.*xhamster2?\\.(com|one|desi)/(movies|videos)/.*$"); + Pattern p = Pattern.compile("^https?://.*xhamster([^<]*)\\.(com|desi)/(movies|videos)/(.*$)"); Matcher m = p.matcher(url.toExternalForm()); return m.matches(); } @Override public Document getNextPage(Document doc) throws IOException { - if (doc.select("a[data-page=next]").first() != null) { - String nextPageUrl = doc.select("a[data-page=next]").first().attr("href"); + if (doc.select("a.prev-next-list-link").first() != null) { + String nextPageUrl = doc.select("a.prev-next-list-link--next").first().attr("href"); if (nextPageUrl.startsWith("http")) { - nextPageUrl = nextPageUrl.replaceAll("https?://\\w?\\w?\\.?xhamster\\.", "https://m.xhamster."); - nextPageUrl = nextPageUrl.replaceAll("https?://xhamster2\\.", "https://m.xhamster2."); + nextPageUrl = nextPageUrl.replaceAll("https?://\\w?\\w?\\.?xhamster([^<]*)\\.", "https://m.xhamster$1."); return Http.url(nextPageUrl).get(); } } @@ -151,30 +147,56 @@ public class XhamsterRipper extends AbstractHTMLRipper { } + @Override + public Document getFirstPage() throws IOException, URISyntaxException { + return super.getFirstPage(); + } + @Override public List getURLsFromPage(Document doc) { LOGGER.debug("Checking for urls"); List result = new ArrayList<>(); if (!isVideoUrl(url)) { - for (Element page : doc.select("div.picture_view > div.pictures_block > div.items > div.item-container > a.item")) { - // Make sure we don't waste time running the loop if the ripper has been stopped - if (isStopped()) { + if (!doc.select("div.picture_view > div.pictures_block > div.items > div.item-container > a.item").isEmpty()) { + // Old HTML structure is still present at some places + for (Element page : doc.select(".clearfix > div > a.slided")) { + // Make sure we don't waste time running the loop if the ripper has been stopped + if (isStopped()) { + break; + } + String pageWithImageUrl = page.attr("href"); + try { + // This works around some redirect fuckery xhamster likes to do where visiting m.xhamster.com sends to + // the page chamster.com but displays the mobile site from m.xhamster.com + pageWithImageUrl = pageWithImageUrl.replaceAll("://xhamster([^<]*)\\.", "://m.xhamster$1."); + String image = Http.url(new URI(pageWithImageUrl).toURL()).get().select("a > img#photoCurr").attr("src"); + result.add(image); + downloadFile(image); + } catch (IOException | URISyntaxException e) { + LOGGER.error("Was unable to load page " + pageWithImageUrl); + } + if (isStopped() || isThisATest()) { break; - } - String pageWithImageUrl = page.attr("href"); - try { - // This works around some redirect fuckery xhamster likes to do where visiting m.xhamster.com sends to - // the page chamster.com but displays the mobile site from m.xhamster.com - pageWithImageUrl = pageWithImageUrl.replaceAll("://xhamster\\.", "://m.xhamster."); - pageWithImageUrl = pageWithImageUrl.replaceAll("://xhamster2\\.", "://m.xhamster."); - String image = Http.url(new URL(pageWithImageUrl)).get().select("a > img#photoCurr").attr("src"); - downloadFile(image); - } catch (IOException e) { - LOGGER.error("Was unable to load page " + pageWithImageUrl); - } - } + } + } + } else { + // New HTML structure + for (Element page : doc.select("div#photo-slider > div#photo_slider > a")) { + // Make sure we don't waste time running the loop if the ripper has been stopped + if (isStopped()) { + break; + } + String image = page.attr("href"); + // This works around some redirect fuckery xhamster likes to do where visiting m.xhamster.com sends to + // the page chamster.com but displays the mobile site from m.xhamster.com + image = image.replaceAll("://xhamster([^<]*)\\.", "://m.xhamster$1."); + result.add(image); + downloadFile(image); + } + } } else { String imgUrl = doc.select("div.player-container > a").attr("href"); + result.add(imgUrl); downloadFile(imgUrl); } return result; @@ -187,18 +209,18 @@ public class XhamsterRipper extends AbstractHTMLRipper { private void downloadFile(String url) { try { - addURLToDownload(new URL(url), getPrefix(index)); + addURLToDownload(new URI(url).toURL(), getPrefix(index)); index = index + 1; - } catch (MalformedURLException e) { + } catch (MalformedURLException | URISyntaxException e) { LOGGER.error("The url \"" + url + "\" is malformed"); } } - + @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title and username as GID - Document doc = getFirstPage(); + Document doc = getCachedFirstPage(); Element user = doc.select("a.author").first(); String username = user.text(); String path = url.getPath(); @@ -212,4 +234,4 @@ public class XhamsterRipper extends AbstractHTMLRipper { } return super.getAlbumTitle(url); } -} +} \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/XlecxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/XlecxRipper.java index 15aee9c9..2e95c04a 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/XlecxRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/XlecxRipper.java @@ -1,36 +1,36 @@ -package com.rarchives.ripme.ripper.rippers; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -public class XlecxRipper extends XcartxRipper { - - private Pattern p = Pattern.compile("^https?://xlecx.com/([a-zA-Z0-9_\\-]+).html"); - - public XlecxRipper(URL url) throws IOException { - super(url); - } - - @Override - public String getHost() { - return "xlecx"; - } - - @Override - public String getDomain() { - return "xlecx.com"; - } - - @Override - public String getGID(URL url) throws MalformedURLException { - Matcher m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - throw new MalformedURLException("Expected URL format: http://xlecx.com/comic, got: " + url); - - } -} +package com.rarchives.ripme.ripper.rippers; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class XlecxRipper extends XcartxRipper { + + private Pattern p = Pattern.compile("^https?://xlecx.org/([a-zA-Z0-9_\\-]+).html"); + + public XlecxRipper(URL url) throws IOException { + super(url); + } + + @Override + public String getHost() { + return "xlecx"; + } + + @Override + public String getDomain() { + return "xlecx.org"; + } + + @Override + public String getGID(URL url) throws MalformedURLException { + Matcher m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(1); + } + throw new MalformedURLException("Expected URL format: http://xlecx.org/comic, got: " + url); + + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/XvideosRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/XvideosRipper.java index 0fdef868..ea19d484 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/XvideosRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/XvideosRipper.java @@ -2,6 +2,7 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -24,11 +25,6 @@ public class XvideosRipper extends AbstractSingleFileRipper { super(url); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(this.url).get(); - } - @Override public String getHost() { return HOST; @@ -109,7 +105,7 @@ public class XvideosRipper extends AbstractSingleFileRipper { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { Pattern p = Pattern.compile("^https?://[wm.]*xvideos\\.com/profiles/([a-zA-Z0-9_-]+)/photos/(\\d+)/([a-zA-Z0-9_-]+)$"); Matcher m = p.matcher(url.toExternalForm()); if (m.matches()) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/YoupornRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/YoupornRipper.java index e99ffef5..b3e5f4f0 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/YoupornRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/YoupornRipper.java @@ -40,11 +40,6 @@ public class YoupornRipper extends AbstractSingleFileRipper { return m.matches(); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(this.url).get(); - } - @Override public List getURLsFromPage(Document doc) { List results = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/YuvutuRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/YuvutuRipper.java index 97365aa8..1fe6513f 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/YuvutuRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/YuvutuRipper.java @@ -50,10 +50,6 @@ public class YuvutuRipper extends AbstractHTMLRipper { "yuvutu.com/modules.php?name=YuGallery&action=view&set_id=albumid - got " + url + "instead"); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } @Override public List getURLsFromPage(Document doc) { List imageURLs = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ZizkiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ZizkiRipper.java index 35733325..043d1835 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ZizkiRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ZizkiRipper.java @@ -2,6 +2,7 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.HashMap; @@ -19,7 +20,6 @@ import com.rarchives.ripme.utils.Http; public class ZizkiRipper extends AbstractHTMLRipper { - private Document albumDoc = null; private Map cookies = new HashMap<>(); public ZizkiRipper(URL url) throws IOException { @@ -46,13 +46,13 @@ public class ZizkiRipper extends AbstractHTMLRipper { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title as GID - Element titleElement = getFirstPage().select("h1.title").first(); + Element titleElement = getCachedFirstPage().select("h1.title").first(); String title = titleElement.text(); - Element authorSpan = getFirstPage().select("span[class=creator]").first(); + Element authorSpan = getCachedFirstPage().select("span[class=creator]").first(); String author = authorSpan.select("a").first().text(); LOGGER.debug("Author: " + author); return getHost() + "_" + author + "_" + title.trim(); @@ -65,12 +65,9 @@ public class ZizkiRipper extends AbstractHTMLRipper { @Override public Document getFirstPage() throws IOException { - if (albumDoc == null) { - Response resp = Http.url(url).response(); - cookies.putAll(resp.cookies()); - albumDoc = resp.parse(); - } - return albumDoc; + Response resp = Http.url(url).response(); + cookies.putAll(resp.cookies()); + return resp.parse(); } @Override @@ -87,14 +84,12 @@ public class ZizkiRipper extends AbstractHTMLRipper { if (thumb.hasAttr("typeof")) { img_type = thumb.attr("typeof"); if (img_type.equals("foaf:Image")) { - LOGGER.debug("Found image with " + img_type); if (thumb.parent() != null && - thumb.parent().parent() != null && - thumb.parent().parent().attr("class") != null && - thumb.parent().parent().attr("class").equals("aimage-center") + thumb.parent().attr("class") != null && + thumb.parent().attr("class").contains("colorbox") ) { - src = thumb.attr("src"); + src = thumb.parent().attr("href"); LOGGER.debug("Found url with " + src); if (!src.contains("zizki.com")) { } else { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/tamindirmp3.java b/src/main/java/com/rarchives/ripme/ripper/rippers/tamindirmp3.java index 2c82d849..d10e1205 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/tamindirmp3.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/tamindirmp3.java @@ -40,12 +40,6 @@ public class tamindirmp3 extends AbstractHTMLRipper { "tamindir.com/files/albumid - got " + url + "instead"); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - - } - @Override public List getURLsFromPage(Document doc) { List music = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/CliphunterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/CliphunterRipper.java index 16526945..a9c39a9c 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/CliphunterRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/CliphunterRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers.video; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -54,7 +56,7 @@ public class CliphunterRipper extends VideoRipper { } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { LOGGER.info("Retrieving " + this.url); String html = Http.url(url).get().html(); String jsonString = html.substring(html.indexOf("var flashVars = {d: '") + 21); @@ -71,7 +73,7 @@ public class CliphunterRipper extends VideoRipper { vidURL += c; } } - addURLToDownload(new URL(vidURL), HOST + "_" + getGID(this.url)); + addURLToDownload(new URI(vidURL).toURL(), HOST + "_" + getGID(this.url)); waitForThreads(); } } \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/PornhubRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/PornhubRipper.java index 678435af..5a3dcebb 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/PornhubRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/PornhubRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers.video; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.HashMap; @@ -56,7 +58,7 @@ public class PornhubRipper extends VideoRipper { } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { String vidUrl = ""; LOGGER.info(" Retrieving " + this.url.toExternalForm()); Document doc = Http.url(this.url).get(); @@ -146,7 +148,7 @@ public class PornhubRipper extends VideoRipper { if (vidUrl.equals("")) { throw new IOException("Unable to find encrypted video URL at " + this.url); } - addURLToDownload(new URL(vidUrl), HOST + "_" + bestQuality + "p_" + getGID(this.url)); + addURLToDownload(new URI(vidUrl).toURL(), HOST + "_" + bestQuality + "p_" + getGID(this.url)); waitForThreads(); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/StickyXXXRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/StickyXXXRipper.java index 7c951b23..8708e552 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/StickyXXXRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/StickyXXXRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers.video; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -32,11 +34,6 @@ public class StickyXXXRipper extends VideoRipper { return m.matches(); } - @Override - public URL sanitizeURL(URL url) throws MalformedURLException { - return url; - } - @Override public String getGID(URL url) throws MalformedURLException { Pattern p = Pattern.compile("^https?://.*stickyxxx\\.com(/)(.*)/$"); @@ -52,7 +49,7 @@ public class StickyXXXRipper extends VideoRipper { } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { LOGGER.info("Retrieving " + this.url); Document doc = Http.url(url).get(); Elements videos = doc.select(".wp-video > video > source"); @@ -60,7 +57,7 @@ public class StickyXXXRipper extends VideoRipper { throw new IOException("Could not find Embed code at " + url); } String vidUrl = videos.attr("src"); - addURLToDownload(new URL(vidUrl), HOST + "_" + getGID(this.url)); + addURLToDownload(new URI(vidUrl).toURL(), HOST + "_" + getGID(this.url)); waitForThreads(); } } \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/TwitchVideoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/TwitchVideoRipper.java index d977708a..bd4ee556 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/TwitchVideoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/TwitchVideoRipper.java @@ -1,80 +1,77 @@ -package com.rarchives.ripme.ripper.rippers.video; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; - - -import com.rarchives.ripme.ripper.VideoRipper; -import com.rarchives.ripme.utils.Http; - -public class TwitchVideoRipper extends VideoRipper { - - private static final String HOST = "twitch"; - - public TwitchVideoRipper(URL url) throws IOException { - super(url); - } - - @Override - public String getHost() { - return HOST; - } - - @Override - public boolean canRip(URL url) { - Pattern p = Pattern.compile("^https://clips\\.twitch\\.tv/.*$"); - Matcher m = p.matcher(url.toExternalForm()); - return m.matches(); - } - - @Override - public URL sanitizeURL(URL url) throws MalformedURLException { - return url; - } - - @Override - public String getGID(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("^https://clips\\.twitch\\.tv/(.*)$"); - Matcher m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(m.groupCount()); - } - - throw new MalformedURLException( - "Expected Twitch.tv format:" - + "https://clips.twitch.tv/####" - + " Got: " + url); - } - - @Override - public void rip() throws IOException { - LOGGER.info("Retrieving " + this.url); - Document doc = Http.url(url).get(); - - //Get user friendly filename from page title - String title = doc.title(); - - Elements script = doc.select("script"); - if (script.isEmpty()) { - throw new IOException("Could not find script code at " + url); - } - //Regex assumes highest quality source is listed first - Pattern p = Pattern.compile("\"source\":\"(.*?)\""); - - for (Element element : script) { - Matcher m = p.matcher(element.data()); - if (m.find()){ - String vidUrl = m.group(1); - addURLToDownload(new URL(vidUrl), HOST + "_" + title); - } - } - waitForThreads(); - } +package com.rarchives.ripme.ripper.rippers.video; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; +import org.jsoup.select.Elements; + + +import com.rarchives.ripme.ripper.VideoRipper; +import com.rarchives.ripme.utils.Http; + +public class TwitchVideoRipper extends VideoRipper { + + private static final String HOST = "twitch"; + + public TwitchVideoRipper(URL url) throws IOException { + super(url); + } + + @Override + public String getHost() { + return HOST; + } + + @Override + public boolean canRip(URL url) { + Pattern p = Pattern.compile("^https://clips\\.twitch\\.tv/.*$"); + Matcher m = p.matcher(url.toExternalForm()); + return m.matches(); + } + + @Override + public String getGID(URL url) throws MalformedURLException { + Pattern p = Pattern.compile("^https://clips\\.twitch\\.tv/(.*)$"); + Matcher m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(m.groupCount()); + } + + throw new MalformedURLException( + "Expected Twitch.tv format:" + + "https://clips.twitch.tv/####" + + " Got: " + url); + } + + @Override + public void rip() throws IOException, URISyntaxException { + LOGGER.info("Retrieving " + this.url); + Document doc = Http.url(url).get(); + + //Get user friendly filename from page title + String title = doc.title(); + + Elements script = doc.select("script"); + if (script.isEmpty()) { + throw new IOException("Could not find script code at " + url); + } + //Regex assumes highest quality source is listed first + Pattern p = Pattern.compile("\"source\":\"(.*?)\""); + + for (Element element : script) { + Matcher m = p.matcher(element.data()); + if (m.find()){ + String vidUrl = m.group(1); + addURLToDownload(new URI(vidUrl).toURL(), HOST + "_" + title); + } + } + waitForThreads(); + } } \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/ViddmeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/ViddmeRipper.java index 078b32a5..279e1d3a 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/ViddmeRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/ViddmeRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers.video; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -52,7 +54,7 @@ public class ViddmeRipper extends VideoRipper { } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { LOGGER.info(" Retrieving " + this.url.toExternalForm()); Document doc = Http.url(this.url).get(); Elements videos = doc.select("meta[name=twitter:player:stream]"); @@ -61,7 +63,7 @@ public class ViddmeRipper extends VideoRipper { } String vidUrl = videos.first().attr("content"); vidUrl = vidUrl.replaceAll("&", "&"); - addURLToDownload(new URL(vidUrl), HOST + "_" + getGID(this.url)); + addURLToDownload(new URI(vidUrl).toURL(), HOST + "_" + getGID(this.url)); waitForThreads(); } } \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VidearnRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VidearnRipper.java index 052b2cbe..707fa03f 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VidearnRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VidearnRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers.video; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.List; import java.util.regex.Matcher; @@ -33,11 +35,6 @@ public class VidearnRipper extends VideoRipper { return m.matches(); } - @Override - public URL sanitizeURL(URL url) throws MalformedURLException { - return url; - } - @Override public String getGID(URL url) throws MalformedURLException { Pattern p = Pattern.compile("^https?://[wm.]*videarn\\.com/[a-zA-Z0-9\\-]+/([0-9]+).*$"); @@ -53,7 +50,7 @@ public class VidearnRipper extends VideoRipper { } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { LOGGER.info("Retrieving " + this.url); Document doc = Http.url(url).get(); List mp4s = Utils.between(doc.html(), "file:\"", "\""); @@ -61,7 +58,7 @@ public class VidearnRipper extends VideoRipper { throw new IOException("Could not find files at " + url); } String vidUrl = mp4s.get(0); - addURLToDownload(new URL(vidUrl), HOST + "_" + getGID(this.url)); + addURLToDownload(new URI(vidUrl).toURL(), HOST + "_" + getGID(this.url)); waitForThreads(); } -} \ No newline at end of file +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VkRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VkRipper.java index 70528727..84206abb 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VkRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VkRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers.video; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -51,10 +53,10 @@ public class VkRipper extends VideoRipper { } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { LOGGER.info(" Retrieving " + this.url); String videoURL = getVideoURLAtPage(this.url.toExternalForm()); - addURLToDownload(new URL(videoURL), HOST + "_" + getGID(this.url)); + addURLToDownload(new URI(videoURL).toURL(), HOST + "_" + getGID(this.url)); waitForThreads(); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/YuvutuRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/YuvutuRipper.java index 2891efb5..3fb55b6f 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/YuvutuRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/YuvutuRipper.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.ripper.rippers.video; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -34,11 +36,6 @@ public class YuvutuRipper extends VideoRipper { return m.matches(); } - @Override - public URL sanitizeURL(URL url) throws MalformedURLException { - return url; - } - @Override public String getGID(URL url) throws MalformedURLException { Pattern p = Pattern.compile("^http://www\\.yuvutu\\.com/video/[0-9]+/(.*)$"); @@ -54,7 +51,7 @@ public class YuvutuRipper extends VideoRipper { } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { LOGGER.info("Retrieving " + this.url); Document doc = Http.url(url).get(); Element iframe = doc.select("iframe").first(); @@ -74,7 +71,7 @@ public class YuvutuRipper extends VideoRipper { Matcher m = p.matcher(element.data()); if (m.find()){ String vidUrl = m.group(1); - addURLToDownload(new URL(vidUrl), HOST + "_" + getGID(this.url)); + addURLToDownload(new URI(vidUrl).toURL(), HOST + "_" + getGID(this.url)); } } waitForThreads(); diff --git a/src/main/java/com/rarchives/ripme/ui/ClipboardUtils.java b/src/main/java/com/rarchives/ripme/ui/ClipboardUtils.java index 24c46cd4..55b68d65 100644 --- a/src/main/java/com/rarchives/ripme/ui/ClipboardUtils.java +++ b/src/main/java/com/rarchives/ripme/ui/ClipboardUtils.java @@ -1,8 +1,8 @@ package com.rarchives.ripme.ui; -import java.awt.HeadlessException; import java.awt.Toolkit; import java.awt.datatransfer.DataFlavor; +import java.awt.datatransfer.Transferable; import java.awt.datatransfer.UnsupportedFlavorException; import java.io.IOException; import java.util.HashSet; @@ -30,16 +30,13 @@ class ClipboardUtils { } public static String getClipboardString() { - try { - return (String) Toolkit - .getDefaultToolkit() - .getSystemClipboard() - .getData(DataFlavor.stringFlavor); - } catch (IllegalStateException e) { - e.printStackTrace(); - logger.error("Caught and recovered from IllegalStateException: " + e.getMessage()); - } catch (HeadlessException | IOException | UnsupportedFlavorException e) { - e.printStackTrace(); + Transferable contents = Toolkit.getDefaultToolkit().getSystemClipboard().getContents(null); + if (contents.isDataFlavorSupported(DataFlavor.stringFlavor)) { + try { + return (String) contents.getTransferData(DataFlavor.stringFlavor); + } catch (UnsupportedFlavorException | IOException e) { + logger.debug("ignore this one" + e.getMessage()); + } } return null; } @@ -47,7 +44,7 @@ class ClipboardUtils { class AutoripThread extends Thread { volatile boolean isRunning = false; - private Set rippedURLs = new HashSet<>(); + private final Set rippedURLs = new HashSet<>(); public void run() { isRunning = true; diff --git a/src/main/java/com/rarchives/ripme/ui/ContextMenuMouseListener.java b/src/main/java/com/rarchives/ripme/ui/ContextMenuMouseListener.java index 94348411..dac3d0a4 100644 --- a/src/main/java/com/rarchives/ripme/ui/ContextMenuMouseListener.java +++ b/src/main/java/com/rarchives/ripme/ui/ContextMenuMouseListener.java @@ -1,15 +1,16 @@ package com.rarchives.ripme.ui; -import java.awt.Toolkit; -import java.awt.datatransfer.DataFlavor; -import java.awt.event.ActionEvent; -import java.awt.event.InputEvent; -import java.awt.event.MouseAdapter; -import java.awt.event.MouseEvent; +import com.rarchives.ripme.uiUtils.ContextActionProtections; -import javax.swing.AbstractAction; -import javax.swing.Action; -import javax.swing.JPopupMenu; +import java.awt.Toolkit; +import java.awt.datatransfer.Clipboard; +import java.awt.datatransfer.DataFlavor; +import java.awt.datatransfer.Transferable; +import java.awt.datatransfer.UnsupportedFlavorException; +import java.awt.event.*; +import java.io.IOException; + +import javax.swing.*; import javax.swing.text.JTextComponent; /** @@ -20,27 +21,72 @@ import javax.swing.text.JTextComponent; public class ContextMenuMouseListener extends MouseAdapter { private JPopupMenu popup = new JPopupMenu(); + public String getDebugSavedString() { + return debugSavedString; + } + + private String debugSavedString; + + public Action getCutAction() { + return cutAction; + } + private Action cutAction; private Action copyAction; private Action pasteAction; + + public Action getCopyAction() { + return copyAction; + } + + public Action getPasteAction() { + return pasteAction; + } + + public Action getUndoAction() { + return undoAction; + } + + public Action getSelectAllAction() { + return selectAllAction; + } + private Action undoAction; private Action selectAllAction; + public JTextComponent getTextComponent() { + return textComponent; + } + private JTextComponent textComponent; + + public String getSavedString() { + return savedString; + } + private String savedString = ""; private Actions lastActionSelected; private enum Actions { UNDO, CUT, COPY, PASTE, SELECT_ALL } + @SuppressWarnings("serial") - public ContextMenuMouseListener() { + public ContextMenuMouseListener(JTextField ripTextfield) { + this.textComponent = ripTextfield; + + //Add protection for cntl+v + + generate_popup(); + } + + private void generate_popup() { undoAction = new AbstractAction("Undo") { @Override public void actionPerformed(ActionEvent ae) { textComponent.setText(""); textComponent.replaceSelection(savedString); - + debugSavedString = textComponent.getText(); lastActionSelected = Actions.UNDO; } }; @@ -54,6 +100,7 @@ public class ContextMenuMouseListener extends MouseAdapter { public void actionPerformed(ActionEvent ae) { lastActionSelected = Actions.CUT; savedString = textComponent.getText(); + debugSavedString = savedString; textComponent.cut(); } }; @@ -65,6 +112,7 @@ public class ContextMenuMouseListener extends MouseAdapter { @Override public void actionPerformed(ActionEvent ae) { lastActionSelected = Actions.COPY; + debugSavedString = textComponent.getText(); textComponent.copy(); } }; @@ -77,7 +125,8 @@ public class ContextMenuMouseListener extends MouseAdapter { public void actionPerformed(ActionEvent ae) { lastActionSelected = Actions.PASTE; savedString = textComponent.getText(); - textComponent.paste(); + debugSavedString = savedString; + ContextActionProtections.pasteFromClipboard(textComponent); } }; @@ -89,6 +138,7 @@ public class ContextMenuMouseListener extends MouseAdapter { @Override public void actionPerformed(ActionEvent ae) { lastActionSelected = Actions.SELECT_ALL; + debugSavedString = textComponent.getText(); textComponent.selectAll(); } }; @@ -96,9 +146,30 @@ public class ContextMenuMouseListener extends MouseAdapter { popup.add(selectAllAction); } + + @Override + public void mousePressed(MouseEvent e) { + showPopup(e); + } + + @Override + public void mouseReleased(MouseEvent e) { + showPopup(e); + } + + private void showPopup(MouseEvent e) { + if (e.isPopupTrigger()) { + if(this.popup == null) { + popup = new JPopupMenu(); + generate_popup(); + } + popup.show(e.getComponent(), e.getX(), e.getY()); + } + } + @Override public void mouseClicked(MouseEvent e) { - if (e.getModifiers() == InputEvent.BUTTON3_MASK) { + if (e.getModifiersEx() == InputEvent.BUTTON3_DOWN_MASK) { if (!(e.getSource() instanceof JTextComponent)) { return; } diff --git a/src/main/java/com/rarchives/ripme/ui/History.java b/src/main/java/com/rarchives/ripme/ui/History.java index f3f9451f..190eeeb8 100644 --- a/src/main/java/com/rarchives/ripme/ui/History.java +++ b/src/main/java/com/rarchives/ripme/ui/History.java @@ -100,7 +100,7 @@ public class History { public void fromFile(String filename) throws IOException { try (InputStream is = new FileInputStream(filename)) { - String jsonString = IOUtils.toString(is); + String jsonString = IOUtils.toString(is, "UTF-8"); JSONArray jsonArray = new JSONArray(jsonString); fromJSON(jsonArray); } catch (JSONException e) { @@ -134,7 +134,7 @@ public class History { public void toFile(String filename) throws IOException { try (OutputStream os = new FileOutputStream(filename)) { - IOUtils.write(toJSON().toString(2), os); + IOUtils.write(toJSON().toString(2), os, "UTF-8"); } } } diff --git a/src/main/java/com/rarchives/ripme/ui/HistoryMenuMouseListener.java b/src/main/java/com/rarchives/ripme/ui/HistoryMenuMouseListener.java index 9044531f..8a69477c 100644 --- a/src/main/java/com/rarchives/ripme/ui/HistoryMenuMouseListener.java +++ b/src/main/java/com/rarchives/ripme/ui/HistoryMenuMouseListener.java @@ -62,8 +62,17 @@ class HistoryMenuMouseListener extends MouseAdapter { } @Override - public void mouseClicked(MouseEvent e) { - if (e.getModifiers() == InputEvent.BUTTON3_MASK) { + public void mousePressed(MouseEvent e) { + checkPopupTrigger(e); + } + + @Override + public void mouseReleased(MouseEvent e) { + checkPopupTrigger(e); + } + + private void checkPopupTrigger(MouseEvent e) { + if (e.getModifiersEx() == InputEvent.BUTTON3_DOWN_MASK) { if (!(e.getSource() instanceof JTable)) { return; } diff --git a/src/main/java/com/rarchives/ripme/ui/MainWindow.java b/src/main/java/com/rarchives/ripme/ui/MainWindow.java index bf19a7be..13e05fed 100644 --- a/src/main/java/com/rarchives/ripme/ui/MainWindow.java +++ b/src/main/java/com/rarchives/ripme/ui/MainWindow.java @@ -1,75 +1,56 @@ package com.rarchives.ripme.ui; -import java.awt.*; -import java.awt.TrayIcon.MessageType; -import java.awt.event.ActionEvent; -import java.awt.event.ActionListener; -import java.awt.event.MouseAdapter; -import java.awt.event.MouseEvent; -import java.awt.event.WindowAdapter; -import java.awt.event.WindowEvent; -import java.io.*; -import java.net.MalformedURLException; -import java.net.URI; -import java.net.URL; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.*; -import java.util.List; +import com.rarchives.ripme.ripper.AbstractRipper; +import com.rarchives.ripme.uiUtils.ContextActionProtections; +import com.rarchives.ripme.utils.RipUtils; +import com.rarchives.ripme.utils.Utils; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.LoggerConfig; import javax.imageio.ImageIO; -import javax.swing.DefaultListModel; -import javax.swing.ImageIcon; -import javax.swing.JButton; -import javax.swing.JCheckBox; -import javax.swing.JComboBox; -import javax.swing.JFileChooser; -import javax.swing.JFrame; -import javax.swing.JLabel; -import javax.swing.JList; -import javax.swing.JOptionPane; -import javax.swing.JPanel; -import javax.swing.JProgressBar; -import javax.swing.JScrollPane; -import javax.swing.JTable; -import javax.swing.JTextField; -import javax.swing.JTextPane; -import javax.swing.ListSelectionModel; -import javax.swing.SwingUtilities; -import javax.swing.UIManager; +import javax.swing.*; import javax.swing.border.EmptyBorder; import javax.swing.event.DocumentEvent; import javax.swing.event.DocumentListener; import javax.swing.event.ListDataEvent; import javax.swing.event.ListDataListener; import javax.swing.table.AbstractTableModel; -import javax.swing.text.BadLocationException; -import javax.swing.text.SimpleAttributeSet; -import javax.swing.text.StyleConstants; -import javax.swing.text.StyledDocument; - -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.FileAppender; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; - -import com.rarchives.ripme.ripper.AbstractRipper; -import com.rarchives.ripme.utils.RipUtils; -import com.rarchives.ripme.utils.Utils; - -import javax.swing.UnsupportedLookAndFeelException; +import javax.swing.text.*; +import java.awt.*; +import java.awt.TrayIcon.MessageType; +import java.awt.event.*; +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.stream.Stream; /** * Everything UI-related starts and ends here. */ public final class MainWindow implements Runnable, RipStatusHandler { - private static final Logger LOGGER = Logger.getLogger(MainWindow.class); + private static final Logger LOGGER = LogManager.getLogger(MainWindow.class); private boolean isRipping = false; // Flag to indicate if we're ripping something private static JFrame mainFrame; + private static JTextField ripTextfield; private static JButton ripButton, stopButton; @@ -98,7 +79,6 @@ public final class MainWindow implements Runnable, RipStatusHandler { public static JButton optionQueue; private static JPanel queuePanel; private static DefaultListModel queueListModel; - private static QueueMenuMouseListener queueMenuMouseListener; // Configuration private static JButton optionConfiguration; @@ -111,9 +91,11 @@ public final class MainWindow implements Runnable, RipStatusHandler { private static JLabel configSaveDirLabel; private static JButton configSaveDirButton; private static JTextField configRetriesText; + private JTextField configRetrySleepText; private static JCheckBox configAutoupdateCheckbox; private static JComboBox configLogLevelCombobox; private static JCheckBox configURLHistoryCheckbox; + private static JCheckBox configSSLVerifyOff; private static JCheckBox configPlaySound; private static JCheckBox configSaveOrderCheckbox; private static JCheckBox configShowPopup; @@ -128,6 +110,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { private static JLabel configThreadsLabel; private static JLabel configTimeoutLabel; private static JLabel configRetriesLabel; + private static JLabel configRetrySleepLabel; // This doesn't really belong here but I have no idea where else to put it private static JButton configUrlFileChooserButton; @@ -144,7 +127,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { model = queueListModel; if (model.size() > 0) { - Utils.setConfigList("queue", (Enumeration) model.elements()); + Utils.setConfigList("queue", model.elements()); Utils.saveConfig(); } @@ -175,7 +158,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { queueListModel.addElement(url); } - public MainWindow() { + public MainWindow() throws IOException { mainFrame = new JFrame("RipMe v" + UpdateUtils.getThisJarVersion()); mainFrame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); mainFrame.setLayout(new GridBagLayout()); @@ -230,6 +213,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { Utils.setConfigBoolean("descriptions.save", configSaveDescriptions.isSelected()); Utils.setConfigBoolean("prefer.mp4", configPreferMp4.isSelected()); Utils.setConfigBoolean("remember.url_history", configURLHistoryCheckbox.isSelected()); + Utils.setConfigBoolean("ssl.verify.off", configSSLVerifyOff.isSelected()); Utils.setConfigString("lang", configSelectLangComboBox.getSelectedItem().toString()); saveWindowPosition(mainFrame); saveHistory(); @@ -271,6 +255,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { try { setupTrayIcon(); } catch (Exception e) { + LOGGER.warn(e.getMessage()); } EmptyBorder emptyBorder = new EmptyBorder(5, 5, 5, 5); @@ -292,7 +277,47 @@ public final class MainWindow implements Runnable, RipStatusHandler { } ripTextfield = new JTextField("", 20); - ripTextfield.addMouseListener(new ContextMenuMouseListener()); + ripTextfield.addMouseListener(new ContextMenuMouseListener(ripTextfield)); + + //Add keyboard protection of cntl + v for pasting. + ripTextfield.addKeyListener(new KeyAdapter() { + @Override + public void keyTyped(KeyEvent e) { + if (e.getKeyChar() == 22) { // ASCII code for Ctrl+V + ContextActionProtections.pasteFromClipboard(ripTextfield); + } + } + }); + + /* + Alternatively, just set this, and use + ((AbstractDocument) ripTextfield.getDocument()).setDocumentFilter(new LengthLimitDocumentFilter(256)); + private static class LengthLimitDocumentFilter extends DocumentFilter { + private final int maxLength; + + public LengthLimitDocumentFilter(int maxLength) { + this.maxLength = maxLength; + } + + @Override + public void insertString(FilterBypass fb, int offset, String string, AttributeSet attr) throws BadLocationException { + // if ((fb.getDocument().getLength() + string.length()) <= maxLength) { + super.insertString(fb, offset, string.substring(0, maxLength), attr); + // } + } + + @Override + public void replace(FilterBypass fb, int offset, int length, String text, AttributeSet attrs) throws BadLocationException { + int currentLength = fb.getDocument().getLength(); + int newLength = currentLength - length + text.length(); + + // if (newLength <= maxLength) { + super.replace(fb, offset, length, text.substring(0, maxLength), attrs); + // } + } + } + */ + ImageIcon ripIcon = new ImageIcon(mainIcon); ripButton = new JButton("Rip", ripIcon); stopButton = new JButton("Stop"); @@ -360,6 +385,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { icon = ImageIO.read(getClass().getClassLoader().getResource("gear.png")); optionConfiguration.setIcon(new ImageIcon(icon)); } catch (Exception e) { + LOGGER.warn(e.getMessage()); } gbc.gridx = 0; optionsPanel.add(optionLog, gbc); @@ -480,9 +506,10 @@ public final class MainWindow implements Runnable, RipStatusHandler { queuePanel.setBorder(emptyBorder); queuePanel.setVisible(false); queuePanel.setPreferredSize(new Dimension(300, 250)); - queueListModel = new DefaultListModel(); + queueListModel = new DefaultListModel<>(); JList queueList = new JList(queueListModel); queueList.setSelectionMode(ListSelectionModel.MULTIPLE_INTERVAL_SELECTION); + QueueMenuMouseListener queueMenuMouseListener; queueList.addMouseListener( queueMenuMouseListener = new QueueMenuMouseListener(d -> updateQueue(queueListModel))); JScrollPane queueListScroll = new JScrollPane(queueList, JScrollPane.VERTICAL_SCROLLBAR_AS_NEEDED, @@ -510,12 +537,15 @@ public final class MainWindow implements Runnable, RipStatusHandler { configUpdateButton = new JButton(Utils.getLocalizedString("check.for.updates")); configUpdateLabel = new JLabel( Utils.getLocalizedString("current.version") + ": " + UpdateUtils.getThisJarVersion(), JLabel.RIGHT); - configThreadsLabel = new JLabel(Utils.getLocalizedString("max.download.threads") + ":", JLabel.RIGHT); + configThreadsLabel = new JLabel(Utils.getLocalizedString("max.download.threads"), JLabel.RIGHT); configTimeoutLabel = new JLabel(Utils.getLocalizedString("timeout.mill"), JLabel.RIGHT); configRetriesLabel = new JLabel(Utils.getLocalizedString("retry.download.count"), JLabel.RIGHT); - configThreadsText = new JTextField(Integer.toString(Utils.getConfigInteger("threads.size", 3))); - configTimeoutText = new JTextField(Integer.toString(Utils.getConfigInteger("download.timeout", 60000))); - configRetriesText = new JTextField(Integer.toString(Utils.getConfigInteger("download.retries", 3))); + configRetrySleepLabel = new JLabel(Utils.getLocalizedString("retry.sleep.mill"), JLabel.RIGHT); + configThreadsText = configField("threads.size", 3); + configTimeoutText = configField("download.timeout", 60000); + configRetriesText = configField("download.retries", 3); + configRetrySleepText = configField("download.retry.sleep", 5000); + configOverwriteCheckbox = addNewCheckbox(Utils.getLocalizedString("overwrite.existing.files"), "file.overwrite", false); configAutoupdateCheckbox = addNewCheckbox(Utils.getLocalizedString("auto.update"), "auto.update", true); @@ -537,12 +567,14 @@ public final class MainWindow implements Runnable, RipStatusHandler { true); configURLHistoryCheckbox = addNewCheckbox(Utils.getLocalizedString("remember.url.history"), "remember.url_history", true); + configSSLVerifyOff = addNewCheckbox(Utils.getLocalizedString("ssl.verify.off"), + "ssl.verify.off", false); configUrlFileChooserButton = new JButton(Utils.getLocalizedString("download.url.list")); configLogLevelCombobox = new JComboBox<>( new String[] { "Log level: Error", "Log level: Warn", "Log level: Info", "Log level: Debug" }); configSelectLangComboBox = new JComboBox<>(Utils.getSupportedLanguages()); - configSelectLangComboBox.setSelectedItem(Utils.getSelectedLanguage()); + configSelectLangComboBox.setSelectedItem(Utils.getConfigString("lang", Utils.getSelectedLanguage())); configLogLevelCombobox.setSelectedItem(Utils.getConfigString("log.level", "Log level: Debug")); setLogLevel(configLogLevelCombobox.getSelectedItem().toString()); configSaveDirLabel = new JLabel(); @@ -552,24 +584,28 @@ public final class MainWindow implements Runnable, RipStatusHandler { configSaveDirLabel.setForeground(Color.BLUE); configSaveDirLabel.setCursor(new Cursor(Cursor.HAND_CURSOR)); } catch (Exception e) { + LOGGER.error(e); } configSaveDirLabel.setToolTipText(configSaveDirLabel.getText()); configSaveDirLabel.setHorizontalAlignment(JLabel.RIGHT); configSaveDirButton = new JButton(Utils.getLocalizedString("select.save.dir") + "..."); - addItemToConfigGridBagConstraints(gbc, 0, configUpdateLabel, configUpdateButton); - addItemToConfigGridBagConstraints(gbc, 1, configAutoupdateCheckbox, configLogLevelCombobox); - addItemToConfigGridBagConstraints(gbc, 2, configThreadsLabel, configThreadsText); - addItemToConfigGridBagConstraints(gbc, 3, configTimeoutLabel, configTimeoutText); - addItemToConfigGridBagConstraints(gbc, 4, configRetriesLabel, configRetriesText); - addItemToConfigGridBagConstraints(gbc, 5, configOverwriteCheckbox, configSaveOrderCheckbox); - addItemToConfigGridBagConstraints(gbc, 6, configPlaySound, configSaveLogs); - addItemToConfigGridBagConstraints(gbc, 7, configShowPopup, configSaveURLsOnly); - addItemToConfigGridBagConstraints(gbc, 8, configClipboardAutorip, configSaveAlbumTitles); - addItemToConfigGridBagConstraints(gbc, 9, configSaveDescriptions, configPreferMp4); - addItemToConfigGridBagConstraints(gbc, 10, configWindowPosition, configURLHistoryCheckbox); - addItemToConfigGridBagConstraints(gbc, 11, configSelectLangComboBox, configUrlFileChooserButton); - addItemToConfigGridBagConstraints(gbc, 12, configSaveDirLabel, configSaveDirButton); + var idx = 0; + addItemToConfigGridBagConstraints(gbc, idx++, configUpdateLabel, configUpdateButton); + addItemToConfigGridBagConstraints(gbc, idx++, configAutoupdateCheckbox, configLogLevelCombobox); + addItemToConfigGridBagConstraints(gbc, idx++, configThreadsLabel, configThreadsText); + addItemToConfigGridBagConstraints(gbc, idx++, configTimeoutLabel, configTimeoutText); + addItemToConfigGridBagConstraints(gbc, idx++, configRetriesLabel, configRetriesText); + addItemToConfigGridBagConstraints(gbc, idx++, configRetrySleepLabel, configRetrySleepText); + addItemToConfigGridBagConstraints(gbc, idx++, configOverwriteCheckbox, configSaveOrderCheckbox); + addItemToConfigGridBagConstraints(gbc, idx++, configPlaySound, configSaveLogs); + addItemToConfigGridBagConstraints(gbc, idx++, configShowPopup, configSaveURLsOnly); + addItemToConfigGridBagConstraints(gbc, idx++, configClipboardAutorip, configSaveAlbumTitles); + addItemToConfigGridBagConstraints(gbc, idx++, configSaveDescriptions, configPreferMp4); + addItemToConfigGridBagConstraints(gbc, idx++, configWindowPosition, configURLHistoryCheckbox); + addItemToConfigGridBagConstraints(gbc, idx++, configSSLVerifyOff, configSSLVerifyOff); + addItemToConfigGridBagConstraints(gbc, idx++, configSelectLangComboBox, configUrlFileChooserButton); + addItemToConfigGridBagConstraints(gbc, idx++, configSaveDirLabel, configSaveDirButton); emptyPanel = new JPanel(); emptyPanel.setPreferredSize(new Dimension(0, 0)); @@ -600,6 +636,40 @@ public final class MainWindow implements Runnable, RipStatusHandler { gbc.fill = GridBagConstraints.HORIZONTAL; } + private JTextField configField(String key, int defaultValue) { + final var field = new JTextField(Integer.toString(Utils.getConfigInteger(key, defaultValue))); + field.getDocument().addDocumentListener(new DocumentListener() { + + @Override + public void insertUpdate(DocumentEvent e) { + checkAndUpdate(); + } + + @Override + public void removeUpdate(DocumentEvent e) { + checkAndUpdate(); + } + + @Override + public void changedUpdate(DocumentEvent e) { + checkAndUpdate(); + } + + private void checkAndUpdate() { + final var txt = field.getText(); + try { + final var newValue = Integer.parseInt(txt); + if (newValue>0) { + Utils.setConfigInteger(key, newValue); + } + } catch (final Exception e) { + LOGGER.warn(e.getMessage()); + } + } + }); + return field; + } + private void addItemToConfigGridBagConstraints(GridBagConstraints gbc, int gbcYValue, JLabel thing1ToAdd, JButton thing2ToAdd) { gbc.gridy = gbcYValue; @@ -673,6 +743,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { configPreferMp4.setText(Utils.getLocalizedString("prefer.mp4.over.gif")); configWindowPosition.setText(Utils.getLocalizedString("restore.window.position")); configURLHistoryCheckbox.setText(Utils.getLocalizedString("remember.url.history")); + configSSLVerifyOff.setText(Utils.getLocalizedString("ssl.verify.off")); optionLog.setText(Utils.getLocalizedString("Log")); optionHistory.setText(Utils.getLocalizedString("History")); optionQueue.setText(Utils.getLocalizedString("queue")); @@ -701,13 +772,13 @@ public final class MainWindow implements Runnable, RipStatusHandler { private void update() { try { String urlText = ripTextfield.getText().trim(); - if (urlText.equals("")) { + if (urlText.isEmpty()) { return; } if (!urlText.startsWith("http")) { urlText = "http://" + urlText; } - URL url = new URL(urlText); + URL url = new URI(urlText).toURL(); AbstractRipper ripper = AbstractRipper.getRipper(url); statusWithColor(ripper.getHost() + " album detected", Color.GREEN); } catch (Exception e) { @@ -724,8 +795,8 @@ public final class MainWindow implements Runnable, RipStatusHandler { statusProgress.setVisible(false); pack(); statusProgress.setValue(0); - status(Utils.getLocalizedString("ripping.interrupted")); - appendLog("Ripper interrupted", Color.RED); + status(Utils.getLocalizedString("download.interrupted")); + appendLog("Download interrupted", Color.RED); } }); optionLog.addActionListener(event -> { @@ -801,6 +872,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { try { historyTableModel.fireTableDataChanged(); } catch (Exception e) { + LOGGER.warn(e.getMessage()); } saveHistory(); }); @@ -817,14 +889,12 @@ public final class MainWindow implements Runnable, RipStatusHandler { checkChoise.add(noButton); JFrame.setDefaultLookAndFeelDecorated(true); JFrame frame = new JFrame("Are you sure?"); - frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); + frame.setDefaultCloseOperation(JFrame.DISPOSE_ON_CLOSE); frame.add(checkChoise); frame.setSize(405, 70); frame.setVisible(true); frame.setLocationRelativeTo(null); - noButton.addActionListener(e -> { - frame.setVisible(false); - }); + noButton.addActionListener(e -> frame.setVisible(false)); yesButton.addActionListener(ed -> { frame.setVisible(false); Utils.clearURLHistory(); @@ -832,6 +902,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { try { historyTableModel.fireTableDataChanged(); } catch (Exception e) { + LOGGER.warn(e.getMessage()); } saveHistory(); }); @@ -841,6 +912,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { try { historyTableModel.fireTableDataChanged(); } catch (Exception e) { + LOGGER.warn(e.getMessage()); } saveHistory(); } @@ -871,54 +943,56 @@ public final class MainWindow implements Runnable, RipStatusHandler { t.start(); }); configLogLevelCombobox.addActionListener(arg0 -> { - String level = ((JComboBox) arg0.getSource()).getSelectedItem().toString(); + String level = ((JComboBox) arg0.getSource()).getSelectedItem().toString(); setLogLevel(level); }); configSelectLangComboBox.addActionListener(arg0 -> { - String level = ((JComboBox) arg0.getSource()).getSelectedItem().toString(); + String level = ((JComboBox) arg0.getSource()).getSelectedItem().toString(); Utils.setLanguage(level); changeLocale(); }); configSaveDirLabel.addMouseListener(new MouseAdapter() { @Override public void mouseClicked(MouseEvent e) { - File file = new File(Utils.getWorkingDirectory().toString()); - Desktop desktop = Desktop.getDesktop(); + Path file; try { - desktop.open(file); - } catch (Exception e1) { + file = Utils.getWorkingDirectory(); + Desktop desktop = Desktop.getDesktop(); + desktop.open(file.toFile()); + } catch (IOException ex) { + LOGGER.warn(ex.getMessage()); } } }); configSaveDirButton.addActionListener(arg0 -> { UIManager.put("FileChooser.useSystemExtensionHiding", false); - JFileChooser jfc = new JFileChooser(Utils.getWorkingDirectory()); + JFileChooser jfc = new JFileChooser(Utils.getWorkingDirectory().toString()); + LOGGER.debug("select save directory, current is:" + Utils.getWorkingDirectory()); jfc.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY); int returnVal = jfc.showDialog(null, "select directory"); if (returnVal != JFileChooser.APPROVE_OPTION) { return; } - File chosenFile = jfc.getSelectedFile(); - String chosenPath = null; + Path chosenPath; try { - chosenPath = chosenFile.getCanonicalPath(); + chosenPath = jfc.getSelectedFile().toPath(); } catch (Exception e) { LOGGER.error("Error while getting selected path: ", e); return; } configSaveDirLabel.setText(Utils.shortenPath(chosenPath)); - Utils.setConfigString("rips.directory", chosenPath); + Utils.setConfigString("rips.directory", chosenPath.toString()); }); configUrlFileChooserButton.addActionListener(arg0 -> { UIManager.put("FileChooser.useSystemExtensionHiding", false); - JFileChooser jfc = new JFileChooser(Utils.getWorkingDirectory()); + JFileChooser jfc = new JFileChooser(Utils.getWorkingDirectory().toAbsolutePath().toString()); jfc.setFileSelectionMode(JFileChooser.FILES_ONLY); int returnVal = jfc.showDialog(null, "Open"); if (returnVal != JFileChooser.APPROVE_OPTION) { return; } File chosenFile = jfc.getSelectedFile(); - String chosenPath = null; + String chosenPath; try { chosenPath = chosenFile.getCanonicalPath(); } catch (Exception e) { @@ -944,6 +1018,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { addCheckboxListener(configSaveLogs, "log.save"); addCheckboxListener(configSaveURLsOnly, "urls_only.save"); addCheckboxListener(configURLHistoryCheckbox, "remember.url_history"); + addCheckboxListener(configSSLVerifyOff, "ssl.verify.off"); addCheckboxListener(configSaveAlbumTitles, "album_titles.save"); addCheckboxListener(configSaveDescriptions, "descriptions.save"); addCheckboxListener(configPreferMp4, "prefer.mp4"); @@ -977,6 +1052,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { } private void setLogLevel(String level) { + // default level is error, set in case something else is given. Level newLevel = Level.ERROR; level = level.substring(level.lastIndexOf(' ') + 1); switch (level) { @@ -988,21 +1064,12 @@ public final class MainWindow implements Runnable, RipStatusHandler { break; case "Warn": newLevel = Level.WARN; - break; - case "Error": - newLevel = Level.ERROR; - break; - } - Logger.getRootLogger().setLevel(newLevel); - LOGGER.setLevel(newLevel); - ConsoleAppender ca = (ConsoleAppender) Logger.getRootLogger().getAppender("stdout"); - if (ca != null) { - ca.setThreshold(newLevel); - } - FileAppender fa = (FileAppender) Logger.getRootLogger().getAppender("FILE"); - if (fa != null) { - fa.setThreshold(newLevel); } + LoggerContext ctx = (LoggerContext) LogManager.getContext(false); + Configuration config = ctx.getConfiguration(); + LoggerConfig loggerConfig = config.getLoggerConfig(LogManager.ROOT_LOGGER_NAME); + loggerConfig.setLevel(newLevel); + ctx.updateLoggers(); // This causes all Loggers to refetch information from their LoggerConfig. } private void setupTrayIcon() { @@ -1050,6 +1117,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { } about.append(""); } catch (Exception e) { + LOGGER.warn(e.getMessage()); } about.append("
And download videos from video sites:"); try { @@ -1066,6 +1134,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { } about.append(""); } catch (Exception e) { + LOGGER.warn(e.getMessage()); } about.append("Do you want to visit the project homepage on Github?"); @@ -1114,7 +1183,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { } catch (IOException | AWTException e) { // TODO implement proper stack trace handling this is really just intented as a // placeholder until you implement proper error handling - e.printStackTrace(); + LOGGER.warn(e.getMessage()); } } @@ -1145,6 +1214,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { sd.insertString(sd.getLength(), text + "\n", sas); } } catch (BadLocationException e) { + LOGGER.warn(e.getMessage()); } logText.setCaretPosition(sd.getLength()); @@ -1161,8 +1231,8 @@ public final class MainWindow implements Runnable, RipStatusHandler { LOGGER.error(line); } - private void loadHistory() { - File historyFile = new File(Utils.getConfigDir() + File.separator + "history.json"); + private void loadHistory() throws IOException { + File historyFile = new File(Utils.getConfigDir() + "/history.json"); HISTORY.clear(); if (historyFile.exists()) { try { @@ -1181,23 +1251,24 @@ public final class MainWindow implements Runnable, RipStatusHandler { if (HISTORY.toList().isEmpty()) { // Loaded from config, still no entries. // Guess rip history based on rip folder - String[] dirs = Utils.getWorkingDirectory() - .list((dir, file) -> new File(dir.getAbsolutePath() + File.separator + file).isDirectory()); - for (String dir : dirs) { - String url = RipUtils.urlFromDirectoryName(dir); + Stream stream = Files.list(Utils.getWorkingDirectory()) + .filter(Files::isDirectory); + + stream.forEach(dir -> { + String url = RipUtils.urlFromDirectoryName(dir.toString()); if (url != null) { // We found one, add it to history HistoryEntry entry = new HistoryEntry(); entry.url = url; HISTORY.add(entry); } - } + }); } } } private void saveHistory() { - Path historyFile = Paths.get(Utils.getConfigDir() + File.separator + "history.json"); + Path historyFile = Paths.get(Utils.getConfigDir() + "/history.json"); try { if (!Files.exists(historyFile)) { Files.createDirectories(historyFile.getParent()); @@ -1214,7 +1285,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { private void ripNextAlbum() { isRipping = true; // Save current state of queue to configuration. - Utils.setConfigList("queue", (Enumeration) queueListModel.elements()); + Utils.setConfigList("queue", queueListModel.elements()); if (queueListModel.isEmpty()) { // End of queue @@ -1250,10 +1321,10 @@ public final class MainWindow implements Runnable, RipStatusHandler { if (!urlString.startsWith("http")) { urlString = "http://" + urlString; } - URL url = null; + URL url; try { - url = new URL(urlString); - } catch (MalformedURLException e) { + url = new URI(urlString).toURL(); + } catch (MalformedURLException | URISyntaxException e) { LOGGER.error("[!] Could not generate URL for '" + urlString + "'", e); error("Given URL is not valid, expecting http://website.com/page/..."); return null; @@ -1279,11 +1350,15 @@ public final class MainWindow implements Runnable, RipStatusHandler { ripper.setObserver(this); Thread t = new Thread(ripper); if (configShowPopup.isSelected() && (!mainFrame.isVisible() || !mainFrame.isActive())) { - mainFrame.toFront(); - mainFrame.setAlwaysOnTop(true); - trayIcon.displayMessage(mainFrame.getTitle(), "Started ripping " + ripper.getURL().toExternalForm(), - MessageType.INFO); - mainFrame.setAlwaysOnTop(false); + try { + mainFrame.toFront(); + mainFrame.setAlwaysOnTop(true); + trayIcon.displayMessage(mainFrame.getTitle(), "Started ripping " + ripper.getURL().toExternalForm(), + MessageType.INFO); + mainFrame.setAlwaysOnTop(false); + } catch (NullPointerException e) { + LOGGER.error("Could not send popup, are tray icons supported?"); + } } return t; } catch (Exception e) { @@ -1306,7 +1381,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { if (!urlText.startsWith("http")) { urlText = "http://" + urlText; } - URL url = new URL(urlText); + URL url = new URI(urlText).toURL(); // Ripper is needed here to throw/not throw an Exception AbstractRipper ripper = AbstractRipper.getRipper(url); return true; @@ -1376,34 +1451,39 @@ public final class MainWindow implements Runnable, RipStatusHandler { switch (msg.getStatus()) { case LOADING_RESOURCE: case DOWNLOAD_STARTED: - if (LOGGER.isEnabledFor(Level.INFO)) { + if (LOGGER.isEnabled(Level.INFO)) { appendLog("Downloading " + msg.getObject(), Color.BLACK); } break; case DOWNLOAD_COMPLETE: - if (LOGGER.isEnabledFor(Level.INFO)) { + if (LOGGER.isEnabled(Level.INFO)) { appendLog("Downloaded " + msg.getObject(), Color.GREEN); } break; case DOWNLOAD_COMPLETE_HISTORY: - if (LOGGER.isEnabledFor(Level.INFO)) { + if (LOGGER.isEnabled(Level.INFO)) { appendLog("" + msg.getObject(), Color.GREEN); } break; case DOWNLOAD_ERRORED: - if (LOGGER.isEnabledFor(Level.ERROR)) { + if (LOGGER.isEnabled(Level.ERROR)) { appendLog((String) msg.getObject(), Color.RED); } break; case DOWNLOAD_WARN: - if (LOGGER.isEnabledFor(Level.WARN)) { + if (LOGGER.isEnabled(Level.WARN)) { appendLog((String) msg.getObject(), Color.ORANGE); } break; + case DOWNLOAD_SKIP: + if (LOGGER.isEnabled(Level.INFO)) { + appendLog((String) msg.getObject(), Color.YELLOW); + } + break; case RIP_ERRORED: - if (LOGGER.isEnabledFor(Level.ERROR)) { + if (LOGGER.isEnabled(Level.ERROR)) { appendLog((String) msg.getObject(), Color.RED); } stopButton.setEnabled(false); @@ -1429,7 +1509,8 @@ public final class MainWindow implements Runnable, RipStatusHandler { entry.count = rsc.count; try { entry.title = ripper.getAlbumTitle(ripper.getURL()); - } catch (MalformedURLException e) { + } catch (MalformedURLException | URISyntaxException e) { + LOGGER.warn(e.getMessage()); } HISTORY.add(entry); historyTableModel.fireTableDataChanged(); @@ -1442,7 +1523,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { statusProgress.setValue(0); statusProgress.setVisible(false); openButton.setVisible(true); - File f = rsc.dir; + Path f = rsc.dir; String prettyFile = Utils.shortenPath(f); openButton.setText(Utils.getLocalizedString("open") + prettyFile); mainFrame.setTitle("RipMe v" + UpdateUtils.getThisJarVersion()); @@ -1450,20 +1531,22 @@ public final class MainWindow implements Runnable, RipStatusHandler { Image folderIcon = ImageIO.read(getClass().getClassLoader().getResource("folder.png")); openButton.setIcon(new ImageIcon(folderIcon)); } catch (Exception e) { + LOGGER.warn(e.getMessage()); } /* * content key %path% the path to the album folder %url% is the album url - * - * + * + * */ if (Utils.getConfigBoolean("enable.finish.command", false)) { try { - String commandToRun = Utils.getConfigString("finish.command", "ls"); - commandToRun = commandToRun.replaceAll("%url%", url); - commandToRun = commandToRun.replaceAll("%path%", f.getAbsolutePath()); + String cmdStr = Utils.getConfigString("finish.command", "ls"); + cmdStr = cmdStr.replaceAll("%url%", url); + cmdStr = cmdStr.replaceAll("%path%", f.toAbsolutePath().toString()); + // java dropped the exec string executor, as the string is only split very trivial. + // do the same at the moment, and split, to get rid of java-21 deprecation warning. + String[] commandToRun = cmdStr.split(" "); LOGGER.info("RUnning command " + commandToRun); - // code from: - // https://stackoverflow.com/questions/5711084/java-runtime-getruntime-getting-output-from-executing-a-command-line-program Process proc = Runtime.getRuntime().exec(commandToRun); BufferedReader stdInput = new BufferedReader(new InputStreamReader(proc.getInputStream())); @@ -1486,7 +1569,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { LOGGER.error(e.getStackTrace()); } } - appendLog("Rip complete, saved to " + f.getAbsolutePath(), Color.GREEN); + appendLog("Rip complete, saved to " + f, Color.GREEN); openButton.setActionCommand(f.toString()); openButton.addActionListener(event -> { try { @@ -1505,7 +1588,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { // Update total bytes break; case NO_ALBUM_OR_USER: - if (LOGGER.isEnabledFor(Level.ERROR)) { + if (LOGGER.isEnabled(Level.ERROR)) { appendLog((String) msg.getObject(), Color.RED); } stopButton.setEnabled(false); @@ -1528,14 +1611,6 @@ public final class MainWindow implements Runnable, RipStatusHandler { ripButton.doClick(); } - public static void enableWindowPositioning() { - Utils.setConfigBoolean("window.position", true); - } - - public static void disableWindowPositioning() { - Utils.setConfigBoolean("window.position", false); - } - private static boolean hasWindowPositionBug() { String osName = System.getProperty("os.name"); // Java on Windows has a bug where if we try to manually set the position of the diff --git a/src/main/java/com/rarchives/ripme/ui/QueueMenuMouseListener.java b/src/main/java/com/rarchives/ripme/ui/QueueMenuMouseListener.java index 19911ee2..0be4b46f 100644 --- a/src/main/java/com/rarchives/ripme/ui/QueueMenuMouseListener.java +++ b/src/main/java/com/rarchives/ripme/ui/QueueMenuMouseListener.java @@ -58,10 +58,19 @@ class QueueMenuMouseListener extends MouseAdapter { updateQueue.accept(queueListModel); } - @SuppressWarnings("unchecked") @Override - public void mouseClicked(MouseEvent e) { - if (e.getModifiers() == InputEvent.BUTTON3_MASK) { + public void mousePressed(MouseEvent e) { + checkPopupTrigger(e); + } + + @Override + public void mouseReleased(MouseEvent e) { + checkPopupTrigger(e); + } + + @SuppressWarnings("unchecked") + private void checkPopupTrigger(MouseEvent e) { + if (e.getModifiersEx() == InputEvent.BUTTON3_DOWN_MASK) { if (!(e.getSource() instanceof JList)) { return; } diff --git a/src/main/java/com/rarchives/ripme/ui/RipStatusComplete.java b/src/main/java/com/rarchives/ripme/ui/RipStatusComplete.java index 720aa9a7..2b8058bb 100644 --- a/src/main/java/com/rarchives/ripme/ui/RipStatusComplete.java +++ b/src/main/java/com/rarchives/ripme/ui/RipStatusComplete.java @@ -1,29 +1,22 @@ package com.rarchives.ripme.ui; -import java.io.File; -import java.io.IOException; +import java.nio.file.Path; public class RipStatusComplete { - File dir = null; + Path dir = null; int count = 0; - public RipStatusComplete(File dir) { + public RipStatusComplete(Path dir) { this.dir = dir; this.count = 1; } - public RipStatusComplete(File dir, int count) { + public RipStatusComplete(Path dir, int count) { this.dir = dir; this.count = count; } public String getDir() { - String result; - try { - result = this.dir.getCanonicalPath(); - } catch (IOException e) { - result = this.dir.toString(); - } - return result; + return this.dir.toString(); } } diff --git a/src/main/java/com/rarchives/ripme/ui/RipStatusMessage.java b/src/main/java/com/rarchives/ripme/ui/RipStatusMessage.java index 207968d9..f589e9db 100644 --- a/src/main/java/com/rarchives/ripme/ui/RipStatusMessage.java +++ b/src/main/java/com/rarchives/ripme/ui/RipStatusMessage.java @@ -13,6 +13,7 @@ public class RipStatusMessage { DOWNLOAD_COMPLETE_HISTORY("Download Complete History"), RIP_COMPLETE("Rip Complete"), DOWNLOAD_WARN("Download problem"), + DOWNLOAD_SKIP("Download Skipped"), TOTAL_BYTES("Total bytes"), COMPLETED_BYTES("Completed bytes"), RIP_ERRORED("Rip Errored"), diff --git a/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java b/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java index 42f2ad8c..d5f82a36 100644 --- a/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java +++ b/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java @@ -1,48 +1,60 @@ package com.rarchives.ripme.ui; -import java.awt.Dimension; -import java.io.*; -import java.net.URISyntaxException; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; - -import javax.swing.JEditorPane; -import javax.swing.JLabel; -import javax.swing.JOptionPane; -import javax.swing.JScrollPane; - -import org.apache.log4j.Logger; +import com.rarchives.ripme.utils.Utils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.json.JSONArray; import org.json.JSONObject; import org.jsoup.Connection.Response; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; -import com.rarchives.ripme.utils.Utils; +import javax.swing.*; +import java.awt.*; +import java.io.BufferedWriter; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; + +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; public class UpdateUtils { - private static final Logger logger = Logger.getLogger(UpdateUtils.class); - private static final String DEFAULT_VERSION = "1.7.90"; + private static final Logger logger = LogManager.getLogger(UpdateUtils.class); + // do not update the default version without adjusting the unit test. the real version comes from METAINF.MF + private static final String DEFAULT_VERSION = "1.7.94-10-b6345398"; private static final String REPO_NAME = "ripmeapp/ripme"; - private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/master/ripme.json"; - private static String mainFileName; + private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/main/ripme.json"; + private static final Path newFile = Paths.get("ripme.jar.new"); + private static Path mainFile; + private static JSONObject ripmeJson; static { try { - mainFileName = new File(UpdateUtils.class.getProtectionDomain().getCodeSource().getLocation().toURI()).getAbsolutePath(); + mainFile = Paths.get(UpdateUtils.class.getProtectionDomain().getCodeSource().getLocation().toURI()); } catch (URISyntaxException | IllegalArgumentException e) { - mainFileName = "ripme.jar"; + mainFile = Paths.get("ripme.jar"); logger.error("Unable to get path of jar"); e.printStackTrace(); } } - private static final String updateFileName = "ripme.jar.update"; - private static JSONObject ripmeJson; - private static String getUpdateJarURL(String latestVersion) { - return "https://github.com/" + REPO_NAME + "/releases/download/" + latestVersion + "/ripme.jar"; + // this works with a tag created in github, and thus download URLs like: + // https://github.com/ripmeapp2/ripme/releases/download/2.0.4/ripme-2.0.4-12-487e38cc.jar + return "https://github.com/" + + REPO_NAME + + "/releases/download/" + + latestVersion + + "/ripme-" + + latestVersion + ".jar"; } public static String getThisJarVersion() { @@ -70,7 +82,7 @@ public class UpdateUtils { public static void updateProgramCLI() { logger.info("Checking for update..."); - Document doc = null; + Document doc; try { logger.debug("Retrieving " + UpdateUtils.updateJsonURL); doc = Jsoup.connect(UpdateUtils.updateJsonURL).timeout(10 * 1000).ignoreContentType(true).get(); @@ -93,25 +105,22 @@ public class UpdateUtils { String latestVersion = ripmeJson.getString("latestVersion"); if (UpdateUtils.isNewerVersion(latestVersion)) { logger.info("Found newer version: " + latestVersion); - logger.info("Downloading new version..."); - logger.info("New version found, downloading..."); + logger.info("Downloading" +getUpdateJarURL(latestVersion) + " ..."); try { UpdateUtils.downloadJarAndLaunch(getUpdateJarURL(latestVersion), false); } catch (IOException e) { logger.error("Error while updating: ", e); } } else { - logger.debug("This version (" + UpdateUtils.getThisJarVersion() - + ") is the same or newer than the website's version (" + latestVersion + ")"); - logger.info("v" + UpdateUtils.getThisJarVersion() + " is the latest version"); - logger.debug("Running latest version: " + UpdateUtils.getThisJarVersion()); + logger.info("Running version (" + UpdateUtils.getThisJarVersion() + + ") is not older than release (" + latestVersion + ")"); } } public static void updateProgramGUI(JLabel configUpdateLabel) { configUpdateLabel.setText("Checking for update..."); - Document doc = null; + Document doc; try { logger.debug("Retrieving " + UpdateUtils.updateJsonURL); doc = Jsoup.connect(UpdateUtils.updateJsonURL).timeout(10 * 1000).ignoreContentType(true).get(); @@ -147,7 +156,7 @@ public class UpdateUtils { return; } configUpdateLabel.setText("Downloading new version..."); - logger.info("New version found, downloading..."); + logger.info("New version found, downloading " + getUpdateJarURL(latestVersion)); try { UpdateUtils.downloadJarAndLaunch(getUpdateJarURL(latestVersion), true); } catch (IOException e) { @@ -157,15 +166,14 @@ public class UpdateUtils { logger.error("Error while updating: ", e); } } else { - logger.debug("This version (" + UpdateUtils.getThisJarVersion() - + ") is the same or newer than the website's version (" + latestVersion + ")"); + logger.info("Running version (" + UpdateUtils.getThisJarVersion() + + ") is not older than release (" + latestVersion + ")"); configUpdateLabel.setText("v" + UpdateUtils.getThisJarVersion() + " is the latest version"); - logger.debug("Running latest version: " + UpdateUtils.getThisJarVersion()); } } - private static boolean isNewerVersion(String latestVersion) { + static boolean isNewerVersion(String latestVersion) { // If we're testing the update utils we want the program to always try to update if (Utils.getConfigBoolean("testing.always_try_to_update", false)) { logger.info("isNewerVersion is returning true because the key \"testing.always_try_to_update\" is true"); @@ -180,7 +188,7 @@ public class UpdateUtils { for (int i = 0; i < oldVersions.length; i++) { if (newVersions[i] > oldVersions[i]) { - logger.debug("oldVersion " + getThisJarVersion() + " < latestVersion" + latestVersion); + logger.debug("oldVersion " + getThisJarVersion() + " < latestVersion " + latestVersion); return true; } else if (newVersions[i] < oldVersions[i]) { logger.debug("oldVersion " + getThisJarVersion() + " > latestVersion " + latestVersion); @@ -194,26 +202,34 @@ public class UpdateUtils { } private static int[] versionStringToInt(String version) { - String strippedVersion = version.split("-")[0]; - String[] strVersions = strippedVersion.split("\\."); - int[] intVersions = new int[strVersions.length]; - for (int i = 0; i < strVersions.length; i++) { - intVersions[i] = Integer.parseInt(strVersions[i]); + // a version string looks like 1.7.94, 1.7.94-10-something + // 10 is the number of commits since the 1.7.94 tag, so newer + // the int array returned then contains e.g. 1.7.94.0 or 1.7.94.10 + String[] strVersions = version.split("[.-]"); + // not consider more than 4 components of version, loop only the real number + // of components or maximum 4 components of the version string + int[] intVersions = new int[4]; + for (int i = 0; i < Math.min(4, strVersions.length); i++) { + // if it is an integer, set it, otherwise leave default 0 + if (strVersions[i].matches("\\d+")) { + intVersions[i] = Integer.parseInt(strVersions[i]); + } } return intVersions; } // Code take from https://stackoverflow.com/a/30925550 - public static String createSha256(File file) { + public static String createSha256(Path file) { try { MessageDigest digest = MessageDigest.getInstance("SHA-256"); - InputStream fis = new FileInputStream(file); - int n = 0; - byte[] buffer = new byte[8192]; - while (n != -1) { - n = fis.read(buffer); - if (n > 0) { - digest.update(buffer, 0, n); + try (InputStream fis = Files.newInputStream(file)) { + int n = 0; + byte[] buffer = new byte[8192]; + while (n != -1) { + n = fis.read(buffer); + if (n > 0) { + digest.update(buffer, 0, n); + } } } byte[] hash = digest.digest(); @@ -225,11 +241,9 @@ public class UpdateUtils { // As patch.py writes the hash in lowercase this must return the has in // lowercase return sb.toString().toLowerCase(); - } catch (NoSuchAlgorithmException e) { - logger.error("Got error getting file hash " + e.getMessage()); } catch (FileNotFoundException e) { - logger.error("Could not find file: " + file.getName()); - } catch (IOException e) { + logger.error("Could not find file: " + file); + } catch (NoSuchAlgorithmException | IOException e) { logger.error("Got error getting file hash " + e.getMessage()); } return null; @@ -241,13 +255,13 @@ public class UpdateUtils { .timeout(Utils.getConfigInteger("download.timeout", 60 * 1000)).maxBodySize(1024 * 1024 * 100) .execute(); - try (FileOutputStream out = new FileOutputStream(updateFileName)) { + try (OutputStream out = Files.newOutputStream(newFile)) { out.write(response.bodyAsBytes()); } // Only check the hash if the user hasn't disabled hash checking if (Utils.getConfigBoolean("security.check_update_hash", true)) { - String updateHash = createSha256(new File(updateFileName)); - logger.info("Download of new version complete; saved to " + updateFileName); + String updateHash = createSha256(newFile); + logger.info("Download of new version complete; saved to " + newFile); logger.info("Checking hash of update"); if (!ripmeJson.getString("currentHash").equals(updateHash)) { @@ -262,19 +276,17 @@ public class UpdateUtils { if (System.getProperty("os.name").toLowerCase().contains("win")) { // Windows - final String batchFile = "update_ripme.bat"; - final String batchPath = new File(batchFile).getAbsolutePath(); - String script = "@echo off\r\n" + "timeout 1\r\n" - + "copy \"" + updateFileName + "\" \"" + mainFileName + "\"\r\n" - + "del \"" + updateFileName + "\"\r\n"; - - if (shouldLaunch) - script += "\"" + mainFileName + "\"\r\n"; - script += "del \"" + batchPath + "\"\r\n"; - - final String[] batchExec = new String[] { batchPath }; + final Path batchFile = Paths.get("update_ripme.bat"); + String script = "@echo off\r\n" + "timeout 1\r\n" + + "copy \"" + newFile + "\" \"" + mainFile + "\"\r\n" + + "del \"" + newFile + "\"\r\n"; + + if (shouldLaunch) + script += "\"" + mainFile + "\"\r\n"; + script += "del \"" + batchFile + "\"\r\n"; + // Create updater script - try (BufferedWriter bw = new BufferedWriter(new FileWriter(batchFile))) { + try (BufferedWriter bw = Files.newBufferedWriter(batchFile)) { bw.write(script); bw.flush(); } @@ -284,10 +296,9 @@ public class UpdateUtils { Runtime.getRuntime().addShutdownHook(new Thread(() -> { try { logger.info("Executing: " + batchFile); - Runtime.getRuntime().exec(batchExec); + ProcessBuilder processBuilder = new ProcessBuilder(String.valueOf(batchFile)); + processBuilder.start(); } catch (IOException e) { - // TODO implement proper stack trace handling this is really just intented as a - // placeholder until you implement proper error handling e.printStackTrace(); } })); @@ -298,16 +309,14 @@ public class UpdateUtils { // Modifying file and launching it: *nix distributions don't have any issues // with modifying/deleting files // while they are being run - File mainFile = new File(mainFileName); - String mainFilePath = mainFile.getAbsolutePath(); - mainFile.delete(); - new File(updateFileName).renameTo(new File(mainFilePath)); + Files.move(newFile, mainFile, REPLACE_EXISTING); if (shouldLaunch) { // No need to do it during shutdown: the file used will indeed be the new one - Runtime.getRuntime().exec("java -jar " + mainFileName); + logger.info("Executing: " + mainFile); + Runtime.getRuntime().exec(new String[]{"java", "-jar", mainFile.toString()}); } logger.info("Update installed, newer version should be executed upon relaunch"); System.exit(0); } } -} +} \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/uiUtils/ContextActionProtections.java b/src/main/java/com/rarchives/ripme/uiUtils/ContextActionProtections.java new file mode 100644 index 00000000..9237fea9 --- /dev/null +++ b/src/main/java/com/rarchives/ripme/uiUtils/ContextActionProtections.java @@ -0,0 +1,31 @@ +package com.rarchives.ripme.uiUtils; + +import javax.swing.*; +import javax.swing.text.JTextComponent; +import java.awt.*; +import java.awt.datatransfer.Clipboard; +import java.awt.datatransfer.DataFlavor; +import java.awt.datatransfer.Transferable; +import java.awt.datatransfer.UnsupportedFlavorException; +import java.io.IOException; + +public class ContextActionProtections { + public static void pasteFromClipboard(JTextComponent textComponent) { + Clipboard clipboard = Toolkit.getDefaultToolkit().getSystemClipboard(); + Transferable transferable = clipboard.getContents(new Object()); + + try { + String clipboardContent = (String) transferable.getTransferData(DataFlavor.stringFlavor); + + // TODO check if commenting this causes regression + // Limit the pasted content to 96 characters + // if (clipboardContent.length() > 96) { + // clipboardContent = clipboardContent.substring(0, 96); + // } + // Set the text in the JTextField + textComponent.setText(clipboardContent); + } catch (UnsupportedFlavorException | IOException unable_to_modify_text_on_paste) { + unable_to_modify_text_on_paste.printStackTrace(); + } + } +} diff --git a/src/main/java/com/rarchives/ripme/utils/Http.java b/src/main/java/com/rarchives/ripme/utils/Http.java index 885a194d..a1705f5a 100644 --- a/src/main/java/com/rarchives/ripme/utils/Http.java +++ b/src/main/java/com/rarchives/ripme/utils/Http.java @@ -1,32 +1,42 @@ package com.rarchives.ripme.utils; -import java.io.IOException; -import java.net.URL; -import java.util.HashMap; -import java.util.Map; - -import org.apache.log4j.Logger; +import com.rarchives.ripme.ripper.AbstractRipper; +import org.apache.commons.lang.ArrayUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.json.JSONArray; import org.json.JSONObject; import org.jsoup.Connection; import org.jsoup.Connection.Method; import org.jsoup.Connection.Response; +import org.jsoup.HttpStatusException; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; -import com.rarchives.ripme.ripper.AbstractRipper; +import javax.net.ssl.*; +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.security.SecureRandom; +import java.security.cert.X509Certificate; +import java.util.HashMap; +import java.util.Map; /** * Wrapper around the Jsoup connection methods. - * + *

* Benefit is retry logic. */ public class Http { - private static final int TIMEOUT = Utils.getConfigInteger("page.timeout", 5 * 1000); - private static final Logger logger = Logger.getLogger(Http.class); + private static final int TIMEOUT = Utils.getConfigInteger("page.timeout", 5 * 1000); + private static final Logger logger = LogManager.getLogger(Http.class); private int retries; - private String url; + private int retrySleep = 0; + private final String url; private Connection connection; // Constructors @@ -34,6 +44,7 @@ public class Http { this.url = url; defaultSettings(); } + private Http(URL url) { this.url = url.toExternalForm(); defaultSettings(); @@ -42,17 +53,64 @@ public class Http { public static Http url(String url) { return new Http(url); } + public static Http url(URL url) { return new Http(url); } private void defaultSettings() { - this.retries = Utils.getConfigInteger("download.retries", 1); + this.retries = Utils.getConfigInteger("download.retries", 3); + this.retrySleep = Utils.getConfigInteger("download.retry.sleep", 5000); connection = Jsoup.connect(this.url); connection.userAgent(AbstractRipper.USER_AGENT); connection.method(Method.GET); connection.timeout(TIMEOUT); connection.maxBodySize(0); + + // Extract cookies from config entry: + // Example config entry: + // cookies.reddit.com = reddit_session=; other_cookie= + connection.cookies(cookiesForURL(this.url)); + } + + private Map cookiesForURL(String u) { + Map cookiesParsed = new HashMap<>(); + + String cookieDomain = ""; + try { + URL parsed = new URI(u).toURL(); + String cookieStr = ""; + + String[] parts = parsed.getHost().split("\\."); + + // if url is www.reddit.com, we should also use cookies from reddit.com; + // this rule is applied for all subdomains (for all rippers); e.g. also + // old.reddit.com, new.reddit.com + while (parts.length > 1) { + String domain = String.join(".", parts); + // Try to get cookies for this host from config + logger.info("Trying to load cookies from config for " + domain); + cookieStr = Utils.getConfigString("cookies." + domain, ""); + if (!cookieStr.equals("")) { + cookieDomain = domain; + // we found something, start parsing + break; + } + parts = (String[]) ArrayUtils.remove(parts, 0); + } + + if (!cookieStr.equals("")) { + cookiesParsed = RipUtils.getCookiesFromString(cookieStr.trim()); + } + } catch (MalformedURLException | URISyntaxException e) { + logger.warn("Parsing url " + u + " while getting cookies", e); + } + + if (cookiesParsed.size() > 0) { + logger.info("Cookies for " + cookieDomain + " have been added to this request"); + } + + return cookiesParsed; } // Setters @@ -60,42 +118,52 @@ public class Http { connection.timeout(timeout); return this; } + public Http ignoreContentType() { connection.ignoreContentType(true); return this; } - public Http referrer(String ref) { + + public Http referrer(String ref) { connection.referrer(ref); return this; } + public Http referrer(URL ref) { return referrer(ref.toExternalForm()); } - public Http userAgent(String ua) { + + public Http userAgent(String ua) { connection.userAgent(ua); return this; } + public Http retries(int tries) { this.retries = tries; return this; } + public Http header(String name, String value) { - connection.header(name, value); + connection.header(name, value); return this; } - public Http cookies(Map cookies) { + + public Http cookies(Map cookies) { connection.cookies(cookies); return this; } - public Http data(Map data) { + + public Http data(Map data) { connection.data(data); return this; } + public Http data(String name, String value) { - Map data = new HashMap<>(); + Map data = new HashMap<>(); data.put(name, value); return data(data); } + public Http method(Method method) { connection.method(method); return this; @@ -105,6 +173,7 @@ public class Http { public Connection connection() { return connection; } + public Document get() throws IOException { connection.method(Method.GET); return response().parse(); @@ -121,8 +190,14 @@ public class Http { return new JSONObject(jsonString); } + public JSONArray getJSONArray() throws IOException { + ignoreContentType(); + String jsonArray = response().body(); + return new JSONArray(jsonArray); + } + public Response response() throws IOException { - Response response = null; + Response response; IOException lastException = null; int retries = this.retries; while (--retries >= 0) { @@ -130,10 +205,71 @@ public class Http { response = connection.execute(); return response; } catch (IOException e) { - logger.warn("Error while loading " + url, e); + // Warn users about possibly fixable permission error + if (e instanceof org.jsoup.HttpStatusException) { + HttpStatusException ex = (HttpStatusException) e; + + // These status codes might indicate missing cookies + // 401 Unauthorized + // 403 Forbidden + + int status = ex.getStatusCode(); + if (status == 401 || status == 403) { + throw new IOException("Failed to load " + url + ": Status Code " + status + ". You might be able to circumvent this error by setting cookies for this domain", e); + } + if (status == 404) { + throw new IOException("File not found " + url + ": Status Code " + status + ". ", e); + } + } + + if (retrySleep > 0 && retries >= 0) { + logger.warn("Error while loading " + url + " waiting "+ retrySleep + " ms before retrying.", e); + Utils.sleep(retrySleep); + } else { + logger.warn("Error while loading " + url, e); + } lastException = e; } } throw new IOException("Failed to load " + url + " after " + this.retries + " attempts", lastException); } + + public static void SSLVerifyOff() { + try { + TrustManager[] trustAllCerts = new TrustManager[]{ + new X509TrustManager() { + public X509Certificate[] getAcceptedIssuers() { + return null; + } + + public void checkClientTrusted(X509Certificate[] certs, String authType) { + } + + public void checkServerTrusted(X509Certificate[] certs, String authType) { + } + } + }; + SSLContext sslContext = SSLContext.getInstance("SSL"); + sslContext.init(null, trustAllCerts, new SecureRandom()); + HttpsURLConnection.setDefaultSSLSocketFactory(sslContext.getSocketFactory()); + HostnameVerifier allHostsValid = (hostname, session) -> true; + HttpsURLConnection.setDefaultHostnameVerifier(allHostsValid); + } catch (Exception e) { + logger.error("ignoreSSLVerification() failed."); + logger.error(e.getMessage()); + } + } + + public static void undoSSLVerifyOff() { + try { + // Reset to the default SSL socket factory and hostname verifier + SSLContext sslContext = SSLContext.getInstance("SSL"); + sslContext.init(null, null, new SecureRandom()); + HttpsURLConnection.setDefaultSSLSocketFactory(sslContext.getSocketFactory()); + HttpsURLConnection.setDefaultHostnameVerifier(HttpsURLConnection.getDefaultHostnameVerifier()); + } catch (Exception e) { + logger.error("undoSSLVerificationIgnore() failed."); + logger.error(e.getMessage()); + } + } } diff --git a/src/main/java/com/rarchives/ripme/utils/Proxy.java b/src/main/java/com/rarchives/ripme/utils/Proxy.java index be3c3b7e..0275bd5c 100644 --- a/src/main/java/com/rarchives/ripme/utils/Proxy.java +++ b/src/main/java/com/rarchives/ripme/utils/Proxy.java @@ -1,99 +1,99 @@ -package com.rarchives.ripme.utils; - -import java.net.Authenticator; -import java.net.PasswordAuthentication; -import java.util.Map; -import java.util.HashMap; - -/** - * Proxy/Socks setter - */ -public class Proxy { - private Proxy() { - } - - /** - * Parse the proxy server settings from string, using the format - * [user:password]@host[:port]. - * - * @param fullproxy the string to parse - * @return HashMap containing proxy server, port, user and password - */ - private static Map parseServer(String fullproxy) { - Map proxy = new HashMap(); - - if (fullproxy.lastIndexOf("@") != -1) { - int sservli = fullproxy.lastIndexOf("@"); - String userpw = fullproxy.substring(0, sservli); - String[] usersplit = userpw.split(":"); - proxy.put("user", usersplit[0]); - proxy.put("password", usersplit[1]); - fullproxy = fullproxy.substring(sservli + 1); - } - String[] servsplit = fullproxy.split(":"); - if (servsplit.length == 2) { - proxy.put("port", servsplit[1]); - } - proxy.put("server", servsplit[0]); - return proxy; - } - - /** - * Set a HTTP Proxy. - * WARNING: Authenticated HTTP Proxy won't work from jdk1.8.111 unless - * passing the flag -Djdk.http.auth.tunneling.disabledSchemes="" to java - * see https://stackoverflow.com/q/41505219 - * - * @param fullproxy the proxy, using format [user:password]@host[:port] - */ - public static void setHTTPProxy(String fullproxy) { - Map proxyServer = parseServer(fullproxy); - - if (proxyServer.get("user") != null && proxyServer.get("password") != null) { - Authenticator.setDefault(new Authenticator(){ - protected PasswordAuthentication getPasswordAuthentication(){ - PasswordAuthentication p = new PasswordAuthentication(proxyServer.get("user"), proxyServer.get("password").toCharArray()); - return p; - } - }); - System.setProperty("http.proxyUser", proxyServer.get("user")); - System.setProperty("http.proxyPassword", proxyServer.get("password")); - System.setProperty("https.proxyUser", proxyServer.get("user")); - System.setProperty("https.proxyPassword", proxyServer.get("password")); - } - - if (proxyServer.get("port") != null) { - System.setProperty("http.proxyPort", proxyServer.get("port")); - System.setProperty("https.proxyPort", proxyServer.get("port")); - } - - System.setProperty("http.proxyHost", proxyServer.get("server")); - System.setProperty("https.proxyHost", proxyServer.get("server")); - } - - /** - * Set a Socks Proxy Server (globally). - * - * @param fullsocks the socks server, using format [user:password]@host[:port] - */ - public static void setSocks(String fullsocks) { - - Map socksServer = parseServer(fullsocks); - if (socksServer.get("user") != null && socksServer.get("password") != null) { - Authenticator.setDefault(new Authenticator(){ - protected PasswordAuthentication getPasswordAuthentication(){ - PasswordAuthentication p = new PasswordAuthentication(socksServer.get("user"), socksServer.get("password").toCharArray()); - return p; - } - }); - System.setProperty("java.net.socks.username", socksServer.get("user")); - System.setProperty("java.net.socks.password", socksServer.get("password")); - } - if (socksServer.get("port") != null) { - System.setProperty("socksProxyPort", socksServer.get("port")); - } - - System.setProperty("socksProxyHost", socksServer.get("server")); - } - -} +package com.rarchives.ripme.utils; + +import java.net.Authenticator; +import java.net.PasswordAuthentication; +import java.util.Map; +import java.util.HashMap; + +/** + * Proxy/Socks setter + */ +public class Proxy { + private Proxy() { + } + + /** + * Parse the proxy server settings from string, using the format + * [user:password]@host[:port]. + * + * @param fullproxy the string to parse + * @return HashMap containing proxy server, port, user and password + */ + private static Map parseServer(String fullproxy) { + Map proxy = new HashMap(); + + if (fullproxy.lastIndexOf("@") != -1) { + int sservli = fullproxy.lastIndexOf("@"); + String userpw = fullproxy.substring(0, sservli); + String[] usersplit = userpw.split(":"); + proxy.put("user", usersplit[0]); + proxy.put("password", usersplit[1]); + fullproxy = fullproxy.substring(sservli + 1); + } + String[] servsplit = fullproxy.split(":"); + if (servsplit.length == 2) { + proxy.put("port", servsplit[1]); + } + proxy.put("server", servsplit[0]); + return proxy; + } + + /** + * Set a HTTP Proxy. + * WARNING: Authenticated HTTP Proxy won't work from jdk1.8.111 unless + * passing the flag -Djdk.http.auth.tunneling.disabledSchemes="" to java + * see https://stackoverflow.com/q/41505219 + * + * @param fullproxy the proxy, using format [user:password]@host[:port] + */ + public static void setHTTPProxy(String fullproxy) { + Map proxyServer = parseServer(fullproxy); + + if (proxyServer.get("user") != null && proxyServer.get("password") != null) { + Authenticator.setDefault(new Authenticator(){ + protected PasswordAuthentication getPasswordAuthentication(){ + PasswordAuthentication p = new PasswordAuthentication(proxyServer.get("user"), proxyServer.get("password").toCharArray()); + return p; + } + }); + System.setProperty("http.proxyUser", proxyServer.get("user")); + System.setProperty("http.proxyPassword", proxyServer.get("password")); + System.setProperty("https.proxyUser", proxyServer.get("user")); + System.setProperty("https.proxyPassword", proxyServer.get("password")); + } + + if (proxyServer.get("port") != null) { + System.setProperty("http.proxyPort", proxyServer.get("port")); + System.setProperty("https.proxyPort", proxyServer.get("port")); + } + + System.setProperty("http.proxyHost", proxyServer.get("server")); + System.setProperty("https.proxyHost", proxyServer.get("server")); + } + + /** + * Set a Socks Proxy Server (globally). + * + * @param fullsocks the socks server, using format [user:password]@host[:port] + */ + public static void setSocks(String fullsocks) { + + Map socksServer = parseServer(fullsocks); + if (socksServer.get("user") != null && socksServer.get("password") != null) { + Authenticator.setDefault(new Authenticator(){ + protected PasswordAuthentication getPasswordAuthentication(){ + PasswordAuthentication p = new PasswordAuthentication(socksServer.get("user"), socksServer.get("password").toCharArray()); + return p; + } + }); + System.setProperty("java.net.socks.username", socksServer.get("user")); + System.setProperty("java.net.socks.password", socksServer.get("password")); + } + if (socksServer.get("port") != null) { + System.setProperty("socksProxyPort", socksServer.get("port")); + } + + System.setProperty("socksProxyHost", socksServer.get("server")); + } + +} diff --git a/src/main/java/com/rarchives/ripme/utils/RipUtils.java b/src/main/java/com/rarchives/ripme/utils/RipUtils.java index 5dea166b..15e4128f 100644 --- a/src/main/java/com/rarchives/ripme/utils/RipUtils.java +++ b/src/main/java/com/rarchives/ripme/utils/RipUtils.java @@ -2,6 +2,8 @@ package com.rarchives.ripme.utils; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.*; import java.util.regex.Matcher; @@ -11,17 +13,19 @@ import com.rarchives.ripme.ripper.AbstractRipper; import com.rarchives.ripme.ripper.rippers.EroShareRipper; import com.rarchives.ripme.ripper.rippers.EromeRipper; import com.rarchives.ripme.ripper.rippers.ImgurRipper; +import com.rarchives.ripme.ripper.rippers.RedgifsRipper; import com.rarchives.ripme.ripper.rippers.VidbleRipper; -import com.rarchives.ripme.ripper.rippers.GfycatRipper; +import com.rarchives.ripme.ripper.rippers.SoundgasmRipper; import org.apache.commons.lang.math.NumberUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; public class RipUtils { - private static final Logger logger = Logger.getLogger(RipUtils.class); + private static final Logger logger = LogManager.getLogger(RipUtils.class); public static List getFilesFromURL(URL url) { List result = new ArrayList<>(); @@ -37,42 +41,30 @@ public class RipUtils { logger.debug("Got imgur image: " + imgurImage.url); result.add(imgurImage.url); } - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { logger.error("[!] Exception while loading album " + url, e); } return result; - } - else if (url.getHost().endsWith("imgur.com") && url.toExternalForm().contains(",")) { - // Imgur image series. - try { - logger.debug("Fetching imgur series at " + url); - ImgurRipper.ImgurAlbum imgurAlbum = ImgurRipper.getImgurSeries(url); - for (ImgurRipper.ImgurImage imgurImage : imgurAlbum.images) { - logger.debug("Got imgur image: " + imgurImage.url); - result.add(imgurImage.url); - } - } catch (IOException e) { - logger.error("[!] Exception while loading album " + url, e); - } - } else if (url.getHost().endsWith("i.imgur.com") && url.toExternalForm().contains("gifv")) { + } + else if (url.getHost().endsWith("i.imgur.com") && url.toExternalForm().contains("gifv")) { // links to imgur gifvs try { - result.add(new URL(url.toExternalForm().replaceAll(".gifv", ".mp4"))); - } catch (IOException e) { + result.add(new URI(url.toExternalForm().replaceAll(".gifv", ".mp4")).toURL()); + } catch (IOException | URISyntaxException e) { logger.info("Couldn't get gifv from " + url); } return result; } - else if (url.getHost().endsWith("gfycat.com")) { + else if (url.getHost().endsWith("redgifs.com") || url.getHost().endsWith("gifdeliverynetwork.com")) { try { - logger.debug("Fetching gfycat page " + url); - String videoURL = GfycatRipper.getVideoURL(url); - logger.debug("Got gfycat URL: " + videoURL); - result.add(new URL(videoURL)); - } catch (IOException e) { + logger.debug("Fetching redgifs page " + url); + String videoURL = RedgifsRipper.getVideoURL(url); + logger.debug("Got redgifs URL: " + videoURL); + result.add(new URI(videoURL).toURL()); + } catch (IOException | URISyntaxException e) { // Do nothing - logger.warn("Exception while retrieving gfycat page:", e); + logger.warn("Exception while retrieving redgifs page:", e); } return result; } @@ -80,7 +72,7 @@ public class RipUtils { try { logger.info("Getting vidble album " + url); result.addAll(VidbleRipper.getURLsFromPage(url)); - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { // Do nothing logger.warn("Exception while retrieving vidble page:", e); } @@ -90,7 +82,7 @@ public class RipUtils { try { logger.info("Getting eroshare album " + url); result.addAll(EroShareRipper.getURLs(url)); - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { // Do nothing logger.warn("Exception while retrieving eroshare page:", e); } @@ -106,14 +98,28 @@ public class RipUtils { EromeRipper r = new EromeRipper(url); Document tempDoc = r.getFirstPage(); for (String u : r.getURLsFromPage(tempDoc)) { - result.add(new URL(u)); + result.add(new URI(u).toURL()); } - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { // Do nothing logger.warn("Exception while retrieving eroshare page:", e); } return result; } + else if (url.toExternalForm().contains("soundgasm.net")) { + try { + logger.info("Getting soundgasm page " + url); + SoundgasmRipper r = new SoundgasmRipper(url); + Document tempDoc = r.getFirstPage(); + for (String u : r.getURLsFromPage(tempDoc)) { + result.add(new URI(u).toURL()); + } + } catch (IOException | URISyntaxException e) { + // Do nothing + logger.warn("Exception while retrieving soundgasm page:", e); + } + return result; + } Pattern p = Pattern.compile("https?://i.reddituploads.com/([a-zA-Z0-9]+)\\?.*"); Matcher m = p.matcher(url.toExternalForm()); @@ -121,8 +127,8 @@ public class RipUtils { logger.info("URL: " + url.toExternalForm()); String u = url.toExternalForm().replaceAll("&", "&"); try { - result.add(new URL(u)); - } catch (MalformedURLException e) { + result.add(new URI(u).toURL()); + } catch (MalformedURLException | URISyntaxException e) { } return result; } @@ -132,11 +138,11 @@ public class RipUtils { m = p.matcher(url.toExternalForm()); if (m.matches()) { try { - URL singleURL = new URL(m.group(1)); + URL singleURL = new URI(m.group(1)).toURL(); logger.debug("Found single URL: " + singleURL); result.add(singleURL); return result; - } catch (MalformedURLException e) { + } catch (MalformedURLException | URISyntaxException e) { logger.error("[!] Not a valid URL: '" + url + "'", e); } } @@ -150,19 +156,19 @@ public class RipUtils { .get(); for (Element el : doc.select("meta")) { if (el.attr("property").equals("og:video")) { - result.add(new URL(el.attr("content"))); + result.add(new URI(el.attr("content")).toURL()); return result; } else if (el.attr("name").equals("twitter:image:src")) { - result.add(new URL(el.attr("content"))); + result.add(new URI(el.attr("content")).toURL()); return result; } else if (el.attr("name").equals("twitter:image")) { - result.add(new URL(el.attr("content"))); + result.add(new URI(el.attr("content")).toURL()); return result; } } - } catch (IOException ex) { + } catch (IOException | URISyntaxException ex) { logger.error("[!] Error", ex); } @@ -301,7 +307,7 @@ public class RipUtils { Map cookies = new HashMap<>(); for (String pair : line.split(";")) { String[] kv = pair.split("="); - cookies.put(kv[0], kv[1]); + cookies.put(kv[0].trim(), kv[1]); } return cookies; } diff --git a/src/main/java/com/rarchives/ripme/utils/Utils.java b/src/main/java/com/rarchives/ripme/utils/Utils.java index a009c7a1..88eb0c5e 100644 --- a/src/main/java/com/rarchives/ripme/utils/Utils.java +++ b/src/main/java/com/rarchives/ripme/utils/Utils.java @@ -1,17 +1,32 @@ package com.rarchives.ripme.utils; +import com.rarchives.ripme.ripper.AbstractRipper; +import org.apache.commons.configuration.ConfigurationException; +import org.apache.commons.configuration.PropertiesConfiguration; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.appender.RollingFileAppender; +import org.apache.logging.log4j.core.appender.rolling.DefaultRolloverStrategy; +import org.apache.logging.log4j.core.appender.rolling.SizeBasedTriggeringPolicy; +import org.apache.logging.log4j.core.appender.rolling.TriggeringPolicy; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.LoggerConfig; + +import javax.sound.sampled.AudioSystem; +import javax.sound.sampled.Clip; +import javax.sound.sampled.Line; +import javax.sound.sampled.LineEvent; import java.io.File; import java.io.FileNotFoundException; -import java.io.FilenameFilter; import java.io.IOException; -import java.io.InputStream; -import java.io.UnsupportedEncodingException; import java.lang.reflect.Constructor; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.net.URLDecoder; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.nio.file.FileSystem; import java.nio.file.FileSystems; import java.nio.file.Files; @@ -22,58 +37,42 @@ import java.util.Arrays; import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.MissingResourceException; +import java.util.Objects; import java.util.ResourceBundle; import java.util.jar.JarEntry; import java.util.jar.JarFile; import java.util.regex.Matcher; import java.util.regex.Pattern; -import java.util.stream.Stream; - -import javax.sound.sampled.AudioSystem; -import javax.sound.sampled.Clip; -import javax.sound.sampled.Line; -import javax.sound.sampled.LineEvent; - -import com.rarchives.ripme.ripper.AbstractRipper; - -import org.apache.commons.configuration.ConfigurationException; -import org.apache.commons.configuration.PropertiesConfiguration; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; -import org.apache.log4j.PropertyConfigurator; /** * Common utility functions used in various places throughout the project. */ public class Utils { - private static final Pattern pattern = Pattern.compile("LabelsBundle_(?[A-Za-z_]+).properties"); - private static final String DEFAULT_LANG = "en_US"; private static final String RIP_DIRECTORY = "rips"; private static final String CONFIG_FILE = "rip.properties"; private static final String OS = System.getProperty("os.name").toLowerCase(); - private static final Logger LOGGER = Logger.getLogger(Utils.class); + private static final Logger LOGGER = LogManager.getLogger(Utils.class); private static final int SHORTENED_PATH_LENGTH = 12; private static PropertiesConfiguration config; - private static HashMap> cookieCache; - private static HashMap magicHash = new HashMap<>(); + private static final HashMap> cookieCache; + private static final HashMap magicHash = new HashMap<>(); - private static ResourceBundle resourceBundle = null; + private static ResourceBundle resourceBundle; static { cookieCache = new HashMap<>(); try { String configPath = getConfigFilePath(); - File file = new File(configPath); + Path file = Paths.get(configPath); - if (!file.exists()) { + if (!Files.exists(file)) { // Use default bundled with .jar configPath = CONFIG_FILE; } @@ -81,7 +80,7 @@ public class Utils { config = new PropertiesConfiguration(configPath); LOGGER.info("Loaded " + config.getPath()); - if (file.exists()) { + if (Files.exists(file)) { // Config was loaded from file if (!config.containsKey("twitter.auth") || !config.containsKey("twitter.max_requests") || !config.containsKey("tumblr.auth") || !config.containsKey("error.skip404") @@ -91,7 +90,7 @@ public class Utils { // Need to reload the default config // See https://github.com/4pr0n/ripme/issues/158 LOGGER.warn("Config does not contain key fields, deleting old config"); - file.delete(); + Files.delete(file); config = new PropertiesConfiguration(CONFIG_FILE); LOGGER.info("Loaded " + config.getPath()); } @@ -108,21 +107,21 @@ public class Utils { * * @return Root directory to save rips to. */ - public static File getWorkingDirectory() { - String currentDir = ""; - try { - currentDir = getJarDirectory().getCanonicalPath() + File.separator + RIP_DIRECTORY + File.separator; - } catch (IOException e) { - LOGGER.error("Error while finding working dir: ", e); - } + public static Path getWorkingDirectory() { + String currentDir = getJarDirectory() + File.separator + RIP_DIRECTORY + File.separator; if (config != null) { currentDir = getConfigString("rips.directory", currentDir); } - File workingDir = new File(currentDir); - if (!workingDir.exists()) { - workingDir.mkdirs(); + Path workingDir = Paths.get(currentDir); + if (!Files.exists(workingDir)) { + try { + Files.createDirectory(workingDir); + } catch (IOException e) { + LOGGER.error("WorkingDir " + workingDir + " not exists, and could not be created. Set to user.home, continue."); + workingDir = Paths.get(System.getProperty("user.home")); + } } return workingDir; } @@ -239,13 +238,13 @@ public class Utils { + File.separator + "ripme"; } - private static File getJarDirectory() { - File jarDirectory = Utils.class.getResource("/rip.properties").toString().contains("jar:") - ? new File(System.getProperty("java.class.path")).getParentFile() - : new File(System.getProperty("user.dir")); + private static Path getJarDirectory() { + Path jarDirectory = Objects.requireNonNull(Utils.class.getResource("/rip.properties")).toString().contains("jar:") + ? Paths.get(System.getProperty("java.class.path")).getParent() + : Paths.get(System.getProperty("user.dir")); if (jarDirectory == null) - jarDirectory = new File("."); + jarDirectory = Paths.get("."); return jarDirectory; } @@ -254,16 +253,8 @@ public class Utils { * Determines if the app is running in a portable mode. i.e. on a USB stick */ private static boolean portableMode() { - try { - File file = new File(getJarDirectory().getCanonicalPath() + File.separator + CONFIG_FILE); - if (file.exists() && !file.isDirectory()) { - return true; - } - } catch (IOException e) { - return false; - } - - return false; + Path file = getJarDirectory().resolve(CONFIG_FILE); + return Files.exists(file) && !Files.isDirectory(file); } /** @@ -272,7 +263,7 @@ public class Utils { public static String getConfigDir() { if (portableMode()) { try { - return getJarDirectory().getCanonicalPath(); + return getJarDirectory().toAbsolutePath().toString(); } catch (Exception e) { return "."; } @@ -286,7 +277,7 @@ public class Utils { return getUnixConfigDir(); try { - return getJarDirectory().getCanonicalPath(); + return getJarDirectory().toAbsolutePath().toString(); } catch (Exception e) { return "."; } @@ -296,8 +287,12 @@ public class Utils { * Delete the url history file */ public static void clearURLHistory() { - File file = new File(getURLHistoryFile()); - file.delete(); + Path file = Paths.get(getURLHistoryFile()); + try { + Files.delete(file); + } catch (IOException e) { + e.printStackTrace(); + } } /** @@ -324,16 +319,13 @@ public class Utils { * @param saveAs The File path * @return saveAs in relation to the CWD */ - public static String removeCWD(File saveAs) { - String prettySaveAs = saveAs.toString(); + public static String removeCWD(Path saveAs) { try { - prettySaveAs = saveAs.getCanonicalPath(); - String cwd = new File(".").getCanonicalPath() + File.separator; - prettySaveAs = prettySaveAs.replace(cwd, "." + File.separator); - } catch (Exception e) { - LOGGER.error("Exception: ", e); + return Paths.get(".").toAbsolutePath().relativize(saveAs).toString(); + } + catch (IllegalArgumentException e) { + return saveAs.toString(); } - return prettySaveAs; } /** @@ -359,7 +351,7 @@ public class Utils { if (wasFirstParam) { c = "?"; } - url = url.substring(0, paramIndex) + c + url.substring(nextParam + 1, url.length()); + url = url.substring(0, paramIndex) + c + url.substring(nextParam + 1); } else { url = url.substring(0, paramIndex); } @@ -368,16 +360,6 @@ public class Utils { return url; } - /** - * Removes the current working directory from a given filename - * - * @param file Path to the file - * @return 'file' without the leading current working directory - */ - public static String removeCWD(String file) { - return removeCWD(new File(file)); - } - /** * Get a list of all Classes within a package. Works with file system projects * and jar files! Borrowed from StackOverflow, but I don't have a link :[ @@ -410,6 +392,7 @@ public class Utils { if (directory != null && directory.exists()) { // Get the list of the files contained in the package String[] files = directory.list(); + assert files != null; for (String file : files) { if (file.endsWith(".class") && !file.contains("$")) { String className = pkgname + '.' + file.substring(0, file.length() - 6); @@ -424,7 +407,7 @@ public class Utils { // Load from JAR try { String jarPath = fullPath.replaceFirst("[.]jar[!].*", ".jar").replaceFirst("file:", ""); - jarPath = URLDecoder.decode(jarPath, "UTF-8"); + jarPath = URLDecoder.decode(jarPath, StandardCharsets.UTF_8); JarFile jarFile = new JarFile(jarPath); Enumeration entries = jarFile.entries(); while (entries.hasMoreElements()) { @@ -458,21 +441,23 @@ public class Utils { * @return The simplified path to the file. */ public static String shortenPath(String path) { - return shortenPath(new File(path)); + return shortenPath(path); } /** * Shortens the path to a file * - * @param file File object that you want the shortened path of. + * @param path File object that you want the shortened path of. * @return The simplified path to the file. */ - public static String shortenPath(File file) { - String path = removeCWD(file); - if (path.length() < SHORTENED_PATH_LENGTH * 2) { - return path; + public static String shortenPath(Path path) { + Path prettyPath = path.normalize(); + if (prettyPath.toString().length() < SHORTENED_PATH_LENGTH * 2) { + return prettyPath.toString(); } - return path.substring(0, SHORTENED_PATH_LENGTH) + "..." + path.substring(path.length() - SHORTENED_PATH_LENGTH); + return prettyPath.toString().substring(0, SHORTENED_PATH_LENGTH) + + "..." + + prettyPath.toString().substring(prettyPath.toString().length() - SHORTENED_PATH_LENGTH); } /** @@ -486,8 +471,15 @@ public class Utils { return text; } + /** + * Removes any potentially unsafe characters from a string and truncates it on a maximum length of 100 characters. + * Characters considered safe are alpha numerical characters as well as minus, dot, comma, underscore and whitespace. + * + * @param text The potentially unsafe text + * @return a filesystem safe string + */ public static String filesystemSafe(String text) { - text = text.replaceAll("[^a-zA-Z0-9.-]", "_").replaceAll("__", "_").replaceAll("_+$", ""); + text = text.replaceAll("[^a-zA-Z0-9-.,_ ]", "").trim(); if (text.length() > 100) { text = text.substring(0, 99); } @@ -500,7 +492,7 @@ public class Utils { * @param path - original path entered to be ripped * @return path of existing folder or the original path if not present */ - public static String getOriginalDirectory(String path) { + public static String getOriginalDirectory(String path) throws IOException { int index; if (isUnix() || isMacOS()) { @@ -510,13 +502,15 @@ public class Utils { return path; } - String original = path; // needs to be checked if lowercase exists - String lastPart = original.substring(index + 1).toLowerCase(); // setting lowercase to check if it exists + String lastPart = path.substring(index + 1).toLowerCase(); // setting lowercase to check if it exists // Get a List of all Directories and check its lowercase // if file exists return it File file = new File(path.substring(0, index)); - ArrayList names = new ArrayList<>(Arrays.asList(file.list())); + if (!(file.isDirectory() && file.canWrite() && file.canExecute())) { + throw new IOException("Original directory \"" + file + "\" is no directory or not writeable."); + } + ArrayList names = new ArrayList<>(Arrays.asList(Objects.requireNonNull(file.list()))); for (String name : names) { if (name.toLowerCase().equals(lastPart)) { @@ -525,7 +519,8 @@ public class Utils { } } - return original; + // otherwise return original path + return path; } /** @@ -536,7 +531,7 @@ public class Utils { */ public static String bytesToHumanReadable(int bytes) { float fbytes = (float) bytes; - String[] mags = new String[] { "", "K", "M", "G", "T" }; + String[] mags = new String[]{"", "K", "M", "G", "T"}; int magIndex = 0; while (fbytes >= 1024) { fbytes /= 1024; @@ -598,20 +593,32 @@ public class Utils { * Configures root logger, either for FILE output or just console. */ public static void configureLogger() { - LogManager.shutdown(); - String logFile = getConfigBoolean("log.save", false) ? "log4j.file.properties" : "log4j.properties"; - try (InputStream stream = Utils.class.getClassLoader().getResourceAsStream(logFile)) { - if (stream == null) { - PropertyConfigurator.configure("src/main/resources/" + logFile); - } else { - PropertyConfigurator.configure(stream); + LoggerContext ctx = (LoggerContext) LogManager.getContext(false); + Configuration config = ctx.getConfiguration(); + LoggerConfig loggerConfig = config.getLoggerConfig(LogManager.ROOT_LOGGER_NAME); + + // write to ripme.log file if checked in GUI + boolean logSave = getConfigBoolean("log.save", false); + if (logSave) { + LOGGER.debug("add rolling appender ripmelog"); + TriggeringPolicy tp = SizeBasedTriggeringPolicy.createPolicy("20M"); + DefaultRolloverStrategy rs = DefaultRolloverStrategy.newBuilder().withMax("2").build(); + RollingFileAppender rolling = RollingFileAppender.newBuilder() + .setName("ripmelog") + .withFileName("ripme.log") + .withFilePattern("%d{yyyy-MM-dd HH:mm:ss} %p %m%n") + .withPolicy(tp) + .withStrategy(rs) + .build(); + loggerConfig.addAppender(rolling, null, null); + } else { + LOGGER.debug("remove rolling appender ripmelog"); + if (config.getAppender("ripmelog") != null) { + config.getAppender("ripmelog").stop(); } - - LOGGER.info("Loaded " + logFile); - } catch (IOException e) { - LOGGER.error(e.getMessage(), e); + loggerConfig.removeAppender("ripmelog"); } - + ctx.updateLoggers(); // This causes all Loggers to refetch information from their LoggerConfig. } /** @@ -655,18 +662,13 @@ public class Utils { String[] parts = query.split("&"); int pos; - try { - for (String part : parts) { - if ((pos = part.indexOf('=')) >= 0) { - res.put(URLDecoder.decode(part.substring(0, pos), "UTF-8"), - URLDecoder.decode(part.substring(pos + 1), "UTF-8")); - } else { - res.put(URLDecoder.decode(part, "UTF-8"), ""); - } + for (String part : parts) { + if ((pos = part.indexOf('=')) >= 0) { + res.put(URLDecoder.decode(part.substring(0, pos), StandardCharsets.UTF_8), + URLDecoder.decode(part.substring(pos + 1), StandardCharsets.UTF_8)); + } else { + res.put(URLDecoder.decode(part, StandardCharsets.UTF_8), ""); } - } catch (UnsupportedEncodingException e) { - // Shouldn't happen since UTF-8 is required to be supported - throw new RuntimeException(e); } return res; @@ -687,20 +689,15 @@ public class Utils { String[] parts = query.split("&"); int pos; - try { - for (String part : parts) { - if ((pos = part.indexOf('=')) >= 0) { - if (URLDecoder.decode(part.substring(0, pos), "UTF-8").equals(key)) { - return URLDecoder.decode(part.substring(pos + 1), "UTF-8"); - } - - } else if (URLDecoder.decode(part, "UTF-8").equals(key)) { - return ""; + for (String part : parts) { + if ((pos = part.indexOf('=')) >= 0) { + if (URLDecoder.decode(part.substring(0, pos), StandardCharsets.UTF_8).equals(key)) { + return URLDecoder.decode(part.substring(pos + 1), StandardCharsets.UTF_8); } + + } else if (URLDecoder.decode(part, StandardCharsets.UTF_8).equals(key)) { + return ""; } - } catch (UnsupportedEncodingException e) { - // Shouldn't happen since UTF-8 is required to be supported - throw new RuntimeException(e); } return null; @@ -731,20 +728,19 @@ public class Utils { * of the UI. * * @return Returns the default resource bundle using the language specified in - * the config file. + * the config file. */ public static ResourceBundle getResourceBundle(String langSelect) { if (langSelect == null) { if (!getConfigString("lang", "").equals("")) { - String[] langCode = getConfigString("lang", "").split("_"); LOGGER.info("Setting locale to " + getConfigString("lang", "")); - return ResourceBundle.getBundle("LabelsBundle", new Locale(langCode[0], langCode[1]), + return ResourceBundle.getBundle("LabelsBundle", Locale.forLanguageTag(getConfigString("lang", "")), new UTF8Control()); } } else { - String[] langCode = langSelect.split("_"); - LOGGER.info("Setting locale to " + langSelect); - return ResourceBundle.getBundle("LabelsBundle", new Locale(langCode[0], langCode[1]), new UTF8Control()); + String[] langCode = langSelect.split("-"); + LOGGER.info("set locale, langcoe: {}, selected langauge: {}, locale: {}", langCode, langSelect, Locale.forLanguageTag(langSelect)); + return ResourceBundle.getBundle("LabelsBundle", Locale.forLanguageTag(langSelect), new UTF8Control()); } try { LOGGER.info("Setting locale to default"); @@ -757,6 +753,7 @@ public class Utils { public static void setLanguage(String langSelect) { resourceBundle = getResourceBundle(langSelect); + LOGGER.info("Selected resource bundle locale: {}, from {}", resourceBundle.getLocale().toString(), langSelect); } public static String getSelectedLanguage() { @@ -765,13 +762,15 @@ public class Utils { // All the langs ripme has been translated into public static String[] getSupportedLanguages() { + final Pattern pattern = Pattern.compile("LabelsBundle_(?[A-Za-z_]+).properties"); + final String DEFAULT_LANG = "en-US"; ArrayList filesList = new ArrayList<>(); try { - URI uri = Utils.class.getResource("/rip.properties").toURI(); + URI uri = Objects.requireNonNull(Utils.class.getResource("/rip.properties")).toURI(); Path myPath; if (uri.getScheme().equals("jar")) { - FileSystem fileSystem = FileSystems.newFileSystem(uri, Collections.emptyMap()); + FileSystem fileSystem = FileSystems.newFileSystem(uri, Collections.emptyMap()); myPath = fileSystem.getPath("/"); } else { myPath = Paths.get(uri).getParent(); @@ -784,19 +783,19 @@ public class Utils { for (int i = 0; i < filesList.size(); i++) { Matcher matcher = pattern.matcher(filesList.get(i).toString()); if (matcher.find()) - langs[i] = matcher.group("lang"); + langs[i] = matcher.group("lang").replace("_", "-"); } return langs; } catch (Exception e) { e.printStackTrace(); // On error return default language - return new String[] { DEFAULT_LANG }; + return new String[]{DEFAULT_LANG}; } } public static String getLocalizedString(String key) { - LOGGER.debug(String.format("Getting key %s in %s value %s", key, getSelectedLanguage(), + LOGGER.debug(String.format("Key %s in %s is: %s", key, getSelectedLanguage(), resourceBundle.getString(key))); return resourceBundle.getString(key); } @@ -809,11 +808,10 @@ public class Utils { * @param bytesCompleted How many bytes have been downloaded * @param bytesTotal The total size of the file that is being * downloaded - * @return Returns the formatted status text for rippers using the byte progress - * bar + * @return Returns the formatted status text for rippers using the byte progresbar */ public static String getByteStatusText(int completionPercentage, int bytesCompleted, int bytesTotal) { - return String.valueOf(completionPercentage) + "% - " + Utils.bytesToHumanReadable(bytesCompleted) + " / " + return completionPercentage + "% - " + Utils.bytesToHumanReadable(bytesCompleted) + " / " + Utils.bytesToHumanReadable(bytesTotal); } @@ -830,46 +828,29 @@ public class Utils { } private static void initialiseMagicHashMap() { - magicHash.put(ByteBuffer.wrap(new byte[] { -1, -40, -1, -37, 0, 0, 0, 0 }), "jpeg"); - magicHash.put(ByteBuffer.wrap(new byte[] { -119, 80, 78, 71, 13, 0, 0, 0 }), "png"); + magicHash.put(ByteBuffer.wrap(new byte[]{-1, -40, -1, -37, 0, 0, 0, 0}), "jpeg"); + magicHash.put(ByteBuffer.wrap(new byte[]{-119, 80, 78, 71, 13, 0, 0, 0}), "png"); } // Checks if a file exists ignoring it's extension. - // Code from: https://stackoverflow.com/a/17698068 - public static boolean fuzzyExists(File folder, String fileName) { - if (!folder.exists()) { - return false; - } - File[] listOfFiles = folder.listFiles(); - if (listOfFiles == null) { - return false; - } + public static boolean fuzzyExists(Path folder, String filename) { + return Files.exists(folder.resolve(filename)); + } - for (File file : listOfFiles) { - if (file.isFile()) { - String[] filename = file.getName().split("\\.(?=[^\\.]+$)"); // split filename from it's extension - if (filename[0].equalsIgnoreCase(fileName)) { - return true; - } - } - } - return false; + public static Path getPath(String pathToSanitize) { + return Paths.get(sanitizeSaveAs(pathToSanitize)); } public static String sanitizeSaveAs(String fileNameToSan) { - return fileNameToSan.replaceAll("[\\\\/:*?\"<>|]", "_"); + return fileNameToSan.replaceAll("[\\\\:*?\"<>|]", "_"); } - public static File shortenSaveAsWindows(String ripsDirPath, String fileName) throws FileNotFoundException { - // int ripDirLength = ripsDirPath.length(); - // int maxFileNameLength = 260 - ripDirLength; - // LOGGER.info(maxFileNameLength); + public static Path shortenSaveAsWindows(String ripsDirPath, String fileName) throws FileNotFoundException { LOGGER.error("The filename " + fileName + " is to long to be saved on this file system."); LOGGER.info("Shortening filename"); String fullPath = ripsDirPath + File.separator + fileName; // How long the path without the file name is int pathLength = ripsDirPath.length(); - int fileNameLength = fileName.length(); if (pathLength == 260) { // We've reached the max length, there's nothing more we can do throw new FileNotFoundException("File path is too long for this OS"); @@ -879,11 +860,17 @@ public class Utils { // file extension String fileExt = saveAsSplit[saveAsSplit.length - 1]; // The max limit for paths on Windows is 260 chars - LOGGER.info(fullPath.substring(0, 259 - pathLength - fileExt.length() + 1) + "." + fileExt); fullPath = fullPath.substring(0, 259 - pathLength - fileExt.length() + 1) + "." + fileExt; LOGGER.info(fullPath); LOGGER.info(fullPath.length()); - return new File(fullPath); + return Paths.get(fullPath); } + public static void sleep(long time) { + try { + Thread.sleep(time); + } catch (final InterruptedException e1) { + e1.printStackTrace(); + } + } } diff --git a/src/main/resources/LabelsBundle.properties b/src/main/resources/LabelsBundle.properties index 575f4f8e..6a48b245 100644 --- a/src/main/resources/LabelsBundle.properties +++ b/src/main/resources/LabelsBundle.properties @@ -12,7 +12,8 @@ check.for.updates = Check for updates auto.update = Auto-update? max.download.threads = Maximum download threads: timeout.mill = Timeout (in milliseconds): -retry.download.count = Retry download count +retry.download.count = Retry download count: +retry.sleep.mill = Wait between retries (in milliseconds): overwrite.existing.files = Overwrite existing files? sound.when.rip.completes = Sound when rip completes preserve.order = Preserve order @@ -25,6 +26,7 @@ save.descriptions = Save descriptions prefer.mp4.over.gif = Prefer MP4 over GIF restore.window.position = Restore window position remember.url.history = Remember URL history +ssl.verify.off = SSL verify off loading.history.from = Loading history from # Queue keys diff --git a/src/main/resources/LabelsBundle_de_DE.properties b/src/main/resources/LabelsBundle_de_DE.properties index 61461aba..b13a0bb9 100644 --- a/src/main/resources/LabelsBundle_de_DE.properties +++ b/src/main/resources/LabelsBundle_de_DE.properties @@ -10,9 +10,9 @@ Configuration = Konfiguration current.version = Aktuelle Version check.for.updates = Suche nach Aktualisierungen auto.update = Automatisch Aktualisieren? -max.download.threads = Maximum download threads -timeout.mill = Timeout (in milliseconds): -retry.download.count = Anzahl der Downloadversuche +max.download.threads = Maximale Download-Threads: +timeout.mill = Timeout (in Milliseconds): +retry.download.count = Anzahl der Downloadversuche: overwrite.existing.files = Überschreibe bereits existierende Dateien? sound.when.rip.completes = Ton abspielen bei fertigem Download preserve.order = Reihenfolge beibehalten @@ -24,7 +24,7 @@ autorip.from.clipboard = Automatisch Downloaden von der Zwischenablage save.descriptions = Speichere Beschreibungen prefer.mp4.over.gif = Bevorzuge MP4 über GIF restore.window.position = Wieder herstellen der Fensterposition -remember.url.history = Erinnere URL Verlauf +remember.url.history = Speichere URL Verlauf loading.history.from = Lade Verlauf von # Misc UI keys @@ -32,11 +32,11 @@ loading.history.from = Lade Verlauf von loading.history.from.configuration = Lade Verlauf aus Konfiguration interrupted.while.waiting.to.rip.next.album = Unterbrochen während Download des nächsten Albums inactive = Inaktiv -re-rip.checked = Re-rip Überprüft +re-rip.checked = Re-rip Ausgewählte remove = Entfernen clear = Leeren -download.url.list = Download url list -select.save.dir = Select Save Directory +download.url.list = Download URL Liste +select.save.dir = Wähle Zielverzeichnis # Keys for the logs generated by DownloadFileThread @@ -54,4 +54,4 @@ http.status.exception = HTTP status exception exception.while.downloading.file = Exception while downloading file failed.to.download = Failed to download skipping = Skipping -file.already.exists = file already exists \ No newline at end of file +file.already.exists = file already exists diff --git a/src/main/resources/LabelsBundle_el_GR.properties b/src/main/resources/LabelsBundle_el_GR.properties new file mode 100644 index 00000000..14656e87 --- /dev/null +++ b/src/main/resources/LabelsBundle_el_GR.properties @@ -0,0 +1,75 @@ +Log = Log +History = Ιστορικό +created = δημιουργήθηκε +modified = τροποποιήθηκε +queue = Ουρά +Configuration = Ρυθμίσεις +open = Άνοιγμα + +# Keys for the Configuration menu +current.version = Τρέχουσα έκδοση +check.for.updates = Έλεγχος για ενημερώσεις +auto.update = Αυτόματη ενημέρωση? +max.download.threads = Μέγιστος αριθμός παράλληλων συνδέσεων: +timeout.mill = Λήξη (σε χιλιοστά του δευτερολέπτου): +retry.download.count = Αριθμός επανάληψεων μεταφόρτωσης: +overwrite.existing.files = Να αντικατασταθούν τα υπάρχοντα αρχεία? +sound.when.rip.completes = Ήχος όταν το rip ολοκληρωθεί +preserve.order = Διατήρηση σειράς +save.logs = Αποθήκευση logs +notification.when.rip.starts = Ειδοποίηση όταν αρχισει το rip +save.urls.only = Αποθήκευση μόνο των URL +save.album.titles = Αποθήκευση τίτλων των αλμπουμ +autorip.from.clipboard = Αυτόματο rip-άρισμα απο το πρόχειρο (clipboard) +save.descriptions = Αποθήκευση περιγραφής +prefer.mp4.over.gif = Προτίμηση MP4 απο GIF +restore.window.position = Επαναφορά θέσης παραθύρου +remember.url.history = Να θυμάμαι ιστορικο απο URL +loading.history.from = Φόρτωση ιστορικού από + +# Queue keys +queue.remove.all = Διαγραφή όλων +queue.validation = Είσαι σίγουρος οτι θέλεις να διαγράφουν όλα τα στοιχεια της ουράς? +queue.remove.selected = Διαγραφή επιλεγμένου + +# History +re-rip.checked = Re-rip Τσεκαρισμένο +remove = Διαγραφή +clear = Καθάρισμα +history.check.all = Επιλογή όλων +history.check.none = Επιλογή κανενός +history.check.selected = Επιλογή επιλεγμένου +history.uncheck.selected = απο-επιλογή επιλεγμένου +history.load.failed.warning = Το RipMe απέτυχε να φορτώσει το αρχείο ιστορικού απο historyFile.getAbsolutePath() \n\nΛάθος %s\n\n Κλέισιμο του RipMe, θα επιφέρει αυτοματη αντικατάσταση των περιεχόμενων αυτού του αρχείου,\nάρα ίσως θα ήταν καλή ιδέα να πάρεις ένα αντίγραφο ασφαλείας πρίν κλέισεις το RipMe! +history.load.none = Δεν υπάρχουν ιστορικές εγγραφές για να ξαναripάρω. Rip-αρε καποια αλμπουμς πρώτα +history.load.none.checked = Δεν έχει τσεκαριστεί καμια ιστορική εγγραφή. Τσέκαρε μια εγγραφή κλικάροντας το checkbox στα αριστερλα του URL, ή πάτα δεξί κλίκ σε ένα URL για να επιλέξεις/αποεπιλέξεις ολα τα στοιχεία + +# TrayIcon +tray.show = Εμφάνιση +tray.hide = Απόκρυψη +tray.autorip = Αυτόματο rip του προχειρου(clipboard) +tray.exit = Έξοδος + +# Misc UI keys +loading.history.from.configuration = Φόρτωση ιστορικού απο τίς ρυθμίσεις +interrupted.while.waiting.to.rip.next.album = Διεκόπη ενω ήμουν σε αναμονή για να ripάρω το επόμενο άλμπουμ +inactive = Αδρανής +download.url.list = Μεταφόρτωση λίστας URL +select.save.dir = Επιλογή τοποθεσίας απθήκευσης + +# Keys for the logs generated by DownloadFileThread +nonretriable.status.code = Non-retriable status code +retriable.status.code = Retriable status code +server.doesnt.support.resuming.downloads = Ο διακομιστής δεν υποστηρίζει συνέχιση της μεταφόρτωσης +# A "magic number" can also be called a file signature +was.unable.to.get.content.type.using.magic.number = Δεν ευρέθη ο τύπος του αρχείου χρησιμοποιώντας τον μαγικό αριθμό +magic.number.was = Ο μαγικός αριθμός ήταν +deleting.existing.file = Διαγραφή υπάρχοντος αρχείου +request.properties = Ιδιότητες της αίτησης +download.interrupted = Η μεταφόρτωση διεκόπη +exceeded.maximum.retries = Υπέρβαση του μέγιστου αριθμου προσπαθειών +http.status.exception = HTTP status λάθος +exception.while.downloading.file = Λάθος ενω μεταφορτώνοταν ενα αρχειο +failed.to.download = Αποτυχία μεταφόρτωσης +skipping = Παράκαμψη +file.already.exists = το αρχείο υπάρχει ήδη \ No newline at end of file diff --git a/src/main/resources/LabelsBundle_es_ES.properties b/src/main/resources/LabelsBundle_es_ES.properties index c178ec79..fea84e5d 100644 --- a/src/main/resources/LabelsBundle_es_ES.properties +++ b/src/main/resources/LabelsBundle_es_ES.properties @@ -4,59 +4,73 @@ created = creado modified = modificado queue = Cola Configuration = Configuracion +open = Abrir # Keys for the Configuration menu - current.version = Version Actual check.for.updates = Buscar actualizaciones -auto.update = Auto-actualizar? -max.download.threads = Maximos procesos de descarga -timeout.mill = Timeout (in milliseconds): -retry.download.count = Numero de reintentos de descarga +auto.update = Actualizar automáticamente? +max.download.threads = Número de descargas simultáneas: +timeout.mill = Tiempo máximo de espera (milisegundos): +retry.download.count = Número de reintentos de descarga: +retry.sleep.mill = Espera entre reintentos (milisegundos): overwrite.existing.files = Sobreescribir archivos existentes? -sound.when.rip.completes = Sonar cuando el Rip termina -preserve.order = Mantener orden +sound.when.rip.completes = Notificar cuando el rip termina +preserve.order = Mantener el orden save.logs = Guardar logs -notification.when.rip.starts = Notificar cuando el Rip comienza +notification.when.rip.starts = Notificar cuando el rip comienza save.urls.only = Guardar solamente URLs -save.album.titles = Guardar titulos de albunes -autorip.from.clipboard = Autorip desde Portapapeles +save.album.titles = Guardar títulos de álbumes +autorip.from.clipboard = Autorip desde el portapapeles save.descriptions = Guardar descripciones prefer.mp4.over.gif = Preferir MP4 sobre GIF -restore.window.position = Restaurar posicion de ventana +restore.window.position = Restaurar posicion de la ventana remember.url.history = Recordar historia URL loading.history.from = Cargando historia desde # Queue keys queue.remove.all = Eliminar todos los elementos -queue.validation = ¿Esta seguro que desea eliminar todos los elementos de la lista? +queue.validation = ¿Está seguro que desea eliminar todos los elementos de la lista? queue.remove.selected = Eliminar elementos seleccionados -# Misc UI keys - -loading.history.from.configuration = Cargando historia desde la configuracion -interrupted.while.waiting.to.rip.next.album = Interrumpido esperando el Rip del proximo album -inactive = Inactivo -re-rip.checked = Re-rip marcado -remove = Quitar +# History +re-rip.checked = Re-rip Marcados +remove = Remover clear = Limpiar -download.url.list = Download url list -select.save.dir = Select Save Directory +history.check.all = Marcar Todos +history.check.none = Desmarcar Todos +history.check.selected = Marcar Seleccionados +history.uncheck.selected = Desmarcar Seleccionados +history.load.failed.warning = RipMe falló al cargar la historia de historyFile.getAbsolutePath() \n\nError: %s\n\nSi cierras RipMe los contenidos de este archivo se sobreescribirán,\nhaz un backup antes de cerrar RipMe! +history.load.none = El historial está vacío. Ripea algunos álbumes primero +history.load.none.checked = Ninguna entrada del historial fue 'marcada'. Selecciona una entrada clickeando la casilla a la derecha de la URL o haz click derecho sobre una URL para marcar/desmarcar todas las entradas. + +# TrayIcon +tray.show = Mostrar +tray.hide = Esconder +tray.autorip = Autorip desde el portapapeles +tray.exit = Salida + +# Misc UI keys +loading.history.from.configuration = Cargando historia desde la configuración +interrupted.while.waiting.to.rip.next.album = Interrumpido esperando el rip del próximo álbum +inactive = Inactivo +download.url.list = Lista de URLs a descargar +select.save.dir = Seleccione el directorio de guardado # Keys for the logs generated by DownloadFileThread - -nonretriable.status.code = Non-retriable status code -retriable.status.code = Retriable status code -server.doesnt.support.resuming.downloads = Server doesn't support resuming downloads +nonretriable.status.code = Código de estado no recuperable +retriable.status.code = Código de estado recuperable +server.doesnt.support.resuming.downloads = El servidor no soporta resumir las descargas # A "magic number" can also be called a file signature -was.unable.to.get.content.type.using.magic.number = Was unable to get content type using magic number -magic.number.was = Magic number was -deleting.existing.file = Deleting existing file -request.properties = Request properties -download.interrupted = Download interrupted -exceeded.maximum.retries = Exceeded maximum retries -http.status.exception = HTTP status exception -exception.while.downloading.file = Exception while downloading file -failed.to.download = Failed to download -skipping = Skipping -file.already.exists = file already exists \ No newline at end of file +was.unable.to.get.content.type.using.magic.number = Imposible obtener el tipo de contenido utilizando el número mágico +magic.number.was = El número mágico era +deleting.existing.file = Eliminando el archivo existente +request.properties = Propiedades del pedido +download.interrupted = Descarga interrumpida +exceeded.maximum.retries = Máximo número de reintentos excedido +http.status.exception = Error de estado HTTP +exception.while.downloading.file = Error al descargar archivo +failed.to.download = Descarga fallida +skipping = Saltando +file.already.exists = el fichero ya existe \ No newline at end of file diff --git a/src/main/resources/LabelsBundle_fi_FI.properties b/src/main/resources/LabelsBundle_fi_FI.properties index 6edd4e45..c823cf05 100644 --- a/src/main/resources/LabelsBundle_fi_FI.properties +++ b/src/main/resources/LabelsBundle_fi_FI.properties @@ -10,9 +10,9 @@ Configuration = Asetukset current.version = Nykyinen versio check.for.updates = Tarkista päivitykset auto.update = Automaattipäivitys? -max.download.threads = Yhtäaikaiset lataukset +max.download.threads = Yhtäaikaiset lataukset: timeout.mill = Aikakatkaisu (millisekunneissa): -retry.download.count = Latauksen uudelleenyritykset +retry.download.count = Latauksen uudelleenyritykset: overwrite.existing.files = Korvaa nykyiset tiedostot? sound.when.rip.completes = Valmistumisääni preserve.order = Pidä järjestys diff --git a/src/main/resources/LabelsBundle_porrisavvo_FI.properties b/src/main/resources/LabelsBundle_fi_FI_porrisavo.properties similarity index 94% rename from src/main/resources/LabelsBundle_porrisavvo_FI.properties rename to src/main/resources/LabelsBundle_fi_FI_porrisavo.properties index a2ba056e..653709ab 100644 --- a/src/main/resources/LabelsBundle_porrisavvo_FI.properties +++ b/src/main/resources/LabelsBundle_fi_FI_porrisavo.properties @@ -10,9 +10,9 @@ Configuration = Assetuksse current.version = Nykyne versijjo check.for.updates = Tarkist update auto.update = Automaatpäivvitys? -max.download.threads = Yht'aikasse ripi +max.download.threads = Yht'aikasse ripi: timeout.mill = Timeout (millisekois): -retry.download.count = Ripi retry count +retry.download.count = Ripi retry count: overwrite.existing.files = Korvvaa nykysse filu? sound.when.rip.completes = Valmistummis'ään preserve.order = Pir järestys diff --git a/src/main/resources/LabelsBundle_fr_CH.properties b/src/main/resources/LabelsBundle_fr_CH.properties index b489e3e3..1b035dac 100644 --- a/src/main/resources/LabelsBundle_fr_CH.properties +++ b/src/main/resources/LabelsBundle_fr_CH.properties @@ -10,9 +10,9 @@ Configuration = Configuration current.version = Version actuelle check.for.updates = Vérifier mises à jour auto.update = Mises à jour automatiques? -max.download.threads = Nombre de téléchargements parallèles maximum +max.download.threads = Nombre de téléchargements parallèles maximum: timeout.mill = Délai d'expiration (en millisecondes): -retry.download.count = Nombre d'essais téléchargement +retry.download.count = Nombre d'essais téléchargement: overwrite.existing.files = Remplacer fichiers existants ? sound.when.rip.completes = Son lorsque le rip est terminé preserve.order = Conserver l'ordre diff --git a/src/main/resources/LabelsBundle_in_ID.properties b/src/main/resources/LabelsBundle_in_ID.properties index b5e773d5..778e72ab 100644 --- a/src/main/resources/LabelsBundle_in_ID.properties +++ b/src/main/resources/LabelsBundle_in_ID.properties @@ -10,9 +10,9 @@ Configuration = Pengaturan current.version = Versi saat ini check.for.updates = Periksa update auto.update = Update otomatis? -max.download.threads = Thread unduh maksimal +max.download.threads = Thread unduh maksimal: timeout.mill = Batas waktu (dalam milidetik): -retry.download.count = Jumlah percobaan unduh +retry.download.count = Jumlah percobaan unduh: overwrite.existing.files = Timpa file yang ada? sound.when.rip.completes = Hidupkan suara saat rip selesai preserve.order = Pertahankan urutan diff --git a/src/main/resources/LabelsBundle_it_IT.properties b/src/main/resources/LabelsBundle_it_IT.properties index de00612b..192d777a 100644 --- a/src/main/resources/LabelsBundle_it_IT.properties +++ b/src/main/resources/LabelsBundle_it_IT.properties @@ -12,7 +12,7 @@ check.for.updates = Controlla aggiornamenti auto.update = Aggiornamento automatico? max.download.threads = Thread di download massimi: timeout.mill = Timeout (in millisecondi): -retry.download.count = Tentativi di download +retry.download.count = Tentativi di download: overwrite.existing.files = Sovrascrivi file esistenti? sound.when.rip.completes = Suono al completamento del rip preserve.order = Preserva ordine diff --git a/src/main/resources/LabelsBundle_kr_KR.properties b/src/main/resources/LabelsBundle_kr_KR.properties index 34a35a7a..984da15b 100644 --- a/src/main/resources/LabelsBundle_kr_KR.properties +++ b/src/main/resources/LabelsBundle_kr_KR.properties @@ -10,9 +10,9 @@ Configuration = \uAD6C\uC131 current.version = \uD604\uC7AC \uBC84\uC804 check.for.updates = \uC5C5\uB370\uC774\uD2B8 \uD655\uC778 auto.update = \uC790\uB3D9 \uC5C5\uB370\uC774\uD2B8 -max.download.threads = \uCD5C\uB300 \uB2E4\uC6B4\uB85C\uB4DC \uC4F0\uB808\uB4DC \uC218 +max.download.threads = \uCD5C\uB300 \uB2E4\uC6B4\uB85C\uB4DC \uC4F0\uB808\uB4DC \uC218: timeout.mill = \uC2DC\uAC04 \uC81C\uD55C (\uBC00\uB9AC\uCD08): -retry.download.count = \uB2E4\uC6B4\uB85C\uB4DC \uC7AC\uC2DC\uB3C4 \uD68C\uC218 +retry.download.count = \uB2E4\uC6B4\uB85C\uB4DC \uC7AC\uC2DC\uB3C4 \uD68C\uC218: overwrite.existing.files = \uC911\uBCF5\uD30C\uC77C \uB36E\uC5B4\uC4F0\uAE30 sound.when.rip.completes = \uC644\uB8CC\uC2DC \uC54C\uB9BC preserve.order = \uBA85\uB839 \uAE30\uC5B5\uD558\uAE30 @@ -35,23 +35,23 @@ inactive = \uBE44\uD65C\uC131\uD654 re-rip.checked = \uB2E4\uC2DC \uBCF5\uC0AC\uD558\uAE30 \uCCB4\uD06C\uB428 remove = \uC120\uD0DD\uD55C \uAE30\uB85D \uC0AD\uC81C clear = \uD788\uC2A4\uD1A0\uB9AC \uBAA8\uB450 \uC0AD\uC81C -download.url.list = Download url list -select.save.dir = Select Save Directory +download.url.list = URL \ubAA9\uB85D\uC744 \uB2E4\uC6B4\uB85C\uB4DC\uD558\uAE30 +select.save.dir = \uC800\uC7A5\uC744 \uB514\uB809\uD1A0\uB9AC \uC120\uD0DD\uD558\uAE30 # Keys for the logs generated by DownloadFileThread -nonretriable.status.code = Non-retriable status code -retriable.status.code = Retriable status code -server.doesnt.support.resuming.downloads = Server doesn't support resuming downloads +nonretriable.status.code = \uBCF5\uAD6C\uD560 \uC218 \uC5C6\uB294 \uC0C1\uD0DC \uCF54\uB4DC +retriable.status.code = \uBCF5\uAD6C\uD560 \uC218 \uC788\uB294 \uC0C1\uD0DC \uCF54\uB4DC +server.doesnt.support.resuming.downloads = \uC11C\uBC84 \uB2E4\uC6B4\uB85C\uB4DC \uC7AC\uAC1C\uB97C \uC9C0\uC6D0\uD558\uC9C0 \uC54A\uB2E4 # A "magic number" can also be called a file signature -was.unable.to.get.content.type.using.magic.number = Was unable to get content type using magic number -magic.number.was = Magic number was -deleting.existing.file = Deleting existing file -request.properties = Request properties -download.interrupted = Download interrupted -exceeded.maximum.retries = Exceeded maximum retries -http.status.exception = HTTP status exception -exception.while.downloading.file = Exception while downloading file -failed.to.download = Failed to download -skipping = Skipping -file.already.exists = file already exists \ No newline at end of file +was.unable.to.get.content.type.using.magic.number = \uC2DC\uADF8\uB108\uCC98 \uD30C\uC77C \uC0AC\uC6A9\uD558\uC5EC \uB0B4\uC6A9 \uC720\uD615\uC744 \uAC00\uC838\uC62C \uC218 \uC5C6\uAE30 +magic.number.was = \uC2DC\uADF8\uB108\uCC98 \uD30C\uC77C \uC774\uAC70\uC600\uC2B5\uB2C8\uB2E4 +deleting.existing.file = \uAE30\uC874 \uD30C\uC77C\uC744 \uC0AD\uC81C\uD558\uB294 \uC911\uC785\uB2C8\uB2E4 +request.properties = \uC18D\uC131\uC744 \uC694\uCCAD\uD558\uAE30 +download.interrupted = \uB2E4\uC6B4\uB85C\uB4DC\uAC00 \uC911\uB2E8\uB418\uC5C8\uC2B5\uB2C8\uB2E4 +exceeded.maximum.retries = \uCD5C\uB300 \uC7AC\uC2DC\uB3C4 \uD69F\uC218\uB97C \uCD08\uACFC\uD588\uC2B5\uB2C8\uB2E4 +http.status.exception = HTTP \uC0C1\uD0DC \uC608\uC678\uB97C +exception.while.downloading.file = \uD30C\uC77C\uC744 \uB2E4\uC6B4\uB85C\uB4DC\uD558\uB294 \uB3D9\uC548 \uC608\uC678\uAC00 \uBC1C\uC0DD\uD558\uAE30 +failed.to.download = \uB2E4\uC6B4\uB85C\uB4DC\uD558\uC9C0 \uBABB\uD558\uAE30 +skipping = \uAC74\uB108\uB6F0\uACE0 \uC788\uC2B5\uB2C8\uB2E4 +file.already.exists = \uD30C\uC77C\uC774 \uC774\uBBF8 \uC788\uC2B5\uB2C8\uB2E4 \ No newline at end of file diff --git a/src/main/resources/LabelsBundle_nl_NL.properties b/src/main/resources/LabelsBundle_nl_NL.properties index e1d9d61c..6cec1f73 100644 --- a/src/main/resources/LabelsBundle_nl_NL.properties +++ b/src/main/resources/LabelsBundle_nl_NL.properties @@ -10,9 +10,9 @@ Configuration = Configuratie current.version = Huidige versie check.for.updates = Controleer op updates auto.update = Auto-update? -max.download.threads = Maximale downloadthreads +max.download.threads = Maximale downloadthreads: timeout.mill = Timeout (in milliseconden): -retry.download.count = Aantal keren opnieuw proberen te downloaden +retry.download.count = Aantal keren opnieuw proberen te downloaden: overwrite.existing.files = Bestaande bestanden overschrijven? sound.when.rip.completes = Geluid wanneer rip klaar is preserve.order = Volgorde behouden diff --git a/src/main/resources/LabelsBundle_pl_PL.properties b/src/main/resources/LabelsBundle_pl_PL.properties index 4ba4590e..a3bbbb38 100644 --- a/src/main/resources/LabelsBundle_pl_PL.properties +++ b/src/main/resources/LabelsBundle_pl_PL.properties @@ -1,59 +1,59 @@ -Log = Logi -History = Historia -created = Stworzono -modified = Zmodyfikowano -queue = Kolejka -Configuration = Konfiguracja - -# Keys for the Configuration menu - -current.version = Obecna Wersja -check.for.updates = Sprawdź dostępność aktualizacji -auto.update = Auto Aktualizacja? -max.download.threads = Maksymalna Ilośc Pobieranych Plików: -timeout.mill = Opóźnienie (w milisekundach): -retry.download.count = Liczba ponownych pobrań -overwrite.existing.files = Nadpisać istniejące pliki? -sound.when.rip.completes = Dźwięk po zakończeniu -preserve.order = Zachować porządek -save.logs = Zapisz Logi -notification.when.rip.starts = Powiadomienie przy uruchomieniu pobierania -save.urls.only = Zapisz tylko linki -save.album.titles = Zapisz nazwy albumów -autorip.from.clipboard = Auto pobieranie ze schowka -save.descriptions = Zapisz opis -prefer.mp4.over.gif = Preferuj MP4 od GIF -restore.window.position = Przywróć pozycję okna -remember.url.history = Zapamiętaj historię linków -loading.history.from = Załaduj historię z... - -# Misc UI keys - -loading.history.from.configuration = Załaduj historię z ustawień -interrupted.while.waiting.to.rip.next.album = Przerwany podczas oczekiwania na zgrywanie następnego albumu -inactive = Nieaktywny -re-rip.checked = Sprawdź pobrane ripy -remove = Usuń -clear = Wyczyść -download.url.list = Pobierz listę linków -select.save.dir = Wybierz ścieżkę zapisu - -# Keys for the logs generated by DownloadFileThread - -nonretriable.status.code = Nieodwracalny kod statusu -retriable.status.code = Odzyskiwanie kodu statusu -server.doesnt.support.resuming.downloads = Serwer nie obsługuje wznowienia pobierania - -# A "magic number" can also be called a file signature - -was.unable.to.get.content.type.using.magic.number = Nie udało się uzyskać typu zawartości za pomocą magicznej liczby -magic.number.was = Magiczną liczbą była -deleting.existing.file = Usuwanie istniejących plików -request.properties = Poproś o uprawnienia -download.interrupted = Pobieranie przerwane -exceeded.maximum.retries = Spodziewana ilośc powtórzeń -http.status.exception = Wyjątek statusu http -exception.while.downloading.file = Wystąpił problem podczas pobierania pliku -failed.to.download = Nie można pobrać pliku -skipping = Pomijanie +Log = Logi +History = Historia +created = Stworzono +modified = Zmodyfikowano +queue = Kolejka +Configuration = Konfiguracja + +# Keys for the Configuration menu + +current.version = Obecna Wersja +check.for.updates = Sprawdź dostępność aktualizacji +auto.update = Auto Aktualizacja? +max.download.threads = Maksymalna Ilośc Pobieranych Plików: +timeout.mill = Opóźnienie (w milisekundach): +retry.download.count = Liczba ponownych pobrań: +overwrite.existing.files = Nadpisać istniejące pliki? +sound.when.rip.completes = Dźwięk po zakończeniu +preserve.order = Zachować porządek +save.logs = Zapisz Logi +notification.when.rip.starts = Powiadomienie przy uruchomieniu pobierania +save.urls.only = Zapisz tylko linki +save.album.titles = Zapisz nazwy albumów +autorip.from.clipboard = Auto pobieranie ze schowka +save.descriptions = Zapisz opis +prefer.mp4.over.gif = Preferuj MP4 od GIF +restore.window.position = Przywróć pozycję okna +remember.url.history = Zapamiętaj historię linków +loading.history.from = Załaduj historię z... + +# Misc UI keys + +loading.history.from.configuration = Załaduj historię z ustawień +interrupted.while.waiting.to.rip.next.album = Przerwany podczas oczekiwania na zgrywanie następnego albumu +inactive = Nieaktywny +re-rip.checked = Sprawdź pobrane ripy +remove = Usuń +clear = Wyczyść +download.url.list = Pobierz listę linków +select.save.dir = Wybierz ścieżkę zapisu + +# Keys for the logs generated by DownloadFileThread + +nonretriable.status.code = Nieodwracalny kod statusu +retriable.status.code = Odzyskiwanie kodu statusu +server.doesnt.support.resuming.downloads = Serwer nie obsługuje wznowienia pobierania + +# A "magic number" can also be called a file signature + +was.unable.to.get.content.type.using.magic.number = Nie udało się uzyskać typu zawartości za pomocą magicznej liczby +magic.number.was = Magiczną liczbą była +deleting.existing.file = Usuwanie istniejących plików +request.properties = Poproś o uprawnienia +download.interrupted = Pobieranie przerwane +exceeded.maximum.retries = Spodziewana ilośc powtórzeń +http.status.exception = Wyjątek statusu http +exception.while.downloading.file = Wystąpił problem podczas pobierania pliku +failed.to.download = Nie można pobrać pliku +skipping = Pomijanie file.already.exists = Plik już istnieje \ No newline at end of file diff --git a/src/main/resources/LabelsBundle_pt_BR.properties b/src/main/resources/LabelsBundle_pt_BR.properties index 88209235..91c262b7 100644 --- a/src/main/resources/LabelsBundle_pt_BR.properties +++ b/src/main/resources/LabelsBundle_pt_BR.properties @@ -12,7 +12,7 @@ check.for.updates = Verificar atualizações auto.update = Atualização automática? max.download.threads = Número máximo de conexões: timeout.mill = Tempo limite (em milissegundos): -retry.download.count = Número de tentativas +retry.download.count = Número de tentativas: overwrite.existing.files = Sobrescrever arquivos existentes? sound.when.rip.completes = Som quando terminar o rip preserve.order = Preservar ordem diff --git a/src/main/resources/LabelsBundle_pt_PT.properties b/src/main/resources/LabelsBundle_pt_PT.properties index 3dcbafe1..500049ce 100644 --- a/src/main/resources/LabelsBundle_pt_PT.properties +++ b/src/main/resources/LabelsBundle_pt_PT.properties @@ -4,15 +4,15 @@ created = criado modified = modificado queue = Fila Configuration = Configuração +open = Abrir # Keys for the Configuration menu - current.version = Versão atual check.for.updates = Verificar atualizações auto.update = Atualização automática? -max.download.threads = Número máximo de processos de transferência +max.download.threads = Número máximo de processos de transferência: timeout.mill = Tempo de espera (em milissegundos): -retry.download.count = Número de novas tentativas de transferência +retry.download.count = Número de novas tentativas de transferência: overwrite.existing.files = Sobrescrever ficheiros existentes? sound.when.rip.completes = Notificar quando o rip é concluído preserve.order = Manter a ordem @@ -27,31 +27,49 @@ restore.window.position = Restaurar posição da janela remember.url.history = Lembrar histórico de URL loading.history.from = Carregar histórico de -# Misc UI keys +# Queue keys +queue.remove.all = Remover todos +queue.validation = Tem a certeza de que quer remover todos os elementos da fila? +queue.remove.selected = Remover seleccionados +# History +re-rip.checked = Re-rip seleccionados +remove = Remover +clear = Limpar +history.check.all = Seleccionar todos +history.check.none = Não seleccionar nenhuma +history.check.selected = Marca seleccionada +history.uncheck.selected = Desmaracar seleccionada +history.load.failed.warning = RipMe não carregou o ficheiro de histórico no historyFile.getAbsolutePath() \n\nErro: %s\n\nFechar o RipMe sobregravará automaticamente o conteúdo deste ficheiro.\nPode querer fazer o backup do ficheiro antes de fechar o RipMe! +history.load.none = Não há entradas no histórico para re-rip. Rip alguns álbuns primeiro. +history.load.none.checked = Não foram seleccionadas entradas no histórico. Selecione uma entrada clicando na caixa à direita do URL ou clique com o botão direito em um URL para selecione/desselecione todos os itens. + +# TrayIcon +tray.show = Mostrar +tray.hide = Esconder +tray.autorip = AutoRip clipboard +tray.exit = Saída + +# Misc UI keys loading.history.from.configuration = A carregar o histórico da configuração interrupted.while.waiting.to.rip.next.album = Interrompido durante a espera do rip do próximo álbum inactive = Inativo -re-rip.checked = Re-rip verificado -remove = Remover -clear = Limpar -download.url.list = Download url list -select.save.dir = Select Save Directory +download.url.list = A lista de URLs para download +select.save.dir = Selecione salvar diretório # Keys for the logs generated by DownloadFileThread - -nonretriable.status.code = Non-retriable status code -retriable.status.code = Retriable status code -server.doesnt.support.resuming.downloads = Server doesn't support resuming downloads +nonretriable.status.code = Código de estado não recuperável +retriable.status.code = Código de estado recuperável +server.doesnt.support.resuming.downloads = Servidor não suporta o reinício de downloads # A "magic number" can also be called a file signature -was.unable.to.get.content.type.using.magic.number = Was unable to get content type using magic number -magic.number.was = Magic number was -deleting.existing.file = Deleting existing file -request.properties = Request properties -download.interrupted = Download interrupted -exceeded.maximum.retries = Exceeded maximum retries -http.status.exception = HTTP status exception -exception.while.downloading.file = Exception while downloading file -failed.to.download = Failed to download -skipping = Skipping -file.already.exists = file already exists +was.unable.to.get.content.type.using.magic.number = Impossibilidade de obter o tipo de conteúdo utilizando o número mágico +magic.number.was = Número mágico foi +deleting.existing.file = Eliminação do ficheiro existente +request.properties = Propriedades do pedido +download.interrupted = Download interrompido +exceeded.maximum.retries = Excedeu tentativas máximas +http.status.exception = Exceção de status HTTP +exception.while.downloading.file = Exceção enquanto o ficheiro era baixado +failed.to.download = Falha no download +skipping = Pulando +file.already.exists = Ficheiro já existe \ No newline at end of file diff --git a/src/main/resources/LabelsBundle_ru_RU.properties b/src/main/resources/LabelsBundle_ru_RU.properties index a3100df8..f354d15a 100644 --- a/src/main/resources/LabelsBundle_ru_RU.properties +++ b/src/main/resources/LabelsBundle_ru_RU.properties @@ -12,7 +12,7 @@ check.for.updates = Проверить обновления auto.update = Автообновление max.download.threads = Максимальное число потоков: timeout.mill = Задержка (в миллисекундах): -retry.download.count = Число повторов +retry.download.count = Число повторов: overwrite.existing.files = Перезаписать существующие файлы? sound.when.rip.completes = Звук при завершении preserve.order = Сохранять порядок diff --git a/src/main/resources/LabelsBundle_zh_CN.properties b/src/main/resources/LabelsBundle_zh_CN.properties new file mode 100644 index 00000000..7cf6d781 --- /dev/null +++ b/src/main/resources/LabelsBundle_zh_CN.properties @@ -0,0 +1,75 @@ +Log = 日志 +History = 历史 +created = 创建时间 +modified = 修改时间 +queue = 队列 +Configuration = 配置 +open = 打开 + +# Keys for the Configuration menu +current.version = 当前版本 +check.for.updates = 检查更新 +auto.update = 自动更新? +max.download.threads = 最大下载线程数: +timeout.mill = 超时(毫秒): +retry.download.count = 重试下载次数: +overwrite.existing.files = 覆盖现有文件? +sound.when.rip.completes = 抓取完成时播放声音 +preserve.order = 保持顺序 +save.logs = 保存日志 +notification.when.rip.starts = 通知抓取开始 +save.urls.only = 仅保存 URL +save.album.titles = 保存专辑标题 +autorip.from.clipboard = 监视剪贴板上的 URL +save.descriptions = 保存描述 +prefer.mp4.over.gif = 首选 MP4 而非 GIF +restore.window.position = 恢复窗口位置 +remember.url.history = 记住 URL 历史 +loading.history.from = 加载历史从 + +# Queue keys +queue.remove.all = 移除全部 +queue.validation = 您确定要移除队列内的全部项目? +queue.remove.selected = 移除所选项目 + +# History +re-rip.checked = 重新抓取选中的项目 +remove = 移除 +clear = 清除 +history.check.all = 选中全部 +history.check.none = 取消选中全部 +history.check.selected = 选中所选项目 +history.uncheck.selected = 取消选中所选项目 +history.load.failed.warning = RipMe 加载位于 historyFile.getAbsolutePath() 的历史文件失败\n\n错误:%s\n\n关闭 RipMe 会自动覆盖此文件的内容,\n请在关闭 RipMe 前备份它! +history.load.none = 无可重新抓取的历史条目。请先抓取一些专辑 +history.load.none.checked = 未 '选中' 任何历史条目,请通过选中所需 URL 前面的复选框或URL 的右键菜单以选中所需条目 + +# TrayIcon +tray.show = 显示 +tray.hide = 隐藏 +tray.autorip = 监视剪贴板上的 URL +tray.exit = 退出 + +# Misc UI keys +loading.history.from.configuration = 从配置加载历史 +interrupted.while.waiting.to.rip.next.album = 等候抓取下一专辑期间发生中断 +inactive = 非活动 +download.url.list = 下载 URL 列表 +select.save.dir = 选择保存目录 + +# Keys for the logs generated by DownloadFileThread +nonretriable.status.code = 非可重试状态代码 +retriable.status.code = 可重试状态代码 +server.doesnt.support.resuming.downloads = 服务器不支持继续下载(续传) +# A "magic number" can also be called a file signature +was.unable.to.get.content.type.using.magic.number = 不能使用幻数获取内容类型 +magic.number.was = 幻数为 +deleting.existing.file = 删除现有文件 +request.properties = 请求属性 +download.interrupted = 下载中断 +exceeded.maximum.retries = 超过最大重试次数 +http.status.exception = HTTP 状态意外 +exception.while.downloading.file = 下载文件时发生意外 +failed.to.download = 下载失败 +skipping = 跳过 +file.already.exists = 文件已存在 \ No newline at end of file diff --git a/src/main/resources/log4j.properties b/src/main/resources/log4j.properties deleted file mode 100644 index 409dd303..00000000 --- a/src/main/resources/log4j.properties +++ /dev/null @@ -1,10 +0,0 @@ - -# define the console appender -log4j.appender.stdout = org.apache.log4j.ConsoleAppender -log4j.appender.stdout.Target = System.out -log4j.appender.stdout.Threshold = info -log4j.appender.stdout.layout = org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern = %m%n - -# now map our console appender as a root logger, means all log messages will go to this appender -log4j.rootLogger = debug, stdout \ No newline at end of file diff --git a/src/main/resources/log4j2-example.xml b/src/main/resources/log4j2-example.xml new file mode 100644 index 00000000..dbc0888c --- /dev/null +++ b/src/main/resources/log4j2-example.xml @@ -0,0 +1,30 @@ + + + + # Console appender + + # Pattern of log message for console appender + + + + # Rolling appender + + + %d{yyyy-MM-dd HH:mm:ss} %p %m%n + + + + + + + + + + + + + + + diff --git a/src/main/resources/rip.properties b/src/main/resources/rip.properties index cac0c1f1..484cacac 100644 --- a/src/main/resources/rip.properties +++ b/src/main/resources/rip.properties @@ -6,7 +6,7 @@ threads.size = 5 file.overwrite = false # Number of retries on failed downloads -download.retries = 1 +download.retries = 3 # File download timeout (in milliseconds) download.timeout = 60000 @@ -17,6 +17,9 @@ page.timeout = 5000 # Maximum size of downloaded files in bytes (required) download.max_size = 104857600 +# Any URLs ending with one of these comma-separated values will be skipped +#download.ignore_extensions = mp4,gif,m4v,webm,html + # Don't retry on 404 errors error.skip404 = true diff --git a/src/test/java/com/rarchives/ripme/tst/AbstractRipperTest.java b/src/test/java/com/rarchives/ripme/tst/AbstractRipperTest.java index f1d8eff5..7eb3df43 100644 --- a/src/test/java/com/rarchives/ripme/tst/AbstractRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/AbstractRipperTest.java @@ -1,29 +1,32 @@ package com.rarchives.ripme.tst; import com.rarchives.ripme.ripper.AbstractRipper; -import junit.framework.TestCase; +import org.junit.jupiter.api.Test; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; + +import static org.junit.jupiter.api.Assertions.assertEquals; +public class AbstractRipperTest { -public class AbstractRipperTest extends TestCase { - - public void testGetFileName() throws IOException { - String fileName = AbstractRipper.getFileName(new URL("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D"), "test", "test"); + @Test + public void testGetFileName() throws IOException, URISyntaxException { + String fileName = AbstractRipper.getFileName(new URI("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D").toURL(),null, "test", "test"); assertEquals("test.test", fileName); - fileName = AbstractRipper.getFileName(new URL("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D"), "test", null); + fileName = AbstractRipper.getFileName(new URI("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D").toURL(), null,"test", null); assertEquals("test", fileName); - fileName = AbstractRipper.getFileName(new URL("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D"), null, null); + fileName = AbstractRipper.getFileName(new URI("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D").toURL(), null,null, null); assertEquals("Object", fileName); - fileName = AbstractRipper.getFileName(new URL("http://www.test.com/file.png"), null, null); + fileName = AbstractRipper.getFileName(new URI("http://www.test.com/file.png").toURL(), null,null, null); assertEquals("file.png", fileName); - fileName = AbstractRipper.getFileName(new URL("http://www.test.com/file."), null, null); + fileName = AbstractRipper.getFileName(new URI("http://www.test.com/file.").toURL(), null,null, null); assertEquals("file.", fileName); } diff --git a/src/test/java/com/rarchives/ripme/tst/AppTest.java b/src/test/java/com/rarchives/ripme/tst/AppTest.java deleted file mode 100644 index fb9bc680..00000000 --- a/src/test/java/com/rarchives/ripme/tst/AppTest.java +++ /dev/null @@ -1,30 +0,0 @@ -package com.rarchives.ripme.tst; - -import junit.framework.Test; -import junit.framework.TestCase; -import junit.framework.TestSuite; - -public class AppTest extends TestCase { - /** - * Create the test case - * - * @param testName name of the test case - */ - public AppTest(String testName) { - super(testName); - } - - /** - * @return the suite of tests being tested - */ - public static Test suite() { - return new TestSuite(AppTest.class); - } - - /** - * Rigourous Test :-) - */ - public void testApp() { - assertTrue(true); - } -} diff --git a/src/test/java/com/rarchives/ripme/tst/Base64Test.java b/src/test/java/com/rarchives/ripme/tst/Base64Test.java index ffe2f3f8..f5c82078 100644 --- a/src/test/java/com/rarchives/ripme/tst/Base64Test.java +++ b/src/test/java/com/rarchives/ripme/tst/Base64Test.java @@ -1,10 +1,13 @@ package com.rarchives.ripme.tst; -import junit.framework.TestCase; import com.rarchives.ripme.utils.Base64; +import org.junit.jupiter.api.Test; -public class Base64Test extends TestCase { +import static org.junit.jupiter.api.Assertions.assertEquals; +public class Base64Test { + + @Test public void testDecode() { assertEquals("test", new String(Base64.decode("dGVzdA=="))); } diff --git a/src/test/java/com/rarchives/ripme/tst/UtilsTest.java b/src/test/java/com/rarchives/ripme/tst/UtilsTest.java index d87eca55..c43fa76a 100644 --- a/src/test/java/com/rarchives/ripme/tst/UtilsTest.java +++ b/src/test/java/com/rarchives/ripme/tst/UtilsTest.java @@ -1,16 +1,27 @@ package com.rarchives.ripme.tst; -import java.io.File; import java.io.FileNotFoundException; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.Arrays; import com.rarchives.ripme.utils.Utils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; public class UtilsTest { + private final Logger LOGGER = LogManager.getLogger(UtilsTest.class); + + @Test + public void testConfigureLogger() { + Utils.configureLogger(); + LOGGER.warn("this is a warning messaage."); + } + public void testGetEXTFromMagic() { Assertions.assertEquals("jpeg", Utils.getEXTFromMagic(new byte[] { -1, -40, -1, -37, 0, 0, 0, 0 })); @@ -50,8 +61,8 @@ public class UtilsTest { public void testShortenFileNameWindows() throws FileNotFoundException { String filename = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff.png"; // Test filename shortening for windows - File f = Utils.shortenSaveAsWindows("D:/rips/test/reddit/deep", filename); - Assertions.assertEquals(new File( + Path f = Utils.shortenSaveAsWindows("D:/rips/test/reddit/deep", filename); + Assertions.assertEquals(Paths.get( "D:/rips/test/reddit/deep/ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff.png"), f); } diff --git a/src/test/java/com/rarchives/ripme/tst/proxyTest.java b/src/test/java/com/rarchives/ripme/tst/proxyTest.java index 36ea2f55..0576b8e2 100644 --- a/src/test/java/com/rarchives/ripme/tst/proxyTest.java +++ b/src/test/java/com/rarchives/ripme/tst/proxyTest.java @@ -1,52 +1,57 @@ -package com.rarchives.ripme.tst; - -import java.io.IOException; -import java.net.URL; -import com.rarchives.ripme.utils.Proxy; -import com.rarchives.ripme.utils.Utils; -import junit.framework.TestCase; -import com.rarchives.ripme.utils.Http; - - -public class proxyTest extends TestCase { - - - // This test will only run on machines where the user has added a entry for proxy.socks - public void testSocksProxy() throws IOException { - // Unset proxy before testing - System.setProperty("http.proxyHost", ""); - System.setProperty("https.proxyHost", ""); - System.setProperty("socksProxyHost", ""); - URL url = new URL("https://icanhazip.com"); - String proxyConfig = Utils.getConfigString("proxy.socks", ""); - if (!proxyConfig.equals("")) { - String ip1 = Http.url(url).ignoreContentType().get().text(); - Proxy.setSocks(Utils.getConfigString("proxy.socks", "")); - String ip2 = Http.url(url).ignoreContentType().get().text(); - assertFalse(ip1.equals(ip2)); - } else { - System.out.println("Skipping testSocksProxy"); - assert(true); - } - } - - // This test will only run on machines where the user has added a entry for proxy.http - public void testHTTPProxy() throws IOException { - // Unset proxy before testing - System.setProperty("http.proxyHost", ""); - System.setProperty("https.proxyHost", ""); - System.setProperty("socksProxyHost", ""); - URL url = new URL("https://icanhazip.com"); - String proxyConfig = Utils.getConfigString("proxy.http", ""); - if (!proxyConfig.equals("")) { - String ip1 = Http.url(url).ignoreContentType().get().text(); - Proxy.setHTTPProxy(Utils.getConfigString("proxy.http", "")); - String ip2 = Http.url(url).ignoreContentType().get().text(); - assertFalse(ip1.equals(ip2)); - } else { - System.out.println("Skipping testHTTPProxy"); - assert(true); - } - } - -} +package com.rarchives.ripme.tst; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import com.rarchives.ripme.utils.Proxy; +import com.rarchives.ripme.utils.Utils; +import com.rarchives.ripme.utils.Http; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertFalse; + +public class proxyTest { + + + // This test will only run on machines where the user has added a entry for proxy.socks + @Test + public void testSocksProxy() throws IOException, URISyntaxException { + // Unset proxy before testing + System.setProperty("http.proxyHost", ""); + System.setProperty("https.proxyHost", ""); + System.setProperty("socksProxyHost", ""); + URL url = new URI("https://icanhazip.com").toURL(); + String proxyConfig = Utils.getConfigString("proxy.socks", ""); + if (!proxyConfig.equals("")) { + String ip1 = Http.url(url).ignoreContentType().get().text(); + Proxy.setSocks(Utils.getConfigString("proxy.socks", "")); + String ip2 = Http.url(url).ignoreContentType().get().text(); + assertFalse(ip1.equals(ip2)); + } else { + System.out.println("Skipping testSocksProxy"); + assert(true); + } + } + + // This test will only run on machines where the user has added a entry for proxy.http + @Test + public void testHTTPProxy() throws IOException, URISyntaxException { + // Unset proxy before testing + System.setProperty("http.proxyHost", ""); + System.setProperty("https.proxyHost", ""); + System.setProperty("socksProxyHost", ""); + URL url = new URI("https://icanhazip.com").toURL(); + String proxyConfig = Utils.getConfigString("proxy.http", ""); + if (!proxyConfig.equals("")) { + String ip1 = Http.url(url).ignoreContentType().get().text(); + Proxy.setHTTPProxy(Utils.getConfigString("proxy.http", "")); + String ip2 = Http.url(url).ignoreContentType().get().text(); + assertFalse(ip1.equals(ip2)); + } else { + System.out.println("Skipping testHTTPProxy"); + assert(true); + } + } + +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AerisdiesRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AerisdiesRipperTest.java index 5b99b8c4..4c0bd833 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AerisdiesRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AerisdiesRipperTest.java @@ -1,36 +1,44 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.AerisdiesRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class AerisdiesRipperTest extends RippersTest { @Test - public void testAlbum() throws IOException { - AerisdiesRipper ripper = new AerisdiesRipper(new URL("http://www.aerisdies.com/html/lb/alb_1097_1.html")); + @Tag("flaky") + public void testAlbum() throws IOException, URISyntaxException { + AerisdiesRipper ripper = new AerisdiesRipper(new URI("http://www.aerisdies.com/html/lb/alb_1097_1.html").toURL()); testRipper(ripper); } @Test - public void testSubAlbum() throws IOException { - AerisdiesRipper ripper = new AerisdiesRipper(new URL("http://www.aerisdies.com/html/lb/alb_3692_1.html")); + @Tag("flaky") + public void testSubAlbum() throws IOException, URISyntaxException { + AerisdiesRipper ripper = new AerisdiesRipper(new URI("http://www.aerisdies.com/html/lb/alb_3692_1.html").toURL()); testRipper(ripper); } @Test - public void testDjAlbum() throws IOException { - AerisdiesRipper ripper = new AerisdiesRipper(new URL("http://www.aerisdies.com/html/lb/douj_5230_1.html")); + @Tag("flaky") + public void testDjAlbum() throws IOException, URISyntaxException { + AerisdiesRipper ripper = new AerisdiesRipper(new URI("http://www.aerisdies.com/html/lb/douj_5230_1.html").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("http://www.aerisdies.com/html/lb/douj_5230_1.html"); + @Tag("flaky") + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("http://www.aerisdies.com/html/lb/douj_5230_1.html").toURL(); AerisdiesRipper ripper = new AerisdiesRipper(url); - assertEquals("5230", ripper.getGID(url)); + Assertions.assertEquals("5230", ripper.getGID(url)); } // TODO: Add a test for an album with a title. diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AllporncomicRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AllporncomicRipperTest.java index f8466072..20b79c0b 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AllporncomicRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AllporncomicRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import com.rarchives.ripme.ripper.rippers.AllporncomicRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; public class AllporncomicRipperTest extends RippersTest { @Test - public void testAlbum() throws IOException { - AllporncomicRipper ripper = new AllporncomicRipper(new URL("https://allporncomic.com/porncomic/dnd-pvp-dungeons-dragons-fred-perry/1-dnd-pvp")); + @Tag("flaky") + public void testAlbum() throws IOException, URISyntaxException { + AllporncomicRipper ripper = new AllporncomicRipper(new URI("https://allporncomic.com/porncomic/dnd-pvp-dungeons-dragons-fred-perry/1-dnd-pvp").toURL()); testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtAlleyRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtAlleyRipperTest.java index 693ce619..63b9d69b 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtAlleyRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtAlleyRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.ArtAlleyRipper; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; public class ArtAlleyRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - ArtAlleyRipper ripper = new ArtAlleyRipper(new URL("https://artalley.social/@curator/media")); + @Disabled("website switched off") + public void testRip() throws IOException, URISyntaxException { + ArtAlleyRipper ripper = new ArtAlleyRipper(new URI("https://artalley.social/@curator/media").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtStationRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtStationRipperTest.java index d7cf6cdf..6450cad1 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtStationRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtStationRipperTest.java @@ -1,23 +1,26 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; import com.rarchives.ripme.ripper.rippers.ArtStationRipper; -import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class ArtStationRipperTest extends RippersTest { @Test - public void testArtStationProjects() throws IOException { + @Tag("flaky") + public void testArtStationProjects() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); - contentURLs.add(new URL("https://www.artstation.com/artwork/the-dwarf-mortar")); - contentURLs.add(new URL("https://www.artstation.com/artwork/K36GR")); - contentURLs.add(new URL("http://artstation.com/artwork/5JJQw")); + contentURLs.add(new URI("https://www.artstation.com/artwork/the-dwarf-mortar").toURL()); + contentURLs.add(new URI("https://www.artstation.com/artwork/K36GR").toURL()); + contentURLs.add(new URI("http://artstation.com/artwork/5JJQw").toURL()); for (URL url : contentURLs) { ArtStationRipper ripper = new ArtStationRipper(url); testRipper(ripper); @@ -25,12 +28,12 @@ public class ArtStationRipperTest extends RippersTest { } @Test - @Disabled("Failed with cloudflare protection") - public void testArtStationUserProfiles() throws IOException { + @Tag("flaky") + public void testArtStationUserProfiles() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); - contentURLs.add(new URL("https://www.artstation.com/heitoramatsu")); - contentURLs.add(new URL("https://artstation.com/kuvshinov_ilya")); - contentURLs.add(new URL("http://artstation.com/givemeapiggy")); + contentURLs.add(new URI("https://www.artstation.com/heitoramatsu").toURL()); + contentURLs.add(new URI("https://artstation.com/kuvshinov_ilya").toURL()); + contentURLs.add(new URI("http://artstation.com/givemeapiggy").toURL()); for (URL url : contentURLs) { ArtStationRipper ripper = new ArtStationRipper(url); testRipper(ripper); diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtstnRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtstnRipperTest.java index 7ce919da..ee8621c2 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtstnRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtstnRipperTest.java @@ -1,24 +1,28 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import java.io.IOException; -import java.net.URL; - -import com.rarchives.ripme.ripper.rippers.ArtstnRipper; - -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; - -public class ArtstnRipperTest extends RippersTest { - @Test - public void testSingleProject() throws IOException { - URL url = new URL("https://artstn.co/p/JlE15Z"); - testRipper(new ArtstnRipper(url)); - } - - @Test - @Disabled("Failed with cloudflare protection") - public void testUserPortfolio() throws IOException { - URL url = new URL("https://artstn.co/m/rv37"); - testRipper(new ArtstnRipper(url)); - } -} +package com.rarchives.ripme.tst.ripper.rippers; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; + +import com.rarchives.ripme.ripper.rippers.ArtstnRipper; + +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +public class ArtstnRipperTest extends RippersTest { + @Test + @Tag("flaky") + public void testSingleProject() throws IOException, URISyntaxException { + URL url = new URI("https://artstn.co/p/JlE15Z").toURL(); + testRipper(new ArtstnRipper(url)); + } + + @Test + @Disabled("Failed with cloudflare protection") + public void testUserPortfolio() throws IOException, URISyntaxException { + URL url = new URI("https://artstn.co/m/rv37").toURL(); + testRipper(new ArtstnRipper(url)); + } +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BaraagRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BaraagRipperTest.java index 57105a9a..7b987b74 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BaraagRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BaraagRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.BaraagRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class BaraagRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - BaraagRipper ripper = new BaraagRipper(new URL("https://baraag.net/@darkshadow777/media")); + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + BaraagRipper ripper = new BaraagRipper(new URI("https://baraag.net/@darkshadow777/media").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BatoRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BatoRipperTest.java index 635c7ecd..6849f0e1 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BatoRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BatoRipperTest.java @@ -1,31 +1,37 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.BatoRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; public class BatoRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - BatoRipper ripper = new BatoRipper(new URL("https://bato.to/chapter/1207152")); + @Disabled("cloudlare? gets unavailable in test but works in browser") + public void testRip() throws IOException, URISyntaxException { + BatoRipper ripper = new BatoRipper(new URI("https://bato.to/chapter/1207152").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("https://bato.to/chapter/1207152"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://bato.to/chapter/1207152").toURL(); BatoRipper ripper = new BatoRipper(url); - assertEquals("1207152", ripper.getGID(url)); + Assertions.assertEquals("1207152", ripper.getGID(url)); } @Test - public void testGetAlbumTitle() throws IOException { - URL url = new URL("https://bato.to/chapter/1207152"); + @Disabled("cloudlare? gets unavailable in test but works in browser") + public void testGetAlbumTitle() throws IOException, URISyntaxException { + URL url = new URI("https://bato.to/chapter/1207152").toURL(); BatoRipper ripper = new BatoRipper(url); - assertEquals("bato_1207152_I_Messed_Up_by_Teaching_at_a_Black_Gyaru_School!_Ch.2", ripper.getAlbumTitle(url)); + Assertions.assertEquals("bato_1207152_I_Messed_Up_by_Teaching_at_a_Black_Gyaru_School!_Ch.2", ripper.getAlbumTitle(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BcfakesRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BcfakesRipperTest.java index 8c31ffd4..3140c056 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BcfakesRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BcfakesRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.BcfakesRipper; @@ -11,8 +12,8 @@ import org.junit.jupiter.api.Test; public class BcfakesRipperTest extends RippersTest { @Test @Disabled("21/06/2018 This test was disbaled as the site has experienced notable downtime") - public void testRip() throws IOException { - BcfakesRipper ripper = new BcfakesRipper(new URL("http://www.bcfakes.com/celebritylist/olivia-wilde/")); + public void testRip() throws IOException, URISyntaxException { + BcfakesRipper ripper = new BcfakesRipper(new URI("http://www.bcfakes.com/celebritylist/olivia-wilde/").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BlackbrickroadofozRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BlackbrickroadofozRipperTest.java index 0bf11d58..c28cc52d 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BlackbrickroadofozRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BlackbrickroadofozRipperTest.java @@ -6,14 +6,15 @@ import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; public class BlackbrickroadofozRipperTest extends RippersTest { @Test @Disabled("Commented out on 02/04/2019 because the serve has been down for a while") - public void testRip() throws IOException { + public void testRip() throws IOException, URISyntaxException { BlackbrickroadofozRipper ripper = new BlackbrickroadofozRipper( - new URL("http://www.blackbrickroadofoz.com/comic/beginning")); + new URI("http://www.blackbrickroadofoz.com/comic/beginning").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BooruRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BooruRipperTest.java index 31041175..89efef4e 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BooruRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BooruRipperTest.java @@ -1,19 +1,64 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; +import java.util.ArrayList; +import java.util.List; import com.rarchives.ripme.ripper.rippers.BooruRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; public class BooruRipperTest extends RippersTest { - public void testRip() throws IOException { - BooruRipper ripper = new BooruRipper(new URL("http://xbooru.com/index.php?page=post&s=list&tags=furry")); - testRipper(ripper); + @Test + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + List passURLs = new ArrayList<>(); + passURLs.add(new URI("https://xbooru.com/index.php?page=post&s=list&tags=furry").toURL()); + passURLs.add(new URI("https://gelbooru.com/index.php?page=post&s=list&tags=animal_ears").toURL()); + + for (URL url : passURLs) { + BooruRipper ripper = new BooruRipper(url); + testRipper(ripper); + } } - public void testGetGID() throws IOException { - URL url = new URL("http://xbooru.com/index.php?page=post&s=list&tags=furry"); - BooruRipper ripper = new BooruRipper(url); - assertEquals("furry", ripper.getGID(url)); + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL xbooruUrl = new URI("https://xbooru.com/index.php?page=post&s=list&tags=furry").toURL(); + URL gelbooruUrl = new URI("https://gelbooru.com/index.php?page=post&s=list&tags=animal_ears").toURL(); + + BooruRipper xbooruRipper = new BooruRipper(xbooruUrl); + BooruRipper gelbooruRipper = new BooruRipper(gelbooruUrl); + + Assertions.assertEquals("furry", xbooruRipper.getGID(xbooruUrl)); + Assertions.assertEquals("animal_ears", gelbooruRipper.getGID(gelbooruUrl)); + } + + @Test + public void testGetDomain() throws IOException, URISyntaxException { + URL xbooruUrl = new URI("https://xbooru.com/index.php?page=post&s=list&tags=furry").toURL(); + URL gelbooruUrl = new URI("https://gelbooru.com/index.php?page=post&s=list&tags=animal_ears").toURL(); + + BooruRipper xbooruRipper = new BooruRipper(xbooruUrl); + BooruRipper gelbooruRipper = new BooruRipper(gelbooruUrl); + + Assertions.assertEquals("xbooru.com", xbooruRipper.getDomain()); + Assertions.assertEquals("gelbooru.com", gelbooruRipper.getDomain()); + } + + @Test + public void testGetHost() throws IOException, URISyntaxException { + URL xbooruUrl = new URI("https://xbooru.com/index.php?page=post&s=list&tags=furry").toURL(); + URL gelbooruUrl = new URI("https://gelbooru.com/index.php?page=post&s=list&tags=animal_ears").toURL(); + + BooruRipper xbooruRipper = new BooruRipper(xbooruUrl); + BooruRipper gelbooruRipper = new BooruRipper(gelbooruUrl); + + Assertions.assertEquals("xbooru", xbooruRipper.getHost()); + Assertions.assertEquals("gelbooru", gelbooruRipper.getHost()); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CfakeRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CfakeRipperTest.java index 3e6dad94..95f7ec2e 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CfakeRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CfakeRipperTest.java @@ -1,13 +1,14 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.CfakeRipper; public class CfakeRipperTest extends RippersTest { - public void testRip() throws IOException { - CfakeRipper ripper = new CfakeRipper(new URL("http://cfake.com/picture/Zooey_Deschanel/1264")); + public void testRip() throws IOException, URISyntaxException { + CfakeRipper ripper = new CfakeRipper(new URI("http://cfake.com/picture/Zooey_Deschanel/1264").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ChanRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ChanRipperTest.java index 22995002..ed023d47 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ChanRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ChanRipperTest.java @@ -1,83 +1,89 @@ package com.rarchives.ripme.tst.ripper.rippers; -import java.io.File; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import com.rarchives.ripme.ripper.rippers.ChanRipper; import com.rarchives.ripme.ripper.rippers.ripperhelpers.ChanSite; import com.rarchives.ripme.utils.Http; import org.jsoup.nodes.Document; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class ChanRipperTest extends RippersTest { @Test - public void testChanURLFailures() throws IOException { - List failURLs = new ArrayList<>(); - // URLs that should not work - for (URL url : failURLs) { - try { - new ChanRipper(url); - fail("Instantiated ripper for URL that should not work: " + url); - } catch (Exception e) { - // Expected - } - } - } - @Test - public void testChanURLPasses() throws IOException { + @Tag("flaky") + public void testChanURLPasses() throws IOException, URISyntaxException { List passURLs = new ArrayList<>(); // URLs that should work - passURLs.add(new URL("http://desuchan.net/v/res/7034.html")); - passURLs.add(new URL("https://boards.4chan.org/hr/thread/3015701")); - passURLs.add(new URL("https://boards.420chan.org/420/res/232066.php")); - passURLs.add(new URL("http://7chan.org/gif/res/25873.html")); - passURLs.add(new URL("https://rbt.asia/g/thread/70643087/")); //must work with TLDs with len of 4 + passURLs.add(new URI("http://desuchan.net/v/res/7034.html").toURL()); + passURLs.add(new URI("https://boards.4chan.org/hr/thread/3015701").toURL()); + passURLs.add(new URI("https://boards.420chan.org/420/res/232066.php").toURL()); + passURLs.add(new URI("http://7chan.org/gif/res/25873.html").toURL()); + passURLs.add(new URI("https://rbt.asia/g/thread/70643087/").toURL()); //must work with TLDs with len of 4 for (URL url : passURLs) { ChanRipper ripper = new ChanRipper(url); - ripper.setup(); + // Use CompletableFuture to run setup() asynchronously + CompletableFuture setupFuture = CompletableFuture.runAsync(() -> { + try { + ripper.setup(); + } catch (IOException | URISyntaxException e) { + throw new RuntimeException(e); + } + }); + + try { + // Wait for up to 5 seconds for setup() to complete + setupFuture.get(5, TimeUnit.SECONDS); + } catch (InterruptedException | ExecutionException | + TimeoutException e) { + e.printStackTrace(); // Handle exceptions as needed + } assert (ripper.canRip(url)); - assertNotNull("Ripper for " + url + " did not have a valid working directory.", ripper.getWorkingDir()); + Assertions.assertNotNull(ripper.getWorkingDir(), "Ripper for " + url + " did not have a valid working directory."); deleteDir(ripper.getWorkingDir()); } } @Test - public void testChanStringParsing() throws IOException { + public void testChanStringParsing() throws IOException, URISyntaxException { List site1 = Arrays.asList("site1.com"); List site1Cdns = Arrays.asList("cnd1.site1.com", "cdn2.site2.biz"); List site2 = Arrays.asList("site2.co.uk"); List site2Cdns = Arrays.asList("cdn.site2.co.uk"); - ChanRipper ripper = new ChanRipper(new URL("http://desuchan.net/v/res/7034.html")); + ChanRipper ripper = new ChanRipper(new URI("http://desuchan.net/v/res/7034.html").toURL()); List chansFromConfig = ripper .getChansFromConfig("site1.com[cnd1.site1.com|cdn2.site2.biz],site2.co.uk[cdn.site2.co.uk]"); - assertEquals(chansFromConfig.get(0).getDomains(), site1); - assertEquals(chansFromConfig.get(0).getCdns(), site1Cdns); + Assertions.assertEquals(chansFromConfig.get(0).getDomains(), site1); + Assertions.assertEquals(chansFromConfig.get(0).getCdns(), site1Cdns); - assertEquals(chansFromConfig.get(1).getDomains(), site2); - assertEquals(chansFromConfig.get(1).getCdns(), site2Cdns); + Assertions.assertEquals(chansFromConfig.get(1).getDomains(), site2); + Assertions.assertEquals(chansFromConfig.get(1).getCdns(), site2Cdns); } @Test - public void testChanRipper() throws IOException { + public void testChanRipper() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); - contentURLs.add(new URL(getRandomThreadDesuarchive())); + contentURLs.add(getRandomThreadDesuarchive()); for (URL url : contentURLs) { ChanRipper ripper = new ChanRipper(url); testChanRipper(ripper); } } - /** - * - * @return String returns a url to a active desuarchive.org tread as a string - */ - public String getRandomThreadDesuarchive() { + public URL getRandomThreadDesuarchive() throws URISyntaxException { try { - Document doc = Http.url(new URL("https://desuarchive.org/wsg/")).get(); - return doc.select("div.post_data > a").first().attr("href"); + Document doc = Http.url(new URI("https://desuarchive.org/wsg/").toURL()).get(); + return new URI(doc.select("div.post_data > a").first().attr("href")).toURL(); } catch (IOException e) { e.printStackTrace(); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CheveretoRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CheveretoRipperTest.java index 385464da..6d893527 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CheveretoRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CheveretoRipperTest.java @@ -1,20 +1,24 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.CheveretoRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class CheveretoRipperTest extends RippersTest { @Test - public void testTagFox() throws IOException { - CheveretoRipper ripper = new CheveretoRipper(new URL("http://tag-fox.com/album/Thjb")); + @Tag("flaky") + public void testTagFox() throws IOException, URISyntaxException { + CheveretoRipper ripper = new CheveretoRipper(new URI("http://tag-fox.com/album/Thjb").toURL()); testRipper(ripper); } @Test - public void testSubdirAlbum() throws IOException { - CheveretoRipper ripper = new CheveretoRipper(new URL("https://kenzato.uk/booru/album/TnEc")); + @Tag("flaky") + public void testSubdirAlbum() throws IOException, URISyntaxException { + CheveretoRipper ripper = new CheveretoRipper(new URI("https://kenzato.uk/booru/album/TnEc").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ComicextraRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ComicextraRipperTest.java index ffe0f12b..e01ae6e0 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ComicextraRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ComicextraRipperTest.java @@ -1,22 +1,28 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import java.io.IOException; -import java.net.URL; -import com.rarchives.ripme.ripper.rippers.ComicextraRipper; -import org.junit.jupiter.api.Test; - -public class ComicextraRipperTest extends RippersTest { - @Test - public void testComicUrl() throws IOException { - URL url = new URL("https://www.comicextra.com/comic/karma-police"); - ComicextraRipper ripper = new ComicextraRipper(url); - testRipper(ripper); - } - @Test - public void testChapterUrl() throws IOException { - URL url = new URL("https://www.comicextra.com/v-for-vendetta/chapter-1"); - ComicextraRipper ripper = new ComicextraRipper(url); - testRipper(ripper); - } - -} +package com.rarchives.ripme.tst.ripper.rippers; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import com.rarchives.ripme.ripper.rippers.ComicextraRipper; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +public class ComicextraRipperTest extends RippersTest { + @Test + @Tag("flaky") + public void testComicUrl() throws IOException, URISyntaxException { + URL url = new URI("https://www.comicextra.com/comic/karma-police").toURL(); + ComicextraRipper ripper = new ComicextraRipper(url); + testRipper(ripper); + } + @Test + @Disabled("no images found error, broken ripper?") + public void testChapterUrl() throws IOException, URISyntaxException { + URL url = new URI("https://www.comicextra.com/v-for-vendetta/chapter-1").toURL(); + ComicextraRipper ripper = new ComicextraRipper(url); + testRipper(ripper); + } + +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CoomerPartyRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CoomerPartyRipperTest.java new file mode 100644 index 00000000..c35822a8 --- /dev/null +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CoomerPartyRipperTest.java @@ -0,0 +1,40 @@ +package com.rarchives.ripme.tst.ripper.rippers; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.rarchives.ripme.ripper.rippers.CoomerPartyRipper; + +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; + +public class CoomerPartyRipperTest extends RippersTest { + @Test + public void testRip() throws IOException, URISyntaxException { + URL url = new URI("https://coomer.su/onlyfans/user/soogsx").toURL(); + CoomerPartyRipper ripper = new CoomerPartyRipper(url); + testRipper(ripper); + } + + @Test + public void testUrlParsing() throws IOException, URISyntaxException { + String expectedGid = "onlyfans_soogsx"; + String[] urls = new String[]{ + "https://coomer.su/onlyfans/user/soogsx", // normal url + "http://coomer.su/onlyfans/user/soogsx", // http, not https + "https://coomer.su/onlyfans/user/soogsx/", // with slash at the end + "https://coomer.su/onlyfans/user/soogsx?whatever=abc", // with url params + "https://coomer.party/onlyfans/user/soogsx", // alternate domain + }; + for (String stringUrl : urls) { + URL url = new URI(stringUrl).toURL(); + CoomerPartyRipper ripper = new CoomerPartyRipper(url); + assertTrue(ripper.canRip(url)); + assertEquals(expectedGid, ripper.getGID(url)); + } + } +} \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CyberdropRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CyberdropRipperTest.java new file mode 100644 index 00000000..14fcef07 --- /dev/null +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CyberdropRipperTest.java @@ -0,0 +1,55 @@ +package com.rarchives.ripme.tst.ripper.rippers; + +import com.rarchives.ripme.ripper.rippers.CyberdropRipper; +import com.rarchives.ripme.utils.Http; +import org.jsoup.nodes.Document; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class CyberdropRipperTest extends RippersTest { + @Test + public void testScrolllerGID() throws IOException, URISyntaxException { + Map testURLs = new HashMap<>(); + + testURLs.put(new URI("https://cyberdrop.me/a/n4umdBjw").toURL(), "n4umdBjw"); + testURLs.put(new URI("https://cyberdrop.me/a/iLtp4BjW").toURL(), "iLtp4BjW"); + for (URL url : testURLs.keySet()) { + CyberdropRipper ripper = new CyberdropRipper(url); + ripper.setup(); + Assertions.assertEquals(testURLs.get(url), ripper.getGID(ripper.getURL())); + deleteDir(ripper.getWorkingDir()); + } + } + + @Test + @Tag("flaky") + public void testCyberdropNumberOfFiles() throws IOException, URISyntaxException { + List testURLs = new ArrayList(); + + testURLs.add(new URI("https://cyberdrop.me/a/n4umdBjw").toURL()); + testURLs.add(new URI("https://cyberdrop.me/a/iLtp4BjW").toURL()); + for (URL url : testURLs) { + Assertions.assertTrue(willDownloadAllFiles(url)); + } + } + + public boolean willDownloadAllFiles(URL url) throws IOException { + Document doc = Http.url(url).get(); + long numberOfLinks = doc.getElementsByClass("image").stream().count(); + int numberOfFiles = Integer.parseInt(doc.getElementById("totalFilesAmount").text()); + return numberOfLinks == numberOfFiles; + } + + + +} \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DanbooruRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DanbooruRipperTest.java new file mode 100644 index 00000000..dd6e7163 --- /dev/null +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DanbooruRipperTest.java @@ -0,0 +1,49 @@ +package com.rarchives.ripme.tst.ripper.rippers; + +import com.rarchives.ripme.ripper.rippers.DanbooruRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; + +public class DanbooruRipperTest extends RippersTest { + @Test + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + List passURLs = new ArrayList<>(); + passURLs.add(new URI("https://danbooru.donmai.us/posts?tags=brown_necktie").toURL()); + passURLs.add(new URI("https://danbooru.donmai.us/posts?page=1&tags=pink_sweater_vest").toURL()); + + for (URL url : passURLs) { + DanbooruRipper danbooruRipper = new DanbooruRipper(url); + testRipper(danbooruRipper); + } + } + + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL danBooruUrl = new URI("https://danbooru.donmai.us/posts?tags=brown_necktie").toURL(); + URL danBooruUrl2 = new URI("https://danbooru.donmai.us/posts?page=1&tags=pink_sweater_vest").toURL(); + + DanbooruRipper danbooruRipper = new DanbooruRipper(danBooruUrl); + DanbooruRipper danbooruRipper2 = new DanbooruRipper(danBooruUrl2); + + Assertions.assertEquals("brown_necktie", danbooruRipper.getGID(danBooruUrl)); + Assertions.assertEquals("pink_sweater_vest", danbooruRipper2.getGID(danBooruUrl2)); + } + + @Test + public void testGetHost() throws IOException, URISyntaxException { + URL danBooruUrl = new URI("https://danbooru.donmai.us/posts?tags=brown_necktie").toURL(); + + DanbooruRipper danbooruRipper = new DanbooruRipper(danBooruUrl); + + Assertions.assertEquals("danbooru", danbooruRipper.getHost()); + } +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DeviantartRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DeviantartRipperTest.java index adf15442..23dbe679 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DeviantartRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DeviantartRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -8,36 +10,38 @@ import java.util.List; import com.rarchives.ripme.ripper.rippers.DeviantartRipper; import com.rarchives.ripme.utils.Http; import org.jsoup.nodes.Document; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; public class DeviantartRipperTest extends RippersTest { @Test @Disabled("Broken ripper") - public void testDeviantartAlbum() throws IOException { - DeviantartRipper ripper = new DeviantartRipper(new URL("https://www.deviantart.com/airgee/gallery/")); + public void testDeviantartAlbum() throws IOException, URISyntaxException { + DeviantartRipper ripper = new DeviantartRipper(new URI("https://www.deviantart.com/airgee/gallery/").toURL()); testRipper(ripper); } @Test @Disabled("Broken ripper") - public void testDeviantartNSFWAlbum() throws IOException { + public void testDeviantartNSFWAlbum() throws IOException, URISyntaxException { // NSFW gallery - DeviantartRipper ripper = new DeviantartRipper(new URL("https://www.deviantart.com/faterkcx/gallery/")); + DeviantartRipper ripper = new DeviantartRipper(new URI("https://www.deviantart.com/faterkcx/gallery/").toURL()); testRipper(ripper); } @Test @Disabled("Broken ripper") - public void testGetGID() throws IOException { - URL url = new URL("https://www.deviantart.com/airgee/gallery/"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://www.deviantart.com/airgee/gallery/").toURL(); DeviantartRipper ripper = new DeviantartRipper(url); - assertEquals("airgee", ripper.getGID(url)); + Assertions.assertEquals("airgee", ripper.getGID(url)); } @Test - public void testGetGalleryIDAndUsername() throws IOException { - URL url = new URL("https://www.deviantart.com/airgee/gallery/"); + @Disabled("Broken ripper") + public void testGetGalleryIDAndUsername() throws IOException, URISyntaxException { + URL url = new URI("https://www.deviantart.com/airgee/gallery/").toURL(); DeviantartRipper ripper = new DeviantartRipper(url); Document doc = Http.url(url).get(); // Had to comment because of refactoring/style change @@ -47,15 +51,15 @@ public class DeviantartRipperTest extends RippersTest { @Test @Disabled("Broken ripper") - public void testSanitizeURL() throws IOException { + public void testSanitizeURL() throws IOException, URISyntaxException { List urls = new ArrayList(); - urls.add(new URL("https://www.deviantart.com/airgee/")); - urls.add(new URL("https://www.deviantart.com/airgee")); - urls.add(new URL("https://www.deviantart.com/airgee/gallery/")); + urls.add(new URI("https://www.deviantart.com/airgee/").toURL()); + urls.add(new URI("https://www.deviantart.com/airgee").toURL()); + urls.add(new URI("https://www.deviantart.com/airgee/gallery/").toURL()); for (URL url : urls) { DeviantartRipper ripper = new DeviantartRipper(url); - assertEquals("https://www.deviantart.com/airgee/gallery/", ripper.sanitizeURL(url).toExternalForm()); + Assertions.assertEquals("https://www.deviantart.com/airgee/gallery/", ripper.sanitizeURL(url).toExternalForm()); } } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DrawcrowdRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DrawcrowdRipperTest.java deleted file mode 100644 index b326d365..00000000 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DrawcrowdRipperTest.java +++ /dev/null @@ -1,19 +0,0 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import java.io.IOException; -import java.net.URL; - -import com.rarchives.ripme.ripper.rippers.DrawcrowdRipper; - -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; - -public class DrawcrowdRipperTest extends RippersTest { - @Test - @Disabled("https://github.com/RipMeApp/ripme/issues/304 -- Drawcrowd broken (site changed)") - public void testRip() throws IOException { - DrawcrowdRipper ripper = new DrawcrowdRipper(new URL("https://drawcrowd.com/rabbiteyes")); - testRipper(ripper); - } - -} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DribbbleRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DribbbleRipperTest.java index 36b56d6a..4a6bf37b 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DribbbleRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DribbbleRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.DribbbleRipper; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; public class DribbbleRipperTest extends RippersTest { @Test - public void testDribbbleRip() throws IOException { - DribbbleRipper ripper = new DribbbleRipper(new URL("https://dribbble.com/typogriff")); + @Disabled("test or ripper broken") + public void testDribbbleRip() throws IOException, URISyntaxException { + DribbbleRipper ripper = new DribbbleRipper(new URI("https://dribbble.com/typogriff").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DuckmoviesRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DuckmoviesRipperTest.java deleted file mode 100644 index e4b17cb1..00000000 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DuckmoviesRipperTest.java +++ /dev/null @@ -1,20 +0,0 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import com.rarchives.ripme.ripper.rippers.DuckmoviesRipper; - -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; - -import java.io.IOException; -import java.net.URL; - -public class DuckmoviesRipperTest extends RippersTest { - @Test - @Disabled("Broken ripper") - public void testRip() throws IOException { - DuckmoviesRipper ripper = new DuckmoviesRipper( - new URL("https://palapaja.com/spyfam-stepbro-gives-in-to-stepsis-asian-persuasion/")); - testRipper(ripper); - } - -} \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DynastyscansRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DynastyscansRipperTest.java index 8eb8d88f..a2855e98 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DynastyscansRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DynastyscansRipperTest.java @@ -1,18 +1,25 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.DynastyscansRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; public class DynastyscansRipperTest extends RippersTest { - public void testRip() throws IOException { - DynastyscansRipper ripper = new DynastyscansRipper(new URL("https://dynasty-scans.com/chapters/under_one_roof_ch01")); + @Test + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + DynastyscansRipper ripper = new DynastyscansRipper(new URI("https://dynasty-scans.com/chapters/under_one_roof_ch01").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - DynastyscansRipper ripper = new DynastyscansRipper(new URL("https://dynasty-scans.com/chapters/under_one_roof_ch01")); - assertEquals("under_one_roof_ch01", ripper.getGID(new URL("https://dynasty-scans.com/chapters/under_one_roof_ch01"))); + @Test + public void testGetGID() throws IOException, URISyntaxException { + DynastyscansRipper ripper = new DynastyscansRipper(new URI("https://dynasty-scans.com/chapters/under_one_roof_ch01").toURL()); + Assertions.assertEquals("under_one_roof_ch01", ripper.getGID(new URI("https://dynasty-scans.com/chapters/under_one_roof_ch01").toURL())); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/E621RipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/E621RipperTest.java index 5f7165b2..4859ade0 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/E621RipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/E621RipperTest.java @@ -1,24 +1,29 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.E621Ripper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class E621RipperTest extends RippersTest { - public void testRip() throws IOException { - E621Ripper ripper = new E621Ripper(new URL("https://e621.net/post/index/1/beach")); + public void testRip() throws IOException, URISyntaxException { + E621Ripper ripper = new E621Ripper(new URI("https://e621.net/posts?tags=beach").toURL()); testRipper(ripper); } @Test - public void testFlashOrWebm() throws IOException { - E621Ripper ripper = new E621Ripper(new URL("https://e621.net/post/index/1/gif")); + @Tag("flaky") + public void testFlashOrWebm() throws IOException, URISyntaxException { + E621Ripper ripper = new E621Ripper(new URI("https://e621.net/posts?page=4&tags=gif+rating%3As+3d").toURL()); testRipper(ripper); } @Test - public void testGetNextPage() throws IOException { - E621Ripper nextPageRipper = new E621Ripper(new URL("https://e621.net/post/index/1/cosmicminerals")); + @Tag("flaky") + public void testGetNextPage() throws IOException, URISyntaxException { + E621Ripper nextPageRipper = new E621Ripper(new URI("https://e621.net/posts?tags=cosmicminerals").toURL()); try { nextPageRipper.getNextPage(nextPageRipper.getFirstPage()); assert (true); @@ -26,11 +31,41 @@ public class E621RipperTest extends RippersTest { throw e; } - E621Ripper noNextPageRipper = new E621Ripper(new URL("https://e621.net/post/index/1/cosmicminerals")); + E621Ripper noNextPageRipper = new E621Ripper(new URI("https://e621.net/post/index/1/cosmicminerals").toURL()); try { noNextPageRipper.getNextPage(noNextPageRipper.getFirstPage()); } catch (IOException e) { - assertEquals(e.getMessage(), "No more pages."); + Assertions.assertEquals(e.getMessage(), "No more pages."); + } + } + @Test + @Tag("flaky") + public void testOldRip() throws IOException, URISyntaxException { + E621Ripper ripper = new E621Ripper(new URI("https://e621.net/post/index/1/beach").toURL()); + testRipper(ripper); + } + @Test + @Tag("flaky") + public void testOldFlashOrWebm() throws IOException, URISyntaxException { + E621Ripper ripper = new E621Ripper(new URI("https://e621.net/post/index/1/gif").toURL()); + testRipper(ripper); + } + @Test + @Tag("flaky") + public void testOldGetNextPage() throws IOException, URISyntaxException { + E621Ripper nextPageRipper = new E621Ripper(new URI("https://e621.net/post/index/1/cosmicminerals").toURL()); + try { + nextPageRipper.getNextPage(nextPageRipper.getFirstPage()); + assert (true); + } catch (IOException e) { + throw e; + } + + E621Ripper noNextPageRipper = new E621Ripper(new URI("https://e621.net/post/index/1/cosmicminerals").toURL()); + try { + noNextPageRipper.getNextPage(noNextPageRipper.getFirstPage()); + } catch (IOException e) { + Assertions.assertEquals(e.getMessage(), "No more pages."); } } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EhentaiRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EhentaiRipperTest.java index 78234951..a8393daf 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EhentaiRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EhentaiRipperTest.java @@ -1,34 +1,36 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.List; import com.rarchives.ripme.ripper.rippers.EHentaiRipper; import com.rarchives.ripme.utils.RipUtils; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.*; public class EhentaiRipperTest extends RippersTest { @Test - public void testEHentaiAlbum() throws IOException { - EHentaiRipper ripper = new EHentaiRipper(new URL("https://e-hentai.org/g/1144492/e823bdf9a5/")); + public void testEHentaiAlbum() throws IOException, URISyntaxException { + EHentaiRipper ripper = new EHentaiRipper(new URI("https://e-hentai.org/g/1144492/e823bdf9a5/").toURL()); testRipper(ripper); } // Test the tag black listing @Test - public void testTagBlackList() throws IOException { - URL url = new URL("https://e-hentai.org/g/1228503/1a2f455f96/"); + public void testTagBlackList() throws IOException, URISyntaxException { + URL url = new URI("https://e-hentai.org/g/1228503/1a2f455f96/").toURL(); EHentaiRipper ripper = new EHentaiRipper(url); List tagsOnPage = ripper.getTags(ripper.getFirstPage()); // Test multiple blacklisted tags String[] tags = {"test", "one", "yuri"}; String blacklistedTag = RipUtils.checkTags(tags, tagsOnPage); - assertEquals("yuri", blacklistedTag); + Assertions.assertEquals("yuri", blacklistedTag); // test tags with spaces in them String[] tags2 = {"test", "one", "midnight on mars"}; blacklistedTag = RipUtils.checkTags(tags2, tagsOnPage); - assertEquals("midnight on mars", blacklistedTag); + Assertions.assertEquals("midnight on mars", blacklistedTag); } -} \ No newline at end of file +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EightmusesRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EightmusesRipperTest.java index 2cdbcb2e..2a016b66 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EightmusesRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EightmusesRipperTest.java @@ -1,32 +1,36 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.EightmusesRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class EightmusesRipperTest extends RippersTest { @Test - public void testEightmusesAlbum() throws IOException { + @Tag("flaky") + public void testEightmusesAlbum() throws IOException, URISyntaxException { // A simple image album - EightmusesRipper ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore")); + EightmusesRipper ripper = new EightmusesRipper(new URI("https://www.8muses.com/comix/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore").toURL()); testRipper(ripper); // Test the new url format - ripper = new EightmusesRipper(new URL("https://www.8muses.com/comics/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore")); + ripper = new EightmusesRipper(new URI("https://www.8muses.com/comics/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore").toURL()); testRipper(ripper); // Test pages with subalbums - ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Blacknwhitecomics_com-Comix/BlacknWhiteComics/The-Mayor")); + ripper = new EightmusesRipper(new URI("https://www.8muses.com/comix/album/Blacknwhitecomics_com-Comix/BlacknWhiteComics/The-Mayor").toURL()); testRipper(ripper); } @Test - public void testGID() throws IOException { - EightmusesRipper ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore")); - assertEquals("Affect3D-Comics", ripper.getGID(new URL("https://www.8muses.com/comics/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore"))); + public void testGID() throws IOException, URISyntaxException { + EightmusesRipper ripper = new EightmusesRipper(new URI("https://www.8muses.com/comix/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore").toURL()); + Assertions.assertEquals("Affect3D-Comics", ripper.getGID(new URI("https://www.8muses.com/comics/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore").toURL())); } @Test - public void testGetSubdir() throws IOException { - EightmusesRipper ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore")); - assertEquals("After-Party-Issue-1", ripper.getSubdir("After Party - Issue 1")); + public void testGetSubdir() throws IOException, URISyntaxException { + EightmusesRipper ripper = new EightmusesRipper(new URI("https://www.8muses.com/comix/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore").toURL()); + Assertions.assertEquals("After-Party-Issue-1", ripper.getSubdir("After Party - Issue 1")); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EroShareRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EroShareRipperTest.java index 780460ce..98d6be8f 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EroShareRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EroShareRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.EroShareRipper; import com.rarchives.ripme.ripper.rippers.RedditRipper; @@ -14,46 +15,46 @@ public class EroShareRipperTest extends RippersTest { // single image posts @Test @Disabled("https://github.com/RipMeApp/ripme/issues/306 : EroShareRipper broken (even for eroshae links)") - public void testImageEroshareFromRedditRip() throws IOException { - RedditRipper ripper = new RedditRipper(new URL( - "https://www.reddit.com/r/BestOfEroshare/comments/5z7foo/good_morning_who_likes_abstract_asian_artwork_f/")); + public void testImageEroshareFromRedditRip() throws IOException, URISyntaxException { + RedditRipper ripper = new RedditRipper(new URI( + "https://www.reddit.com/r/BestOfEroshare/comments/5z7foo/good_morning_who_likes_abstract_asian_artwork_f/").toURL()); testRipper(ripper); } @Test @Disabled("https://github.com/RipMeApp/ripme/issues/306 : EroShareRipper broken (even for eroshae links)") - public void testImageEroshareRip() throws IOException { - EroShareRipper ripper = new EroShareRipper(new URL("https://eroshare.com/i/5j2qln3f")); + public void testImageEroshareRip() throws IOException, URISyntaxException { + EroShareRipper ripper = new EroShareRipper(new URI("https://eroshare.com/i/5j2qln3f").toURL()); testRipper(ripper); } @Test @Disabled("https://github.com/RipMeApp/ripme/issues/306 : EroShareRipper broken (even for eroshae links)") - public void testImageEroshaeRip() throws IOException { - EroShareRipper ripper = new EroShareRipper(new URL("https://eroshae.com/i/5j2qln3f")); + public void testImageEroshaeRip() throws IOException, URISyntaxException { + EroShareRipper ripper = new EroShareRipper(new URI("https://eroshae.com/i/5j2qln3f").toURL()); testRipper(ripper); } // video album post @Test @Disabled("https://github.com/RipMeApp/ripme/issues/306 : EroShareRipper broken (even for eroshae links)") - public void testVideoAlbumFromRedditRip() throws IOException { - EroShareRipper ripper = new EroShareRipper(new URL( - "https://www.reddit.com/r/BestOfEroshare/comments/5vyfnw/asian_mf_heard_i_should_post_here_date_night_her/")); + public void testVideoAlbumFromRedditRip() throws IOException, URISyntaxException { + EroShareRipper ripper = new EroShareRipper(new URI( + "https://www.reddit.com/r/BestOfEroshare/comments/5vyfnw/asian_mf_heard_i_should_post_here_date_night_her/").toURL()); testRipper(ripper); } @Test @Disabled("https://github.com/RipMeApp/ripme/issues/306 : EroShareRipper broken (even for eroshae links)") - public void testVideoAlbumEroshareRip() throws IOException { - EroShareRipper ripper = new EroShareRipper(new URL("https://eroshare.com/wqnl6f00")); + public void testVideoAlbumEroshareRip() throws IOException, URISyntaxException { + EroShareRipper ripper = new EroShareRipper(new URI("https://eroshare.com/wqnl6f00").toURL()); testRipper(ripper); } @Test @Disabled("https://github.com/RipMeApp/ripme/issues/306 : EroShareRipper broken (even for eroshae links)") - public void testVideoAlbumEroshaeRip() throws IOException { - EroShareRipper ripper = new EroShareRipper(new URL("https://eroshae.com/wqnl6f00")); + public void testVideoAlbumEroshaeRip() throws IOException, URISyntaxException { + EroShareRipper ripper = new EroShareRipper(new URI("https://eroshae.com/wqnl6f00").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ErofusRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ErofusRipperTest.java index ab497433..a06f0e70 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ErofusRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ErofusRipperTest.java @@ -1,18 +1,26 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.ErofusRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; public class ErofusRipperTest extends RippersTest { - public void testRip() throws IOException { - ErofusRipper ripper = new ErofusRipper(new URL("https://www.erofus.com/comics/be-story-club-comics/a-kiss/issue-1")); + @Test + @Tag("flaky") // if url does not exist, erofusripper test ends in out of memory + public void testRip() throws IOException, URISyntaxException { + ErofusRipper ripper = new ErofusRipper(new URI("https://www.erofus.com/comics/be-story-club-comics/a-kiss/issue-1").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - ErofusRipper ripper = new ErofusRipper(new URL("https://www.erofus.com/comics/be-story-club-comics/a-kiss/issue-1")); - assertEquals("be-story-club-comics", ripper.getGID(new URL("https://www.erofus.com/comics/be-story-club-comics/a-kiss/issue-1"))); + @Test + @Tag("flaky") + public void testGetGID() throws IOException, URISyntaxException { + ErofusRipper ripper = new ErofusRipper(new URI("https://www.erofus.com/comics/be-story-club-comics/a-kiss/issue-1").toURL()); + Assertions.assertEquals("be-story-club-comics", ripper.getGID(new URI("https://www.erofus.com/comics/be-story-club-comics/a-kiss/issue-1").toURL())); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EromeRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EromeRipperTest.java index 4e7241af..18ddf4bb 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EromeRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EromeRipperTest.java @@ -1,47 +1,49 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.EromeRipper; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; public class EromeRipperTest extends RippersTest { @Test - public void testGetGIDProfilePage() throws IOException { - URL url = new URL("https://www.erome.com/Jay-Jenna"); + public void testGetGIDProfilePage() throws IOException, URISyntaxException { + URL url = new URI("https://www.erome.com/Jay-Jenna").toURL(); EromeRipper ripper = new EromeRipper(url); - assertEquals("Jay-Jenna", ripper.getGID(url)); + Assertions.assertEquals("Jay-Jenna", ripper.getGID(url)); } @Test - public void testGetGIDAlbum() throws IOException { - URL url = new URL("https://www.erome.com/a/KbDAM1XT"); + public void testGetGIDAlbum() throws IOException, URISyntaxException { + URL url = new URI("https://www.erome.com/a/KbDAM1XT").toURL(); EromeRipper ripper = new EromeRipper(url); - assertEquals("KbDAM1XT", ripper.getGID(url)); + Assertions.assertEquals("KbDAM1XT", ripper.getGID(url)); } @Test - public void testGetAlbumsToQueue() throws IOException { - URL url = new URL("https://www.erome.com/Jay-Jenna"); + public void testGetAlbumsToQueue() throws IOException, URISyntaxException { + URL url = new URI("https://www.erome.com/Jay-Jenna").toURL(); EromeRipper ripper = new EromeRipper(url); assert (2 >= ripper.getAlbumsToQueue(ripper.getFirstPage()).size()); } @Test - public void testPageContainsAlbums() throws IOException { - URL url = new URL("https://www.erome.com/Jay-Jenna"); + public void testPageContainsAlbums() throws IOException, URISyntaxException { + URL url = new URI("https://www.erome.com/Jay-Jenna").toURL(); EromeRipper ripper = new EromeRipper(url); assert (ripper.pageContainsAlbums(url)); - assert (!ripper.pageContainsAlbums(new URL("https://www.erome.com/a/KbDAM1XT"))); + assert (!ripper.pageContainsAlbums(new URI("https://www.erome.com/a/KbDAM1XT").toURL())); } - public void testRip() throws IOException { - URL url = new URL("https://www.erome.com/a/vlefBdsg"); + public void testRip() throws IOException, URISyntaxException { + URL url = new URI("https://www.erome.com/a/vlefBdsg").toURL(); EromeRipper ripper = new EromeRipper(url); testRipper(ripper); } @Test - public void testGetURLsFromPage() throws IOException { - URL url = new URL("https://www.erome.com/a/Tak8F2h6"); + public void testGetURLsFromPage() throws IOException, URISyntaxException { + URL url = new URI("https://www.erome.com/a/Tak8F2h6").toURL(); EromeRipper ripper = new EromeRipper(url); assert (35 == ripper.getURLsFromPage(ripper.getFirstPage()).size()); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ErotivRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ErotivRipperTest.java index 432b7fb9..11f2b59f 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ErotivRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ErotivRipperTest.java @@ -1,28 +1,32 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.ErotivRipper; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; public class ErotivRipperTest extends RippersTest { @Test - public void testGetGID() throws IOException { - URL url = new URL("https://erotiv.io/e/1568314255"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://erotiv.io/e/1568314255").toURL(); ErotivRipper ripper = new ErotivRipper(url); assert("1568314255".equals(ripper.getGID(url))); } - public void testRip() throws IOException { - URL url = new URL("https://erotiv.io/e/1568314255"); + public void testRip() throws IOException, URISyntaxException { + URL url = new URI("https://erotiv.io/e/1568314255").toURL(); ErotivRipper ripper = new ErotivRipper(url); testRipper(ripper); } @Test - public void testGetURLsFromPage() throws IOException { - URL url = new URL("https://erotiv.io/e/1568314255"); + @Disabled("test or ripper broken") + public void testGetURLsFromPage() throws IOException, URISyntaxException { + URL url = new URI("https://erotiv.io/e/1568314255").toURL(); ErotivRipper ripper = new ErotivRipper(url); assert(1 == ripper.getURLsFromPage(ripper.getFirstPage()).size()); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FemjoyhunterRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FemjoyhunterRipperTest.java index 5520441a..3f295a2c 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FemjoyhunterRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FemjoyhunterRipperTest.java @@ -1,17 +1,20 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.FemjoyhunterRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class FemjoyhunterRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - FemjoyhunterRipper ripper = new FemjoyhunterRipper(new URL( - "https://www.femjoyhunter.com/alisa-i-got-nice-big-breasts-and-fine-ass-so-she-seems-to-be-a-hottest-brunette-5936/")); + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + FemjoyhunterRipper ripper = new FemjoyhunterRipper(new URI( + "https://www.femjoyhunter.com/alisa-i-got-nice-big-breasts-and-fine-ass-so-she-seems-to-be-a-hottest-brunette-5936/").toURL()); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FivehundredpxRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FivehundredpxRipperTest.java index 0392b36e..c4be94d9 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FivehundredpxRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FivehundredpxRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.FivehundredpxRipper; import org.junit.jupiter.api.Disabled; @@ -9,8 +10,8 @@ import org.junit.jupiter.api.Test; public class FivehundredpxRipperTest extends RippersTest { @Test @Disabled("Ripper is broken. See https://github.com/RipMeApp/ripme/issues/438") - public void test500pxAlbum() throws IOException { - FivehundredpxRipper ripper = new FivehundredpxRipper(new URL("https://marketplace.500px.com/alexander_hurman")); + public void test500pxAlbum() throws IOException, URISyntaxException { + FivehundredpxRipper ripper = new FivehundredpxRipper(new URI("https://marketplace.500px.com/alexander_hurman").toURL()); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FlickrRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FlickrRipperTest.java index 02268d64..22a507be 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FlickrRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FlickrRipperTest.java @@ -1,19 +1,21 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.FlickrRipper; import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class FlickrRipperTest extends RippersTest { @Test - @Disabled("https://github.com/RipMeApp/ripme/issues/243") - public void testFlickrAlbum() throws IOException { + @Tag("slow") + public void testFlickrAlbum() throws IOException, URISyntaxException { FlickrRipper ripper = new FlickrRipper( - new URL("https://www.flickr.com/photos/leavingallbehind/sets/72157621895942720/")); + new URI("https://www.flickr.com/photos/leavingallbehind/sets/72157621895942720/").toURL()); testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FolioRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FolioRipperTest.java index 66a9c1ed..d4a51a68 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FolioRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FolioRipperTest.java @@ -1,26 +1,30 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import java.io.IOException; -import java.net.URL; -import com.rarchives.ripme.ripper.rippers.FolioRipper; - -import org.junit.jupiter.api.Test; - -public class FolioRipperTest extends RippersTest { - /** - * Test for folio.ink ripper - * @throws IOException - */ - @Test - public void testFolioRip() throws IOException { - FolioRipper ripper = new FolioRipper(new URL("https://folio.ink/DmBe6i")); - testRipper(ripper); - } - - @Test - public void testGetGID() throws IOException { - URL url = new URL("https://folio.ink/DmBe6i"); - FolioRipper ripper = new FolioRipper(url); - assertEquals("DmBe6i", ripper.getGID(url)); - } -} +package com.rarchives.ripme.tst.ripper.rippers; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import com.rarchives.ripme.ripper.rippers.FolioRipper; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class FolioRipperTest extends RippersTest { + /** + * Test for folio.ink ripper + */ + @Test + @Disabled("test or ripper broken") + public void testFolioRip() throws IOException, URISyntaxException { + FolioRipper ripper = new FolioRipper(new URI("https://folio.ink/DmBe6i").toURL()); + testRipper(ripper); + } + + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://folio.ink/DmBe6i").toURL(); + FolioRipper ripper = new FolioRipper(url); + Assertions.assertEquals("DmBe6i", ripper.getGID(url)); + } +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FooktubeRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FooktubeRipperTest.java new file mode 100644 index 00000000..3e873ed6 --- /dev/null +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FooktubeRipperTest.java @@ -0,0 +1,19 @@ +package com.rarchives.ripme.tst.ripper.rippers; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import com.rarchives.ripme.ripper.rippers.FooktubeRipper; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class FooktubeRipperTest extends RippersTest { + @Test + @Disabled("test or ripper broken") + public void testFooktubeVideo() throws IOException, URISyntaxException { + FooktubeRipper ripper = new FooktubeRipper(new URI("https://fooktube.com/video/641/in-the-cinema").toURL()); //pick any video from the front page + testRipper(ripper); + } + +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuraffinityRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuraffinityRipperTest.java index 1f397bcb..fd21aff8 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuraffinityRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuraffinityRipperTest.java @@ -1,34 +1,42 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.FuraffinityRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class FuraffinityRipperTest extends RippersTest { @Test - public void testFuraffinityAlbum() throws IOException { - FuraffinityRipper ripper = new FuraffinityRipper(new URL("https://www.furaffinity.net/gallery/spencerdragon/")); + @Tag("slow") + public void testFuraffinityAlbum() throws IOException, URISyntaxException { + FuraffinityRipper ripper = new FuraffinityRipper(new URI("https://www.furaffinity.net/gallery/spencerdragon/").toURL()); testRipper(ripper); } @Test - public void testFuraffinityScrap() throws IOException { - FuraffinityRipper ripper = new FuraffinityRipper(new URL("http://www.furaffinity.net/scraps/sssonic2/")); + @Tag("slow") + public void testFuraffinityScrap() throws IOException, URISyntaxException { + FuraffinityRipper ripper = new FuraffinityRipper(new URI("http://www.furaffinity.net/scraps/sssonic2/").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("https://www.furaffinity.net/gallery/mustardgas/"); + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://www.furaffinity.net/gallery/mustardgas/").toURL(); FuraffinityRipper ripper = new FuraffinityRipper(url); - assertEquals("mustardgas", ripper.getGID(url)); + Assertions.assertEquals("mustardgas", ripper.getGID(url)); } @Test - public void testLogin() throws IOException { - URL url = new URL("https://www.furaffinity.net/gallery/mustardgas/"); + @Tag("flaky") + public void testLogin() throws IOException, URISyntaxException { + URL url = new URI("https://www.furaffinity.net/gallery/mustardgas/").toURL(); FuraffinityRipper ripper = new FuraffinityRipper(url); // Check if the first page contain the username of ripmes shared account - Boolean containsUsername = ripper.getFirstPage().html().contains("ripmethrowaway"); + boolean containsUsername = ripper.getFirstPage().html().contains("ripmethrowaway"); assert containsUsername; } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuskatorRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuskatorRipperTest.java index 334cd6ff..25656d79 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuskatorRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuskatorRipperTest.java @@ -1,20 +1,24 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.FuskatorRipper; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; public class FuskatorRipperTest extends RippersTest { @Test - public void testFuskatorAlbum() throws IOException { - FuskatorRipper ripper = new FuskatorRipper(new URL("https://fuskator.com/thumbs/hqt6pPXAf9z/Shaved-Blonde-Babe-Katerina-Ambre.html")); + @Disabled("test or ripper broken") + public void testFuskatorAlbum() throws IOException, URISyntaxException { + FuskatorRipper ripper = new FuskatorRipper(new URI("https://fuskator.com/thumbs/hqt6pPXAf9z/Shaved-Blonde-Babe-Katerina-Ambre.html").toURL()); testRipper(ripper); } @Test - public void testUrlsWithTiled() throws IOException { - FuskatorRipper ripper = new FuskatorRipper(new URL("https://fuskator.com/thumbs/hsrzk~UIFmJ/Blonde-Babe-Destiny-Dixon-Playing-With-Black-Dildo.html")); + @Disabled("test or ripper broken") + public void testUrlsWithTiled() throws IOException, URISyntaxException { + FuskatorRipper ripper = new FuskatorRipper(new URI("https://fuskator.com/thumbs/hsrzk~UIFmJ/Blonde-Babe-Destiny-Dixon-Playing-With-Black-Dildo.html").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatRipperTest.java deleted file mode 100644 index 019350ad..00000000 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatRipperTest.java +++ /dev/null @@ -1,47 +0,0 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import com.rarchives.ripme.ripper.rippers.GfycatRipper; -import org.junit.jupiter.api.Test; - -import java.io.IOException; -import java.net.URL; - - -public class GfycatRipperTest extends RippersTest { - - /** - * Rips correctly formatted URL directly from Gfycat - * @throws IOException - */ - @Test - public void testGfycatGoodURL() throws IOException{ - GfycatRipper ripper = new GfycatRipper(new URL("https://gfycat.com/TemptingExcellentIchthyosaurs")); - testRipper(ripper); - } - /** - * Rips badly formatted URL directly from Gfycat - * @throws IOException - */ - public void testGfycatBadURL() throws IOException { - GfycatRipper ripper = new GfycatRipper(new URL("https://gfycat.com/gifs/detail/limitedtestyamericancrow")); - testRipper(ripper); - } - - /** - * Rips a Gfycat profile - * @throws IOException - */ - public void testGfycatProfile() throws IOException { - GfycatRipper ripper = new GfycatRipper(new URL("https://gfycat.com/@golbanstorage")); - testRipper(ripper); - } - - /** - * Rips a Gfycat amp link - * @throws IOException - */ - public void testGfycatAmp() throws IOException { - GfycatRipper ripper = new GfycatRipper(new URL("https://gfycat.com/amp/TemptingExcellentIchthyosaurs")); - testRipper(ripper); - } -} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatporntubeRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatporntubeRipperTest.java index e53c78e6..ed000e1d 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatporntubeRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatporntubeRipperTest.java @@ -1,19 +1,27 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.GfycatporntubeRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; public class GfycatporntubeRipperTest extends RippersTest { - public void testRip() throws IOException { - GfycatporntubeRipper ripper = new GfycatporntubeRipper(new URL("https://gfycatporntube.com/blowjob-bunny-puts-on-a-show/")); + @Test + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + GfycatporntubeRipper ripper = new GfycatporntubeRipper(new URI("https://gfycatporntube.com/blowjob-bunny-puts-on-a-show/").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("https://gfycatporntube.com/blowjob-bunny-puts-on-a-show/"); + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://gfycatporntube.com/blowjob-bunny-puts-on-a-show/").toURL(); GfycatporntubeRipper ripper = new GfycatporntubeRipper(url); - assertEquals("blowjob-bunny-puts-on-a-show", ripper.getGID(url)); + Assertions.assertEquals("blowjob-bunny-puts-on-a-show", ripper.getGID(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GirlsOfDesireRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GirlsOfDesireRipperTest.java index 07fb8616..59ba2184 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GirlsOfDesireRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GirlsOfDesireRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.GirlsOfDesireRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class GirlsOfDesireRipperTest extends RippersTest { @Test - public void testGirlsofdesireAlbum() throws IOException { - GirlsOfDesireRipper ripper = new GirlsOfDesireRipper(new URL("http://www.girlsofdesire.org/galleries/krillia/")); + @Tag("flaky") + public void testGirlsofdesireAlbum() throws IOException, URISyntaxException { + GirlsOfDesireRipper ripper = new GirlsOfDesireRipper(new URI("http://www.girlsofdesire.org/galleries/krillia/").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HbrowseRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HbrowseRipperTest.java index 35b8ffa6..291ac782 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HbrowseRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HbrowseRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.HbrowseRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class HbrowseRipperTest extends RippersTest { @Test - public void testPahealRipper() throws IOException { - HbrowseRipper ripper = new HbrowseRipper(new URL("https://www.hbrowse.com/21013/c00001")); + @Tag("flaky") + public void testPahealRipper() throws IOException, URISyntaxException { + HbrowseRipper ripper = new HbrowseRipper(new URI("https://www.hbrowse.com/21013/c00001").toURL()); testRipper(ripper); } -} \ No newline at end of file +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Hentai2readRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Hentai2readRipperTest.java index f448f0de..d6cbb9d0 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Hentai2readRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Hentai2readRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.Hentai2readRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class Hentai2readRipperTest extends RippersTest { @Test - public void testHentai2readAlbum() throws IOException { - Hentai2readRipper ripper = new Hentai2readRipper(new URL("https://hentai2read.com/sm_school_memorial/1/")); + @Tag("flaky") + public void testHentai2readAlbum() throws IOException, URISyntaxException { + Hentai2readRipper ripper = new Hentai2readRipper(new URI("https://hentai2read.com/sm_school_memorial/1/").toURL()); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaicafeRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaicafeRipperTest.java index 9c922260..e8c39d07 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaicafeRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaicafeRipperTest.java @@ -1,21 +1,28 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.HentaiCafeRipper; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class HentaicafeRipperTest extends RippersTest { @Test - public void testHentaiCafeAlbum() throws IOException { - HentaiCafeRipper ripper = new HentaiCafeRipper(new URL("https://hentai.cafe/kikuta-the-oni-in-the-room/")); + @Tag("flaky") + @Disabled("20/05/2021 This test was disabled as the site has experienced notable downtime") + public void testHentaiCafeAlbum() throws IOException, URISyntaxException { + HentaiCafeRipper ripper = new HentaiCafeRipper(new URI("https://hentai.cafe/kikuta-the-oni-in-the-room/").toURL()); testRipper(ripper); } // This album has a line break (
) in the url. Test it to make sure ripme can handle these invalid urls @Test - public void testAlbumWithInvalidChars() throws IOException { - HentaiCafeRipper ripper = new HentaiCafeRipper(new URL("https://hentai.cafe/chobipero-club/")); + @Tag("flaky") + @Disabled("20/05/2021 This test was disabled as the site has experienced notable downtime") + public void testAlbumWithInvalidChars() throws IOException, URISyntaxException { + HentaiCafeRipper ripper = new HentaiCafeRipper(new URI("https://hentai.cafe/chobipero-club/").toURL()); testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaidudeRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaidudeRipperTest.java index b8924078..e2cbd754 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaidudeRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaidudeRipperTest.java @@ -1,15 +1,19 @@ package com.rarchives.ripme.tst.ripper.rippers; import com.rarchives.ripme.ripper.rippers.HentaidudeRipper; -import com.rarchives.ripme.utils.Utils; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; public class HentaidudeRipperTest extends RippersTest{ - public void testRip() throws IOException { - HentaidudeRipper ripper = new HentaidudeRipper(new URL("https://hentaidude.com/girlfriends-4ever-dlc-2/")); + @Test + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + HentaidudeRipper ripper = new HentaidudeRipper(new URI("https://hentaidude.com/girlfriends-4ever-dlc-2/").toURL()); testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaifoundryRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaifoundryRipperTest.java index d86e2904..2e360a9c 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaifoundryRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaifoundryRipperTest.java @@ -1,26 +1,32 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.HentaifoundryRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class HentaifoundryRipperTest extends RippersTest { @Test - public void testHentaifoundryRip() throws IOException { - HentaifoundryRipper ripper = new HentaifoundryRipper(new URL("https://www.hentai-foundry.com/pictures/user/personalami")); + @Tag("flaky") + public void testHentaifoundryRip() throws IOException, URISyntaxException { + HentaifoundryRipper ripper = new HentaifoundryRipper(new URI("https://www.hentai-foundry.com/pictures/user/personalami").toURL()); testRipper(ripper); } @Test - public void testHentaifoundryGetGID() throws IOException { - HentaifoundryRipper ripper = new HentaifoundryRipper(new URL("https://www.hentai-foundry.com/stories/user/Rakked")); + @Tag("flaky") + public void testHentaifoundryGetGID() throws IOException, URISyntaxException { + HentaifoundryRipper ripper = new HentaifoundryRipper(new URI("https://www.hentai-foundry.com/stories/user/Rakked").toURL()); testRipper(ripper); - assertEquals("Rakked", ripper.getGID(new URL("https://www.hentai-foundry.com/stories/user/Rakked"))); + Assertions.assertEquals("Rakked", ripper.getGID(new URI("https://www.hentai-foundry.com/stories/user/Rakked").toURL())); } @Test - public void testHentaifoundryPdfRip() throws IOException { - HentaifoundryRipper ripper = new HentaifoundryRipper(new URL("https://www.hentai-foundry.com/stories/user/Rakked")); + @Tag("flaky") + public void testHentaifoundryPdfRip() throws IOException, URISyntaxException { + HentaifoundryRipper ripper = new HentaifoundryRipper(new URI("https://www.hentai-foundry.com/stories/user/Rakked").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaifoxRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaifoxRipperTest.java index 02515956..edda8162 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaifoxRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaifoxRipperTest.java @@ -1,13 +1,14 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.HentaifoxRipper; public class HentaifoxRipperTest extends RippersTest { - public void testRip() throws IOException { - HentaifoxRipper ripper = new HentaifoxRipper(new URL("https://hentaifox.com/gallery/38544/")); + public void testRip() throws IOException, URISyntaxException { + HentaifoxRipper ripper = new HentaifoxRipper(new URI("https://hentaifox.com/gallery/38544/").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaiimageRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaiimageRipperTest.java index 3fab101d..7ccc0029 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaiimageRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaiimageRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.HentaiimageRipper; import com.rarchives.ripme.utils.Utils; @@ -9,9 +10,9 @@ import org.junit.jupiter.api.Test; public class HentaiimageRipperTest extends RippersTest { @Test - public void testHentaifoundryRip() throws IOException { + public void testHentaifoundryRip() throws IOException, URISyntaxException { if (Utils.getConfigBoolean("test.run_flaky_tests", false)) { - HentaiimageRipper ripper = new HentaiimageRipper(new URL("https://hentai-image.com/image/afrobull-gerudo-ongoing-12/")); + HentaiimageRipper ripper = new HentaiimageRipper(new URI("https://hentai-image.com/image/afrobull-gerudo-ongoing-12/").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentainexusRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentainexusRipperTest.java new file mode 100644 index 00000000..3389408f --- /dev/null +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentainexusRipperTest.java @@ -0,0 +1,51 @@ +package com.rarchives.ripme.tst.ripper.rippers; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; + +import com.rarchives.ripme.ripper.rippers.HentaiNexusRipper; +import org.json.JSONObject; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class HentainexusRipperTest extends RippersTest { + @Test + @Tag("flaky") + @Disabled("20/05/2021 This test was disabled as the site has experienced notable downtime") + public void testHentaiNexusJson() throws IOException, URISyntaxException { + List testURLs = new ArrayList<>(); + testURLs.add(new URI("https://hentainexus.com/view/9202").toURL()); + testURLs.add(new URI("https://hentainexus.com/read/9202").toURL()); + testURLs.add(new URI("https://hentainexus.com/view/9202#001").toURL()); + testURLs.add(new URI("https://hentainexus.com/read/9202#001").toURL()); + + for (URL url : testURLs) { + + HentaiNexusRipper ripper = new HentaiNexusRipper(url); + + boolean testOK = false; + try { + + String jsonEncodedString = ripper.getJsonEncodedStringFromPage(); + String jsonDecodedString = ripper.decodeJsonString(jsonEncodedString); + JSONObject json = new JSONObject(jsonDecodedString); + // Fail test if JSON empty + testOK = !json.isEmpty(); + + } catch (Exception e) { + // Fail test if JSON invalid, not present or other errors + testOK = false; + } + + assertEquals(true, testOK); + } + + } +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HitomiRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HitomiRipperTest.java index 00c937b3..5587f773 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HitomiRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HitomiRipperTest.java @@ -1,19 +1,21 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.HitomiRipper; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; public class HitomiRipperTest extends RippersTest { @Test @Disabled("Broken ripper") - public void testRip() throws IOException { - HitomiRipper ripper = new HitomiRipper(new URL("https://hitomi.la/galleries/975973.html")); + public void testRip() throws IOException, URISyntaxException { + HitomiRipper ripper = new HitomiRipper(new URI("https://hitomi.la/galleries/975973.html").toURL()); testRipper(ripper); - assertTrue(ripper.getGID(new URL("https://hitomi.la/galleries/975973.html")).equals("975973")); + Assertions.assertTrue(ripper.getGID(new URI("https://hitomi.la/galleries/975973.html").toURL()).equals("975973")); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HqpornerRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HqpornerRipperTest.java index 6ef21c8d..c978f71c 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HqpornerRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HqpornerRipperTest.java @@ -2,67 +2,70 @@ package com.rarchives.ripme.tst.ripper.rippers; import com.rarchives.ripme.ripper.rippers.HqpornerRipper; import com.rarchives.ripme.utils.Utils; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; public class HqpornerRipperTest extends RippersTest { - public void testRip() throws IOException { + public void testRip() throws IOException, URISyntaxException { if (Utils.getConfigBoolean("test.run_flaky_tests", false)) { HqpornerRipper ripper = new HqpornerRipper( - new URL("https://hqporner.com/hdporn/84636-pool_lesson_with_a_cheating_husband.html")); + new URI("https://hqporner.com/hdporn/84636-pool_lesson_with_a_cheating_husband.html").toURL()); testRipper(ripper); } } - public void testGetGID() throws IOException { - URL poolURL = new URL("https://hqporner.com/hdporn/84636-pool_lesson_with_a_cheating_husband.html"); + public void testGetGID() throws IOException, URISyntaxException { + URL poolURL = new URI("https://hqporner.com/hdporn/84636-pool_lesson_with_a_cheating_husband.html").toURL(); HqpornerRipper ripper = new HqpornerRipper(poolURL); - assertEquals("84636-pool_lesson_with_a_cheating_husband", ripper.getGID(poolURL)); + Assertions.assertEquals("84636-pool_lesson_with_a_cheating_husband", ripper.getGID(poolURL)); } @Test - public void testGetURLsFromPage() throws IOException { - URL actressUrl = new URL("https://hqporner.com/actress/kali-roses"); + public void testGetURLsFromPage() throws IOException, URISyntaxException { + URL actressUrl = new URI("https://hqporner.com/actress/kali-roses").toURL(); HqpornerRipper ripper = new HqpornerRipper(actressUrl); assert (ripper.getURLsFromPage(ripper.getFirstPage()).size() >= 2); } @Test - public void testGetNextPage() throws IOException { - URL multiPageUrl = new URL("https://hqporner.com/category/tattooed"); + public void testGetNextPage() throws IOException, URISyntaxException { + URL multiPageUrl = new URI("https://hqporner.com/category/tattooed").toURL(); HqpornerRipper multiPageRipper = new HqpornerRipper(multiPageUrl); assert (multiPageRipper.getNextPage(multiPageRipper.getFirstPage()) != null); - URL singlePageUrl = new URL("https://hqporner.com/actress/amy-reid"); + URL singlePageUrl = new URI("https://hqporner.com/actress/amy-reid").toURL(); HqpornerRipper ripper = new HqpornerRipper(singlePageUrl); try { ripper.getNextPage(ripper.getFirstPage()); } catch (IOException e) { - assertEquals(e.getMessage(), "No next page found."); + Assertions.assertEquals(e.getMessage(), "No next page found."); } } @Test - public void testMyDaddyVideoHost() throws IOException { + public void testMyDaddyVideoHost() throws IOException, URISyntaxException { if (Utils.getConfigBoolean("test.run_flaky_tests", false)) { - URL myDaddyUrl = new URL("https://hqporner.com/hdporn/84636-pool_lesson_with_a_cheating_husband.html"); + URL myDaddyUrl = new URI("https://hqporner.com/hdporn/84636-pool_lesson_with_a_cheating_husband.html").toURL(); HqpornerRipper myDaddyRipper = new HqpornerRipper(myDaddyUrl); testRipper(myDaddyRipper); } } @Test - public void testFlyFlvVideoHost() throws IOException { + public void testFlyFlvVideoHost() throws IOException, URISyntaxException { if (Utils.getConfigBoolean("test.run_flaky_tests", false)) { - URL flyFlvUrl = new URL( - "https://hqporner.com/hdporn/69862-bangbros_-_amy_reid_taking_off_a_tight_sexy_swimsuit.html"); + URL flyFlvUrl = new URI( + "https://hqporner.com/hdporn/69862-bangbros_-_amy_reid_taking_off_a_tight_sexy_swimsuit.html").toURL(); HqpornerRipper flyFlvRipper = new HqpornerRipper(flyFlvUrl); testRipper(flyFlvRipper); } } @Test - public void testUnknownVideoHost() throws IOException { + public void testUnknownVideoHost() throws IOException, URISyntaxException { if (Utils.getConfigBoolean("test.run_flaky_tests", false)) { - URL unknownHostUrl = new URL("https://hqporner.com/hdporn/79528-Kayden_Kross_-_Serious_Masturbation.html"); // howq.cc + URL unknownHostUrl = new URI("https://hqporner.com/hdporn/79528-Kayden_Kross_-_Serious_Masturbation.html").toURL(); // howq.cc HqpornerRipper unknownHostRipper = new HqpornerRipper(unknownHostUrl); testRipper(unknownHostRipper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HypnohubRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HypnohubRipperTest.java index 1d9ef4ad..46ba5828 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HypnohubRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HypnohubRipperTest.java @@ -1,25 +1,33 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.HypnohubRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; public class HypnohubRipperTest extends RippersTest { - public void testRip() throws IOException { - URL poolURL = new URL("http://hypnohub.net/pool/show/2303"); - URL postURL = new URL("http://hypnohub.net/post/show/63464/black_hair-bracelet-collar-corruption-female_only-"); + @Test + @Disabled("wants a hunman") + public void testRip() throws IOException, URISyntaxException { + URL poolURL = new URI("http://hypnohub.net/pool/show/2303").toURL(); + URL postURL = new URI("http://hypnohub.net/post/show/63464/black_hair-bracelet-collar-corruption-female_only-").toURL(); HypnohubRipper ripper = new HypnohubRipper(poolURL); testRipper(ripper); ripper = new HypnohubRipper(postURL); testRipper(ripper); } - public void testGetGID() throws IOException { - URL poolURL = new URL("http://hypnohub.net/pool/show/2303"); + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL poolURL = new URI("http://hypnohub.net/pool/show/2303").toURL(); HypnohubRipper ripper = new HypnohubRipper(poolURL); - assertEquals("2303", ripper.getGID(poolURL)); + Assertions.assertEquals("2303", ripper.getGID(poolURL)); - URL postURL = new URL("http://hypnohub.net/post/show/63464/black_hair-bracelet-collar-corruption-female_only-"); - assertEquals("63464_black_hair-bracelet-collar-corruption-female_only-", ripper.getGID(postURL)); + URL postURL = new URI("http://hypnohub.net/post/show/63464/black_hair-bracelet-collar-corruption-female_only-").toURL(); + Assertions.assertEquals("63464_black_hair-bracelet-collar-corruption-female_only-", ripper.getGID(postURL)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagearnRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagearnRipperTest.java deleted file mode 100644 index 69b6d899..00000000 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagearnRipperTest.java +++ /dev/null @@ -1,15 +0,0 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import java.io.IOException; -import java.net.URL; - -import com.rarchives.ripme.ripper.rippers.ImagearnRipper; -import org.junit.jupiter.api.Test; - -public class ImagearnRipperTest extends RippersTest { - @Test - public void testImagearnRip() throws IOException { - ImagearnRipper ripper = new ImagearnRipper(new URL("http://imagearn.com//gallery.php?id=578682")); - testRipper(ripper); - } -} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagebamRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagebamRipperTest.java index efe57b96..5f1e9786 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagebamRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagebamRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.ImagebamRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class ImagebamRipperTest extends RippersTest { @Test - public void testImagebamRip() throws IOException { - ImagebamRipper ripper = new ImagebamRipper(new URL("http://www.imagebam.com/gallery/488cc796sllyf7o5srds8kpaz1t4m78i")); + @Tag("flaky") + public void testImagebamRip() throws IOException, URISyntaxException { + ImagebamRipper ripper = new ImagebamRipper(new URI("http://www.imagebam.com/gallery/488cc796sllyf7o5srds8kpaz1t4m78i").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagefapRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagefapRipperTest.java index 533fff79..2af7d499 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagefapRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagefapRipperTest.java @@ -1,25 +1,26 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.HashMap; import java.util.Map; import com.rarchives.ripme.ripper.rippers.ImagefapRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class ImagefapRipperTest extends RippersTest { @Test - public void testImagefapAlbums() throws IOException { + @Tag("flaky") + public void testImagefapAlbums() throws IOException, URISyntaxException { Map testURLs = new HashMap<>(); // Album with specific title - testURLs.put(new URL("http://www.imagefap.com/pictures/4649440/Frozen-%28Elsa-and-Anna%29?view=2"), - "Frozen (Elsa and Anna)"); - - // New URL format - testURLs.put(new URL("http://www.imagefap.com/gallery.php?pgid=fffd68f659befa5535cf78f014e348f1"), - "imagefap_fffd68f659befa5535cf78f014e348f1"); + testURLs.put(new URI("https://www.imagefap.com/pictures/11365460/Cartoons").toURL(), + "Cartoons"); for (URL url : testURLs.keySet()) { ImagefapRipper ripper = new ImagefapRipper(url); @@ -27,9 +28,10 @@ public class ImagefapRipperTest extends RippersTest { } } @Test - public void testImagefapGetAlbumTitle() throws IOException { - URL url = new URL("https://www.imagefap.com/gallery.php?gid=7789753"); + @Tag("flaky") + public void testImagefapGetAlbumTitle() throws IOException, URISyntaxException { + URL url = new URI("https://www.imagefap.com/pictures/11365460/Cartoons").toURL(); ImagefapRipper ripper = new ImagefapRipper(url); - assertEquals("imagefap_Red.Heels.Lover.In.Love_7789753", ripper.getAlbumTitle(url)); + Assertions.assertEquals("imagefap_Cartoons_11365460", ripper.getAlbumTitle(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagevenueRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagevenueRipperTest.java index 8093ede7..f604d1f7 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagevenueRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagevenueRipperTest.java @@ -1,26 +1,29 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.ImagevenueRipper; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; public class ImagevenueRipperTest extends RippersTest { @Test @Disabled("See https://github.com/RipMeApp/ripme/issues/1202") - public void testImagevenueRip() throws IOException { + public void testImagevenueRip() throws IOException, URISyntaxException { ImagevenueRipper ripper = new ImagevenueRipper( - new URL("http://img120.imagevenue.com/galshow.php?gal=gallery_1373818527696_191lo")); + new URI("http://img120.imagevenue.com/galshow.php?gal=gallery_1373818527696_191lo").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("http://img120.imagevenue.com/galshow.php?gal=gallery_1373818527696_191lo"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("http://img120.imagevenue.com/galshow.php?gal=gallery_1373818527696_191lo").toURL(); ImagevenueRipper ripper = new ImagevenueRipper(url); - assertEquals("gallery_1373818527696_191lo", ripper.getGID(url)); + Assertions.assertEquals("gallery_1373818527696_191lo", ripper.getGID(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgboxRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgboxRipperTest.java index bb877450..98e3dfc5 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgboxRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgboxRipperTest.java @@ -1,21 +1,27 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.ImgboxRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class ImgboxRipperTest extends RippersTest { @Test - public void testImgboxRip() throws IOException { - ImgboxRipper ripper = new ImgboxRipper(new URL("https://imgbox.com/g/FJPF7t26FD")); + @Tag("flaky") + public void testImgboxRip() throws IOException, URISyntaxException { + ImgboxRipper ripper = new ImgboxRipper(new URI("https://imgbox.com/g/FJPF7t26FD").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("https://imgbox.com/g/FJPF7t26FD"); + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://imgbox.com/g/FJPF7t26FD").toURL(); ImgboxRipper ripper = new ImgboxRipper(url); - assertEquals("FJPF7t26FD", ripper.getGID(url)); + Assertions.assertEquals("FJPF7t26FD", ripper.getGID(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgurRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgurRipperTest.java index 5c4fb1a1..3dacc1bb 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgurRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgurRipperTest.java @@ -4,28 +4,34 @@ import com.rarchives.ripme.ripper.rippers.ImgurRipper; import com.rarchives.ripme.ripper.rippers.ImgurRipper.ImgurAlbum; import com.rarchives.ripme.utils.RipUtils; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; public class ImgurRipperTest extends RippersTest { @Test - public void testImgurURLFailures() throws IOException { + public void testImgurURLFailures() throws IOException, URISyntaxException { List failURLs = new ArrayList<>(); // Imgur urls that should not work - failURLs.add(new URL("http://imgur.com")); - failURLs.add(new URL("http://imgur.com/")); - failURLs.add(new URL("http://i.imgur.com")); - failURLs.add(new URL("http://i.imgur.com/")); - failURLs.add(new URL("http://imgur.com/image.jpg")); - failURLs.add(new URL("http://i.imgur.com/image.jpg")); + failURLs.add(new URI("http://imgur.com").toURL()); + failURLs.add(new URI("http://imgur.com/").toURL()); + failURLs.add(new URI("http://i.imgur.com").toURL()); + failURLs.add(new URI("http://i.imgur.com/").toURL()); + failURLs.add(new URI("http://imgur.com/image.jpg").toURL()); + failURLs.add(new URI("http://i.imgur.com/image.jpg").toURL()); + // Imgur seems not to support URLs with lists of images anymore. + failURLs.add(new URI("http://imgur.com/758qD43,C6iVJex,bP7flAu,J3l85Ri,1U7fhu5,MbuAUCM,JF4vOXQ").toURL()); for (URL url : failURLs) { try { new ImgurRipper(url); - fail("Instantiated ripper for URL that should not work: " + url); + Assertions.fail("Instantiated ripper for URL that should not work: " + url); } catch (Exception e) { // Expected } @@ -33,22 +39,23 @@ public class ImgurRipperTest extends RippersTest { } @Test - public void testImgurAlbums() throws IOException { + public void testImgurAlbums() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); // URLs that should return more than 1 image - //contentURLs.add(new URL("http://imgur.com/a/dS9OQ#0")); // Horizontal layout - //contentURLs.add(new URL("http://imgur.com/a/YpsW9#0")); // Grid layout - contentURLs.add(new URL("http://imgur.com/a/WxG6f/layout/vertical#0")); - contentURLs.add(new URL("http://imgur.com/a/WxG6f/layout/horizontal#0")); - contentURLs.add(new URL("http://imgur.com/a/WxG6f/layout/grid#0")); - contentURLs.add(new URL("http://imgur.com/gallery/FmP2o")); // Gallery URL - // Imgur seems not to support URLs with lists of images anymore. - // contentURLs.add(new - // URL("http://imgur.com/758qD43,C6iVJex,bP7flAu,J3l85Ri,1U7fhu5,MbuAUCM,JF4vOXQ")); + contentURLs.add(new URI("http://imgur.com/gallery/FmP2o").toURL()); + // URLs with /gallery path + contentURLs.add(new URI("http://imgur.com/gallery/nAl13J6").toURL()); + contentURLs.add(new URI("https://imgur.com/gallery/another-brendan-fraser-reaction-from-bedazzled-intergalactic-quality-nAl13J6").toURL()); + // URLs with /a path + contentURLs.add(new URI("http://imgur.com/a/G058j5F").toURL()); + contentURLs.add(new URI("https://imgur.com/a/thanks-batman-G058j5F").toURL()); + contentURLs.add(new URI("https://imgur.com/a/thanks-batman-G058j5F/layout/grid#0").toURL()); + contentURLs.add(new URI("https://imgur.com/a/G058j5F/layout/grid#0").toURL()); + contentURLs.add(new URI("https://imgur.com/a/G058j5F/layout/horizontal#0").toURL()); // Sometimes hangs up - // contentURLs.add(new URL("http://imgur.com/r/nsfw_oc/top/all")); - // contentURLs.add(new URL("http://imgur.com/a/bXQpH")); // Album with - // titles/descriptions + // contentURLs.add(new URI("http://imgur.com/r/nsfw_oc/top/all").toURL()); + // Album with titles/descriptions + contentURLs.add(new URI("http://imgur.com/a/bXQpH").toURL()); for (URL url : contentURLs) { ImgurRipper ripper = new ImgurRipper(url); testRipper(ripper); @@ -56,10 +63,25 @@ public class ImgurRipperTest extends RippersTest { } @Test - public void testImgurSingleImage() throws IOException { + public void testImgurUserAccount() throws IOException, URISyntaxException { + List contentURLs = new ArrayList<>(); + // URL with albums + contentURLs.add("https://RockStarBrew.imgur.com"); + // New URL format + contentURLs.add("https://imgur.com/user/RockStarBrew/"); + // And URL with images + contentURLs.add("https://imgur.com/user/counter2strike"); + for (var url : contentURLs) { + ImgurRipper ripper = new ImgurRipper(new URI(url).toURL()); + testRipper(ripper); + } + } + + @Test + public void testImgurSingleImage() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); - contentURLs.add(new URL("http://imgur.com/qbfcLyG")); // Single image URL - contentURLs.add(new URL("https://imgur.com/KexUO")); // Single image URL + contentURLs.add(new URI("http://imgur.com/qbfcLyG").toURL()); // Single image URL + contentURLs.add(new URI("https://imgur.com/KexUO").toURL()); // Single image URL for (URL url : contentURLs) { ImgurRipper ripper = new ImgurRipper(url); testRipper(ripper); @@ -67,23 +89,24 @@ public class ImgurRipperTest extends RippersTest { } @Test - public void testImgurAlbumWithMoreThan20Pictures() throws IOException { - ImgurAlbum album = ImgurRipper.getImgurAlbum(new URL("http://imgur.com/a/HUMsq")); - assertTrue("Failed to find 20 files from " + album.url.toExternalForm() + ", only got " + album.images.size(), - album.images.size() >= 20); + public void testImgurAlbumWithMoreThan20Pictures() throws IOException, URISyntaxException { + ImgurAlbum album = ImgurRipper.getImgurAlbum(new URI("http://imgur.com/a/HUMsq").toURL()); + Assertions.assertTrue(album.images.size() >= 20, + "Failed to find 20 files from " + album.url.toExternalForm() + ", only got " + album.images.size()); } @Test - public void testImgurAlbumWithMoreThan100Pictures() throws IOException { - ImgurAlbum album = ImgurRipper.getImgurAlbum(new URL("https://imgur.com/a/HX3JSrD")); - assertTrue("Failed to find 100 files from " + album.url.toExternalForm() + ", only got " + album.images.size(), - album.images.size() >= 100); + @Tag("flaky") + public void testImgurAlbumWithMoreThan100Pictures() throws IOException, URISyntaxException { + ImgurAlbum album = ImgurRipper.getImgurAlbum(new URI("https://imgur.com/a/HX3JSrD").toURL()); + Assertions.assertTrue(album.images.size() >= 100, + "Failed to find 100 files from " + album.url.toExternalForm() + ", only got " + album.images.size()); } @Test public void testImgurVideoFromGetFilesFromURL() throws Exception { - List urls = RipUtils.getFilesFromURL(new URL("https://i.imgur.com/4TtwxRN.gifv")); - assertEquals("https://i.imgur.com/4TtwxRN.mp4", urls.get(0).toExternalForm()); + List urls = RipUtils.getFilesFromURL(new URI("https://i.imgur.com/7qoW0Mo.gifv").toURL()); + Assertions.assertEquals("https://i.imgur.com/7qoW0Mo.mp4", urls.get(0).toExternalForm()); } /* @@ -92,7 +115,7 @@ public class ImgurRipperTest extends RippersTest { * "over capacity" warning on the page. // I wonder if our testing automation is * what is putting this album over capacity? // See issue #376. public void * testImgurAlbumWithMoreThan1000Pictures() throws IOException { ImgurAlbum - * album = ImgurRipper.getImgurAlbum(new URL("http://imgur.com/a/vsuh5")); + * album = ImgurRipper.getImgurAlbum(new URI("http://imgur.com/a/vsuh5").toURL()); * assertTrue("Failed to find 1000 files from " + album.url.toExternalForm() + * ", only got " + album.images.size(), album.images.size() >= 1000); } */ diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/InstagramRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/InstagramRipperTest.java index 693dc72f..5b929faf 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/InstagramRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/InstagramRipperTest.java @@ -1,9 +1,14 @@ package com.rarchives.ripme.tst.ripper.rippers; import com.rarchives.ripme.ripper.rippers.InstagramRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.HashMap; @@ -12,37 +17,46 @@ import java.util.Map; public class InstagramRipperTest extends RippersTest { @Test - public void testInstagramGID() throws IOException { + public void testInstagramGID() throws IOException, URISyntaxException { Map testURLs = new HashMap<>(); - testURLs.put(new URL("http://instagram.com/Test_User"), "Test_User"); - testURLs.put(new URL("http://instagram.com/_test_user_"), "_test_user_"); - testURLs.put(new URL("http://instagram.com/_test_user_/?pinned"), "_test_user__pinned"); - testURLs.put(new URL("http://instagram.com/stories/_test_user_/"), "_test_user__stories"); - testURLs.put(new URL("http://instagram.com/_test_user_/tagged"), "_test_user__tagged"); - testURLs.put(new URL("http://instagram.com/_test_user_/channel"), "_test_user__igtv"); - testURLs.put(new URL("http://instagram.com/explore/tags/test_your_tag"), "tag_test_your_tag"); - testURLs.put(new URL("https://www.instagram.com/p/BZ4egP7njW5/?hl=en"), "post_BZ4egP7njW5"); - testURLs.put(new URL("https://www.instagram.com/p/BZ4egP7njW5"), "post_BZ4egP7njW5"); - testURLs.put(new URL("https://www.instagram.com/p/BaNPpaHn2zU/?taken-by=hilaryduff"), "post_BaNPpaHn2zU"); - testURLs.put(new URL("https://www.instagram.com/p/BaNPpaHn2zU/"), "post_BaNPpaHn2zU"); + testURLs.put(new URI("http://instagram.com/Test_User").toURL(), "Test_User"); + testURLs.put(new URI("http://instagram.com/_test_user_").toURL(), "_test_user_"); + testURLs.put(new URI("http://instagram.com/_test_user_/?pinned").toURL(), "_test_user__pinned"); + testURLs.put(new URI("http://instagram.com/stories/_test_user_/").toURL(), "_test_user__stories"); + testURLs.put(new URI("http://instagram.com/_test_user_/tagged").toURL(), "_test_user__tagged"); + testURLs.put(new URI("http://instagram.com/_test_user_/channel").toURL(), "_test_user__igtv"); + testURLs.put(new URI("http://instagram.com/explore/tags/test_your_tag").toURL(), "tag_test_your_tag"); + testURLs.put(new URI("https://www.instagram.com/p/BZ4egP7njW5/?hl=en").toURL(), "post_BZ4egP7njW5"); + testURLs.put(new URI("https://www.instagram.com/p/BZ4egP7njW5").toURL(), "post_BZ4egP7njW5"); + testURLs.put(new URI("https://www.instagram.com/p/BaNPpaHn2zU/?taken-by=hilaryduff").toURL(), "post_BaNPpaHn2zU"); + testURLs.put(new URI("https://www.instagram.com/p/BaNPpaHn2zU/").toURL(), "post_BaNPpaHn2zU"); for (URL url : testURLs.keySet()) { InstagramRipper ripper = new InstagramRipper(url); ripper.setup(); - assertEquals(testURLs.get(url), ripper.getGID(ripper.getURL())); + Assertions.assertEquals(testURLs.get(url), ripper.getGID(ripper.getURL())); deleteDir(ripper.getWorkingDir()); } } @Test - public void testInstagramAlbums() throws IOException { + @Disabled("Ripper broken for single items") + public void testInstagramSingle() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); - // This unit test is a bit flaky - //contentURLs.add(new URL("https://www.instagram.com/Test_User/")); - contentURLs.add(new URL("https://www.instagram.com/p/BaNPpaHn2zU/?hl=en")); - contentURLs.add(new URL("https://www.instagram.com/p/BaNPpaHn2zU/")); + contentURLs.add(new URI("https://www.instagram.com/p/BaNPpaHn2zU/?hl=en").toURL()); + contentURLs.add(new URI("https://www.instagram.com/p/BaNPpaHn2zU/").toURL()); for (URL url : contentURLs) { InstagramRipper ripper = new InstagramRipper(url); testRipper(ripper); } } + + @Test + @Tag("flaky") + public void testInstagramAlbums() throws IOException, URISyntaxException { + // do not test, in case of rate limit 200/hr since 2021. see + // https://github.com/ripmeapp2/ripme/issues/32 + URL url = new URI("https://www.instagram.com/Test_User/").toURL(); + InstagramRipper ripper = new InstagramRipper(url); + testRipper(ripper); + } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/JagodibujaRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/JagodibujaRipperTest.java index d41c0352..3f5c199e 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/JagodibujaRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/JagodibujaRipperTest.java @@ -1,16 +1,19 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.JagodibujaRipper; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; public class JagodibujaRipperTest extends RippersTest { @Test - public void testJagodibujaRipper() throws IOException { + @Disabled("fails on github ubuntu automated PR check 2020-07-29") + public void testJagodibujaRipper() throws IOException, URISyntaxException { // a photo set - JagodibujaRipper ripper = new JagodibujaRipper(new URL("http://www.jagodibuja.com/comic-in-me/")); + JagodibujaRipper ripper = new JagodibujaRipper(new URI("http://www.jagodibuja.com/comic-in-me/").toURL()); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/KingcomixRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/KingcomixRipperTest.java new file mode 100644 index 00000000..ae543c36 --- /dev/null +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/KingcomixRipperTest.java @@ -0,0 +1,30 @@ +package com.rarchives.ripme.tst.ripper.rippers; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; + +import com.rarchives.ripme.ripper.rippers.KingcomixRipper; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class KingcomixRipperTest extends RippersTest { + + @Test + @Disabled("test or ripper broken") + public void testRip() throws IOException, URISyntaxException { + KingcomixRipper ripper = new KingcomixRipper(new URI("https://kingcomix.com/aunt-cumming-tracy-scops/").toURL()); + testRipper(ripper); + } + + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://kingcomix.com/aunt-cumming-tracy-scops/").toURL(); + KingcomixRipper ripper = new KingcomixRipper(url); + Assertions.assertEquals("aunt-cumming-tracy-scops", ripper.getGID(url)); + } + +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ListalRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ListalRipperTest.java index aba41af3..259114ba 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ListalRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ListalRipperTest.java @@ -1,29 +1,44 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import java.io.IOException; -import java.net.URL; -import com.rarchives.ripme.ripper.rippers.ListalRipper; - -public class ListalRipperTest extends RippersTest { - - /** - * Test for list type url. - * @throws IOException - */ - public void testRipListType() throws IOException { - ListalRipper ripper = - new ListalRipper(new URL("https://www.listal.com/list/evolution-emma-stone")); - testRipper(ripper); - } - - /** - * Test for folder type url. - * @throws IOException - */ - public void testRipFolderType() throws IOException { - ListalRipper ripper = - new ListalRipper(new URL("https://www.listal.com/chet-atkins/pictures")); - testRipper(ripper); - } - -} +package com.rarchives.ripme.tst.ripper.rippers; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import com.rarchives.ripme.ripper.rippers.ListalRipper; +import org.junit.jupiter.api.*; + +public class ListalRipperTest extends RippersTest { + + /** + * Test for list type url. + */ + @Test + @Tag("flaky") + public void testPictures() throws IOException, URISyntaxException { + ListalRipper ripper = + new ListalRipper(new URI("https://www.listal.com/emma-stone_iii/pictures").toURL()); + testRipper(ripper); + } + + /** + * Test for list type url. + */ + @Test + @Tag("flaky") + public void testRipListType() throws IOException, URISyntaxException { + ListalRipper ripper = + new ListalRipper(new URI("https://www.listal.com/list/evolution-emma-stone").toURL()); + testRipper(ripper); + } + + /** + * Test for folder type url. + */ + @Test + public void testRipFolderType() throws IOException, URISyntaxException { + ListalRipper ripper = + new ListalRipper(new URI("https://www.listal.com/chet-atkins/pictures").toURL()); + testRipper(ripper); + } + +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/LusciousRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/LusciousRipperTest.java index 6362dcc6..bc8594d6 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/LusciousRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/LusciousRipperTest.java @@ -1,38 +1,29 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.LusciousRipper; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; public class LusciousRipperTest extends RippersTest { - @Test @Disabled("Flaky in the CI") - public void testPahealRipper() throws IOException { + @Test + @Disabled("test or ripper broken") + public void testLusciousRipper() throws IOException, URISyntaxException { // a photo set LusciousRipper ripper = new LusciousRipper( - new URL("https://luscious.net/albums/h-na-alice-wa-suki-desu-ka-do-you-like-alice-when_321609/")); + new URI("https://luscious.net/albums/h-na-alice-wa-suki-desu-ka-do-you-like-alice-when_321609/").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("https://luscious.net/albums/h-na-alice-wa-suki-desu-ka-do-you-like-alice-when_321609/"); + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://luscious.net/albums/h-na-alice-wa-suki-desu-ka-do-you-like-alice-when_321609/").toURL(); LusciousRipper ripper = new LusciousRipper(url); - assertEquals("h-na-alice-wa-suki-desu-ka-do-you-like-alice-when_321609", ripper.getGID(url)); - } - @Test @Disabled("Flaky in the CI") - public void testGetNextPage() throws IOException { - URL multiPageAlbumUrl = new URL("https://luscious.net/albums/women-of-color_58/"); - LusciousRipper multiPageRipper = new LusciousRipper(multiPageAlbumUrl); - assert (multiPageRipper.getNextPage(multiPageRipper.getFirstPage()) != null); - - URL singlePageAlbumUrl = new URL("https://members.luscious.net/albums/bakaneko-navidarks_332097/"); - LusciousRipper singlePageRipper = new LusciousRipper(singlePageAlbumUrl); - try { - singlePageRipper.getNextPage(singlePageRipper.getFirstPage()); - } catch (IOException e) { - assertEquals("No next page found.", e.getMessage()); - } + Assertions.assertEquals("h-na-alice-wa-suki-desu-ka-do-you-like-alice-when_321609", ripper.getGID(url)); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MangadexRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MangadexRipperTest.java index 70aa5aaf..3bcec8c8 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MangadexRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MangadexRipperTest.java @@ -2,13 +2,25 @@ package com.rarchives.ripme.tst.ripper.rippers; import com.rarchives.ripme.ripper.rippers.MangadexRipper; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; -public class MangadexRipperTest extends RippersTest{ - public void testRip() throws IOException { - MangadexRipper ripper = new MangadexRipper(new URL("https://mangadex.org/chapter/467904/")); +public class MangadexRipperTest extends RippersTest { + @Test + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + MangadexRipper ripper = new MangadexRipper(new URI("https://mangadex.org/chapter/467904/").toURL()); + testRipper(ripper); + } + + @Test + @Tag("flaky") + public void test2() throws IOException, URISyntaxException { + MangadexRipper ripper = new MangadexRipper(new URI("https://mangadex.org/title/44625/this-croc-will-die-in-100-days").toURL()); testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ManganeloRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ManganeloRipperTest.java index 7eb1b8e9..5095553c 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ManganeloRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ManganeloRipperTest.java @@ -1,19 +1,27 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.ManganeloRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; public class ManganeloRipperTest extends RippersTest { - public void testRip() throws IOException { - ManganeloRipper ripper = new ManganeloRipper(new URL("https://manganelo.com/manga/demonic_housekeeper")); + @Test + @Disabled("no images found, test or ripper broken") + public void testRip() throws IOException, URISyntaxException { + ManganeloRipper ripper = new ManganeloRipper(new URI("https://manganelo.com/manga/demonic_housekeeper").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("https://manganelo.com/manga/demonic_housekeeper"); + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://manganelo.com/manga/demonic_housekeeper").toURL(); ManganeloRipper ripper = new ManganeloRipper(url); - assertEquals("demonic_housekeeper", ripper.getGID(url)); + Assertions.assertEquals("demonic_housekeeper", ripper.getGID(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MastodonRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MastodonRipperTest.java index 4b564300..9e50e9a5 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MastodonRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MastodonRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.MastodonRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class MastodonRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - MastodonRipper ripper = new MastodonRipper(new URL("https://mastodon.social/@pythonhub/media")); + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + MastodonRipper ripper = new MastodonRipper(new URI("https://mastodon.social/@pythonhub/media").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MastodonXyzRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MastodonXyzRipperTest.java index 2f500d6a..0ad1b3f1 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MastodonXyzRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MastodonXyzRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.MastodonXyzRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class MastodonXyzRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - MastodonXyzRipper ripper = new MastodonXyzRipper(new URL("https://mastodon.xyz/@artwo/media")); + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + MastodonXyzRipper ripper = new MastodonXyzRipper(new URI("https://mastodon.xyz/@artwo/media").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MeituriRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MeituriRipperTest.java index fbd9ea57..441fd5c6 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MeituriRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MeituriRipperTest.java @@ -1,25 +1,28 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.MeituriRipper; -import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class MeituriRipperTest extends RippersTest { @Test - @Disabled("Broken ripper") - public void testMeituriRip() throws IOException { - MeituriRipper ripper = new MeituriRipper(new URL("https://www.meituri.com/a/14449/")); + @Tag("flaky") + public void testMeituriRip() throws IOException, URISyntaxException { + MeituriRipper ripper = new MeituriRipper(new URI("https://www.tujigu.com/a/14449/").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("https://www.meituri.com/a/14449/"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://www.tujigu.com/a/14449/").toURL(); MeituriRipper ripper = new MeituriRipper(url); - assertEquals("14449", ripper.getGID(url)); + Assertions.assertEquals("14449", ripper.getGID(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelmayhemRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelmayhemRipperTest.java index a9c859c7..c8c10ce6 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelmayhemRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelmayhemRipperTest.java @@ -1,10 +1,12 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.ModelmayhemRipper; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; @@ -12,16 +14,16 @@ public class ModelmayhemRipperTest extends RippersTest { @Test @Disabled("Broken ripper") - public void testModelmayhemRip() throws IOException { + public void testModelmayhemRip() throws IOException, URISyntaxException { ModelmayhemRipper ripper = new ModelmayhemRipper( - new URL("https://www.modelmayhem.com/portfolio/520206/viewall")); + new URI("https://www.modelmayhem.com/portfolio/520206/viewall").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { + public void testGetGID() throws IOException, URISyntaxException { ModelmayhemRipper ripper = new ModelmayhemRipper( - new URL("https://www.modelmayhem.com/portfolio/520206/viewall")); - assertEquals("520206", ripper.getGID(new URL("https://www.modelmayhem.com/portfolio/520206/viewall"))); + new URI("https://www.modelmayhem.com/portfolio/520206/viewall").toURL()); + Assertions.assertEquals("520206", ripper.getGID(new URI("https://www.modelmayhem.com/portfolio/520206/viewall").toURL())); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelxRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelxRipperTest.java index 50fec37b..38f572a8 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelxRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelxRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.ModelxRipper; @@ -11,11 +13,10 @@ import org.junit.jupiter.api.Test; public class ModelxRipperTest extends RippersTest { @Test @Disabled("ModelxRipper domain has been changes. Commenting to avoid build failure.") - public void testModelxAlbum() throws IOException { - ModelxRipper ripper = new ModelxRipper(new URL( - "http://www.modelx.org/graphis-collection-2002-2016/ai-yuzuki-%e6%9f%9a%e6%9c%88%e3%81%82%e3%81%84-yuzuiro/")); - System.out.println(ripper.getGID(new URL( - "http://www.modelx.org/graphis-collection-2002-2016/ai-yuzuki-%e6%9f%9a%e6%9c%88%e3%81%82%e3%81%84-yuzuiro/"))); + public void testModelxAlbum() throws IOException, URISyntaxException { + URL url = new URI("http://www.modelx.org/graphis-collection-2002-2016/ai-yuzuki-%e6%9f%9a%e6%9c%88%e3%81%82%e3%81%84-yuzuiro/").toURL(); + ModelxRipper ripper = new ModelxRipper(url); + System.out.println(ripper.getGID(url)); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MotherlessRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MotherlessRipperTest.java index 2739f9da..98c65f07 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MotherlessRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MotherlessRipperTest.java @@ -1,16 +1,19 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.MotherlessRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class MotherlessRipperTest extends RippersTest { @Test - public void testMotherlessAlbumRip() throws IOException { - MotherlessRipper ripper = new MotherlessRipper(new URL("https://motherless.com/G1168D90")); + @Tag("flaky") + public void testMotherlessAlbumRip() throws IOException, URISyntaxException { + MotherlessRipper ripper = new MotherlessRipper(new URI("https://motherless.com/G1168D90").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MulemaxRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MulemaxRipperTest.java deleted file mode 100644 index 8c12a94d..00000000 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MulemaxRipperTest.java +++ /dev/null @@ -1,16 +0,0 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import java.io.IOException; -import java.net.URL; - -import com.rarchives.ripme.ripper.rippers.MulemaxRipper; -import org.junit.jupiter.api.Test; - -public class MulemaxRipperTest extends RippersTest { - @Test - public void testMulemaxVideo() throws IOException { - MulemaxRipper ripper = new MulemaxRipper(new URL("https://mulemax.com/video/1720/emma-and-her-older-sissy-are-home-for-a-holiday-break")); //pick any video from the front page - testRipper(ripper); - } - -} \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaicomicsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaicomicsRipperTest.java index 72524b06..52f8b0d8 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaicomicsRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaicomicsRipperTest.java @@ -1,40 +1,46 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.MyhentaicomicsRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class MyhentaicomicsRipperTest extends RippersTest { @Test - public void testMyhentaicomicsAlbum() throws IOException { - MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(new URL("http://myhentaicomics.com/index.php/Nienna-Lost-Tales")); + @Tag("flaky") + public void testMyhentaicomicsAlbum() throws IOException, URISyntaxException { + MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(new URI("http://myhentaicomics.com/index.php/Nienna-Lost-Tales").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("http://myhentaicomics.com/index.php/Nienna-Lost-Tales"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("http://myhentaicomics.com/index.php/Nienna-Lost-Tales").toURL(); MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(url); // Test a comic - assertEquals("Nienna-Lost-Tales", ripper.getGID(url)); + Assertions.assertEquals("Nienna-Lost-Tales", ripper.getGID(url)); // Test a search - assertEquals("test", ripper.getGID(new URL("http://myhentaicomics.com/index.php/search?q=test"))); + Assertions.assertEquals("test", ripper.getGID(new URI("http://myhentaicomics.com/index.php/search?q=test").toURL())); // Test a tag - assertEquals("2409", ripper.getGID(new URL("http://myhentaicomics.com/index.php/tag/2409/"))); + Assertions.assertEquals("2409", ripper.getGID(new URI("http://myhentaicomics.com/index.php/tag/2409/").toURL())); } @Test - public void testGetAlbumsToQueue() throws IOException { - URL url = new URL("https://myhentaicomics.com/index.php/tag/3167/"); + @Tag("flaky") + public void testGetAlbumsToQueue() throws IOException, URISyntaxException { + URL url = new URI("https://myhentaicomics.com/index.php/tag/3167/").toURL(); MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(url); - assertEquals(15, ripper.getAlbumsToQueue(ripper.getFirstPage()).size()); + Assertions.assertEquals(15, ripper.getAlbumsToQueue(ripper.getFirstPage()).size()); } @Test - public void testPageContainsAlbums() throws IOException { - URL url = new URL("https://myhentaicomics.com/index.php/tag/3167/"); - URL url2 = new URL("https://myhentaicomics.com/index.php/search?q=test"); + public void testPageContainsAlbums() throws IOException, URISyntaxException { + URL url = new URI("https://myhentaicomics.com/index.php/tag/3167/").toURL(); + URL url2 = new URI("https://myhentaicomics.com/index.php/search?q=test").toURL(); MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(url); - assertTrue(ripper.pageContainsAlbums(url)); - assertTrue(ripper.pageContainsAlbums(url2)); + Assertions.assertTrue(ripper.pageContainsAlbums(url)); + Assertions.assertTrue(ripper.pageContainsAlbums(url2)); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaigalleryRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaigalleryRipperTest.java index 3d126b82..f7e4273a 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaigalleryRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaigalleryRipperTest.java @@ -1,22 +1,28 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.MyhentaigalleryRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class MyhentaigalleryRipperTest extends RippersTest { @Test - public void testMyhentaigalleryAlbum() throws IOException { + @Tag("flaky") + public void testMyhentaigalleryAlbum() throws IOException, URISyntaxException { MyhentaigalleryRipper ripper = new MyhentaigalleryRipper( - new URL("https://myhentaigallery.com/gallery/thumbnails/9201")); + new URI("https://myhentaigallery.com/gallery/thumbnails/9201").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("https://myhentaigallery.com/gallery/thumbnails/9201"); + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://myhentaigallery.com/gallery/thumbnails/9201").toURL(); MyhentaigalleryRipper ripper = new MyhentaigalleryRipper(url); - assertEquals("9201", ripper.getGID(url)); + Assertions.assertEquals("9201", ripper.getGID(url)); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyreadingmangaRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyreadingmangaRipperTest.java index 16c5de5e..a6a5a13a 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyreadingmangaRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyreadingmangaRipperTest.java @@ -1,13 +1,15 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; + import com.rarchives.ripme.ripper.rippers.MyreadingmangaRipper; public class MyreadingmangaRipperTest extends RippersTest { - public void testRip() throws IOException { - MyreadingmangaRipper ripper = new MyreadingmangaRipper(new URL("https://myreadingmanga.info/zelo-lee-brave-lover-dj-slave-market-jp/")); + public void testRip() throws IOException, URISyntaxException { + MyreadingmangaRipper ripper = new MyreadingmangaRipper(new URI("https://myreadingmanga.info/zelo-lee-brave-lover-dj-slave-market-jp/").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NatalieMuRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NatalieMuRipperTest.java index 15a9d91a..e3522e71 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NatalieMuRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NatalieMuRipperTest.java @@ -25,10 +25,10 @@ // public void testNatalieMuURLPasses() throws IOException { // List passURLs = new ArrayList<>(); // // URLs that should work -// passURLs.add(new URL("http://natalie.mu/music/news/140367")); -// passURLs.add(new URL("http://cdn2.natalie.mu/music/news/140411")); -// passURLs.add(new URL("http://cdn2.natalie.mu/music/gallery/show/news_id/140411/image_id/369655")); -// passURLs.add(new URL("http://natalie.mu/music/gallery/show/news_id/139146/image_id/365218")); +// passURLs.add(new URI("http://natalie.mu/music/news/140367").toURL()); +// passURLs.add(new URI("http://cdn2.natalie.mu/music/news/140411").toURL()); +// passURLs.add(new URI("http://cdn2.natalie.mu/music/gallery/show/news_id/140411/image_id/369655").toURL()); +// passURLs.add(new URI("http://natalie.mu/music/gallery/show/news_id/139146/image_id/365218").toURL()); // for (URL url : passURLs) { // NatalieMuRipper ripper = new NatalieMuRipper(url); // ripper.setup(); @@ -42,19 +42,19 @@ // public void testNatalieMuRipper() throws IOException { // List contentURLs = new ArrayList<>(); // // URLs that should return more than 1 image -// contentURLs.add(new URL("http://natalie.mu/music/news/140367")); -// contentURLs.add(new URL("http://cdn2.natalie.mu/music/news/140411")); -// contentURLs.add(new URL("http://cdn2.natalie.mu/music/gallery/show/news_id/140411/image_id/369655")); -// contentURLs.add(new URL("http://natalie.mu/music/gallery/show/news_id/139146/image_id/365218")); +// contentURLs.add(new URI("http://natalie.mu/music/news/140367").toURL()); +// contentURLs.add(new URI("http://cdn2.natalie.mu/music/news/140411").toURL()); +// contentURLs.add(new URI("http://cdn2.natalie.mu/music/gallery/show/news_id/140411/image_id/369655").toURL()); +// contentURLs.add(new URI("http://natalie.mu/music/gallery/show/news_id/139146/image_id/365218").toURL()); // // // Most *chans have volatile threads & can't be trusted for integration testing. // -// //contentURLs.add(new URL("http://boards.4chan.org/r/res/12225949")); -// //contentURLs.add(new URL("http://7chan.org/gif/res/23795.html")); -// //contentURLs.add(new URL("http://unichan2.org/b/res/518004.html")); +// //contentURLs.add(new URI("http://boards.4chan.org/r/res/12225949").toURL()); +// //contentURLs.add(new URI("http://7chan.org/gif/res/23795.html").toURL()); +// //contentURLs.add(new URI("http://unichan2.org/b/res/518004.html").toURL()); // // // xchan has an HTTPS certificaiton error... -// //contentURLs.add(new URL("http://xchan.pw/porn/res/437.html")); +// //contentURLs.add(new URI("http://xchan.pw/porn/res/437.html").toURL()); // for (URL url : contentURLs) { // NatalieMuRipper ripper = new NatalieMuRipper(url); // testRipper(ripper); diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NewgroundsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NewgroundsRipperTest.java index 5815aa8f..fcfac96d 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NewgroundsRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NewgroundsRipperTest.java @@ -1,22 +1,28 @@ package com.rarchives.ripme.tst.ripper.rippers; import com.rarchives.ripme.ripper.rippers.NewgroundsRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; public class NewgroundsRipperTest extends RippersTest { @Test - public void testNewgroundsRip() throws IOException { - NewgroundsRipper ripper = new NewgroundsRipper(new URL("https://zone-sama.newgrounds.com/art")); + @Tag("flaky") + public void testNewgroundsRip() throws IOException, URISyntaxException { + NewgroundsRipper ripper = new NewgroundsRipper(new URI("https://zone-sama.newgrounds.com/art").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("https://zone-sama.newgrounds.com/art"); + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://zone-sama.newgrounds.com/art").toURL(); NewgroundsRipper ripper = new NewgroundsRipper(url); - assertEquals("zone-sama", ripper.getGID(url)); + Assertions.assertEquals("zone-sama", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NewsfilterRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NewsfilterRipperTest.java deleted file mode 100644 index 8567b0ff..00000000 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NewsfilterRipperTest.java +++ /dev/null @@ -1,15 +0,0 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import java.io.IOException; -import java.net.URL; - -import com.rarchives.ripme.ripper.rippers.NewsfilterRipper; -import org.junit.jupiter.api.Test; - -public class NewsfilterRipperTest extends RippersTest { - @Test - public void testNewsfilterRip() throws IOException { - NewsfilterRipper ripper = new NewsfilterRipper(new URL("http://newsfilter.org/gallery/he-doubted-she-would-fuck-on-cam-happy-to-be-proven-wrong-216799")); - testRipper(ripper); - } -} \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NfsfwRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NfsfwRipperTest.java index f5bed040..0477a13d 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NfsfwRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NfsfwRipperTest.java @@ -1,10 +1,13 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.NfsfwRipper; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; @@ -12,21 +15,21 @@ public class NfsfwRipperTest extends RippersTest { @Test @Disabled("https://github.com/RipMeApp/ripme/issues/291 -- nfsfw 'account suspended' error; disabled flaky test in CI") - public void testNfsfwRip() throws IOException { - NfsfwRipper ripper = new NfsfwRipper(new URL("http://nfsfw.com/gallery/v/Kitten/")); + public void testNfsfwRip() throws IOException, URISyntaxException { + NfsfwRipper ripper = new NfsfwRipper(new URI("http://nfsfw.com/gallery/v/Kitten/").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("http://nfsfw.com/gallery/v/Kitten/"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("http://nfsfw.com/gallery/v/Kitten/").toURL(); NfsfwRipper ripper = new NfsfwRipper(url); - assertEquals("Kitten", ripper.getGID(url)); - url = new URL("http://nfsfw.com/gallery/v/Kitten"); - assertEquals("Kitten", ripper.getGID(url)); - url = new URL("http://nfsfw.com/gallery/v/Kitten/gif_001/"); - assertEquals("Kitten__gif_001", ripper.getGID(url)); - url = new URL("http://nfsfw.com/gallery/v/Kitten/gif_001/"); - assertEquals("Kitten__gif_001", ripper.getGID(url)); + Assertions.assertEquals("Kitten", ripper.getGID(url)); + url = new URI("http://nfsfw.com/gallery/v/Kitten").toURL(); + Assertions.assertEquals("Kitten", ripper.getGID(url)); + url = new URI("http://nfsfw.com/gallery/v/Kitten/gif_001/").toURL(); + Assertions.assertEquals("Kitten__gif_001", ripper.getGID(url)); + url = new URI("http://nfsfw.com/gallery/v/Kitten/gif_001/").toURL(); + Assertions.assertEquals("Kitten__gif_001", ripper.getGID(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NhentaiRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NhentaiRipperTest.java index 7e82adaf..1857e865 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NhentaiRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NhentaiRipperTest.java @@ -1,38 +1,43 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.List; import com.rarchives.ripme.ripper.rippers.NhentaiRipper; import com.rarchives.ripme.utils.RipUtils; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class NhentaiRipperTest extends RippersTest { - public void testRip() throws IOException { - NhentaiRipper ripper = new NhentaiRipper(new URL("https://nhentai.net/g/233295/")); + public void testRip() throws IOException, URISyntaxException { + NhentaiRipper ripper = new NhentaiRipper(new URI("https://nhentai.net/g/233295/").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - NhentaiRipper ripper = new NhentaiRipper(new URL("https://nhentai.net/g/233295/")); - assertEquals("233295", ripper.getGID(new URL("https://nhentai.net/g/233295/"))); + public void testGetGID() throws IOException, URISyntaxException { + NhentaiRipper ripper = new NhentaiRipper(new URI("https://nhentai.net/g/233295/").toURL()); + Assertions.assertEquals("233295", ripper.getGID(new URI("https://nhentai.net/g/233295/").toURL())); } // Test the tag black listing @Test - public void testTagBlackList() throws IOException { - URL url = new URL("https://nhentai.net/g/233295/"); + @Tag("flaky") + public void testTagBlackList() throws IOException, URISyntaxException { + URL url = new URI("https://nhentai.net/g/233295/").toURL(); NhentaiRipper ripper = new NhentaiRipper(url); List tagsOnPage = ripper.getTags(ripper.getFirstPage()); // Test multiple blacklisted tags String[] tags = {"test", "one", "blowjob"}; String blacklistedTag = RipUtils.checkTags(tags, tagsOnPage); - assertEquals("blowjob", blacklistedTag); + Assertions.assertEquals("blowjob", blacklistedTag); // test tags with spaces in them String[] tags2 = {"test", "one", "sole-female"}; blacklistedTag = RipUtils.checkTags(tags2, tagsOnPage); - assertEquals("sole-female", blacklistedTag); + Assertions.assertEquals("sole-female", blacklistedTag); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NsfwXxxRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NsfwXxxRipperTest.java new file mode 100644 index 00000000..06e6d5c6 --- /dev/null +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NsfwXxxRipperTest.java @@ -0,0 +1,16 @@ +package com.rarchives.ripme.tst.ripper.rippers; + +import com.rarchives.ripme.ripper.rippers.NsfwXxxRipper; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +public class NsfwXxxRipperTest extends RippersTest { + @Test + public void testNsfwXxxUser() throws IOException, URISyntaxException { + NsfwXxxRipper ripper = new NsfwXxxRipper(new URI("https://nsfw.xxx/user/smay3991").toURL()); + testRipper(ripper); + } +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NudeGalsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NudeGalsRipperTest.java index 3353eeb5..fb348d94 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NudeGalsRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NudeGalsRipperTest.java @@ -1,18 +1,23 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.NudeGalsRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; public class NudeGalsRipperTest extends RippersTest { - public void testRip() throws IOException { - NudeGalsRipper ripper = new NudeGalsRipper(new URL("https://nude-gals.com/photoshoot.php?photoshoot_id=5541")); + @Test + public void testRip() throws IOException, URISyntaxException { + NudeGalsRipper ripper = new NudeGalsRipper(new URI("https://nude-gals.com/photoshoot.php?photoshoot_id=5541").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - NudeGalsRipper ripper = new NudeGalsRipper(new URL("https://nude-gals.com/photoshoot.php?photoshoot_id=5541")); - assertEquals("5541", ripper.getGID( new URL("https://nude-gals.com/photoshoot.php?photoshoot_id=5541"))); + @Test + public void testGetGID() throws IOException, URISyntaxException { + NudeGalsRipper ripper = new NudeGalsRipper(new URI("https://nude-gals.com/photoshoot.php?photoshoot_id=5541").toURL()); + Assertions.assertEquals("5541", ripper.getGID( new URI("https://nude-gals.com/photoshoot.php?photoshoot_id=5541").toURL())); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/OglafRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/OglafRipperTest.java index 3e716f45..df5eb3dd 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/OglafRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/OglafRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.OglafRipper; @@ -9,8 +10,8 @@ import org.junit.jupiter.api.Test; public class OglafRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - OglafRipper ripper = new OglafRipper(new URL("http://oglaf.com/plumes/")); + public void testRip() throws IOException, URISyntaxException { + OglafRipper ripper = new OglafRipper(new URI("http://oglaf.com/plumes/").toURL()); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PahealRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PahealRipperTest.java index 84403005..d78ad5ef 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PahealRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PahealRipperTest.java @@ -1,16 +1,17 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.PahealRipper; import org.junit.jupiter.api.Test; public class PahealRipperTest extends RippersTest { @Test - public void testPahealRipper() throws IOException { + public void testPahealRipper() throws IOException, URISyntaxException { // a photo set - PahealRipper ripper = new PahealRipper(new URL("http://rule34.paheal.net/post/list/bimbo/1")); + PahealRipper ripper = new PahealRipper(new URI("http://rule34.paheal.net/post/list/bimbo/1").toURL()); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PawooRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PawooRipperTest.java index 730a965c..664f3fec 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PawooRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PawooRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.PawooRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class PawooRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - PawooRipper ripper = new PawooRipper(new URL("https://pawoo.net/@halki/media")); + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + PawooRipper ripper = new PawooRipper(new URI("https://pawoo.net/@halki/media").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PhotobucketRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PhotobucketRipperTest.java index b2d409d3..8581d038 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PhotobucketRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PhotobucketRipperTest.java @@ -1,10 +1,13 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.PhotobucketRipper; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; @@ -12,9 +15,9 @@ public class PhotobucketRipperTest extends RippersTest { @Test @Disabled("https://github.com/RipMeApp/ripme/issues/229 : Disabled test (temporary) : BasicRippersTest#testPhotobucketRip (timing out)") - public void testPhotobucketRip() throws IOException { + public void testPhotobucketRip() throws IOException, URISyntaxException { PhotobucketRipper ripper = new PhotobucketRipper( - new URL("http://s844.photobucket.com/user/SpazzySpizzy/library/Album%20Covers?sort=3&page=1")); + new URI("http://s844.photobucket.com/user/SpazzySpizzy/library/Album%20Covers?sort=3&page=1").toURL()); testRipper(ripper); deleteSubdirs(ripper.getWorkingDir()); deleteDir(ripper.getWorkingDir()); @@ -22,12 +25,12 @@ public class PhotobucketRipperTest extends RippersTest { @Test @Disabled("new test, still disabled out because of the issue above, since this test also involves network IO.") - public void testGetNextPage() throws IOException { + public void testGetNextPage() throws IOException, URISyntaxException { // this album should have more than enough sub-albums and pages // to serve as a pretty good iteration test (barring server or // network errors) String baseURL = "http://s1255.photobucket.com/user/mimajki/library/Movie%20gifs?sort=6&page=1"; - URL url = new URL(baseURL); + URL url = new URI(baseURL).toURL(); PhotobucketRipper ripper = new PhotobucketRipper(url); org.jsoup.nodes.Document page = ripper.getFirstPage(); // NOTE: number of pages remaining includes the subalbums @@ -39,24 +42,24 @@ public class PhotobucketRipperTest extends RippersTest { } try { page = ripper.getNextPage(page); - fail("Get next page did not throw an exception on the last page"); + Assertions.fail("Get next page did not throw an exception on the last page"); } catch (IOException e) { - assertEquals(e.getMessage(), "No more pages"); + Assertions.assertEquals(e.getMessage(), "No more pages"); } } @Test - public void testGetGID() throws IOException { - URL url = new URL( - "http://s732.photobucket.com/user/doublesix66/library/Army%20Painter%20examples?sort=3&page=1"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI( + "http://s732.photobucket.com/user/doublesix66/library/Army%20Painter%20examples?sort=3&page=1").toURL(); PhotobucketRipper ripper = new PhotobucketRipper(url); - assertEquals("doublesix66", ripper.getGID(url)); - url = new URL( - "http://s732.photobucket.com/user/doublesix66/library/Army%20Painter%20examples/Painting%20examples?page=1&sort=3"); - assertEquals("doublesix66", ripper.getGID(url)); - url = new URL("http://s844.photobucket.com/user/SpazzySpizzy/library/Album%20Covers"); - assertEquals("SpazzySpizzy", ripper.getGID(url)); - url = new URL("http://s844.photobucket.com/user/SpazzySpizzy/library"); - assertEquals("SpazzySpizzy", ripper.getGID(url)); + Assertions.assertEquals("doublesix66", ripper.getGID(url)); + url = new URI( + "http://s732.photobucket.com/user/doublesix66/library/Army%20Painter%20examples/Painting%20examples?page=1&sort=3").toURL(); + Assertions.assertEquals("doublesix66", ripper.getGID(url)); + url = new URI("http://s844.photobucket.com/user/SpazzySpizzy/library/Album%20Covers").toURL(); + Assertions.assertEquals("SpazzySpizzy", ripper.getGID(url)); + url = new URI("http://s844.photobucket.com/user/SpazzySpizzy/library").toURL(); + Assertions.assertEquals("SpazzySpizzy", ripper.getGID(url)); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PichunterRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PichunterRipperTest.java index a45f45aa..fc79cb97 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PichunterRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PichunterRipperTest.java @@ -1,27 +1,29 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.PichunterRipper; -import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class PichunterRipperTest extends RippersTest { @Test - @Disabled("This test was commented out at 6/08/2018 because it was randomly failing due to issues with the site see https://github.com/RipMeApp/ripme/issues/867") - public void testPichunterModelPageRip() throws IOException { + @Tag("flaky") + public void testPichunterModelPageRip() throws IOException, URISyntaxException { // A non-photoset - PichunterRipper ripper = new PichunterRipper(new URL("https://www.pichunter.com/models/Madison_Ivy")); + PichunterRipper ripper = new PichunterRipper(new URI("https://www.pichunter.com/models/Madison_Ivy").toURL()); testRipper(ripper); } @Test - public void testPichunterGalleryRip() throws IOException { + @Tag("flaky") + public void testPichunterGalleryRip() throws IOException, URISyntaxException { // a photo set PichunterRipper ripper = new PichunterRipper( - new URL("http://www.pichunter.com/gallery/3270642/Its_not_only_those_who")); + new URI("http://www.pichunter.com/gallery/3270642/Its_not_only_those_who").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PicstatioRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PicstatioRipperTest.java index 1c5cf273..04da17a8 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PicstatioRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PicstatioRipperTest.java @@ -1,20 +1,24 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.PicstatioRipper; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; public class PicstatioRipperTest extends RippersTest { - public void testRip() throws IOException { - PicstatioRipper ripper = new PicstatioRipper(new URL("https://www.picstatio.com/aerial-view-wallpapers")); + public void testRip() throws IOException, URISyntaxException { + PicstatioRipper ripper = new PicstatioRipper(new URI("https://www.picstatio.com/aerial-view-wallpapers").toURL()); testRipper(ripper); } @Test - public void testGID() throws IOException { - PicstatioRipper ripper = new PicstatioRipper(new URL("https://www.picstatio.com/aerial-view-wallpapers")); - assertEquals("aerial-view-wallpapers", ripper.getGID(new URL("https://www.picstatio.com/aerial-view-wallpapers"))); + public void testGID() throws IOException, URISyntaxException { + URL url = new URI("https://www.picstatio.com/aerial-view-wallpapers").toURL(); + PicstatioRipper ripper = new PicstatioRipper(url); + Assertions.assertEquals("aerial-view-wallpapers", ripper.getGID(url)); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PorncomixDotOneRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PorncomixDotOneRipperTest.java deleted file mode 100644 index 9d4df122..00000000 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PorncomixDotOneRipperTest.java +++ /dev/null @@ -1,15 +0,0 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import java.io.IOException; -import java.net.URL; - -import com.rarchives.ripme.ripper.rippers.PorncomixDotOneRipper; -import org.junit.jupiter.api.Test; - -public class PorncomixDotOneRipperTest extends RippersTest { - @Test - public void testPorncomixAlbum() throws IOException { - PorncomixDotOneRipper ripper = new PorncomixDotOneRipper(new URL("https://www.porncomix.one/gallery/blacknwhite-make-america-great-again")); - testRipper(ripper); - } -} \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PorncomixRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PorncomixRipperTest.java index 7abe6e1e..ad9d9b83 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PorncomixRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PorncomixRipperTest.java @@ -1,13 +1,14 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.PorncomixRipper; public class PorncomixRipperTest extends RippersTest { - public void testPorncomixAlbum() throws IOException { - PorncomixRipper ripper = new PorncomixRipper(new URL("http://www.porncomix.info/lust-unleashed-desire-to-submit/")); + public void testPorncomixAlbum() throws IOException, URISyntaxException { + PorncomixRipper ripper = new PorncomixRipper(new URI("http://www.porncomix.info/lust-unleashed-desire-to-submit/").toURL()); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PorncomixinfoRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PorncomixinfoRipperTest.java new file mode 100644 index 00000000..e8628955 --- /dev/null +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PorncomixinfoRipperTest.java @@ -0,0 +1,18 @@ +package com.rarchives.ripme.tst.ripper.rippers; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import com.rarchives.ripme.ripper.rippers.PorncomixinfoRipper; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +public class PorncomixinfoRipperTest extends RippersTest { + @Test + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + PorncomixinfoRipper ripper = new PorncomixinfoRipper(new URI("https://porncomixinfo.net/chapter/alx-come-to-naught-down-in-flames-up-in-smoke-tracy-scops/alx-come-to-naught-down-in-flames-up-in-smoke-tracy-scops/").toURL()); + testRipper(ripper); + } +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornhubRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornhubRipperTest.java index 84094515..1bc6520f 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornhubRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornhubRipperTest.java @@ -1,47 +1,51 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.PornhubRipper; import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.Utils; import org.jsoup.nodes.Document; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class PornhubRipperTest extends RippersTest { @Test - public void testPornhubRip() throws IOException { + public void testPornhubRip() throws IOException, URISyntaxException { if (Utils.getConfigBoolean("test.run_flaky_tests", false)) { - PornhubRipper ripper = new PornhubRipper(new URL("https://www.pornhub.com/album/15680522")); + PornhubRipper ripper = new PornhubRipper(new URI("https://www.pornhub.com/album/15680522").toURL()); testRipper(ripper); } } - public void testGetGID() throws IOException { - URL url = new URL("https://www.pornhub.com/album/15680522?page=2"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://www.pornhub.com/album/15680522?page=2").toURL(); PornhubRipper ripper = new PornhubRipper(url); - assertEquals("15680522", ripper.getGID(url)); - url = new URL("https://www.pornhub.com/album/15680522"); - assertEquals("15680522", ripper.getGID(url)); + Assertions.assertEquals("15680522", ripper.getGID(url)); + url = new URI("https://www.pornhub.com/album/15680522").toURL(); + Assertions.assertEquals("15680522", ripper.getGID(url)); } - // alternate album, with only 2 pages: https://www.pornhub.com/album/4771891 @Test - public void testGetNextPage() throws IOException { - String baseURL = "https://www.pornhub.com/album/15680522"; - PornhubRipper ripper = new PornhubRipper(new URL(baseURL)); + @Tag("flaky") + public void testGetNextPage() throws IOException, URISyntaxException { + String baseURL = "https://www.pornhub.com/album/30687901"; + PornhubRipper ripper = new PornhubRipper(new URI(baseURL).toURL()); Document page = Http.url(baseURL).get(); - int numPagesRemaining = 4; + int numPagesRemaining = 1; for (int idx = 0; idx < numPagesRemaining; idx++){ page = ripper.getNextPage(page); - assertEquals(baseURL + "?page=" + (idx + 2), page.location()); + Assertions.assertEquals(baseURL + "?page=" + (idx + 2), page.location()); } try { page = ripper.getNextPage(page); - fail("Get next page did not throw an exception on the last page"); + Assertions.fail("Get next page did not throw an exception on the last page"); } catch(IOException e){ - assertEquals(e.getMessage(), "No more pages"); + Assertions.assertEquals(e.getMessage(), "No more pages"); } } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornpicsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornpicsRipperTest.java index 1f79b254..4fda9aee 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornpicsRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornpicsRipperTest.java @@ -1,13 +1,14 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.PornpicsRipper; public class PornpicsRipperTest extends RippersTest { - public void testRip() throws IOException { - PornpicsRipper ripper = new PornpicsRipper(new URL("https://www.pornpics.com/galleries/pornstar-dahlia-sky-takes-a-fat-cock-in-her-butthole-wearing-fishnet-stockings/")); + public void testRip() throws IOException, URISyntaxException { + PornpicsRipper ripper = new PornpicsRipper(new URI("https://www.pornpics.com/galleries/pornstar-dahlia-sky-takes-a-fat-cock-in-her-butthole-wearing-fishnet-stockings/").toURL()); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RedditRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RedditRipperTest.java index d5d9600d..db0fa530 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RedditRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RedditRipperTest.java @@ -2,46 +2,65 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.RedditRipper; import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class RedditRipperTest extends RippersTest { @Test - @Disabled("Rip is flaky") // https://github.com/RipMeApp/ripme/issues/253 - public void testRedditSubredditRip() throws IOException { - RedditRipper ripper = new RedditRipper(new URL("http://www.reddit.com/r/nsfw_oc")); + @Tag("flaky") // https://github.com/RipMeApp/ripme/issues/253 + public void testRedditSubredditRip() throws IOException, URISyntaxException { + RedditRipper ripper = new RedditRipper(new URI("http://www.reddit.com/r/nsfw_oc").toURL()); testRipper(ripper); } @Test - @Disabled("Rip is flaky") // https://github.com/RipMeApp/ripme/issues/253 - public void testRedditSubredditTopRip() throws IOException { - RedditRipper ripper = new RedditRipper(new URL("http://www.reddit.com/r/nsfw_oc/top?t=all")); + @Tag("flaky") // https://github.com/RipMeApp/ripme/issues/253 + public void testRedditSubredditTopRip() throws IOException, URISyntaxException { + RedditRipper ripper = new RedditRipper(new URI("http://www.reddit.com/r/nsfw_oc/top?t=all").toURL()); testRipper(ripper); } @Test @Disabled - public void testRedditPostRip() throws IOException { + public void testRedditPostRip() throws IOException, URISyntaxException { RedditRipper ripper = new RedditRipper( - new URL("http://www.reddit.com/r/UnrealGirls/comments/1ziuhl/in_class_veronique_popa/")); + new URI("http://www.reddit.com/r/UnrealGirls/comments/1ziuhl/in_class_veronique_popa/").toURL()); testRipper(ripper); } - /** + /**testRedditSubredditRip:19 * GFYCAT TEST Tests a good GfycatURL (no "/gifs/detail") * * @throws IOException */ @Test - public void testRedditGfyGoodURL() throws IOException { + @Tag("flaky") + public void testRedditGfyGoodURL() throws IOException, URISyntaxException { RedditRipper ripper = new RedditRipper( - new URL("https://www.reddit.com/r/bottesting/comments/7msozf/good_link/")); + new URI("https://www.reddit.com/r/bottesting/comments/7msozf/good_link/").toURL()); + testRipper(ripper); + } + + @Test + @Tag("flaky") + public void testSelfPostRip() throws IOException, URISyntaxException { + RedditRipper ripper = new RedditRipper( + new URI("https://www.reddit.com/r/gonewildstories/comments/oz7d97/f_18_finally_having_a_normal_sex_life/").toURL() + ); + testRipper(ripper); + } + + @Test + @Tag("flaky") + public void testSelfPostAuthorRip() throws IOException, URISyntaxException { + RedditRipper ripper = new RedditRipper(new URI("https://www.reddit.com/user/ickybabie_").toURL()); testRipper(ripper); } @@ -51,9 +70,29 @@ public class RedditRipperTest extends RippersTest { * @throws IOException */ @Test - public void testRedditGfyBadURL() throws IOException { + @Tag("flaky") + public void testRedditGfyBadURL() throws IOException, URISyntaxException { RedditRipper ripper = new RedditRipper( - new URL("https://www.reddit.com/r/bottesting/comments/7msmhi/bad_link/")); + new URI("https://www.reddit.com/r/bottesting/comments/7msmhi/bad_link/").toURL()); + testRipper(ripper); + } + + /** + * GFYCAT TEST Tests a gfycat URL with the gifdeliverynetwork/redgifs hosted video + * + * @throws IOException + */ + @Test + public void testRedditGfycatRedirectURL() throws IOException, URISyntaxException { + RedditRipper ripper = new RedditRipper( + new URI("https://www.reddit.com/r/NSFW_GIF/comments/ennwsa/gorgeous_tits/").toURL()); + } + + @Test + @Tag("flaky") + public void testRedditGallery() throws IOException, URISyntaxException { + RedditRipper ripper = new RedditRipper( + new URI("https://www.reddit.com/gallery/hrrh23").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RedgifsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RedgifsRipperTest.java new file mode 100644 index 00000000..3ef0759c --- /dev/null +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RedgifsRipperTest.java @@ -0,0 +1,66 @@ +package com.rarchives.ripme.tst.ripper.rippers; + +import com.rarchives.ripme.ripper.rippers.RedditRipper; +import com.rarchives.ripme.ripper.rippers.RedgifsRipper; +import org.junit.jupiter.api.*; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +public class RedgifsRipperTest extends RippersTest { + + /** + * Rips correctly formatted URL directly from Redgifs + */ + @Test + public void testRedgifsGoodURL() throws IOException, URISyntaxException { + RedgifsRipper ripper = new RedgifsRipper(new URI("https://www.redgifs.com/watch/ashamedselfishcoypu").toURL()); + testRipper(ripper); + } + + /** + * Rips gifdeliverynetwork URL's by redirecting them to proper redgifs url + */ + @Test + public void testRedgifsBadRL() throws IOException, URISyntaxException { + RedgifsRipper ripper = new RedgifsRipper(new URI("https://www.gifdeliverynetwork.com/consideratetrustworthypigeon").toURL()); + testRipper(ripper); + } + + /** + * Rips a Redgifs profile + */ + @Test + public void testRedgifsProfile() throws IOException, URISyntaxException { + RedgifsRipper ripper = new RedgifsRipper(new URI("https://www.redgifs.com/users/ra-kunv2").toURL()); + testRipper(ripper); + } + + /** + * Rips a Redgif search + * @throws IOException + */ + @Test + public void testRedgifsSearch() throws IOException, URISyntaxException { + RedgifsRipper ripper = new RedgifsRipper(new URI("https://www.redgifs.com/search?query=take+a+shot+every+time").toURL()); + testRipper(ripper); + } + + /** + * Rips Redgif tags + * @throws IOException + */ + @Test + public void testRedgifsTags() throws IOException, URISyntaxException { + RedgifsRipper ripper = new RedgifsRipper(new URI("https://www.redgifs.com/gifs/animation,sfw,funny?order=best&tab=gifs").toURL()); + testRipper(ripper); + } + + @Test + @Tag("flaky") + public void testRedditRedgifs() throws IOException, URISyntaxException { + RedditRipper ripper = new RedditRipper(new URI("https://www.reddit.com/r/nsfwhardcore/comments/ouz5bw/me_cumming_on_his_face/").toURL()); + testRipper(ripper); + } +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RippersTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RippersTest.java index 87530881..24fa8ea7 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RippersTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RippersTest.java @@ -6,9 +6,12 @@ import java.util.List; import com.rarchives.ripme.ripper.rippers.ChanRipper; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.LoggerConfig; import org.junit.jupiter.api.Assertions; import com.rarchives.ripme.ripper.AbstractRipper; @@ -19,16 +22,16 @@ import com.rarchives.ripme.utils.Utils; */ public class RippersTest { - private final Logger logger = Logger.getLogger(RippersTest.class); - - public void testStub() { - assertTrue("RippersTest must contain at lease one test.", true); - } + private final Logger logger = LogManager.getLogger(RippersTest.class); void testRipper(AbstractRipper ripper) { try { // Turn on Debug logging - ((ConsoleAppender) Logger.getRootLogger().getAppender("stdout")).setThreshold(Level.DEBUG); + LoggerContext ctx = (LoggerContext) LogManager.getContext(false); + Configuration config = ctx.getConfiguration(); + LoggerConfig loggerConfig = config.getLoggerConfig(LogManager.ROOT_LOGGER_NAME); + loggerConfig.setLevel(Level.DEBUG); + ctx.updateLoggers(); // This causes all Loggers to refetch information from their LoggerConfig. // Decrease timeout Utils.setConfigInteger("page.timeout", 20 * 1000); @@ -36,18 +39,25 @@ public class RippersTest { ripper.setup(); ripper.markAsTest(); ripper.rip(); - assertTrue("Failed to download a single file from " + ripper.getURL(), - ripper.getWorkingDir().listFiles().length >= 1); + if (logger.isTraceEnabled()) { + logger.trace("working dir: " + ripper.getWorkingDir()); + logger.trace("list files: " + ripper.getWorkingDir().listFiles().length); + for (int i = 0; i < ripper.getWorkingDir().listFiles().length; i++) { + logger.trace(" " + ripper.getWorkingDir().listFiles()[i]); + } + } + Assertions.assertTrue(ripper.getWorkingDir().listFiles().length >= 1, + "Failed to download a single file from " + ripper.getURL()); } catch (IOException e) { if (e.getMessage().contains("Ripping interrupted")) { // We expect some rips to get interrupted } else { e.printStackTrace(); - fail("Failed to rip " + ripper.getURL() + " : " + e.getMessage()); + Assertions.fail("Failed to rip " + ripper.getURL() + " : " + e.getMessage()); } } catch (Exception e) { e.printStackTrace(); - fail("Failed to rip " + ripper.getURL() + " : " + e.getMessage()); + Assertions.fail("Failed to rip " + ripper.getURL() + " : " + e.getMessage()); } finally { deleteDir(ripper.getWorkingDir()); } @@ -60,26 +70,23 @@ public class RippersTest { // that we found links to it void testChanRipper(ChanRipper ripper) { try { - // Turn on Debug logging - ((ConsoleAppender) Logger.getRootLogger().getAppender("stdout")).setThreshold(Level.DEBUG); - // Decrease timeout Utils.setConfigInteger("page.timeout", 20 * 1000); ripper.setup(); ripper.markAsTest(); List foundUrls = ripper.getURLsFromPage(ripper.getFirstPage()); - assertTrue("Failed to find single url on page " + ripper.getURL(), foundUrls.size() >= 1); + Assertions.assertTrue(foundUrls.size() >= 1, "Failed to find single url on page " + ripper.getURL()); } catch (IOException e) { if (e.getMessage().contains("Ripping interrupted")) { // We expect some rips to get interrupted } else { e.printStackTrace(); - fail("Failed to rip " + ripper.getURL() + " : " + e.getMessage()); + Assertions.fail("Failed to rip " + ripper.getURL() + " : " + e.getMessage()); } } catch (Exception e) { e.printStackTrace(); - fail("Failed to rip " + ripper.getURL() + " : " + e.getMessage()); + Assertions.fail("Failed to rip " + ripper.getURL() + " : " + e.getMessage()); } finally { deleteDir(ripper.getWorkingDir()); } @@ -126,49 +133,4 @@ public class RippersTest { } } - @Deprecated - void assertEquals(String expected, String actual) { - Assertions.assertEquals(expected, actual); - } - - @Deprecated - void assertEquals(String message, String expected, String actual) { - Assertions.assertEquals(expected, actual, message); - } - - @Deprecated - void assertEquals(Object expected, Object actual) { - Assertions.assertEquals(expected, actual); - } - - @Deprecated - void fail(String message) { - Assertions.fail(message); - } - - @Deprecated - void assertTrue(boolean condition) { - Assertions.assertTrue(condition); - } - - @Deprecated - void assertTrue(String failMessage, boolean condition) { - Assertions.assertTrue(condition, failMessage); - } - - @Deprecated - void assertFalse(String message, boolean condition) { - Assertions.assertFalse(condition, message); - } - - @Deprecated - void assertNull(Object actual) { - Assertions.assertNull(actual); - } - - @Deprecated - void assertNotNull(String message, Object actual) { - Assertions.assertNotNull(actual, message); - } - } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Rule34RipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Rule34RipperTest.java index 129fedd5..662a7eb7 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Rule34RipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Rule34RipperTest.java @@ -1,22 +1,26 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.Rule34Ripper; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; public class Rule34RipperTest extends RippersTest { @Test - public void testShesFreakyRip() throws IOException { - Rule34Ripper ripper = new Rule34Ripper(new URL("https://rule34.xxx/index.php?page=post&s=list&tags=bimbo")); + public void testShesFreakyRip() throws IOException, URISyntaxException { + Rule34Ripper ripper = new Rule34Ripper(new URI("https://rule34.xxx/index.php?page=post&s=list&tags=bimbo").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("https://rule34.xxx/index.php?page=post&s=list&tags=bimbo"); + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://rule34.xxx/index.php?page=post&s=list&tags=bimbo").toURL(); Rule34Ripper ripper = new Rule34Ripper(url); - assertEquals("bimbo", ripper.getGID(url)); + Assertions.assertEquals("bimbo", ripper.getGID(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RulePornRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RulePornRipperTest.java index fb23b19a..73f79a56 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RulePornRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RulePornRipperTest.java @@ -1,19 +1,25 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.RulePornRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; public class RulePornRipperTest extends RippersTest { - public void testRip() throws IOException { - RulePornRipper ripper = new RulePornRipper(new URL("https://ruleporn.com/are-you-going-to-fill-my-lil-pussy-up/")); + @Test + public void testRip() throws IOException, URISyntaxException { + RulePornRipper ripper = new RulePornRipper(new URI("https://ruleporn.com/tosh/").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("https://ruleporn.com/are-you-going-to-fill-my-lil-pussy-up/"); + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://ruleporn.com/tosh/").toURL(); RulePornRipper ripper = new RulePornRipper(url); - assertEquals("are-you-going-to-fill-my-lil-pussy-up", ripper.getGID(url)); + Assertions.assertEquals("tosh", ripper.getGID(url)); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SankakuComplexRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SankakuComplexRipperTest.java index 035d7767..4efe9ba2 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SankakuComplexRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SankakuComplexRipperTest.java @@ -1,41 +1,44 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.SankakuComplexRipper; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; public class SankakuComplexRipperTest extends RippersTest { @Test @Disabled("https://github.com/RipMeApp/ripme/issues/257") - public void testSankakuChanRip() throws IOException { + public void testSankakuChanRip() throws IOException, URISyntaxException { SankakuComplexRipper ripper = new SankakuComplexRipper( - new URL("https://chan.sankakucomplex.com/?tags=cleavage")); + new URI("https://chan.sankakucomplex.com/?tags=cleavage").toURL()); testRipper(ripper); } @Test @Disabled("https://github.com/RipMeApp/ripme/issues/257") - public void testSankakuIdolRip() throws IOException { + public void testSankakuIdolRip() throws IOException, URISyntaxException { SankakuComplexRipper ripper = new SankakuComplexRipper( - new URL("https://idol.sankakucomplex.com/?tags=meme_%28me%21me%21me%21%29_%28cosplay%29")); + new URI("https://idol.sankakucomplex.com/?tags=meme_%28me%21me%21me%21%29_%28cosplay%29").toURL()); testRipper(ripper); } @Test - public void testgetGID() throws IOException { - URL url = new URL("https://idol.sankakucomplex.com/?tags=meme_%28me%21me%21me%21%29_%28cosplay%29"); + public void testgetGID() throws IOException, URISyntaxException { + URL url = new URI("https://idol.sankakucomplex.com/?tags=meme_%28me%21me%21me%21%29_%28cosplay%29").toURL(); SankakuComplexRipper ripper = new SankakuComplexRipper(url); - assertEquals("idol._meme_(me!me!me!)_(cosplay)", ripper.getGID(url)); + Assertions.assertEquals("idol._meme_(me!me!me!)_(cosplay)", ripper.getGID(url)); } @Test - public void testgetSubDomain() throws IOException { - URL url = new URL("https://idol.sankakucomplex.com/?tags=meme_%28me%21me%21me%21%29_%28cosplay%29"); + public void testgetSubDomain() throws IOException, URISyntaxException { + URL url = new URI("https://idol.sankakucomplex.com/?tags=meme_%28me%21me%21me%21%29_%28cosplay%29").toURL(); SankakuComplexRipper ripper = new SankakuComplexRipper(url); - assertEquals("idol.", ripper.getSubDomain(url)); + Assertions.assertEquals("idol.", ripper.getSubDomain(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ScrolllerRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ScrolllerRipperTest.java new file mode 100644 index 00000000..44bf06cf --- /dev/null +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ScrolllerRipperTest.java @@ -0,0 +1,55 @@ +package com.rarchives.ripme.tst.ripper.rippers; + +import com.rarchives.ripme.ripper.rippers.ScrolllerRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.HashMap; +import java.util.Map; + +public class ScrolllerRipperTest extends RippersTest { + @Test + public void testScrolllerGID() throws IOException, URISyntaxException { + Map testURLs = new HashMap<>(); + + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp").toURL(), "CatsStandingUp"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=pictures").toURL(), "CatsStandingUp"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?sort=top&filter=pictures").toURL(), "CatsStandingUp"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=pictures&sort=top").toURL(), "CatsStandingUp"); + for (URL url : testURLs.keySet()) { + ScrolllerRipper ripper = new ScrolllerRipper(url); + ripper.setup(); + Assertions.assertEquals(testURLs.get(url), ripper.getGID(ripper.getURL())); + deleteDir(ripper.getWorkingDir()); + } + } + + @Test + public void testScrolllerFilterRegex() throws IOException, URISyntaxException { + Map testURLs = new HashMap<>(); + + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp").toURL(), "NOFILTER"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=pictures").toURL(), "PICTURE"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=videos").toURL(), "VIDEO"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=albums").toURL(), "ALBUM"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?sort=top&filter=pictures").toURL(), "PICTURE"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?sort=top&filter=videos").toURL(), "VIDEO"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?sort=top&filter=albums").toURL(), "ALBUM"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=pictures&sort=top").toURL(), "PICTURE"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=videos&sort=top").toURL(), "VIDEO"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=albums&sort=top").toURL(), "ALBUM"); + for (URL url : testURLs.keySet()) { + ScrolllerRipper ripper = new ScrolllerRipper(url); + ripper.setup(); + Assertions.assertEquals(testURLs.get(url), ripper.convertFilterString(ripper.getParameter(ripper.getURL(),"filter"))); + deleteDir(ripper.getWorkingDir()); + } + } + + + +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ShesFreakyRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ShesFreakyRipperTest.java index 7a7ea7f0..f389974b 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ShesFreakyRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ShesFreakyRipperTest.java @@ -1,26 +1,29 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.ShesFreakyRipper; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; public class ShesFreakyRipperTest extends RippersTest { @Test @Disabled("https://github.com/RipMeApp/ripme/issues/254") - public void testShesFreakyRip() throws IOException { + public void testShesFreakyRip() throws IOException, URISyntaxException { ShesFreakyRipper ripper = new ShesFreakyRipper( - new URL("http://www.shesfreaky.com/gallery/nicee-snow-bunny-579NbPjUcYa.html")); + new URI("http://www.shesfreaky.com/gallery/nicee-snow-bunny-579NbPjUcYa.html").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("http://www.shesfreaky.com/gallery/nicee-snow-bunny-579NbPjUcYa.html"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("http://www.shesfreaky.com/gallery/nicee-snow-bunny-579NbPjUcYa.html").toURL(); ShesFreakyRipper ripper = new ShesFreakyRipper(url); - assertEquals("nicee-snow-bunny-579NbPjUcYa", ripper.getGID(url)); + Assertions.assertEquals("nicee-snow-bunny-579NbPjUcYa", ripper.getGID(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinfestRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinfestRipperTest.java index c4f56432..b7a8da53 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinfestRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinfestRipperTest.java @@ -1,19 +1,27 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.SinfestRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; public class SinfestRipperTest extends RippersTest { - public void testRip() throws IOException { - SinfestRipper ripper = new SinfestRipper(new URL("http://sinfest.net/view.php?date=2000-01-17")); + @Test + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + SinfestRipper ripper = new SinfestRipper(new URI("http://sinfest.net/view.php?date=2000-01-17").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("http://sinfest.net/view.php?date=2000-01-17"); + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("http://sinfest.net/view.php?date=2000-01-17").toURL(); SinfestRipper ripper = new SinfestRipper(url); - assertEquals("2000-01-17", ripper.getGID(url)); + Assertions.assertEquals("2000-01-17", ripper.getGID(url)); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinnercomicsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinnercomicsRipperTest.java deleted file mode 100644 index 29ad8cf3..00000000 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinnercomicsRipperTest.java +++ /dev/null @@ -1,33 +0,0 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import java.io.IOException; -import java.net.URL; - -import com.rarchives.ripme.ripper.rippers.SinnercomicsRipper; -import org.junit.jupiter.api.Test; - -public class SinnercomicsRipperTest extends RippersTest { - @Test - public void testSinnercomicsAlbum() throws IOException { - SinnercomicsRipper ripper; - - ripper = new SinnercomicsRipper(new URL("https://sinnercomics.com/comic/gw-addendum-page-01/")); - testRipper(ripper); - - } - - public void testGetGID() throws IOException { - URL url; - SinnercomicsRipper ripper; - - // Comic test - url = new URL("https://sinnercomics.com/comic/beyond-the-hotel-page-01/"); - ripper = new SinnercomicsRipper(url); - assertEquals("beyond-the-hotel", ripper.getGID(url)); - - // Comic test - url = new URL("https://sinnercomics.com/elza-frozen-2/#comments"); - ripper = new SinnercomicsRipper(url); - assertEquals("elza-frozen-2", ripper.getGID(url)); - } -} \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SmuttyRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SmuttyRipperTest.java index c7aa694e..99c3f1aa 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SmuttyRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SmuttyRipperTest.java @@ -1,19 +1,27 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.SmuttyRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; public class SmuttyRipperTest extends RippersTest { - public void testRip() throws IOException { - SmuttyRipper ripper = new SmuttyRipper(new URL("https://smutty.com/user/QUIGON/")); + @Test + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + SmuttyRipper ripper = new SmuttyRipper(new URI("https://smutty.com/user/QUIGON/").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("https://smutty.com/user/QUIGON/"); + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://smutty.com/user/QUIGON/").toURL(); SmuttyRipper ripper = new SmuttyRipper(url); - assertEquals("QUIGON", ripper.getGID(url)); + Assertions.assertEquals("QUIGON", ripper.getGID(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SoundgasmRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SoundgasmRipperTest.java new file mode 100644 index 00000000..847540a3 --- /dev/null +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SoundgasmRipperTest.java @@ -0,0 +1,28 @@ +package com.rarchives.ripme.tst.ripper.rippers; + +import com.rarchives.ripme.ripper.rippers.RedditRipper; +import com.rarchives.ripme.ripper.rippers.SoundgasmRipper; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; + +public class SoundgasmRipperTest extends RippersTest { + + @Test + @Tag("flaky") + public void testSoundgasmURLs() throws IOException, URISyntaxException { + SoundgasmRipper ripper = new SoundgasmRipper(new URI("https://soundgasm.net/u/HTMLExamples/Making-Text-into-a-Soundgasm-Audio-Link").toURL()); + testRipper(ripper); + } + + @Test + @Tag("flaky") + public void testRedditSoundgasmURL() throws IOException, URISyntaxException { + RedditRipper ripper = new RedditRipper(new URI("https://www.reddit.com/user/Mistress_Minerva/").toURL()); + testRipper(ripper); + } +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SpankBangRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SpankBangRipperTest.java index c73a244e..684d4689 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SpankBangRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SpankBangRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.SpankbangRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class SpankBangRipperTest extends RippersTest { @Test - public void testSpankBangVideo() throws IOException { - SpankbangRipper ripper = new SpankbangRipper(new URL("https://spankbang.com/2a7fh/video/mdb901")); //most popular video of all time on site; should stay up + @Tag("flaky") + public void testSpankBangVideo() throws IOException, URISyntaxException { + SpankbangRipper ripper = new SpankbangRipper(new URI("https://spankbang.com/2a7fh/video/mdb901").toURL()); //most popular video of all time on site; should stay up testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StaRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StaRipperTest.java index 4884b205..83da1175 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StaRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StaRipperTest.java @@ -1,26 +1,29 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.StaRipper; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; public class StaRipperTest extends RippersTest { @Test - @Disabled("404 Link") - public void testRip() throws IOException { - StaRipper ripper = new StaRipper(new URL("https://sta.sh/2hn9rtavr1g")); + @Disabled("Ripper broken, Nullpointer exception") + public void testRip() throws IOException, URISyntaxException { + StaRipper ripper = new StaRipper(new URI("https://sta.sh/01umpyuxi4js").toURL()); testRipper(ripper); } @Test - @Disabled("404 Link") - public void testGetGID() throws IOException { - URL url = new URL("https://sta.sh/2hn9rtavr1g"); + @Disabled + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://sta.sh/01umpyuxi4js").toURL(); StaRipper ripper = new StaRipper(url); - assertEquals("2hn9rtavr1g", ripper.getGID(url)); + Assertions.assertEquals("01umpyuxi4js", ripper.getGID(url)); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StickyXXXRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StickyXXXRipperTest.java index 5c530a01..71038c94 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StickyXXXRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StickyXXXRipperTest.java @@ -1,18 +1,21 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.video.StickyXXXRipper; // import com.rarchives.ripme.tst.ripper.rippers.RippersTest; import com.rarchives.ripme.utils.Utils; +import org.junit.jupiter.api.Test; public class StickyXXXRipperTest extends RippersTest { - public void testStickyXXXVideo() throws IOException { + @Test + public void testStickyXXXVideo() throws IOException, URISyntaxException { // This test fails on the CI - possibly due to checking for a file before it's written - so we're skipping it if (Utils.getConfigBoolean("test.run_flaky_tests", false)) { - StickyXXXRipper ripper = new StickyXXXRipper(new URL("http://www.stickyxxx.com/a-very-intense-farewell/")); + StickyXXXRipper ripper = new StickyXXXRipper(new URI("http://www.stickyxxx.com/a-very-intense-farewell/").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TapasticRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TapasticRipperTest.java index bd424860..4528482f 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TapasticRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TapasticRipperTest.java @@ -1,21 +1,27 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.TapasticRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; public class TapasticRipperTest extends RippersTest { @Test - public void testTapasticRip() throws IOException { - TapasticRipper ripper = new TapasticRipper(new URL("https://tapas.io/series/tsiwbakd-comic")); + @Disabled("ripper broken") + public void testTapasticRip() throws IOException, URISyntaxException { + TapasticRipper ripper = new TapasticRipper(new URI("https://tapas.io/series/TPIAG").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("https://tapas.io/series/tsiwbakd-comic"); + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://tapas.io/series/TPIAG").toURL(); TapasticRipper ripper = new TapasticRipper(url); - assertEquals("series_ tsiwbakd-comic", ripper.getGID(url)); + Assertions.assertEquals("series_ TPIAG", ripper.getGID(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TeenplanetRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TeenplanetRipperTest.java index 4d088742..2a69bae7 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TeenplanetRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TeenplanetRipperTest.java @@ -1,21 +1,27 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.TeenplanetRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class TeenplanetRipperTest extends RippersTest { @Test - public void testTeenplanetRip() throws IOException { - TeenplanetRipper ripper = new TeenplanetRipper(new URL("http://teenplanet.org/galleries/the-perfect-side-of-me-6588.html")); + @Tag("flaky") + public void testTeenplanetRip() throws IOException, URISyntaxException { + TeenplanetRipper ripper = new TeenplanetRipper(new URI("http://teenplanet.org/galleries/the-perfect-side-of-me-6588.html").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("http://teenplanet.org/galleries/the-perfect-side-of-me-6588.html"); + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("http://teenplanet.org/galleries/the-perfect-side-of-me-6588.html").toURL(); TeenplanetRipper ripper = new TeenplanetRipper(url); - assertEquals("the-perfect-side-of-me-6588", ripper.getGID(url)); + Assertions.assertEquals("the-perfect-side-of-me-6588", ripper.getGID(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ThechiveRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ThechiveRipperTest.java index 1067f1eb..e7d85d34 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ThechiveRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ThechiveRipperTest.java @@ -26,10 +26,12 @@ package com.rarchives.ripme.tst.ripper.rippers; import com.rarchives.ripme.ripper.rippers.ThechiveRipper; import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; /** * @@ -43,16 +45,18 @@ public class ThechiveRipperTest extends RippersTest { * @throws IOException */ @Test - public void testTheChiveRip() throws IOException { - ThechiveRipper ripper = new ThechiveRipper(new URL( - "https://thechive.com/2019/03/16/beautiful-badasses-lookin-good-in-and-out-of-uniform-35-photos/")); + @Tag("flaky") + public void testTheChiveRip() throws IOException, URISyntaxException { + ThechiveRipper ripper = new ThechiveRipper(new URI( + "https://thechive.com/2019/03/16/beautiful-badasses-lookin-good-in-and-out-of-uniform-35-photos/").toURL()); testRipper(ripper); } @Test - public void testTheChiveGif() throws IOException { + @Tag("flaky") + public void testTheChiveGif() throws IOException, URISyntaxException { ThechiveRipper ripper = new ThechiveRipper( - new URL("https://thechive.com/2019/03/14/dont-tease-me-just-squeeze-me-20-gifs/")); + new URI("https://thechive.com/2019/03/14/dont-tease-me-just-squeeze-me-20-gifs/").toURL()); testRipper(ripper); } @@ -60,8 +64,9 @@ public class ThechiveRipperTest extends RippersTest { * "i.thechive.com" test. */ @Test - public void testIDotThechive() throws IOException { - ThechiveRipper ripper = new ThechiveRipper(new URL("https://i.thechive.com/witcheva")); + @Tag("flaky") + public void testIDotThechive() throws IOException, URISyntaxException { + ThechiveRipper ripper = new ThechiveRipper(new URI("https://i.thechive.com/witcheva").toURL()); testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TheyiffgalleryRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TheyiffgalleryRipperTest.java index 65ebbb14..17ed7398 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TheyiffgalleryRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TheyiffgalleryRipperTest.java @@ -1,21 +1,27 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.TheyiffgalleryRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class TheyiffgalleryRipperTest extends RippersTest { @Test - public void testTheyiffgallery() throws IOException { - TheyiffgalleryRipper ripper = new TheyiffgalleryRipper(new URL("https://theyiffgallery.com/index?/category/4303")); + @Tag("flaky") + public void testTheyiffgallery() throws IOException, URISyntaxException { + TheyiffgalleryRipper ripper = new TheyiffgalleryRipper(new URI("https://theyiffgallery.com/index?/category/4303").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("https://theyiffgallery.com/index?/category/4303"); + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://theyiffgallery.com/index?/category/4303").toURL(); TheyiffgalleryRipper ripper = new TheyiffgalleryRipper(url); - assertEquals("4303", ripper.getGID(url)); + Assertions.assertEquals("4303", ripper.getGID(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TsuminoRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TsuminoRipperTest.java index b1e58adc..21818ae3 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TsuminoRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TsuminoRipperTest.java @@ -1,39 +1,44 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import java.util.List; import com.rarchives.ripme.ripper.rippers.TsuminoRipper; import com.rarchives.ripme.utils.RipUtils; import org.jsoup.nodes.Document; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; public class TsuminoRipperTest extends RippersTest { @Test - public void testTsuminoRipper() throws IOException { - TsuminoRipper ripper = new TsuminoRipper(new URL("http://www.tsumino.com/Book/Info/43528/sore-wa-kurokute-suketeita-what-s-tight-and-black-and-sheer-all-over-")); + @Disabled("Broken ripper") + public void testTsuminoRipper() throws IOException, URISyntaxException { + TsuminoRipper ripper = new TsuminoRipper(new URI("http://www.tsumino.com/Book/Info/43528/sore-wa-kurokute-suketeita-what-s-tight-and-black-and-sheer-all-over-").toURL()); testRipper(ripper); } @Test - public void testTagBlackList() throws IOException { - TsuminoRipper ripper = new TsuminoRipper(new URL("http://www.tsumino.com/Book/Info/43528/sore-wa-kurokute-suketeita-what-s-tight-and-black-and-sheer-all-over-")); + @Disabled("Broken ripper") + public void testTagBlackList() throws IOException, URISyntaxException { + TsuminoRipper ripper = new TsuminoRipper(new URI("http://www.tsumino.com/Book/Info/43528/sore-wa-kurokute-suketeita-what-s-tight-and-black-and-sheer-all-over-").toURL()); Document doc = ripper.getFirstPage(); List tagsOnPage = ripper.getTags(doc); String[] tags1 = {"test", "one", "Smell"}; String blacklistedTag = RipUtils.checkTags(tags1, tagsOnPage); - assertEquals("smell", blacklistedTag); + Assertions.assertEquals("smell", blacklistedTag); // Test a tag with spaces String[] tags2 = {"test", "one", "Face sitting"}; blacklistedTag = RipUtils.checkTags(tags2, tagsOnPage); - assertEquals("face sitting", blacklistedTag); + Assertions.assertEquals("face sitting", blacklistedTag); // Test a album with no blacklisted tags String[] tags3 = {"nothing", "one", "null"}; blacklistedTag = RipUtils.checkTags(tags3, tagsOnPage); - assertNull(blacklistedTag); + Assertions.assertNull(blacklistedTag); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Tubex6RipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Tubex6RipperTest.java deleted file mode 100644 index 83ff88a1..00000000 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Tubex6RipperTest.java +++ /dev/null @@ -1,19 +0,0 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import java.io.IOException; -import java.net.URL; - -import com.rarchives.ripme.ripper.rippers.Tubex6Ripper; - -public class Tubex6RipperTest extends RippersTest { - public void testRip() throws IOException { - Tubex6Ripper ripper = new Tubex6Ripper(new URL("http://www.tubex6.com/my-sister-sleeps-naked-1/")); - testRipper(ripper); - } - - public void testGetGID() throws IOException { - URL url = new URL("http://www.tubex6.com/my-sister-sleeps-naked-1/"); - Tubex6Ripper ripper = new Tubex6Ripper(url); - assertEquals("my-sister-sleeps-naked-1", ripper.getGID(url)); - } -} \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TumblrRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TumblrRipperTest.java index 07aeb28d..e771e209 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TumblrRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TumblrRipperTest.java @@ -2,7 +2,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.TumblrRipper; @@ -12,30 +13,30 @@ import org.junit.jupiter.api.Test; public class TumblrRipperTest extends RippersTest { @Test @Disabled - public void testTumblrFullRip() throws IOException { - TumblrRipper ripper = new TumblrRipper(new URL("http://wrouinr.tumblr.com")); + public void testTumblrFullRip() throws IOException, URISyntaxException { + TumblrRipper ripper = new TumblrRipper(new URI("http://wrouinr.tumblr.com").toURL()); testRipper(ripper); } @Test @Disabled - public void testTumblrTagRip() throws IOException { - TumblrRipper ripper = new TumblrRipper(new URL("https://these-are-my-b-sides.tumblr.com/tagged/boobs")); + public void testTumblrTagRip() throws IOException, URISyntaxException { + TumblrRipper ripper = new TumblrRipper(new URI("https://these-are-my-b-sides.tumblr.com/tagged/boobs").toURL()); testRipper(ripper); } @Test @Disabled - public void testTumblrPostRip() throws IOException { - TumblrRipper ripper = new TumblrRipper(new URL("http://sadbaffoon.tumblr.com/post/132045920789/what-a-hoe")); + public void testTumblrPostRip() throws IOException, URISyntaxException { + TumblrRipper ripper = new TumblrRipper(new URI("http://sadbaffoon.tumblr.com/post/132045920789/what-a-hoe").toURL()); testRipper(ripper); } @Test @Disabled("Commented out because the test link is 404ing") - public void testEmbeddedImage() throws IOException { + public void testEmbeddedImage() throws IOException, URISyntaxException { TumblrRipper ripper = new TumblrRipper( - new URL("https://these-are-my-b-sides.tumblr.com/post/178225921524/this-was-fun")); + new URI("https://these-are-my-b-sides.tumblr.com/post/178225921524/this-was-fun").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TwitterRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TwitterRipperTest.java index 788808c8..de164767 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TwitterRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TwitterRipperTest.java @@ -1,23 +1,27 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.TwitterRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class TwitterRipperTest extends RippersTest { @Test - public void testTwitterUserRip() throws IOException { - TwitterRipper ripper = new TwitterRipper(new URL("https://twitter.com/danngamber01/media")); + @Tag("flaky") + public void testTwitterUserRip() throws IOException, URISyntaxException { + TwitterRipper ripper = new TwitterRipper(new URI("https://twitter.com/danngamber01/media").toURL()); testRipper(ripper); } @Test - public void testTwitterSearchRip() throws IOException { + @Tag("flaky") + public void testTwitterSearchRip() throws IOException, URISyntaxException { TwitterRipper ripper = new TwitterRipper( - new URL("https://twitter.com/search?f=tweets&q=from%3Aalinalixxx%20filter%3Aimages&src=typd")); + new URI("https://twitter.com/search?f=tweets&q=from%3Aalinalixxx%20filter%3Aimages&src=typd").toURL()); testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TwodgalleriesRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TwodgalleriesRipperTest.java index 3671d506..1df43cff 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TwodgalleriesRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TwodgalleriesRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.TwodgalleriesRipper; @@ -11,9 +12,9 @@ import org.junit.jupiter.api.Test; public class TwodgalleriesRipperTest extends RippersTest { @Test @Disabled("https://github.com/RipMeApp/ripme/issues/182") - public void testTwodgalleriesRip() throws IOException { + public void testTwodgalleriesRip() throws IOException, URISyntaxException { TwodgalleriesRipper ripper = new TwodgalleriesRipper( - new URL("http://www.2dgalleries.com/artist/regis-loisel-6477")); + new URI("http://www.2dgalleries.com/artist/regis-loisel-6477").toURL()); testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VidbleRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VidbleRipperTest.java index f76e2b25..0ce64540 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VidbleRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VidbleRipperTest.java @@ -1,22 +1,26 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.VidbleRipper; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; public class VidbleRipperTest extends RippersTest { @Test - public void testVidbleRip() throws IOException { - VidbleRipper ripper = new VidbleRipper(new URL("http://www.vidble.com/album/y1oyh3zd")); + public void testVidbleRip() throws IOException, URISyntaxException { + VidbleRipper ripper = new VidbleRipper(new URI("https://vidble.com/album/cGEFr8zi").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("http://www.vidble.com/album/y1oyh3zd"); + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://vidble.com/album/cGEFr8zi").toURL(); VidbleRipper ripper = new VidbleRipper(url); - assertEquals("y1oyh3zd", ripper.getGID(url)); + Assertions.assertEquals("cGEFr8zi", ripper.getGID(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VideoRippersTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VideoRippersTest.java index e8b5ffe3..6ac08ca4 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VideoRippersTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VideoRippersTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -9,6 +11,7 @@ import com.rarchives.ripme.ripper.VideoRipper; import com.rarchives.ripme.ripper.rippers.video.PornhubRipper; import com.rarchives.ripme.ripper.rippers.video.YuvutuRipper; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; @@ -28,9 +31,9 @@ public class VideoRippersTest extends RippersTest { // Video ripper testing is... weird. // If the ripper finds the URL to download the video, and it's a test, // then the ripper sets the download URL as the ripper's URL. - assertFalse("Failed to find download url for " + oldURL, oldURL.equals(ripper.getURL())); + Assertions.assertFalse(oldURL.equals(ripper.getURL()), "Failed to find download url for " + oldURL); } catch (Exception e) { - fail("Error while ripping " + ripper.getURL() + " : " + e); + Assertions.fail("Error while ripping " + ripper.getURL() + " : " + e); e.printStackTrace(); } finally { deleteDir(ripper.getWorkingDir()); @@ -39,9 +42,9 @@ public class VideoRippersTest extends RippersTest { @Test @Disabled("Test disbaled. See https://github.com/RipMeApp/ripme/issues/574") - public void testTwitchVideoRipper() throws IOException { + public void testTwitchVideoRipper() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); - contentURLs.add(new URL("https://clips.twitch.tv/FaithfulIncredulousPotTBCheesePull")); + contentURLs.add(new URI("https://clips.twitch.tv/FaithfulIncredulousPotTBCheesePull").toURL()); for (URL url : contentURLs) { // TwitchVideoRipper ripper = new TwitchVideoRipper(url); // videoTestHelper(ripper); @@ -50,18 +53,18 @@ public class VideoRippersTest extends RippersTest { @Test @Disabled("Test disabled see https://github.com/RipMeApp/ripme/issues/1095") - public void testPornhubRipper() throws IOException { + public void testPornhubRipper() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); - contentURLs.add(new URL("https://www.pornhub.com/view_video.php?viewkey=ph5a329fa707269")); + contentURLs.add(new URI("https://www.pornhub.com/view_video.php?viewkey=ph5a329fa707269").toURL()); for (URL url : contentURLs) { PornhubRipper ripper = new PornhubRipper(url); videoTestHelper(ripper); } } - public void testYuvutuRipper() throws IOException { + public void testYuvutuRipper() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); - contentURLs.add(new URL("http://www.yuvutu.com/video/828499/female-reader-armpit-job/")); + contentURLs.add(new URI("http://www.yuvutu.com/video/828499/female-reader-armpit-job/").toURL()); for (URL url : contentURLs) { YuvutuRipper ripper = new YuvutuRipper(url); videoTestHelper(ripper); diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ViewcomicRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ViewcomicRipperTest.java index 063cc036..a315648d 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ViewcomicRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ViewcomicRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.ViewcomicRipper; import org.junit.jupiter.api.Disabled; @@ -9,8 +10,8 @@ import org.junit.jupiter.api.Test; public class ViewcomicRipperTest extends RippersTest { @Test @Disabled("Ripper broken") - public void testViewcomicRipper() throws IOException { - ViewcomicRipper ripper = new ViewcomicRipper(new URL("https://view-comic.com/batman-no-mans-land-vol-1/")); + public void testViewcomicRipper() throws IOException, URISyntaxException { + ViewcomicRipper ripper = new ViewcomicRipper(new URI("https://view-comic.com/batman-no-mans-land-vol-1/").toURL()); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VkRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VkRipperTest.java index 22ccb641..7bf7badf 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VkRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VkRipperTest.java @@ -1,9 +1,13 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.VkRipper; +import org.json.JSONObject; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class VkRipperTest extends RippersTest { @@ -16,18 +20,38 @@ public class VkRipperTest extends RippersTest { // EXAMPLE: https://vk.com/album45506334_00?rev=1 (a single album - wall pictures) // EXAMPLE: https://vk.com/album45506334_101886701 (a single album - custom) @Test - public void testVkAlbumHttpRip() throws IOException { - VkRipper ripper = new VkRipper(new URL("http://vk.com/album45506334_0")); + @Tag("flaky") + public void testVkAlbumHttpRip() throws IOException, URISyntaxException { + VkRipper ripper = new VkRipper(new URI("https://vk.com/album45506334_0").toURL()); testRipper(ripper); } @Test - public void testVkAlbumHttpsRip() throws IOException { - VkRipper ripper = new VkRipper(new URL("https://vk.com/album45506334_0")); + @Tag("flaky") + public void testVkPhotosRip() throws IOException, URISyntaxException { + VkRipper ripper = new VkRipper(new URI("https://vk.com/photos45506334").toURL()); testRipper(ripper); } + @Test - public void testVkPhotosRip() throws IOException { - VkRipper ripper = new VkRipper(new URL("https://vk.com/photos45506334")); - testRipper(ripper); + @Tag("flaky") + public void testFindJSONObjectContainingPhotoID() throws IOException, URISyntaxException { + VkRipper ripper = new VkRipper(new URI("http://vk.com/album45506334_0").toURL()); + String json = + "{\"payload\":[0,[\"album-45984105_268691406\",18,14,[{\"id\":\"-45984105_457345201\",\"base\":\"https://sun9-37.userapi.com/\",\"tagged\":[],\"likes\":0,\"shares\":0,\"o_src\":\"https://sun9-65.userapi.com/c857520/v857520962/10e24c/DPxygc3XW5E.jpg\",\"o_\":[\"https://sun9-65.userapi.com/c857520/v857520962/10e24c/DPxygc3XW5E\",130,98],\"z_src\":\"https://sun9-41.userapi.com/c857520/v857520962/10e24a/EsDDQA36qKI.jpg\",\"z_\":[\"https://sun9-41.userapi.com/c857520/v857520962/10e24a/EsDDQA36qKI\",1280,960],\"w_src\":\"https://sun9-60.userapi.com/c857520/v857520962/10e24b/6ETsA15rAdU.jpg\",\"w_\":[\"https://sun9-60.userapi.com/c857520/v857520962/10e24b/6ETsA15rAdU\",1405,1054]}]]],\"langVersion\":\"4298\"}"; + String responseJson = + "{\"id\":\"-45984105_457345201\",\"base\":\"https://sun9-37.userapi.com/\",\"tagged\":[],\"likes\":0,\"shares\":0,\"o_src\":\"https://sun9-65.userapi.com/c857520/v857520962/10e24c/DPxygc3XW5E.jpg\",\"o_\":[\"https://sun9-65.userapi.com/c857520/v857520962/10e24c/DPxygc3XW5E\",130,98],\"z_src\":\"https://sun9-41.userapi.com/c857520/v857520962/10e24a/EsDDQA36qKI.jpg\",\"z_\":[\"https://sun9-41.userapi.com/c857520/v857520962/10e24a/EsDDQA36qKI\",1280,960],\"w_src\":\"https://sun9-60.userapi.com/c857520/v857520962/10e24b/6ETsA15rAdU.jpg\",\"w_\":[\"https://sun9-60.userapi.com/c857520/v857520962/10e24b/6ETsA15rAdU\",1405,1054]}"; + + Assertions.assertTrue( + ripper.findJSONObjectContainingPhotoId("-45984105_457345201", new JSONObject(json)) + .similar(new JSONObject(responseJson))); + } + + @Test + public void testGetBestSourceUrl() throws IOException, URISyntaxException { + VkRipper ripper = new VkRipper(new URI("http://vk.com/album45506334_0").toURL()); + String json = + "{\"id\":\"-45984105_457345201\",\"base\":\"https://sun9-37.userapi.com/\",\"commcount\":0,\"date\":\"3 Dec at 1:14 am\",\"tagged\":[],\"attached_tags\":{\"max_tags_per_object\":5},\"o_src\":\"https://sun9-65.userapi.com/c857520/v857520962/10e24c/DPxygc3XW5E.jpg\",\"o_\":[\"https://sun9-65.userapi.com/c857520/v857520962/10e24c/DPxygc3XW5E\",130,98],\"y_src\":\"https://sun9-9.userapi.com/c857520/v857520962/10e249/dUDeuY10s0A.jpg\",\"y_\":[\"https://sun9-9.userapi.com/c857520/v857520962/10e249/dUDeuY10s0A\",807,605],\"z_src\":\"https://sun9-41.userapi.com/c857520/v857520962/10e24a/EsDDQA36qKI.jpg\",\"z_\":[\"https://sun9-41.userapi.com/c857520/v857520962/10e24a/EsDDQA36qKI\",1280,960]}"; + Assertions.assertEquals("https://sun9-41.userapi.com/c857520/v857520962/10e24a/EsDDQA36qKI.jpg", + ripper.getBestSourceUrl(new JSONObject(json))); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VscoRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VscoRipperTest.java index 3b3bdaa3..20e14442 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VscoRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VscoRipperTest.java @@ -1,60 +1,52 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import com.rarchives.ripme.ripper.rippers.VscoRipper; - -import org.junit.jupiter.api.Test; - -import java.io.IOException; -import java.net.URL; - -public class VscoRipperTest extends RippersTest { - - /** - * Testing single image. - * - * @throws IOException - */ - @Test - public void testSingleImageRip() throws IOException { - VscoRipper ripper = new VscoRipper(new URL("https://vsco.co/jonathangodoy/media/5d1aec76bb669a128035e98a")); - testRipper(ripper); - } - - /** - * Tests profile rip. - * - * @throws IOException - */ - @Test - public void testProfileRip() throws IOException { - VscoRipper ripper = new VscoRipper(new URL("https://vsco.co/jonathangodoy/images/1")); - testRipper(ripper); - } - - /** - * Prevents Bug #679 from happening again. - * https://github.com/RipMeApp/ripme/issues/679 - * - * @throws IOException - */ - @Test - public void testHyphenatedRip() throws IOException { - VscoRipper ripper = new VscoRipper(new URL("https://vsco.co/jolly-roger/images/1")); - testRipper(ripper); - } - - /** - * Make sure it names the folder something sensible. - * - * @throws IOException - */ - @Test - public void testGetGID() throws IOException { - URL url = new URL("https://vsco.co/minijello/media/571cd612542220261a123441"); - - VscoRipper ripper = new VscoRipper(url); - - assertEquals("Failed to get GID", "minijello/571cd", ripper.getGID(url)); - } - -} +package com.rarchives.ripme.tst.ripper.rippers; + +import com.rarchives.ripme.ripper.rippers.VscoRipper; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; + +public class VscoRipperTest extends RippersTest { + + /** + * Testing single image. + * + * @throws IOException + */ + @Test + public void testSingleImageRip() throws IOException, URISyntaxException { + VscoRipper ripper = new VscoRipper(new URI("https://vsco.co/jolly-roger/media/597ce449846079297b3f7cf3").toURL()); + testRipper(ripper); + } + + /** + * Tests profile rip., Prevents Bug #679 from happening again. + * https://github.com/RipMeApp/ripme/issues/679 + * + * @throws IOException + */ + @Test + public void testHyphenatedRip() throws IOException, URISyntaxException { + VscoRipper ripper = new VscoRipper(new URI("https://vsco.co/jolly-roger/gallery").toURL()); + testRipper(ripper); + } + + /** + * Make sure it names the folder something sensible. + * + * @throws IOException + */ + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://vsco.co/jolly-roger/media/590359c4ade3041f2658f407").toURL(); + + VscoRipper ripper = new VscoRipper(url); + + Assertions.assertEquals("jolly-roger/59035", ripper.getGID(url), "Failed to get GID"); + } + +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WebtoonsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WebtoonsRipperTest.java index ad634d5f..d05f307c 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WebtoonsRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WebtoonsRipperTest.java @@ -1,26 +1,34 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.WebtoonsRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -public class WebtoonsRipperTest extends RippersTest { +public class WebtoonsRipperTest extends RippersTest { @Test - public void testWebtoonsAlbum() throws IOException { - WebtoonsRipper ripper = new WebtoonsRipper(new URL("http://www.webtoons.com/en/drama/my-boo/ep-33/viewer?title_no=1185&episode_no=33")); + @Tag("flaky") + public void testWebtoonsAlbum() throws IOException, URISyntaxException { + WebtoonsRipper ripper = new WebtoonsRipper(new URI("https://www.webtoons.com/en/super-hero/unordinary/episode-103/viewer?title_no=679&episode_no=109").toURL()); testRipper(ripper); } @Test - public void testWebtoonsType() throws IOException { - WebtoonsRipper ripper = new WebtoonsRipper(new URL("http://www.webtoons.com/en/drama/lookism/ep-145/viewer?title_no=1049&episode_no=145")); + @Tag("flaky") + public void testWedramabtoonsType() throws IOException, URISyntaxException { + WebtoonsRipper ripper = new WebtoonsRipper(new URI("http://www.webtoons.com/en/drama/lookism/ep-145/viewer?title_no=1049&episode_no=145").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("http://www.webtoons.com/en/drama/my-boo/ep-33/viewer?title_no=1185&episode_no=33"); + @Disabled("URL format different") + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://www.webtoons.com/en/super-hero/unordinary/episode-103/viewer?title_no=679&episode_no=109").toURL(); WebtoonsRipper ripper = new WebtoonsRipper(url); - assertEquals("my-boo", ripper.getGID(url)); + Assertions.assertEquals("super-hero", ripper.getGID(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WordpressComicRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WordpressComicRipperTest.java index f55e5fdc..d0649aa9 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WordpressComicRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WordpressComicRipperTest.java @@ -1,11 +1,14 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.WordpressComicRipper; -import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class WordpressComicRipperTest extends RippersTest { @@ -22,96 +25,100 @@ public class WordpressComicRipperTest extends RippersTest { // http://shipinbottle.pepsaga.com/?p=281 @Test - @Disabled("https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI") - public void test_totempole666() throws IOException { + @Tag("flaky") // https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI + public void test_totempole666() throws IOException, URISyntaxException { WordpressComicRipper ripper = new WordpressComicRipper( - new URL("http://www.totempole666.com/comic/first-time-for-everything-00-cover/")); + new URI("http://www.totempole666.com/comic/first-time-for-everything-00-cover/").toURL()); testRipper(ripper); } @Test - @Disabled("https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI") - public void test_buttsmithy() throws IOException { - WordpressComicRipper ripper = new WordpressComicRipper(new URL("http://buttsmithy.com/archives/comic/p1")); + @Tag("flaky") // https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI + public void test_buttsmithy() throws IOException, URISyntaxException { + WordpressComicRipper ripper = new WordpressComicRipper(new URI("http://buttsmithy.com/archives/comic/p1").toURL()); testRipper(ripper); } @Test - @Disabled("https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI") - public void test_themonsterunderthebed() throws IOException { + @Tag("flaky") // https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI + public void test_themonsterunderthebed() throws IOException, URISyntaxException { WordpressComicRipper ripper = new WordpressComicRipper( - new URL("http://themonsterunderthebed.net/?comic=test-post")); + new URI("http://themonsterunderthebed.net/?comic=test-post").toURL()); testRipper(ripper); } @Test - public void test_prismblush() throws IOException { + public void test_prismblush() throws IOException, URISyntaxException { WordpressComicRipper ripper = new WordpressComicRipper( - new URL("http://prismblush.com/comic/hella-trap-pg-01/")); + new URI("http://prismblush.com/comic/hella-trap-pg-01/").toURL()); testRipper(ripper); } @Test - public void test_konradokonski_1() throws IOException { + @Tag("flaky") + public void test_konradokonski_1() throws IOException, URISyntaxException { WordpressComicRipper ripper = new WordpressComicRipper( - new URL("http://www.konradokonski.com/sawdust/comic/get-up/")); + new URI("http://www.konradokonski.com/sawdust/comic/get-up/").toURL()); testRipper(ripper); } @Test - public void test_konradokonski_2() throws IOException { + @Tag("flaky") + public void test_konradokonski_2() throws IOException, URISyntaxException { WordpressComicRipper ripper = new WordpressComicRipper( - new URL("http://www.konradokonski.com/wiory/comic/08182008/")); + new URI("http://www.konradokonski.com/wiory/comic/08182008/").toURL()); testRipper(ripper); } @Test - public void test_konradokonski_getAlbumTitle() throws IOException { - URL url = new URL("http://www.konradokonski.com/sawdust/comic/get-up/"); + public void test_konradokonski_getAlbumTitle() throws IOException, URISyntaxException { + URL url = new URI("http://www.konradokonski.com/sawdust/comic/get-up/").toURL(); WordpressComicRipper ripper = new WordpressComicRipper(url); - assertEquals("konradokonski.com_sawdust", ripper.getAlbumTitle(url)); + Assertions.assertEquals("konradokonski.com_sawdust", ripper.getAlbumTitle(url)); } @Test - @Disabled("https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI") - public void test_freeadultcomix() throws IOException { + @Tag("flaky") // https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI + public void test_freeadultcomix() throws IOException, URISyntaxException { WordpressComicRipper ripper = new WordpressComicRipper( - new URL("http://freeadultcomix.com/finders-feepaid-in-full-sparrow/")); + new URI("http://freeadultcomix.com/finders-feepaid-in-full-sparrow/").toURL()); testRipper(ripper); } @Test - public void test_delvecomic() throws IOException { + @Tag("flaky") + public void test_delvecomic() throws IOException, URISyntaxException { WordpressComicRipper ripper = new WordpressComicRipper( - new URL("http://thisis.delvecomic.com/NewWP/comic/in-too-deep/")); + new URI("http://thisis.delvecomic.com/NewWP/comic/in-too-deep/").toURL()); testRipper(ripper); } @Test - public void test_Eightmuses_download() throws IOException { + public void test_Eightmuses_download() throws IOException, URISyntaxException { WordpressComicRipper ripper = new WordpressComicRipper( - new URL("https://8muses.download/lustomic-playkittens-josh-samuel-porn-comics-8-muses/")); + new URI("https://8muses.download/lustomic-playkittens-josh-samuel-porn-comics-8-muses/").toURL()); testRipper(ripper); } @Test - public void test_Eightmuses_getAlbumTitle() throws IOException { - URL url = new URL("https://8muses.download/lustomic-playkittens-josh-samuel-porn-comics-8-muses/"); + public void test_Eightmuses_getAlbumTitle() throws IOException, URISyntaxException { + URL url = new URI("https://8muses.download/lustomic-playkittens-josh-samuel-porn-comics-8-muses/").toURL(); WordpressComicRipper ripper = new WordpressComicRipper(url); - assertEquals("8muses.download_lustomic-playkittens-josh-samuel-porn-comics-8-muses", ripper.getAlbumTitle(url)); + Assertions.assertEquals("8muses.download_lustomic-playkittens-josh-samuel-porn-comics-8-muses", ripper.getAlbumTitle(url)); } @Test - public void test_spyingwithlana_download() throws IOException { + @Tag("flaky") + public void test_spyingwithlana_download() throws IOException, URISyntaxException { WordpressComicRipper ripper = new WordpressComicRipper( - new URL("http://spyingwithlana.com/comic/the-big-hookup/")); + new URI("http://spyingwithlana.com/comic/the-big-hookup/").toURL()); testRipper(ripper); } @Test - public void test_spyingwithlana_getAlbumTitle() throws IOException { - URL url = new URL("http://spyingwithlana.com/comic/the-big-hookup/"); + public void test_spyingwithlana_getAlbumTitle() throws IOException, URISyntaxException { + URL url = new URI("http://spyingwithlana.com/comic/the-big-hookup/").toURL(); WordpressComicRipper ripper = new WordpressComicRipper(url); - assertEquals("spyingwithlana_the-big-hookup", ripper.getAlbumTitle(url)); + Assertions.assertEquals("spyingwithlana_the-big-hookup", ripper.getAlbumTitle(url)); } @Test - @Disabled("https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI") - public void test_pepsaga() throws IOException { - WordpressComicRipper ripper = new WordpressComicRipper(new URL("http://shipinbottle.pepsaga.com/?p=281")); + @Tag("flaky") // https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI + public void test_pepsaga() throws IOException, URISyntaxException { + WordpressComicRipper ripper = new WordpressComicRipper(new URI("http://shipinbottle.pepsaga.com/?p=281").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XcartxRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XcartxRipperTest.java index 7b5ab870..b4130cbb 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XcartxRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XcartxRipperTest.java @@ -6,13 +6,14 @@ import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; public class XcartxRipperTest extends RippersTest { @Test @Disabled("Broken ripper") - public void testAlbum() throws IOException { - XcartxRipper ripper = new XcartxRipper(new URL("http://xcartx.com/4937-tokimeki-nioi.html")); + public void testAlbum() throws IOException, URISyntaxException { + XcartxRipper ripper = new XcartxRipper(new URI("http://xcartx.com/4937-tokimeki-nioi.html").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XhamsterRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XhamsterRipperTest.java index 96a9295e..24555e89 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XhamsterRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XhamsterRipperTest.java @@ -1,58 +1,64 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.XhamsterRipper; import org.jsoup.nodes.Document; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class XhamsterRipperTest extends RippersTest { @Test - public void testXhamsterAlbum1() throws IOException { - XhamsterRipper ripper = new XhamsterRipper(new URL("https://xhamster.com/photos/gallery/sexy-preggo-girls-9026608")); + @Tag("flaky") + public void testXhamsterAlbum1() throws IOException, URISyntaxException { + XhamsterRipper ripper = new XhamsterRipper(new URI("https://xhamster.com/photos/gallery/sexy-preggo-girls-9026608").toURL()); testRipper(ripper); } @Test - public void testXhamster2Album() throws IOException { - XhamsterRipper ripper = new XhamsterRipper(new URL("https://xhamster2.com/photos/gallery/sexy-preggo-girls-9026608")); + @Tag("flaky") + public void testXhamster2Album() throws IOException, URISyntaxException { + XhamsterRipper ripper = new XhamsterRipper(new URI("https://xhamster2.com/photos/gallery/sexy-preggo-girls-9026608").toURL()); testRipper(ripper); } @Test - public void testXhamsterAlbum2() throws IOException { - XhamsterRipper ripper = new XhamsterRipper(new URL("https://xhamster.com/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664")); + @Tag("flaky") + public void testXhamsterAlbum2() throws IOException, URISyntaxException { + XhamsterRipper ripper = new XhamsterRipper(new URI("https://xhamster.com/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664").toURL()); testRipper(ripper); } @Test - public void testXhamsterAlbumOneDomain() throws IOException { - XhamsterRipper ripper = new XhamsterRipper(new URL("https://xhamster.one/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664")); + @Tag("flaky") + public void testXhamsterAlbumDesiDomain() throws IOException, URISyntaxException { + XhamsterRipper ripper = new XhamsterRipper(new URI("https://xhamster5.desi/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664").toURL()); testRipper(ripper); } @Test - public void testXhamsterAlbumDesiDomain() throws IOException { - XhamsterRipper ripper = new XhamsterRipper(new URL("https://xhamster.desi/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664")); + @Tag("flaky") + public void testXhamsterVideo() throws IOException, URISyntaxException { + XhamsterRipper ripper = new XhamsterRipper(new URI("https://xhamster.com/videos/brazzers-busty-big-booty-milf-lisa-ann-fucks-her-masseur-1492828").toURL()); testRipper(ripper); } @Test - public void testXhamsterVideo() throws IOException { - XhamsterRipper ripper = new XhamsterRipper(new URL("https://xhamster.com/videos/brazzers-busty-big-booty-milf-lisa-ann-fucks-her-masseur-1492828")); + @Tag("flaky") + public void testBrazilianXhamster() throws IOException, URISyntaxException { + XhamsterRipper ripper = new XhamsterRipper(new URI("https://pt.xhamster.com/photos/gallery/cartoon-babe-15786301").toURL()); testRipper(ripper); } @Test - public void testBrazilianXhamster() throws IOException { - XhamsterRipper ripper = new XhamsterRipper(new URL("https://pt.xhamster.com/photos/gallery/silvana-7105696")); - testRipper(ripper); - } - - public void testGetGID() throws IOException { - URL url = new URL("https://xhamster.com/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://xhamster5.desi/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664").toURL(); XhamsterRipper ripper = new XhamsterRipper(url); - assertEquals("7254664", ripper.getGID(url)); + Assertions.assertEquals("7254664", ripper.getGID(url)); } @Test - public void testGetNextPage() throws IOException { - XhamsterRipper ripper = new XhamsterRipper(new URL("https://pt.xhamster.com/photos/gallery/mega-compil-6-10728626")); + @Tag("flaky") + public void testGetNextPage() throws IOException, URISyntaxException { + XhamsterRipper ripper = new XhamsterRipper(new URI("https://pt.xhamster.com/photos/gallery/mega-compil-6-10728626").toURL()); Document doc = ripper.getFirstPage(); try { ripper.getNextPage(doc); diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XlecxRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XlecxRipperTest.java index 807231e8..78eb5a3a 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XlecxRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XlecxRipperTest.java @@ -1,18 +1,19 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import java.io.IOException; -import java.net.URL; - -import com.rarchives.ripme.ripper.rippers.XlecxRipper; - -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; - -public class XlecxRipperTest extends RippersTest { - @Test - @Disabled("Broken ripper") - public void testAlbum() throws IOException { - XlecxRipper ripper = new XlecxRipper(new URL("http://xlecx.com/4274-black-canary-ravished-prey.html")); - testRipper(ripper); - } -} +package com.rarchives.ripme.tst.ripper.rippers; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import com.rarchives.ripme.ripper.rippers.XlecxRipper; + +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class XlecxRipperTest extends RippersTest { + @Test + @Disabled("Broken ripper") + public void testAlbum() throws IOException, URISyntaxException { + XlecxRipper ripper = new XlecxRipper(new URI("http://xlecx.com/4274-black-canary-ravished-prey.html").toURL()); + testRipper(ripper); + } +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XvideosRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XvideosRipperTest.java index 9446b640..cde9d111 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XvideosRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XvideosRipperTest.java @@ -1,16 +1,16 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.XvideosRipper; -import com.rarchives.ripme.tst.ripper.rippers.RippersTest; import org.junit.jupiter.api.Test; public class XvideosRipperTest extends RippersTest { @Test - public void testXhamsterAlbum1() throws IOException { - XvideosRipper ripper = new XvideosRipper(new URL("https://www.xvideos.com/video23515878/dee_s_pool_toys")); + public void testXhamsterAlbum1() throws IOException, URISyntaxException { + XvideosRipper ripper = new XvideosRipper(new URI("https://www.xvideos.com/video23515878/dee_s_pool_toys").toURL()); testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YoupornRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YoupornRipperTest.java index c8640cad..9520ee08 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YoupornRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YoupornRipperTest.java @@ -1,18 +1,24 @@ package com.rarchives.ripme.tst.ripper.rippers; import com.rarchives.ripme.ripper.rippers.YoupornRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; public class YoupornRipperTest extends RippersTest { @Test - public void testYoupornRipper() throws IOException { + @Tag("flaky") + public void testYoupornRipper() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); - contentURLs.add(new URL("http://www.youporn.com/watch/7669155/mrs-li-amateur-69-orgasm/?from=categ")); + // Video cannot be loaded: "Video has been flagged for verification" + //contentURLs.add(new URI("http://www.youporn.com/watch/7669155/mrs-li-amateur-69-orgasm/?from=categ").toURL()); + contentURLs.add(new URI("https://www.youporn.com/watch/13158849/smashing-star-slut-part-2/").toURL()); for (URL url : contentURLs) { YoupornRipper ripper = new YoupornRipper(url); testRipper(ripper); diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YuvutuRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YuvutuRipperTest.java index 7cc3def9..cc84c8d5 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YuvutuRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YuvutuRipperTest.java @@ -1,26 +1,31 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.YuvutuRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class YuvutuRipperTest extends RippersTest { @Test - public void testYuvutuAlbum1() throws IOException { - YuvutuRipper ripper = new YuvutuRipper(new URL("http://www.yuvutu.com/modules.php?name=YuGallery&action=view&set_id=127013")); + @Tag("flaky") + public void testYuvutuAlbum1() throws IOException, URISyntaxException { + YuvutuRipper ripper = new YuvutuRipper(new URI("http://www.yuvutu.com/modules.php?name=YuGallery&action=view&set_id=127013").toURL()); testRipper(ripper); } @Test - public void testYuvutuAlbum2() throws IOException { - YuvutuRipper ripper = new YuvutuRipper(new URL("http://www.yuvutu.com/modules.php?name=YuGallery&action=view&set_id=420333")); + public void testYuvutuAlbum2() throws IOException, URISyntaxException { + YuvutuRipper ripper = new YuvutuRipper(new URI("http://www.yuvutu.com/modules.php?name=YuGallery&action=view&set_id=420333").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("http://www.yuvutu.com/modules.php?name=YuGallery&action=view&set_id=420333"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("http://www.yuvutu.com/modules.php?name=YuGallery&action=view&set_id=420333").toURL(); YuvutuRipper ripper = new YuvutuRipper(url); - assertEquals("420333", ripper.getGID(url)); + Assertions.assertEquals("420333", ripper.getGID(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ZizkiRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ZizkiRipperTest.java index e12ccb5c..adbd4c77 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ZizkiRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ZizkiRipperTest.java @@ -1,26 +1,36 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.ZizkiRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class ZizkiRipperTest extends RippersTest { - public void testRip() throws IOException { - ZizkiRipper ripper = new ZizkiRipper(new URL("http://zizki.com/dee-chorde/we-got-spirit")); + + @Test + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + ZizkiRipper ripper = new ZizkiRipper(new URI("http://zizki.com/dee-chorde/we-got-spirit").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("http://zizki.com/dee-chorde/we-got-spirit"); - ZizkiRipper ripper = new ZizkiRipper(url); - assertEquals("dee-chorde", ripper.getGID(url)); - } @Test - public void testAlbumTitle() throws IOException { - URL url = new URL("http://zizki.com/dee-chorde/we-got-spirit"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("http://zizki.com/dee-chorde/we-got-spirit").toURL(); ZizkiRipper ripper = new ZizkiRipper(url); - assertEquals("zizki_Dee Chorde_We Got Spirit", ripper.getAlbumTitle(url)); + Assertions.assertEquals("dee-chorde", ripper.getGID(url)); + } + + @Test + @Tag("flaky") + public void testAlbumTitle() throws IOException, URISyntaxException { + URL url = new URI("http://zizki.com/dee-chorde/we-got-spirit").toURL(); + ZizkiRipper ripper = new ZizkiRipper(url); + Assertions.assertEquals("zizki_Dee Chorde_We Got Spirit", ripper.getAlbumTitle(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ui/LabelsBundlesTest.java b/src/test/java/com/rarchives/ripme/tst/ui/LabelsBundlesTest.java index 6189d86a..d35ff49e 100644 --- a/src/test/java/com/rarchives/ripme/tst/ui/LabelsBundlesTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ui/LabelsBundlesTest.java @@ -10,12 +10,12 @@ import java.util.Set; import com.rarchives.ripme.utils.Utils; -import org.apache.log4j.Logger; -import org.junit.jupiter.api.Assertions; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.Test; public class LabelsBundlesTest { - private Logger logger = Logger.getLogger(Utils.class); + private Logger logger = LogManager.getLogger(Utils.class); private static final String DEFAULT_LANG = "en_US"; @Test diff --git a/src/test/java/com/rarchives/ripme/tst/ui/RipStatusMessageTest.java b/src/test/java/com/rarchives/ripme/tst/ui/RipStatusMessageTest.java index e5fe8b43..fbd1c604 100644 --- a/src/test/java/com/rarchives/ripme/tst/ui/RipStatusMessageTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ui/RipStatusMessageTest.java @@ -3,9 +3,11 @@ package com.rarchives.ripme.tst.ui; import com.rarchives.ripme.ui.RipStatusMessage; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; public class RipStatusMessageTest { + @Test public void testConstructor() { RipStatusMessage.STATUS loadingResource = RipStatusMessage.STATUS.LOADING_RESOURCE; String path = "path/to/file"; diff --git a/src/test/java/com/rarchives/ripme/ui/UIContextMenuTests.java b/src/test/java/com/rarchives/ripme/ui/UIContextMenuTests.java new file mode 100644 index 00000000..32dcdd9b --- /dev/null +++ b/src/test/java/com/rarchives/ripme/ui/UIContextMenuTests.java @@ -0,0 +1,190 @@ +package com.rarchives.ripme.ui; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import javax.swing.*; +import java.awt.*; +import java.awt.datatransfer.Clipboard; +import java.awt.datatransfer.StringSelection; +import java.awt.event.ActionEvent; +import java.lang.reflect.InvocationTargetException; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.junit.jupiter.api.Assertions.fail; + +// these tests do not run on a server, as it is headless +@Tag("flaky") +public class UIContextMenuTests { + + private JFrame frame; + private JTextField textField; + private ContextMenuMouseListener contextMenuMouseListener; + + @BeforeEach + void setUp() throws InterruptedException, InvocationTargetException { + AtomicBoolean notDone = new AtomicBoolean(true); + + SwingUtilities.invokeAndWait(() -> { + frame = new JFrame("ContextMenuMouseListener Example"); + textField = new JTextField("Hello, world!"); + + // Create an instance of ContextMenuMouseListener + contextMenuMouseListener = new ContextMenuMouseListener(textField); + + // Add ContextMenuMouseListener to JTextField + textField.addMouseListener(contextMenuMouseListener); + + frame.getContentPane().add(textField, BorderLayout.CENTER); + frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); + frame.setSize(300, 200); + frame.setVisible(true); + + notDone.set(false); + }); + + // Wait for the GUI to be fully initialized + while (notDone.get()) { + Thread.yield(); + } + } + + @AfterEach + void tearDown() { + frame.dispose(); + } + + @Test + void testCut() { + // Simulate a cut event + simulateCutEvent(); + // Add assertions if needed + } + + @Test + void testCopy() { + // Simulate a copy event + simulateCopyEvent(); + // Add assertions if needed + } + + @Test + void testPaste() { + // Simulate a paste event + simulatePasteEvent(); + // Add assertions if needed + } + + @Test + void testSelectAll() { + // Simulate a select all event + simulateSelectAllEvent(); + // Add assertions if needed + } + + @Test + void testUndo() { + // Simulate an undo event + simulateUndoEvent(); + // Add assertions if needed + } + + private void simulatePasteEvent() { + // Save the initial text content + String initialText = contextMenuMouseListener.getTextComponent().getText(); + + // Assume there is some text to paste + String textToPaste = "Text to paste"; + + // Set the text to the clipboard + Clipboard clipboard = Toolkit.getDefaultToolkit().getSystemClipboard(); + StringSelection stringSelection = new StringSelection(textToPaste); + clipboard.setContents(stringSelection, stringSelection); + + // Simulate a paste event + contextMenuMouseListener.getTextComponent().paste(); + + // Verify that the paste operation worked + String actualText = contextMenuMouseListener.getTextComponent().getText(); + + // Check if the text was appended after the initial text + if (actualText.equals(initialText + textToPaste)) { + System.out.println("Paste operation successful. Text content matches."); + } else { + fail("Paste operation failed. Text content does not match."); + } + } + + + + + private void simulateSelectAllEvent() { + // Simulate a select all event by invoking the selectAllAction + contextMenuMouseListener.getSelectAllAction().actionPerformed(new ActionEvent(contextMenuMouseListener.getTextComponent(), ActionEvent.ACTION_PERFORMED, "")); + + // Verify that all text is selected + int expectedSelectionStart = 0; + int expectedSelectionEnd = contextMenuMouseListener.getTextComponent().getText().length(); + int actualSelectionStart = contextMenuMouseListener.getTextComponent().getSelectionStart(); + int actualSelectionEnd = contextMenuMouseListener.getTextComponent().getSelectionEnd(); + + if (expectedSelectionStart == actualSelectionStart && expectedSelectionEnd == actualSelectionEnd) { + System.out.println("Select All operation successful. Text is selected."); + } else { + fail("Select All operation failed. Text is not selected as expected."); + } + } + + private void simulateUndoEvent() { + + // Simulate an undo event by invoking the undoAction + contextMenuMouseListener.getUndoAction().actionPerformed(new ActionEvent(contextMenuMouseListener.getTextComponent(), ActionEvent.ACTION_PERFORMED, "")); + + // Verify that the undo operation worked + String expectedText = contextMenuMouseListener.getSavedString(); // Assuming the undo reverts to the saved state + String actualText = contextMenuMouseListener.getTextComponent().getText(); + + if (expectedText.equals(actualText)) { + System.out.println("Undo operation successful. Text content matches."); + } else { + fail("Undo operation failed. Text content does not match."); + } + } + + + private void simulateCopyEvent() { + // Save the initial text content + String initialText = contextMenuMouseListener.getTextComponent().getText(); + + // Simulate a copy event by invoking the copyAction + contextMenuMouseListener.getCopyAction().actionPerformed(new ActionEvent(contextMenuMouseListener.getTextComponent(), ActionEvent.ACTION_PERFORMED, "")); + + // Verify that the copy operation worked + String actualText = contextMenuMouseListener.getDebugSavedString(); + + if (initialText.equals(actualText)) { + System.out.println("Copy operation successful. Text content matches."); + } else { + fail("Copy operation failed. Text content does not match."); + } + } + + private void simulateCutEvent() { + // Save the initial text content + String initialText = contextMenuMouseListener.getTextComponent().getText(); + + // Simulate a cut event by invoking the cutAction + contextMenuMouseListener.getCutAction().actionPerformed(new ActionEvent(contextMenuMouseListener.getTextComponent(), ActionEvent.ACTION_PERFORMED, "")); + + // Verify that the cut operation worked + String actualText = contextMenuMouseListener.getDebugSavedString(); + + if (initialText.equals(actualText)) { + System.out.println("Cut operation successful. Text content matches."); + } else { + fail("Cut operation failed. Text content does not match."); + } + } +} diff --git a/src/test/java/com/rarchives/ripme/ui/UpdateUtilsTest.java b/src/test/java/com/rarchives/ripme/ui/UpdateUtilsTest.java new file mode 100644 index 00000000..d28e6b07 --- /dev/null +++ b/src/test/java/com/rarchives/ripme/ui/UpdateUtilsTest.java @@ -0,0 +1,16 @@ +package com.rarchives.ripme.ui; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +public class UpdateUtilsTest { + + @Test + public void testIsNewerVersion() { + Assertions.assertFalse(UpdateUtils.isNewerVersion("1.7.94")); + Assertions.assertFalse(UpdateUtils.isNewerVersion("1.7.94-9-asdf")); + Assertions.assertTrue(UpdateUtils.isNewerVersion("1.7.94-11-asdf")); + Assertions.assertTrue(UpdateUtils.isNewerVersion("1.7.95")); + } + +} \ No newline at end of file diff --git a/utils/style.sh b/utils/style.sh deleted file mode 100644 index 45bb40e9..00000000 --- a/utils/style.sh +++ /dev/null @@ -1,27 +0,0 @@ -echo "" -echo "=====================================================" -echo "Tabs are not allowed" -echo "-----------------------------------------------------" -git grep -n -P "\t" -- :/*.java | sed -e "s/\t/\x1b[7m--->\x1b[m/g" -echo "=====================================================" - -echo "" -echo "=====================================================" -echo "Trailing whitespace is not allowed" -echo "-----------------------------------------------------" -git grep -n -P "[ \t]+$" -- :/*.java | sed -e "s/\t/\x1b[7m--->\x1b[m/g" | sed -e "s/ /\x1b[7m.\x1b[m/g" | sed -e "s/$/\x1b[7m$\x1b[m/g" -echo "=====================================================" - -echo "" -echo "=====================================================" -echo "'){' is not allowed. Place a space between ')' and '{', i.e. 'if (a) {'" -echo "-----------------------------------------------------" -git grep -n -P "\)\{" -- :/*.java -echo "=====================================================" - -echo "" -echo "=====================================================" -echo "A space is required after keywords (if|else|for|while|do|try|catch|finally)" -echo "-----------------------------------------------------" -git grep -n -P "(\b(if|for|while|catch)\b[(])|(\b(else|do|try|finally)\b[{])" -- :/*.java | sed -r -e "s/(\b(if|for|while|catch)\b[(])|(\b(else|do|try|finally)\b[{])/\x1b[7m\0\x1b[m/g" -echo "=====================================================" diff --git a/utils/stylefix.sh b/utils/stylefix.sh deleted file mode 100644 index dbfad1e1..00000000 --- a/utils/stylefix.sh +++ /dev/null @@ -1,17 +0,0 @@ -echo "" -echo "=====================================================" -echo "Tabs are not allowed (please manually fix tabs)" -echo "-----------------------------------------------------" -git grep -n -P "\t" -- :/*.java | sed -e "s/\t/\x1b[7m--->\x1b[m/g" -echo "=====================================================" - -echo "Removing trailing whitespace..." -git grep -l -P "[ \t]+$" -- :/*.java | xargs -I % sed -i -r -e "s/[ \t]+$//g" % - -echo "Replacing '){' with ') {'..." -git grep -l -P "\)\{" -- :/*.java | xargs -I % sed -i -r -e "s/\)\{/) {/g" % - -echo "Adding space between keywords and punctuation..." -git grep -l -P "(\b(if|for|while|catch)\b[(])" -- :/*.java | xargs -I % sed -i -r -e "s/(\b(if|for|while|catch)\b[(])/\2 (/g" % -git grep -l -P "(\b(else|do|try|finally)\b[{])" -- :/*.java | xargs -I % sed -i -r -e "s/(\b(else|do|try|finally)\b[{])/\2 {/g" % - diff --git a/workspace.code-workspace b/workspace.code-workspace deleted file mode 100644 index 95b80106..00000000 --- a/workspace.code-workspace +++ /dev/null @@ -1,16 +0,0 @@ -{ - "folders": [ - { - "path": "E:\\Downloads\\_Isaaku\\dev" - } - ], - "settings": { - "files.exclude": { - "**/.classpath": false, - "**/.project": true, - "**/.settings": true, - "**/.factorypath": true - }, - "java.configuration.updateBuildConfiguration": "automatic" - } -} \ No newline at end of file