diff --git a/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java b/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java
index 6a9e62a3..b28b0ba4 100644
--- a/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java
@@ -81,7 +81,7 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
     public void rip() throws IOException {
         int index = 0;
         int textindex = 0;
-        logger.info("Retrieving " + this.url);
+        LOGGER.info("Retrieving " + this.url);
         sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
         Document doc = getFirstPage();
 
@@ -117,7 +117,7 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
 
                 for (String imageURL : imageURLs) {
                     index += 1;
-                    logger.debug("Found image url #" + index + ": " + imageURL);
+                    LOGGER.debug("Found image url #" + index + ": " + imageURL);
                     downloadURL(new URL(imageURL), index);
                     if (isStopped()) {
                         break;
@@ -125,16 +125,16 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
                 }
             }
             if (hasDescriptionSupport() && Utils.getConfigBoolean("descriptions.save", false)) {
-                logger.debug("Fetching description(s) from " + doc.location());
+                LOGGER.debug("Fetching description(s) from " + doc.location());
                 List<String> textURLs = getDescriptionsFromPage(doc);
                 if (!textURLs.isEmpty()) {
-                    logger.debug("Found description link(s) from " + doc.location());
+                    LOGGER.debug("Found description link(s) from " + doc.location());
                     for (String textURL : textURLs) {
                         if (isStopped()) {
                             break;
                         }
                         textindex += 1;
-                        logger.debug("Getting description from " + textURL);
+                        LOGGER.debug("Getting description from " + textURL);
                         String[] tempDesc = getDescription(textURL,doc);
                         if (tempDesc != null) {
                             if (Utils.getConfigBoolean("file.overwrite", false) || !(new File(
@@ -144,11 +144,11 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
                                             + getPrefix(index)
                                             + (tempDesc.length > 1 ? tempDesc[1] : fileNameFromURL(new URL(textURL)))
                                             + ".txt").exists())) {
-                                logger.debug("Got description from " + textURL);
+                                LOGGER.debug("Got description from " + textURL);
                                 saveText(new URL(textURL), "", tempDesc[0], textindex, (tempDesc.length > 1 ? tempDesc[1] : fileNameFromURL(new URL(textURL))));
                                 sleep(descSleepTime());
                             } else {
-                                logger.debug("Description from " + textURL + " already exists.");
+                                LOGGER.debug("Description from " + textURL + " already exists.");
                             }
                         }
 
@@ -164,14 +164,14 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
                 sendUpdate(STATUS.LOADING_RESOURCE, "next page");
                 doc = getNextPage(doc);
             } catch (IOException e) {
-                logger.info("Can't get next page: " + e.getMessage());
+                LOGGER.info("Can't get next page: " + e.getMessage());
                 break;
             }
         }
 
         // If they're using a thread pool, wait for it.
         if (getThreadPool() != null) {
-            logger.debug("Waiting for threadpool " + getThreadPool().getClass().getName());
+            LOGGER.debug("Waiting for threadpool " + getThreadPool().getClass().getName());
             getThreadPool().waitForThreads();
         }
         waitForThreads();
@@ -237,12 +237,12 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
             out.write(text.getBytes());
             out.close();
         } catch (IOException e) {
-            logger.error("[!] Error creating save file path for description '" + url + "':", e);
+            LOGGER.error("[!] Error creating save file path for description '" + url + "':", e);
             return false;
         }
-        logger.debug("Downloading " + url + "'s description to " + saveFileAs);
+        LOGGER.debug("Downloading " + url + "'s description to " + saveFileAs);
         if (!saveFileAs.getParentFile().exists()) {
-            logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
+            LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
             saveFileAs.getParentFile().mkdirs();
         }
         return true;
diff --git a/src/main/java/com/rarchives/ripme/ripper/AbstractJSONRipper.java b/src/main/java/com/rarchives/ripme/ripper/AbstractJSONRipper.java
index 93146d4b..291dd7df 100644
--- a/src/main/java/com/rarchives/ripme/ripper/AbstractJSONRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/AbstractJSONRipper.java
@@ -50,7 +50,7 @@ public abstract class AbstractJSONRipper extends AlbumRipper {
     @Override
     public void rip() throws IOException {
         int index = 0;
-        logger.info("Retrieving " + this.url);
+        LOGGER.info("Retrieving " + this.url);
         sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
         JSONObject json = getFirstPage();
 
@@ -79,7 +79,7 @@ public abstract class AbstractJSONRipper extends AlbumRipper {
                 }
                 
                 index += 1;
-                logger.debug("Found image url #" + index+ ": " + imageURL);
+                LOGGER.debug("Found image url #" + index+ ": " + imageURL);
                 downloadURL(new URL(imageURL), index);
             }
 
@@ -91,14 +91,14 @@ public abstract class AbstractJSONRipper extends AlbumRipper {
                 sendUpdate(STATUS.LOADING_RESOURCE, "next page");
                 json = getNextPage(json);
             } catch (IOException e) {
-                logger.info("Can't get next page: " + e.getMessage());
+                LOGGER.info("Can't get next page: " + e.getMessage());
                 break;
             }
         }
 
         // If they're using a thread pool, wait for it.
         if (getThreadPool() != null) {
-            logger.debug("Waiting for threadpool " + getThreadPool().getClass().getName());
+            LOGGER.debug("Waiting for threadpool " + getThreadPool().getClass().getName());
             getThreadPool().waitForThreads();
         }
         waitForThreads();
diff --git a/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java b/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java
index dc04e801..87d8bd46 100644
--- a/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java
@@ -27,7 +27,7 @@ public abstract class AbstractRipper
                 extends Observable
                 implements RipperInterface, Runnable {
 
-    protected static final Logger logger = Logger.getLogger(AbstractRipper.class);
+    protected static final Logger LOGGER = Logger.getLogger(AbstractRipper.class);
     private final String URLHistoryFile = Utils.getURLHistoryFile();
 
     public static final String USER_AGENT =
@@ -77,11 +77,11 @@ public abstract class AbstractRipper
         try {
             File file = new File(URLHistoryFile);
             if (!new File(Utils.getConfigDir()).exists()) {
-                logger.error("Config dir doesn't exist");
-                logger.info("Making config dir");
+                LOGGER.error("Config dir doesn't exist");
+                LOGGER.info("Making config dir");
                 boolean couldMakeDir = new File(Utils.getConfigDir()).mkdirs();
                 if (!couldMakeDir) {
-                    logger.error("Couldn't make config dir");
+                    LOGGER.error("Couldn't make config dir");
                     return;
                 }
             }
@@ -89,12 +89,12 @@ public abstract class AbstractRipper
             if (!file.exists()) {
                 boolean couldMakeDir = file.createNewFile();
                 if (!couldMakeDir) {
-                    logger.error("Couldn't url history file");
+                    LOGGER.error("Couldn't url history file");
                     return;
                 }
             }
             if (!file.canWrite()) {
-                logger.error("Can't write to url history file: " + URLHistoryFile);
+                LOGGER.error("Can't write to url history file: " + URLHistoryFile);
                 return;
             }
             fw = new FileWriter(file.getAbsoluteFile(), true);
@@ -247,10 +247,10 @@ public abstract class AbstractRipper
         try {
             stopCheck();
         } catch (IOException e) {
-            logger.debug("Ripper has been stopped");
+            LOGGER.debug("Ripper has been stopped");
             return false;
         }
-        logger.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", fileName: " + fileName);
+        LOGGER.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", fileName: " + fileName);
         String saveAs = getFileName(url, fileName, extension);
         File saveFileAs;
         try {
@@ -265,19 +265,19 @@ public abstract class AbstractRipper
                     + prefix
                     + saveAs);
         } catch (IOException e) {
-            logger.error("[!] Error creating save file path for URL '" + url + "':", e);
+            LOGGER.error("[!] Error creating save file path for URL '" + url + "':", e);
             return false;
         }
-        logger.debug("Downloading " + url + " to " + saveFileAs);
+        LOGGER.debug("Downloading " + url + " to " + saveFileAs);
         if (!saveFileAs.getParentFile().exists()) {
-            logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
+            LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
             saveFileAs.getParentFile().mkdirs();
         }
         if (Utils.getConfigBoolean("remember.url_history", true) && !isThisATest()) {
             try {
                 writeDownloadedURL(url.toExternalForm() + "\n");
             } catch (IOException e) {
-                logger.debug("Unable to write URL history file");
+                LOGGER.debug("Unable to write URL history file");
             }
         }
         return addURLToDownload(url, saveFileAs, referrer, cookies, getFileExtFromMIME);
@@ -357,7 +357,7 @@ public abstract class AbstractRipper
      * Waits for downloading threads to complete.
      */
     protected void waitForThreads() {
-        logger.debug("Waiting for threads to finish");
+        LOGGER.debug("Waiting for threads to finish");
         completed = false;
         threadPool.waitForThreads();
         checkIfComplete();
@@ -409,13 +409,13 @@ public abstract class AbstractRipper
      */
     void checkIfComplete() {
         if (observer == null) {
-            logger.debug("observer is null");
+            LOGGER.debug("observer is null");
             return;
         }
 
         if (!completed) {
             completed = true;
-            logger.info("   Rip completed!");
+            LOGGER.info("   Rip completed!");
 
             RipStatusComplete rsc = new RipStatusComplete(workingDir, getCount());
             RipStatusMessage msg = new RipStatusMessage(STATUS.RIP_COMPLETE, rsc);
@@ -424,7 +424,7 @@ public abstract class AbstractRipper
             Logger rootLogger = Logger.getRootLogger();
             FileAppender fa = (FileAppender) rootLogger.getAppender("FILE");
             if (fa != null) {
-                logger.debug("Changing log file back to 'ripme.log'");
+                LOGGER.debug("Changing log file back to 'ripme.log'");
                 fa.setFile("ripme.log");
                 fa.activateOptions();
             }
@@ -433,7 +433,7 @@ public abstract class AbstractRipper
                 try {
                     Desktop.getDesktop().open(new File(urlFile));
                 } catch (IOException e) {
-                    logger.warn("Error while opening " + urlFile, e);
+                    LOGGER.warn("Error while opening " + urlFile, e);
                 }
             }
         }
@@ -488,7 +488,7 @@ public abstract class AbstractRipper
         for (Constructor<?> constructor : getRipperConstructors("com.rarchives.ripme.ripper.rippers")) {
             try {
                 AlbumRipper ripper = (AlbumRipper) constructor.newInstance(url); // by design: can throw ClassCastException
-                logger.debug("Found album ripper: " + ripper.getClass().getName());
+                LOGGER.debug("Found album ripper: " + ripper.getClass().getName());
                 return ripper;
             } catch (Exception e) {
                 // Incompatible rippers *will* throw exceptions during instantiation.
@@ -497,7 +497,7 @@ public abstract class AbstractRipper
         for (Constructor<?> constructor : getRipperConstructors("com.rarchives.ripme.ripper.rippers.video")) {
             try {
                 VideoRipper ripper = (VideoRipper) constructor.newInstance(url); // by design: can throw ClassCastException
-                logger.debug("Found video ripper: " + ripper.getClass().getName());
+                LOGGER.debug("Found video ripper: " + ripper.getClass().getName());
                 return ripper;
             } catch (Exception e) {
                 // Incompatible rippers *will* throw exceptions during instantiation.
@@ -554,11 +554,11 @@ public abstract class AbstractRipper
         try {
             rip();
         } catch (HttpStatusException e) {
-            logger.error("Got exception while running ripper:", e);
+            LOGGER.error("Got exception while running ripper:", e);
             waitForThreads();
             sendUpdate(STATUS.RIP_ERRORED, "HTTP status code " + e.getStatusCode() + " for URL " + e.getUrl());
         } catch (Exception e) {
-            logger.error("Got exception while running ripper:", e);
+            LOGGER.error("Got exception while running ripper:", e);
             waitForThreads();
             sendUpdate(STATUS.RIP_ERRORED, e.getMessage());
         } finally {
@@ -571,10 +571,10 @@ public abstract class AbstractRipper
     private void cleanup() {
         if (this.workingDir.list().length == 0) {
             // No files, delete the dir
-            logger.info("Deleting empty directory " + this.workingDir);
+            LOGGER.info("Deleting empty directory " + this.workingDir);
             boolean deleteResult = this.workingDir.delete();
             if (!deleteResult) {
-                logger.error("Unable to delete empty directory " +  this.workingDir);
+                LOGGER.error("Unable to delete empty directory " +  this.workingDir);
             }
         }
     }
@@ -589,11 +589,11 @@ public abstract class AbstractRipper
      */
     protected boolean sleep(int milliseconds) {
         try {
-            logger.debug("Sleeping " + milliseconds + "ms");
+            LOGGER.debug("Sleeping " + milliseconds + "ms");
             Thread.sleep(milliseconds);
             return true;
         } catch (InterruptedException e) {
-            logger.error("Interrupted while waiting to load next page", e);
+            LOGGER.error("Interrupted while waiting to load next page", e);
             return false;
         }
     }
@@ -607,7 +607,7 @@ public abstract class AbstractRipper
 
     /** Methods for detecting when we're running a test. */
     public void markAsTest() {
-        logger.debug("THIS IS A TEST RIP");
+        LOGGER.debug("THIS IS A TEST RIP");
         thisIsATest = true;
     }
     protected boolean isThisATest() {
diff --git a/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java b/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java
index b037052e..977de15d 100644
--- a/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java
@@ -62,7 +62,7 @@ public abstract class AlbumRipper extends AbstractRipper {
                   || itemsCompleted.containsKey(url)
                   || itemsErrored.containsKey(url) )) {
             // Item is already downloaded/downloading, skip it.
-            logger.info("[!] Skipping " + url + " -- already attempted: " + Utils.removeCWD(saveAs));
+            LOGGER.info("[!] Skipping " + url + " -- already attempted: " + Utils.removeCWD(saveAs));
             return false;
         }
         if (Utils.getConfigBoolean("urls_only.save", false)) {
@@ -76,7 +76,7 @@ public abstract class AlbumRipper extends AbstractRipper {
                 itemsCompleted.put(url, new File(urlFile));
                 observer.update(this, msg);
             } catch (IOException e) {
-                logger.error("Error while writing to " + urlFile, e);
+                LOGGER.error("Error while writing to " + urlFile, e);
             }
         }
         else {
@@ -128,7 +128,7 @@ public abstract class AlbumRipper extends AbstractRipper {
 
             checkIfComplete();
         } catch (Exception e) {
-            logger.error("Exception while updating observer: ", e);
+            LOGGER.error("Exception while updating observer: ", e);
         }
     }
 
@@ -196,7 +196,7 @@ public abstract class AlbumRipper extends AbstractRipper {
         } else {
             title = super.getAlbumTitle(this.url);
         }
-        logger.debug("Using album title '" + title + "'");
+        LOGGER.debug("Using album title '" + title + "'");
 
         title = Utils.filesystemSafe(title);
         path += title;
@@ -204,10 +204,10 @@ public abstract class AlbumRipper extends AbstractRipper {
 
         this.workingDir = new File(path);
         if (!this.workingDir.exists()) {
-            logger.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir));
+            LOGGER.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir));
             this.workingDir.mkdirs();
         }
-        logger.debug("Set working directory to: " + this.workingDir);
+        LOGGER.debug("Set working directory to: " + this.workingDir);
     }
 
     /**
diff --git a/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java b/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java
index 6b1032e5..a59c884b 100644
--- a/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java
@@ -10,7 +10,6 @@ import java.util.Map;
 import com.rarchives.ripme.ui.RipStatusMessage;
 import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
 import com.rarchives.ripme.utils.Utils;
-import com.sun.org.apache.xpath.internal.operations.Bool;
 
 public abstract class VideoRipper extends AbstractRipper {
 
@@ -53,7 +52,7 @@ public abstract class VideoRipper extends AbstractRipper {
                 RipStatusMessage msg = new RipStatusMessage(STATUS.DOWNLOAD_COMPLETE, urlFile);
                 observer.update(this, msg);
             } catch (IOException e) {
-                logger.error("Error while writing to " + urlFile, e);
+                LOGGER.error("Error while writing to " + urlFile, e);
                 return false;
             }
         }
@@ -61,7 +60,7 @@ public abstract class VideoRipper extends AbstractRipper {
             if (isThisATest()) {
                 // Tests shouldn't download the whole video
                 // Just change this.url to the download URL so the test knows we found it.
-                logger.debug("Test rip, found URL: " + url);
+                LOGGER.debug("Test rip, found URL: " + url);
                 this.url = url;
                 return true;
             }
@@ -90,10 +89,10 @@ public abstract class VideoRipper extends AbstractRipper {
         path += "videos" + File.separator;
         this.workingDir = new File(path);
         if (!this.workingDir.exists()) {
-            logger.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir));
+            LOGGER.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir));
             this.workingDir.mkdirs();
         }
-        logger.debug("Set working directory to: " + this.workingDir);
+        LOGGER.debug("Set working directory to: " + this.workingDir);
     }
     
     /**
@@ -124,7 +123,7 @@ public abstract class VideoRipper extends AbstractRipper {
 
             checkIfComplete();
         } catch (Exception e) {
-            logger.error("Exception while updating observer: ", e);
+            LOGGER.error("Exception while updating observer: ", e);
         }
     }
     
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/AerisdiesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/AerisdiesRipper.java
index 74504b12..a11b08a4 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/AerisdiesRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/AerisdiesRipper.java
@@ -9,7 +9,6 @@ import java.util.Map;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.jsoup.Connection.Response;
 import org.jsoup.nodes.Document;
 import org.jsoup.nodes.Element;
 import org.jsoup.select.Elements;
@@ -58,7 +57,7 @@ public class AerisdiesRipper extends AbstractHTMLRipper {
             return getHost() + "_" + getGID(url) + "_" + title.trim();
         } catch (IOException e) {
             // Fall back to default album naming convention
-            logger.info("Unable to find title at " + url);
+            LOGGER.info("Unable to find title at " + url);
         }
         return super.getAlbumTitle(url);
     }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java
index 5978bff5..af4c0c1f 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java
@@ -76,7 +76,7 @@ public class BatoRipper extends AbstractHTMLRipper {
             return getHost() + "_" + getGID(url) + "_" + getFirstPage().select("title").first().text().replaceAll(" ", "_");
         } catch (IOException e) {
             // Fall back to default album naming convention
-            logger.info("Unable to find title at " + url);
+            LOGGER.info("Unable to find title at " + url);
         }
         return super.getAlbumTitle(url);
     }
@@ -113,7 +113,7 @@ public class BatoRipper extends AbstractHTMLRipper {
                 s = s.replaceAll("var prevCha = null;", "");
                 s = s.replaceAll("var nextCha = \\.*;", "");
                 String json = s.replaceAll("var images = ", "").replaceAll(";", "");
-                logger.info(s);
+                LOGGER.info(s);
                 JSONObject images = new JSONObject(json);
                 for (int i = 1; i < images.length() +1; i++) {
                     result.add(images.getString(Integer.toString(i)));
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java
index ace305c1..9076ef1f 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java
@@ -68,11 +68,11 @@ public class ChanRipper extends AbstractHTMLRipper {
                 String subject = doc.select(".post.op > .postinfo > .subject").first().text();
                 return getHost() + "_" + getGID(url) + "_" + subject;
             } catch (NullPointerException e) {
-                logger.warn("Failed to get thread title from " + url);
+                LOGGER.warn("Failed to get thread title from " + url);
             }
         } catch (Exception e) {
             // Fall back to default album naming convention
-            logger.warn("Failed to get album title from " + url, e);
+            LOGGER.warn("Failed to get album title from " + url, e);
         }
         // Fall back on the GID
         return getHost() + "_" + getGID(url);
@@ -144,7 +144,7 @@ public class ChanRipper extends AbstractHTMLRipper {
     private boolean isURLBlacklisted(String url) {
         for (String blacklist_item : url_piece_blacklist) {
             if (url.contains(blacklist_item)) {
-                logger.debug("Skipping link that contains '"+blacklist_item+"': " + url);
+                LOGGER.debug("Skipping link that contains '"+blacklist_item+"': " + url);
                 return true;
             }
         }
@@ -185,7 +185,7 @@ public class ChanRipper extends AbstractHTMLRipper {
                     }
                     // Don't download the same URL twice
                     if (imageURLs.contains(href)) {
-                        logger.debug("Already attempted: " + href);
+                        LOGGER.debug("Already attempted: " + href);
                         continue;
                     }
                     imageURLs.add(href);
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java
index 160febfc..cb0b765f 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java
@@ -63,7 +63,7 @@ public class CheveretoRipper extends AbstractHTMLRipper {
             return getHost() + "_" + title.trim();
         } catch (IOException e) {
             // Fall back to default album naming convention
-            logger.info("Unable to find title at " + url);
+            LOGGER.info("Unable to find title at " + url);
         }
         return super.getAlbumTitle(url);
     }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java
index aac968b4..99942adb 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java
@@ -122,14 +122,14 @@ public class DeviantartRipper extends AbstractHTMLRipper {
         String password = Utils.getConfigString("deviantart.password", new String(Base64.decode("ZmFrZXJz")));
         
         if (username == null || password == null) {
-            logger.debug("No DeviantArt login provided.");
+            LOGGER.debug("No DeviantArt login provided.");
             cookies.put("agegate_state","1"); // Bypasses the age gate
         } else {
             // Attempt Login
             try {
                 cookies = loginToDeviantart();
             } catch (IOException e) {
-                logger.warn("Failed to login: ", e);
+                LOGGER.warn("Failed to login: ", e);
                 cookies.put("agegate_state","1"); // Bypasses the age gate
             }
         }
@@ -161,7 +161,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
                     script = script.substring(script.indexOf("},\"src\":\"") + 9, script.indexOf("\",\"type\""));
                     return script.replace("\\/", "/");
                 } catch (StringIndexOutOfBoundsException e) {
-                    logger.debug("Unable to get json link from " + page.location());
+                    LOGGER.debug("Unable to get json link from " + page.location());
                 }
             }
         }
@@ -204,7 +204,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
                 }
             }
             if (triedURLs.contains(fullSize)) {
-                logger.warn("Already tried to download " + fullSize);
+                LOGGER.warn("Already tried to download " + fullSize);
                 continue;
             }
             triedURLs.add(fullSize);
@@ -222,7 +222,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
         List<String> textURLs = new ArrayList<>();
         // Iterate over all thumbnails
         for (Element thumb : page.select("div.zones-container span.thumb")) {
-            logger.info(thumb.attr("href"));
+            LOGGER.info(thumb.attr("href"));
             if (isStopped()) {
                 break;
             }
@@ -256,7 +256,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
         if (!sleep(PAGE_SLEEP_TIME)) {
             throw new IOException("Interrupted while waiting to load next page: " + nextPage);
         }
-        logger.info("Found next page: " + nextPage);
+        LOGGER.info("Found next page: " + nextPage);
         return Http.url(nextPage)
                    .cookies(cookies)
                    .get();
@@ -351,7 +351,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
             return new String[] {Jsoup.clean(ele.html().replaceAll("\\\\n", System.getProperty("line.separator")), "", Whitelist.none(), new Document.OutputSettings().prettyPrint(false)),fullSize};
             // TODO Make this not make a newline if someone just types \n into the description.
         } catch (IOException ioe) {
-                logger.info("Failed to get description at " + url + ": '" + ioe.getMessage() + "'");
+                LOGGER.info("Failed to get description at " + url + ": '" + ioe.getMessage() + "'");
                 return null;
         }
     }
@@ -379,7 +379,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
             if (!els.isEmpty()) {
                 // Large image
                 fsimage = els.get(0).attr("src");
-                logger.info("Found large-scale: " + fsimage);
+                LOGGER.info("Found large-scale: " + fsimage);
                 if (fsimage.contains("//orig")) {
                     return fsimage;
                 }
@@ -389,7 +389,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
             if (!els.isEmpty()) {
                 // Full-size image
                 String downloadLink = els.get(0).attr("href");
-                logger.info("Found download button link: " + downloadLink);
+                LOGGER.info("Found download button link: " + downloadLink);
                 HttpURLConnection con = (HttpURLConnection) new URL(downloadLink).openConnection();
                 con.setRequestProperty("Referer",this.url.toString());
                 String cookieString = "";
@@ -406,7 +406,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
                 con.disconnect();
                 if (location.contains("//orig")) {
                     fsimage = location;
-                    logger.info("Found image download: " + location);
+                    LOGGER.info("Found image download: " + location);
                 }
             }
             if (fsimage != null) {
@@ -415,9 +415,9 @@ public class DeviantartRipper extends AbstractHTMLRipper {
             throw new IOException("No download page found");
         } catch (IOException ioe) {
             try {
-                logger.info("Failed to get full size download image at " + page + " : '" + ioe.getMessage() + "'");
+                LOGGER.info("Failed to get full size download image at " + page + " : '" + ioe.getMessage() + "'");
                 String lessThanFull = thumbToFull(thumb, false);
-                logger.info("Falling back to less-than-full-size image " + lessThanFull);
+                LOGGER.info("Falling back to less-than-full-size image " + lessThanFull);
                 return lessThanFull;
             } catch (Exception e) {
                 return null;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java
index 43728fb4..69d778cf 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java
@@ -70,7 +70,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
             return getHost() + "_" + elems.first().text();
         } catch (Exception e) {
             // Fall back to default album naming convention
-            logger.warn("Failed to get album title from " + url, e);
+            LOGGER.warn("Failed to get album title from " + url, e);
         }
         return super.getAlbumTitle(url);
     }
@@ -103,7 +103,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
         int retries = 3;
         while (true) {
             sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm());
-            logger.info("Retrieving " + url);
+            LOGGER.info("Retrieving " + url);
             doc = Http.url(url)
                       .referrer(this.url)
                       .cookies(cookies)
@@ -112,7 +112,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
                 if (retries == 0) {
                     throw new IOException("Hit rate limit and maximum number of retries, giving up");
                 }
-                logger.warn("Hit rate limit while loading " + url + ", sleeping for " + IP_BLOCK_SLEEP_TIME + "ms, " + retries + " retries remaining");
+                LOGGER.warn("Hit rate limit while loading " + url + ", sleeping for " + IP_BLOCK_SLEEP_TIME + "ms, " + retries + " retries remaining");
                 retries--;
                 try {
                     Thread.sleep(IP_BLOCK_SLEEP_TIME);
@@ -137,7 +137,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
         if (blackListedTags == null) {
             return null;
         }
-        logger.info("Blacklisted tags " + blackListedTags[0]);
+        LOGGER.info("Blacklisted tags " + blackListedTags[0]);
         List<String> tagsOnPage = getTags(doc);
         for (String tag : blackListedTags) {
             for (String pageTag : tagsOnPage) {
@@ -153,9 +153,9 @@ public class EHentaiRipper extends AbstractHTMLRipper {
 
     private List<String> getTags(Document doc) {
         List<String> tags = new ArrayList<>();
-        logger.info("Getting tags");
+        LOGGER.info("Getting tags");
         for (Element tag : doc.select("td > div > a")) {
-            logger.info("Found tag " + tag.text());
+            LOGGER.info("Found tag " + tag.text());
             tags.add(tag.text());
         }
         return tags;
@@ -168,7 +168,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
             albumDoc = getPageWithRetries(this.url);
         }
         this.lastURL = this.url.toExternalForm();
-        logger.info("Checking blacklist");
+        LOGGER.info("Checking blacklist");
         String blacklistedTag = checkTags(albumDoc, Utils.getConfigStringArray("ehentai.blacklist.tags"));
         if (blacklistedTag != null) {
             sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_WARN, "Skipping " + url.toExternalForm() + " as it " +
@@ -187,13 +187,13 @@ public class EHentaiRipper extends AbstractHTMLRipper {
         // Find next page
         Elements hrefs = doc.select(".ptt a");
         if (hrefs.isEmpty()) {
-            logger.info("doc: " + doc.html());
+            LOGGER.info("doc: " + doc.html());
             throw new IOException("No navigation links found");
         }
         // Ensure next page is different from the current page
         String nextURL = hrefs.last().attr("href");
         if (nextURL.equals(this.lastURL)) {
-            logger.info("lastURL = nextURL : " + nextURL);
+            LOGGER.info("lastURL = nextURL : " + nextURL);
             throw new IOException("Reached last page of results");
         }
         // Sleep before loading next page
@@ -223,7 +223,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
             Thread.sleep(IMAGE_SLEEP_TIME);
         }
         catch (InterruptedException e) {
-            logger.warn("Interrupted while waiting to load next image", e);
+            LOGGER.warn("Interrupted while waiting to load next image", e);
         }
     }
 
@@ -259,13 +259,13 @@ public class EHentaiRipper extends AbstractHTMLRipper {
                     // Attempt to find image elsewise (Issue #41)
                     images = doc.select("img#img");
                     if (images.isEmpty()) {
-                        logger.warn("Image not found at " + this.url);
+                        LOGGER.warn("Image not found at " + this.url);
                         return;
                     }
                 }
                 Element image = images.first();
                 String imgsrc = image.attr("src");
-                logger.info("Found URL " + imgsrc + " via " + images.get(0));
+                LOGGER.info("Found URL " + imgsrc + " via " + images.get(0));
                 Pattern p = Pattern.compile("^http://.*/ehg/image.php.*&n=([^&]+).*$");
                 Matcher m = p.matcher(imgsrc);
                 if (m.matches()) {
@@ -286,7 +286,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
                     addURLToDownload(new URL(imgsrc), prefix);
                 }
             } catch (IOException e) {
-                logger.error("[!] Exception while loading/parsing " + this.url, e);
+                LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
             }
         }
     }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java
index 3e06695b..13a22213 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java
@@ -70,7 +70,7 @@ public class EightmusesRipper extends AbstractHTMLRipper {
             return getHost() + "_" + title.trim();
         } catch (IOException e) {
             // Fall back to default album naming convention
-            logger.info("Unable to find title at " + url);
+            LOGGER.info("Unable to find title at " + url);
         }
         return super.getAlbumTitle(url);
     }
@@ -96,19 +96,19 @@ public class EightmusesRipper extends AbstractHTMLRipper {
             if (thumb.attr("href").contains("/comics/album/")) {
                 String subUrl = "https://www.8muses.com" + thumb.attr("href");
                 try {
-                    logger.info("Retrieving " + subUrl);
+                    LOGGER.info("Retrieving " + subUrl);
                     sendUpdate(STATUS.LOADING_RESOURCE, subUrl);
                     Document subPage = Http.url(subUrl).get();
                     // If the page below this one has images this line will download them
                     List<String> subalbumImages = getURLsFromPage(subPage);
-                    logger.info("Found " + subalbumImages.size() + " images in subalbum");
+                    LOGGER.info("Found " + subalbumImages.size() + " images in subalbum");
                 } catch (IOException e) {
-                    logger.warn("Error while loading subalbum " + subUrl, e);
+                    LOGGER.warn("Error while loading subalbum " + subUrl, e);
                 }
 
             } else if (thumb.attr("href").contains("/comics/picture/")) {
-                logger.info("This page is a album");
-                logger.info("Ripping image");
+                LOGGER.info("This page is a album");
+                LOGGER.info("Ripping image");
                 if (super.isStopped()) break;
                 // Find thumbnail image source
                 String image = null;
@@ -122,7 +122,7 @@ public class EightmusesRipper extends AbstractHTMLRipper {
                         imageHref = "https://www.8muses.com" + imageHref;
                     }
                     try {
-                        logger.info("Retrieving full-size image location from " + imageHref);
+                        LOGGER.info("Retrieving full-size image location from " + imageHref);
                         image = getFullSizeImage(imageHref);
                         URL imageUrl = new URL(image);
                         if (Utils.getConfigBoolean("8muses.use_short_names", false)) {
@@ -134,7 +134,7 @@ public class EightmusesRipper extends AbstractHTMLRipper {
                         x++;
 
                     } catch (IOException e) {
-                        logger.error("Failed to get full-size image from " + imageHref);
+                        LOGGER.error("Failed to get full-size image from " + imageHref);
                         continue;
                     }
                 }
@@ -152,7 +152,7 @@ public class EightmusesRipper extends AbstractHTMLRipper {
 
     private String getFullSizeImage(String imageUrl) throws IOException {
         sendUpdate(STATUS.LOADING_RESOURCE, imageUrl);
-        logger.info("Getting full sized image from " + imageUrl);
+        LOGGER.info("Getting full sized image from " + imageUrl);
         Document doc = new Http(imageUrl).get(); // Retrieve the webpage  of the image URL
         String imageName = doc.select("input[id=imageName]").attr("value"); // Select the "input" element from the page
         return "https://www.8muses.com/image/fm/" + imageName;
@@ -166,14 +166,14 @@ public class EightmusesRipper extends AbstractHTMLRipper {
     }
 
     private String getSubdir(String rawHref) {
-        logger.info("Raw title: " + rawHref);
+        LOGGER.info("Raw title: " + rawHref);
         String title = rawHref;
         title = title.replaceAll("8muses - Sex and Porn Comics", "");
         title = title.replaceAll("\t\t", "");
         title = title.replaceAll("\n", "");
         title = title.replaceAll("\\| ", "");
         title = title.replace(" ", "-");
-        logger.info(title);
+        LOGGER.info(title);
         return title;
     }
 
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java
index 737b8092..d64e9600 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java
@@ -103,7 +103,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
                 return getHost() + "_" + getGID(url) + "_" + title.trim();
             } catch (IOException e) {
                 // Fall back to default album naming convention
-                logger.info("Unable to find title at " + url);
+                LOGGER.info("Unable to find title at " + url);
             }
             return super.getAlbumTitle(url);
         }
@@ -139,7 +139,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
             try {
                 video_page = Http.url("eroshae.com" + link.attr("href")).get();
             } catch (IOException e) {
-                logger.warn("Failed to log link in Jsoup");
+                LOGGER.warn("Failed to log link in Jsoup");
                 video_page = null;
                 e.printStackTrace();
             }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java
index 84e63e76..a01c134a 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java
@@ -52,7 +52,7 @@ public class EromeRipper extends AbstractHTMLRipper {
                 return getHost() + "_" + getGID(url) + "_" + title.trim();
             } catch (IOException e) {
                 // Fall back to default album naming convention
-                logger.info("Unable to find title at " + url);
+                LOGGER.info("Unable to find title at " + url);
             }
             return super.getAlbumTitle(url);
     }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java
index 4ade270b..6591dd01 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java
@@ -153,7 +153,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
 
     /** Convert username to UserID. */
     private String getUserID(String username) throws IOException {
-        logger.info("Fetching user ID for " + username);
+        LOGGER.info("Fetching user ID for " + username);
         JSONObject json = new Http("https://api.500px.com/v1/" +
                     "users/show" +
                     "?username=" + username +
@@ -165,7 +165,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
     @Override
     public JSONObject getFirstPage() throws IOException {
         URL apiURL = new URL(baseURL + "&consumer_key=" + CONSUMER_KEY);
-        logger.debug("apiURL: " + apiURL);
+        LOGGER.debug("apiURL: " + apiURL);
         JSONObject json = Http.url(apiURL).getJSON();
 
         if (baseURL.contains("/galleries?")) {
@@ -185,7 +185,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
                      + "?rpp=100"
                      + "&image_size=5"
                      + "&consumer_key=" + CONSUMER_KEY;
-                logger.info("Loading " + blogURL);
+                LOGGER.info("Loading " + blogURL);
                 sendUpdate(STATUS.LOADING_RESOURCE, "Gallery ID " + galleryID + " for userID " + userID);
                 JSONObject thisJSON = Http.url(blogURL).getJSON();
                 JSONArray thisPhotos = thisJSON.getJSONArray("photos");
@@ -216,7 +216,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
                      + "&rpp=100"
                      + "&image_size=5"
                      + "&consumer_key=" + CONSUMER_KEY;
-                logger.info("Loading " + blogURL);
+                LOGGER.info("Loading " + blogURL);
                 sendUpdate(STATUS.LOADING_RESOURCE, "Story ID " + blogid + " for user " + username);
                 JSONObject thisJSON = Http.url(blogURL).getJSON();
                 JSONArray thisPhotos = thisJSON.getJSONArray("photos");
@@ -268,20 +268,20 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
             Document doc;
             Elements images = new Elements();
             try {
-                logger.debug("Loading " + rawUrl);
+                LOGGER.debug("Loading " + rawUrl);
                 super.retrievingSource(rawUrl);
                 doc = Http.url(rawUrl).get();
                 images = doc.select("div#preload img");
             }
             catch (IOException e) {
-                logger.error("Error fetching full-size image from " + rawUrl, e);
+                LOGGER.error("Error fetching full-size image from " + rawUrl, e);
             }
             if (!images.isEmpty()) {
                 imageURL = images.first().attr("src");
-                logger.debug("Found full-size non-watermarked image: " + imageURL);
+                LOGGER.debug("Found full-size non-watermarked image: " + imageURL);
             }
             else {
-                logger.debug("Falling back to image_url from API response");
+                LOGGER.debug("Falling back to image_url from API response");
                 imageURL = photo.getString("image_url");
                 imageURL = imageURL.replaceAll("/4\\.", "/5.");
                 // See if there's larger images
@@ -289,14 +289,14 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
                     String fsURL = imageURL.replaceAll("/5\\.", "/" + imageSize + ".");
                     sleep(10);
                     if (urlExists(fsURL)) {
-                        logger.info("Found larger image at " + fsURL);
+                        LOGGER.info("Found larger image at " + fsURL);
                         imageURL = fsURL;
                         break;
                     }
                 }
             }
             if (imageURL == null) {
-                logger.error("Failed to find image for photo " + photo.toString());
+                LOGGER.error("Failed to find image for photo " + photo.toString());
             }
             else {
                 imageURLs.add(imageURL);
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java
index a1a1c2b8..10e786d3 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java
@@ -251,7 +251,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
                 Document doc = getLargestImagePageDocument(this.url);
                 Elements fullsizeImages = doc.select("div#allsizes-photo img");
                 if (fullsizeImages.isEmpty()) {
-                    logger.error("Could not find flickr image at " + doc.location() + " - missing 'div#allsizes-photo img'");
+                    LOGGER.error("Could not find flickr image at " + doc.location() + " - missing 'div#allsizes-photo img'");
                 }
                 else {
                     String prefix = "";
@@ -263,7 +263,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
                     }
                 }
             } catch (IOException e) {
-                logger.error("[!] Exception while loading/parsing " + this.url, e);
+                LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
             }
         }
 
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java
index ec8fc5cf..440e9ae5 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java
@@ -81,7 +81,7 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
 
     private String getImageFromPost(String url) {
         try {
-            logger.info("found url " + Http.url(url).cookies(cookies).get().select("meta[property=og:image]").attr("content"));
+            LOGGER.info("found url " + Http.url(url).cookies(cookies).get().select("meta[property=og:image]").attr("content"));
             return Http.url(url).cookies(cookies).get().select("meta[property=og:image]").attr("content");
         } catch (IOException e) {
             return "";
@@ -103,7 +103,7 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
         Elements urlElements = page.select("figure.t-image > b > u > a");
         for (Element e : urlElements) {
             urls.add(urlBase + e.select("a").first().attr("href"));
-            logger.debug("Desc2 " + urlBase + e.select("a").first().attr("href"));
+            LOGGER.debug("Desc2 " + urlBase + e.select("a").first().attr("href"));
         }
         return urls;
     }
@@ -122,21 +122,21 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
             // Try to find the description
             Elements els = resp.parse().select("td[class=alt1][width=\"70%\"]");
             if (els.isEmpty()) {
-                logger.debug("No description at " + page);
+                LOGGER.debug("No description at " + page);
                 throw new IOException("No description found");
             }
-            logger.debug("Description found!");
+            LOGGER.debug("Description found!");
             Document documentz = resp.parse();
             Element ele = documentz.select("td[class=alt1][width=\"70%\"]").get(0); // This is where the description is.
             // Would break completely if FurAffinity changed site layout.
             documentz.outputSettings(new Document.OutputSettings().prettyPrint(false));
             ele.select("br").append("\\n");
             ele.select("p").prepend("\\n\\n");
-            logger.debug("Returning description at " + page);
+            LOGGER.debug("Returning description at " + page);
             String tempPage = Jsoup.clean(ele.html().replaceAll("\\\\n", System.getProperty("line.separator")), "", Whitelist.none(), new Document.OutputSettings().prettyPrint(false));
             return documentz.select("meta[property=og:title]").attr("content") + "\n" + tempPage; // Overridden saveText takes first line and makes it the file name.
         } catch (IOException ioe) {
-            logger.info("Failed to get description " + page + " : '" + ioe.getMessage() + "'");
+            LOGGER.info("Failed to get description " + page + " : '" + ioe.getMessage() + "'");
             return null;
         }
     }
@@ -171,12 +171,12 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
             out.write(text.getBytes());
             out.close();
         } catch (IOException e) {
-            logger.error("[!] Error creating save file path for description '" + url + "':", e);
+            LOGGER.error("[!] Error creating save file path for description '" + url + "':", e);
             return false;
         }
-        logger.debug("Downloading " + url + "'s description to " + saveFileAs);
+        LOGGER.debug("Downloading " + url + "'s description to " + saveFileAs);
         if (!saveFileAs.getParentFile().exists()) {
-            logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
+            LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
             saveFileAs.getParentFile().mkdirs();
         }
         return true;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java
index 5ebb7297..45ce2b92 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java
@@ -67,7 +67,7 @@ public class FuskatorRipper extends AbstractHTMLRipper {
         try {
             baseUrl = URLDecoder.decode(baseUrl, "UTF-8");
         } catch (UnsupportedEncodingException e) {
-            logger.warn("Error while decoding " + baseUrl, e);
+            LOGGER.warn("Error while decoding " + baseUrl, e);
         }
         if (baseUrl.startsWith("//")) {
             baseUrl = "http:" + baseUrl;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java
index 271d0313..2afc79d1 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java
@@ -40,7 +40,7 @@ public class GirlsOfDesireRipper extends AbstractHTMLRipper {
             return getHost() + "_" + elems.first().text();
         } catch (Exception e) {
             // Fall back to default album naming convention
-            logger.warn("Failed to get album title from " + url, e);
+            LOGGER.warn("Failed to get album title from " + url, e);
         }
         return super.getAlbumTitle(url);
     }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HbrowseRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HbrowseRipper.java
index d45cbed5..a3403b9a 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/HbrowseRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HbrowseRipper.java
@@ -56,7 +56,7 @@ public class HbrowseRipper extends AbstractHTMLRipper {
                 return getHost() + "_" + title + "_" + getGID(url);
             } catch (Exception e) {
                 // Fall back to default album naming convention
-                logger.warn("Failed to get album title from " + url, e);
+                LOGGER.warn("Failed to get album title from " + url, e);
             }
             return super.getAlbumTitle(url);
         }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java
index e0dbff17..cb521523 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java
@@ -38,7 +38,7 @@ public class Hentai2readRipper extends AbstractHTMLRipper {
 
     @Override
     public boolean pageContainsAlbums(URL url) {
-        logger.info("Page contains albums");
+        LOGGER.info("Page contains albums");
         Pattern pat = Pattern.compile("https?://hentai2read\\.com/([a-zA-Z0-9_-]*)/?");
         Matcher mat = pat.matcher(url.toExternalForm());
         if (mat.matches()) {
@@ -95,7 +95,7 @@ public class Hentai2readRipper extends AbstractHTMLRipper {
                 return getHost() + "_" + getGID(url);
             } catch (Exception e) {
                 // Fall back to default album naming convention
-                logger.warn("Failed to get album title from " + url, e);
+                LOGGER.warn("Failed to get album title from " + url, e);
             }
             return super.getAlbumTitle(url);
         }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java
index a81b47f9..33a5a964 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java
@@ -98,7 +98,7 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
             cookies.putAll(resp.cookies());
         }
         else {
-            logger.info("unable to find csrf_token and set filter");
+            LOGGER.info("unable to find csrf_token and set filter");
         }
 
         resp = Http.url(url)
@@ -139,19 +139,19 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
             }
             Matcher imgMatcher = imgRegex.matcher(thumb.attr("href"));
             if (!imgMatcher.matches()) {
-                logger.info("Couldn't find user & image ID in " + thumb.attr("href"));
+                LOGGER.info("Couldn't find user & image ID in " + thumb.attr("href"));
                 continue;
             }
             Document imagePage;
             try {
 
-                logger.info("grabbing " + "http://www.hentai-foundry.com" + thumb.attr("href"));
+                LOGGER.info("grabbing " + "http://www.hentai-foundry.com" + thumb.attr("href"));
                 imagePage = Http.url("http://www.hentai-foundry.com" + thumb.attr("href")).cookies(cookies).get();
             }
 
             catch (IOException e) {
-                logger.debug(e.getMessage());
-                logger.debug("Warning: imagePage is null!");
+                LOGGER.debug(e.getMessage());
+                LOGGER.debug("Warning: imagePage is null!");
                 imagePage = null;
             }
             // This is here for when the image is resized to a thumbnail because ripme doesn't report a screensize
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java
index 3e92cc61..c5f0bbd4 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java
@@ -9,9 +9,7 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import org.json.JSONArray;
-import org.json.JSONObject;
 import org.jsoup.nodes.Document;
-import org.jsoup.nodes.Element;
 
 import com.rarchives.ripme.ripper.AbstractHTMLRipper;
 import com.rarchives.ripme.utils.Http;
@@ -57,7 +55,7 @@ public class HitomiRipper extends AbstractHTMLRipper {
     public List<String> getURLsFromPage(Document doc) {
         List<String> result = new ArrayList<>();
         String json = doc.text().replaceAll("var galleryinfo =", "");
-        logger.info(json);
+        LOGGER.info(json);
         JSONArray json_data = new JSONArray(json);
         for (int i = 0; i < json_data.length(); i++) {
             result.add("https://0a.hitomi.la/galleries/" + galleryId + "/" + json_data.getJSONObject(i).getString("name"));
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java
index 1eabefb9..5b481258 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java
@@ -14,8 +14,6 @@ import org.jsoup.nodes.Element;
 import com.rarchives.ripme.ripper.AbstractHTMLRipper;
 import com.rarchives.ripme.utils.Http;
 
-import javax.print.Doc;
-
 public class HypnohubRipper extends AbstractHTMLRipper {
 
     public HypnohubRipper(URL url) throws IOException {
@@ -55,14 +53,14 @@ public class HypnohubRipper extends AbstractHTMLRipper {
     }
 
     private String ripPost(String url) throws IOException {
-        logger.info(url);
+        LOGGER.info(url);
         Document doc = Http.url(url).get();
         return "https:" +  doc.select("img.image").attr("src");
 
     }
 
     private String ripPost(Document doc) {
-        logger.info(url);
+        LOGGER.info(url);
         return "https:" +  doc.select("img.image").attr("src");
 
     }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java
index b33f5624..3aca67cf 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java
@@ -99,16 +99,16 @@ public class ImagebamRipper extends AbstractHTMLRipper {
             // Attempt to use album title as GID
             Elements elems = getFirstPage().select("legend");
             String title = elems.first().text();
-            logger.info("Title text: '" + title + "'");
+            LOGGER.info("Title text: '" + title + "'");
             Pattern p = Pattern.compile("^(.*)\\s\\d* image.*$");
             Matcher m = p.matcher(title);
             if (m.matches()) {
                 return getHost() + "_" + getGID(url) + " (" + m.group(1).trim() + ")";
             }
-            logger.info("Doesn't match " + p.pattern());
+            LOGGER.info("Doesn't match " + p.pattern());
         } catch (Exception e) {
             // Fall back to default album naming convention
-            logger.warn("Failed to get album title from " + url, e);
+            LOGGER.warn("Failed to get album title from " + url, e);
         }
         return super.getAlbumTitle(url);
     }
@@ -148,14 +148,14 @@ public class ImagebamRipper extends AbstractHTMLRipper {
                     //the direct link to the image seems to always be linked in the <meta> part of the html.
                     if (metaTag.attr("property").equals("og:image")) {
                         imgsrc = metaTag.attr("content");
-                        logger.info("Found URL " + imgsrc);
+                        LOGGER.info("Found URL " + imgsrc);
                         break;//only one (useful) image possible for an "image page".
                     }
                 }
                
                 //for debug, or something goes wrong.
                 if (imgsrc.isEmpty()) {
-                    logger.warn("Image not found at " + this.url);
+                    LOGGER.warn("Image not found at " + this.url);
                     return;
                 }
                
@@ -167,7 +167,7 @@ public class ImagebamRipper extends AbstractHTMLRipper {
                 
                 addURLToDownload(new URL(imgsrc), prefix);
             } catch (IOException e) {
-                logger.error("[!] Exception while loading/parsing " + this.url, e);
+                LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
             }
         }
     }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java
index 83d4f098..1a658c59 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java
@@ -43,7 +43,7 @@ public class ImagefapRipper extends AbstractHTMLRipper {
             newURL += "p";
         }
         newURL += "gid=" + gid + "&view=2";
-        logger.debug("Changed URL from " + url + " to " + newURL);
+        LOGGER.debug("Changed URL from " + url + " to " + newURL);
         return new URL(newURL);
     }
 
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java
index f9175656..f50a84a0 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java
@@ -102,7 +102,7 @@ public class ImagevenueRipper extends AbstractHTMLRipper {
                 // Find image
                 Elements images = doc.select("a > img");
                 if (images.isEmpty()) {
-                    logger.warn("Image not found at " + this.url);
+                    LOGGER.warn("Image not found at " + this.url);
                     return;
                 }
                 Element image = images.first();
@@ -115,7 +115,7 @@ public class ImagevenueRipper extends AbstractHTMLRipper {
                 }
                 addURLToDownload(new URL(imgsrc), prefix);
             } catch (IOException e) {
-                logger.error("[!] Exception while loading/parsing " + this.url, e);
+                LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
             }
         }
     }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java
index 280060eb..d3f94456 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java
@@ -108,24 +108,24 @@ public class ImgurRipper extends AlbumRipper {
                 String title = null;
                 final String defaultTitle1 = "Imgur: The most awesome images on the Internet";
                 final String defaultTitle2 = "Imgur: The magic of the Internet";
-                logger.info("Trying to get album title");
+                LOGGER.info("Trying to get album title");
                 elems = albumDoc.select("meta[property=og:title]");
                 if (elems != null) {
                     title = elems.attr("content");
-                    logger.debug("Title is " + title);
+                    LOGGER.debug("Title is " + title);
                 }
                 // This is here encase the album is unnamed, to prevent
                 // Imgur: The most awesome images on the Internet from being added onto the album name
                 if (title.contains(defaultTitle1) || title.contains(defaultTitle2)) {
-                    logger.debug("Album is untitled or imgur is returning the default title");
+                    LOGGER.debug("Album is untitled or imgur is returning the default title");
                     // We set the title to "" here because if it's found in the next few attempts it will be changed
                     // but if it's nto found there will be no reason to set it later
                     title = "";
-                    logger.debug("Trying to use title tag to get title");
+                    LOGGER.debug("Trying to use title tag to get title");
                     elems = albumDoc.select("title");
                     if (elems != null) {
                         if (elems.text().contains(defaultTitle1) || elems.text().contains(defaultTitle2)) {
-                            logger.debug("Was unable to get album title or album was untitled");
+                            LOGGER.debug("Was unable to get album title or album was untitled");
                         }
                         else {
                             title = elems.text();
@@ -159,29 +159,29 @@ public class ImgurRipper extends AlbumRipper {
             case ALBUM:
                 // Fall-through
             case USER_ALBUM:
-                logger.info("Album type is USER_ALBUM");
+                LOGGER.info("Album type is USER_ALBUM");
                 // Don't call getAlbumTitle(this.url) with this
                 // as it seems to cause the album to be downloaded to a subdir.
                 ripAlbum(this.url);
                 break;
             case SERIES_OF_IMAGES:
-                logger.info("Album type is SERIES_OF_IMAGES");
+                LOGGER.info("Album type is SERIES_OF_IMAGES");
                 ripAlbum(this.url);
                 break;
             case SINGLE_IMAGE:
-                logger.info("Album type is SINGLE_IMAGE");
+                LOGGER.info("Album type is SINGLE_IMAGE");
                 ripSingleImage(this.url);
                 break;
             case USER:
-                logger.info("Album type is USER");
+                LOGGER.info("Album type is USER");
                 ripUserAccount(url);
                 break;
             case SUBREDDIT:
-                logger.info("Album type is SUBREDDIT");
+                LOGGER.info("Album type is SUBREDDIT");
                 ripSubreddit(url);
                 break;
             case USER_IMAGES:
-                logger.info("Album type is USER_IMAGES");
+                LOGGER.info("Album type is USER_IMAGES");
                 ripUserImages(url);
                 break;
         }
@@ -241,7 +241,7 @@ public class ImgurRipper extends AlbumRipper {
             String[] imageIds = m.group(1).split(",");
             for (String imageId : imageIds) {
                 // TODO: Fetch image with ID imageId
-                logger.debug("Fetching image info for ID " + imageId);
+                LOGGER.debug("Fetching image info for ID " + imageId);
                 try {
                     JSONObject json = Http.url("https://api.imgur.com/2/image/" + imageId + ".json").getJSON();
                     if (!json.has("image")) {
@@ -259,7 +259,7 @@ public class ImgurRipper extends AlbumRipper {
                     ImgurImage theImage = new ImgurImage(new URL(original));
                     album.addImage(theImage);
                 } catch (Exception e) {
-                    logger.error("Got exception while fetching imgur ID " + imageId, e);
+                    LOGGER.error("Got exception while fetching imgur ID " + imageId, e);
                 }
             }
         }
@@ -271,7 +271,7 @@ public class ImgurRipper extends AlbumRipper {
         if (!strUrl.contains(",")) {
             strUrl += "/all";
         }
-        logger.info("    Retrieving " + strUrl);
+        LOGGER.info("    Retrieving " + strUrl);
         Document doc = getDocument(strUrl);
         // Try to use embedded JSON to retrieve images
         Matcher m = getEmbeddedJsonMatcher(doc);
@@ -283,7 +283,7 @@ public class ImgurRipper extends AlbumRipper {
                                        .getJSONArray("images");
                 return createImgurAlbumFromJsonArray(url, jsonImages);
             } catch (JSONException e) {
-                logger.debug("Error while parsing JSON at " + url + ", continuing", e);
+                LOGGER.debug("Error while parsing JSON at " + url + ", continuing", e);
             }
         }
 
@@ -291,10 +291,10 @@ public class ImgurRipper extends AlbumRipper {
         // http://i.rarchives.com/search.cgi?cache=http://imgur.com/a/albumID
         // At the least, get the thumbnails.
 
-        logger.info("[!] Falling back to /noscript method");
+        LOGGER.info("[!] Falling back to /noscript method");
 
         String newUrl = url.toExternalForm() + "/noscript";
-        logger.info("    Retrieving " + newUrl);
+        LOGGER.info("    Retrieving " + newUrl);
         doc = Jsoup.connect(newUrl)
                             .userAgent(USER_AGENT)
                             .get();
@@ -311,7 +311,7 @@ public class ImgurRipper extends AlbumRipper {
                 image = "http:" + thumb.select("img").attr("src");
             } else {
                 // Unable to find image in this div
-                logger.error("[!] Unable to find image in div: " + thumb.toString());
+                LOGGER.error("[!] Unable to find image in div: " + thumb.toString());
                 continue;
             }
             if (image.endsWith(".gif") && Utils.getConfigBoolean("prefer.mp4", false)) {
@@ -368,7 +368,7 @@ public class ImgurRipper extends AlbumRipper {
      * @throws IOException
      */
     private void ripUserAccount(URL url) throws IOException {
-        logger.info("Retrieving " + url);
+        LOGGER.info("Retrieving " + url);
         sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm());
         Document doc = Http.url(url).get();
         for (Element album : doc.select("div.cover a")) {
@@ -383,7 +383,7 @@ public class ImgurRipper extends AlbumRipper {
                 ripAlbum(albumURL, albumID);
                 Thread.sleep(SLEEP_BETWEEN_ALBUMS * 1000);
             } catch (Exception e) {
-                logger.error("Error while ripping album: " + e.getMessage(), e);
+                LOGGER.error("Error while ripping album: " + e.getMessage(), e);
             }
         }
     }
@@ -420,7 +420,7 @@ public class ImgurRipper extends AlbumRipper {
                 }
                 Thread.sleep(1000);
             } catch (Exception e) {
-                logger.error("Error while ripping user images: " + e.getMessage(), e);
+                LOGGER.error("Error while ripping user images: " + e.getMessage(), e);
                 break;
             }
         }
@@ -435,7 +435,7 @@ public class ImgurRipper extends AlbumRipper {
                 pageURL += "/";
             }
             pageURL += "page/" + page + "/miss?scrolled";
-            logger.info("    Retrieving " + pageURL);
+            LOGGER.info("    Retrieving " + pageURL);
             Document doc = Http.url(pageURL).get();
             Elements imgs = doc.select(".post img");
             for (Element img : imgs) {
@@ -456,7 +456,7 @@ public class ImgurRipper extends AlbumRipper {
             try {
                 Thread.sleep(1000);
             } catch (InterruptedException e) {
-                logger.error("Interrupted while waiting to load next album: ", e);
+                LOGGER.error("Interrupted while waiting to load next album: ", e);
                 break;
             }
         }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java
index 94331650..890a165d 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java
@@ -23,7 +23,6 @@ import com.rarchives.ripme.ripper.AbstractJSONRipper;
 import com.rarchives.ripme.utils.Http;
 
 import org.jsoup.Connection;
-import org.jsoup.Jsoup;
 import org.jsoup.nodes.Document;
 import org.jsoup.nodes.Element;
 import com.rarchives.ripme.ui.RipStatusMessage;
@@ -67,7 +66,7 @@ public class InstagramRipper extends AbstractJSONRipper {
     @Override
     public URL sanitizeURL(URL url) throws MalformedURLException {
        URL san_url = new URL(url.toExternalForm().replaceAll("\\?hl=\\S*", ""));
-       logger.info("sanitized URL is " + san_url.toExternalForm());
+       LOGGER.info("sanitized URL is " + san_url.toExternalForm());
         return san_url;
     }
 
@@ -184,7 +183,7 @@ public class InstagramRipper extends AbstractJSONRipper {
     @Override
     public JSONObject getFirstPage() throws IOException {
         Connection.Response resp = Http.url(url).response();
-        logger.info(resp.cookies());
+        LOGGER.info(resp.cookies());
         csrftoken = resp.cookie("csrftoken");
         Document p = resp.parse();
         // Get the query hash so we can download the next page
@@ -197,7 +196,7 @@ public class InstagramRipper extends AbstractJSONRipper {
             Document doc = Http.url("https://www.instagram.com/p/" + videoID).get();
             return doc.select("meta[property=og:video]").attr("content");
         } catch (IOException e) {
-            logger.warn("Unable to get page " + "https://www.instagram.com/p/" + videoID);
+            LOGGER.warn("Unable to get page " + "https://www.instagram.com/p/" + videoID);
         }
         return "";
     }
@@ -279,9 +278,9 @@ public class InstagramRipper extends AbstractJSONRipper {
                                 addURLToDownload(new URL(toAdd.get(slideShowInt)), image_date + data.getString("shortcode"));
                             }
                         } catch (MalformedURLException e) {
-                            logger.error("Unable to download slide show, URL was malformed");
+                            LOGGER.error("Unable to download slide show, URL was malformed");
                         } catch (IOException e) {
-                            logger.error("Unable to download slide show");
+                            LOGGER.error("Unable to download slide show");
                         }
                     }
                 }
@@ -312,7 +311,7 @@ public class InstagramRipper extends AbstractJSONRipper {
             }
 
         } else { // We're ripping from a single page
-            logger.info("Ripping from single page");
+            LOGGER.info("Ripping from single page");
             imageURLs = getPostsFromSinglePage(json);
         }
 
@@ -321,7 +320,7 @@ public class InstagramRipper extends AbstractJSONRipper {
 
     private String getIGGis(String variables) {
         String stringToMD5 = rhx_gis + ":" + variables;
-        logger.debug("String to md5 is \"" + stringToMD5 + "\"");
+        LOGGER.debug("String to md5 is \"" + stringToMD5 + "\"");
         try {
             byte[] bytesOfMessage = stringToMD5.getBytes("UTF-8");
 
@@ -355,7 +354,7 @@ public class InstagramRipper extends AbstractJSONRipper {
                      toreturn = getPage("https://www.instagram.com/graphql/query/?query_hash=" + qHash +
                                      "&variables=" + vars, ig_gis);
                     // Sleep for a while to avoid a ban
-                    logger.info(toreturn);
+                    LOGGER.info(toreturn);
                     if (!pageHasImages(toreturn)) {
                         throw new IOException("No more pages");
                     }
@@ -371,7 +370,7 @@ public class InstagramRipper extends AbstractJSONRipper {
                 sleep(2500);
                 String vars = "{\"id\":\"" + userID + "\",\"first\":50,\"after\":\"" + nextPageID + "\"}";
                 String ig_gis = getIGGis(vars);
-                logger.info(ig_gis);
+                LOGGER.info(ig_gis);
 
                 toreturn = getPage("https://www.instagram.com/graphql/query/?query_hash=" + qHash + "&variables=" + vars, ig_gis);
                 if (!pageHasImages(toreturn)) {
@@ -419,11 +418,11 @@ public class InstagramRipper extends AbstractJSONRipper {
             return new JSONObject(sb.toString());
 
         } catch (MalformedURLException e) {
-            logger.info("Unable to get query_hash, " + url + " is a malformed URL");
+            LOGGER.info("Unable to get query_hash, " + url + " is a malformed URL");
             return null;
         } catch (IOException e) {
-            logger.info("Unable to get query_hash");
-            logger.info(e.getMessage());
+            LOGGER.info("Unable to get query_hash");
+            LOGGER.info(e.getMessage());
             return null;
         }
     }
@@ -444,11 +443,11 @@ public class InstagramRipper extends AbstractJSONRipper {
             in.close();
 
         } catch (MalformedURLException e) {
-            logger.info("Unable to get query_hash, " + jsFileURL + " is a malformed URL");
+            LOGGER.info("Unable to get query_hash, " + jsFileURL + " is a malformed URL");
             return null;
         } catch (IOException e) {
-            logger.info("Unable to get query_hash");
-            logger.info(e.getMessage());
+            LOGGER.info("Unable to get query_hash");
+            LOGGER.info(e.getMessage());
             return null;
         }
         if (!rippingTag) {
@@ -475,7 +474,7 @@ public class InstagramRipper extends AbstractJSONRipper {
                 return m.group(1);
             }
         }
-        logger.error("Could not find query_hash on " + jsFileURL);
+        LOGGER.error("Could not find query_hash on " + jsFileURL);
         return null;
 
     }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java
index f5782dab..c7f7df71 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java
@@ -54,16 +54,16 @@ public class JagodibujaRipper extends AbstractHTMLRipper {
                 sleep(500);
                 Document comicPage = Http.url(comicPageUrl.attr("href")).get();
                 Element elem = comicPage.select("span.full-size-link > a").first();
-                logger.info("Got link " + elem.attr("href"));
+                LOGGER.info("Got link " + elem.attr("href"));
                 try {
                     addURLToDownload(new URL(elem.attr("href")), "");
                 } catch (MalformedURLException e) {
-                    logger.warn("Malformed URL");
+                    LOGGER.warn("Malformed URL");
                     e.printStackTrace();
                 }
                 result.add(elem.attr("href"));
             } catch (IOException e) {
-                logger.info("Error loading " + comicPageUrl);
+                LOGGER.info("Error loading " + comicPageUrl);
             }
         }
         return result;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java
index 376d1292..68197721 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java
@@ -36,7 +36,7 @@ public class LusciousRipper extends AbstractHTMLRipper {
         // "url" is an instance field of the superclass
         Document page = Http.url(url).get();
         URL firstUrl = new URL("https://luscious.net" +  page.select("div > div.album_cover_item > a").first().attr("href"));
-        logger.info("First page is " + "https://luscious.net" +  page.select("div > div.album_cover_item > a").first().attr("href"));
+        LOGGER.info("First page is " + "https://luscious.net" +  page.select("div > div.album_cover_item > a").first().attr("href"));
         return Http.url(firstUrl).get();
     }
 
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java
index 8f8f8e68..cabb4188 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java
@@ -15,8 +15,6 @@ import org.jsoup.nodes.Element;
 import com.rarchives.ripme.ripper.AbstractHTMLRipper;
 import com.rarchives.ripme.utils.Http;
 
-import javax.print.Doc;
-
 public class ManganeloRipper extends AbstractHTMLRipper {
 
     public ManganeloRipper(URL url) throws IOException {
@@ -67,7 +65,7 @@ public class ManganeloRipper extends AbstractHTMLRipper {
     }
 
     private List<String> getURLsFromChap(String url) {
-        logger.debug("Getting urls from " + url);
+        LOGGER.debug("Getting urls from " + url);
         List<String> result = new ArrayList<>();
         try {
             Document doc = Http.url(url).get();
@@ -82,7 +80,7 @@ public class ManganeloRipper extends AbstractHTMLRipper {
     }
 
     private List<String> getURLsFromChap(Document doc) {
-        logger.debug("Getting urls from " + url);
+        LOGGER.debug("Getting urls from " + url);
         List<String> result = new ArrayList<>();
         for (Element el : doc.select("img.img_content")) {
             result.add(el.attr("src"));
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java
index d8fb3655..b2fee8e5 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java
@@ -75,7 +75,7 @@ public class MotherlessRipper extends AlbumRipper {
             if (isStopped()) {
                 break;
             }
-            logger.info("Retrieving " + nextURL);
+            LOGGER.info("Retrieving " + nextURL);
             sendUpdate(STATUS.LOADING_RESOURCE, nextURL);
             Document doc = Http.url(nextURL)
                                .referrer("http://motherless.com")
@@ -152,10 +152,10 @@ public class MotherlessRipper extends AlbumRipper {
                     }
                     addURLToDownload(new URL(file), prefix);
                 } else {
-                    logger.warn("[!] could not find '__fileurl' at " + url);
+                    LOGGER.warn("[!] could not find '__fileurl' at " + url);
                 }
             } catch (IOException e) {
-                logger.error("[!] Exception while loading/parsing " + this.url, e);
+                LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
             }
         }
     }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NatalieMuRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NatalieMuRipper.java
index f455e58b..952b434e 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/NatalieMuRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NatalieMuRipper.java
@@ -109,7 +109,7 @@ public class NatalieMuRipper extends AbstractHTMLRipper {
                 imgUrl = imgUrl.replace("list_thumb_inbox","xlarge");
                 // Don't download the same URL twice
                 if (imageURLs.contains(imgUrl)) {
-                    logger.debug("Already attempted: " + imgUrl);
+                    LOGGER.debug("Already attempted: " + imgUrl);
                     continue;
                 }
                 imageURLs.add(imgUrl);
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NewsfilterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NewsfilterRipper.java
index 1c7cf8dc..6454c508 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/NewsfilterRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NewsfilterRipper.java
@@ -43,7 +43,7 @@ public class NewsfilterRipper extends AlbumRipper {
     public void rip() throws IOException {
         String gid = getGID(this.url);
         String theurl = "http://newsfilter.org/gallery/" + gid;
-        logger.info("Loading " + theurl);
+        LOGGER.info("Loading " + theurl);
 
         Connection.Response resp = Jsoup.connect(theurl)
             .timeout(5000)
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NfsfwRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NfsfwRipper.java
index 098f1e45..3585b6bb 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/NfsfwRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NfsfwRipper.java
@@ -86,7 +86,7 @@ public class NfsfwRipper extends AlbumRipper {
             String nextURL = nextAlbum.first;
             String nextSubalbum = nextAlbum.second;
             sendUpdate(STATUS.LOADING_RESOURCE, nextURL);
-            logger.info("    Retrieving " + nextURL);
+            LOGGER.info("    Retrieving " + nextURL);
             if (albumDoc == null) {
                 albumDoc = Http.url(nextURL).get();
             }
@@ -116,7 +116,7 @@ public class NfsfwRipper extends AlbumRipper {
                         break;
                     }
                 } catch (MalformedURLException mue) {
-                    logger.warn("Invalid URL: " + imagePage);
+                    LOGGER.warn("Invalid URL: " + imagePage);
                 }
             }
             if (isThisATest()) {
@@ -133,7 +133,7 @@ public class NfsfwRipper extends AlbumRipper {
             try {
                 Thread.sleep(1000);
             } catch (InterruptedException e) {
-                logger.error("Interrupted while waiting to load next page", e);
+                LOGGER.error("Interrupted while waiting to load next page", e);
                 throw new IOException(e);
             }
         }
@@ -168,7 +168,7 @@ public class NfsfwRipper extends AlbumRipper {
                                    .get();
                 Elements images = doc.select(".gbBlock img");
                 if (images.isEmpty()) {
-                    logger.error("Failed to find image at " + this.url);
+                    LOGGER.error("Failed to find image at " + this.url);
                     return;
                 }
                 String file = images.first().attr("src");
@@ -181,7 +181,7 @@ public class NfsfwRipper extends AlbumRipper {
                 }
                 addURLToDownload(new URL(file), prefix, this.subdir);
             } catch (IOException e) {
-                logger.error("[!] Exception while loading/parsing " + this.url, e);
+                LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
             }
         }
     }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java
index 7752f18c..daef205e 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java
@@ -9,7 +9,6 @@ import org.jsoup.nodes.Document;
 import org.jsoup.nodes.Element;
 import org.jsoup.select.Elements;
 
-import java.io.File;
 import java.io.IOException;
 import java.net.MalformedURLException;
 import java.net.URL;
@@ -102,7 +101,7 @@ public class NhentaiRipper extends AbstractHTMLRipper {
         if (blackListedTags == null) {
             return null;
         }
-        logger.info("Blacklisted tags " + blackListedTags[0]);
+        LOGGER.info("Blacklisted tags " + blackListedTags[0]);
         List<String> tagsOnPage = getTags(doc);
         for (String tag : blackListedTags) {
             for (String pageTag : tagsOnPage) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PhotobucketRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PhotobucketRipper.java
index f5cf5cf7..ad0159b3 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/PhotobucketRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PhotobucketRipper.java
@@ -35,7 +35,7 @@ public class PhotobucketRipper extends AlbumRipper {
     }
 
     public URL sanitizeURL(URL url) throws MalformedURLException {
-        logger.info(url);
+        LOGGER.info(url);
         String u = url.toExternalForm();
         if (u.contains("?")) {
             u = u.substring(0, u.indexOf("?"));
@@ -100,12 +100,12 @@ public class PhotobucketRipper extends AlbumRipper {
             }
             String nextSub = subsToRip.remove(0);
             rippedSubs.add(nextSub);
-            logger.info("Attempting to rip next subalbum: " + nextSub);
+            LOGGER.info("Attempting to rip next subalbum: " + nextSub);
             try {
                 pageResponse = null;
                 subalbums = ripAlbumAndGetSubalbums(nextSub);
             } catch (IOException e) {
-                logger.error("Error while ripping " + nextSub, e);
+                LOGGER.error("Error while ripping " + nextSub, e);
                 break;
             }
             for (String subalbum : subalbums) {
@@ -131,7 +131,7 @@ public class PhotobucketRipper extends AlbumRipper {
             pageIndex++;
             if (pageIndex > 1 || pageResponse == null) {
                 url = theUrl + String.format("?sort=3&page=%d", pageIndex);
-                logger.info("    Retrieving " + url);
+                LOGGER.info("    Retrieving " + url);
                 pageResponse = Http.url(url).response();
             }
             Document albumDoc = pageResponse.parse();
@@ -153,7 +153,7 @@ public class PhotobucketRipper extends AlbumRipper {
                 }
             }
             if (jsonString == null) {
-                logger.error("Unable to find JSON data at URL: " + url);
+                LOGGER.error("Unable to find JSON data at URL: " + url);
                 break;
             }
             JSONObject json = new JSONObject(jsonString);
@@ -189,7 +189,7 @@ public class PhotobucketRipper extends AlbumRipper {
                 + "&albumPath=" + currentAlbumPath // %2Falbums%2Fab10%2FSpazzySpizzy"
                 + "&json=1";
         try {
-            logger.info("Loading " + apiUrl);
+            LOGGER.info("Loading " + apiUrl);
             JSONObject json = Http.url(apiUrl).getJSON();
             JSONArray subalbums = json.getJSONObject("body").getJSONArray("subAlbums");
             for (int i = 0; i < subalbums.length(); i++) {
@@ -202,7 +202,7 @@ public class PhotobucketRipper extends AlbumRipper {
                 result.add(suburl);
             }
         } catch (IOException e) {
-            logger.error("Failed to get subalbums from " + apiUrl, e);
+            LOGGER.error("Failed to get subalbums from " + apiUrl, e);
         }
         return result;
     }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PornhubRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PornhubRipper.java
index 8b3b8d7d..bffd0f2d 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/PornhubRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PornhubRipper.java
@@ -46,7 +46,7 @@ public class PornhubRipper extends AlbumRipper {
         try {
             // Attempt to use album title as GID
             if (albumDoc == null) {
-                logger.info("    Retrieving " + url.toExternalForm());
+                LOGGER.info("    Retrieving " + url.toExternalForm());
                 sendUpdate(STATUS.LOADING_RESOURCE, url.toString());
                 albumDoc = Http.url(url).get();
             }
@@ -54,7 +54,7 @@ public class PornhubRipper extends AlbumRipper {
             return HOST + "_" + elems.get(0).text();
         } catch (Exception e) {
             // Fall back to default album naming convention
-            logger.warn("Failed to get album title from " + url, e);
+            LOGGER.warn("Failed to get album title from " + url, e);
         }
         return super.getAlbumTitle(url);
     }
@@ -82,7 +82,7 @@ public class PornhubRipper extends AlbumRipper {
         String nextUrl = this.url.toExternalForm();
 
         if (albumDoc == null) {
-            logger.info("    Retrieving album page " + nextUrl);
+            LOGGER.info("    Retrieving album page " + nextUrl);
             sendUpdate(STATUS.LOADING_RESOURCE, nextUrl);
             albumDoc = Http.url(nextUrl)
                            .referrer(this.url)
@@ -92,8 +92,8 @@ public class PornhubRipper extends AlbumRipper {
         // Find thumbnails
         Elements thumbs = albumDoc.select(".photoBlockBox li");
         if (thumbs.isEmpty()) {
-            logger.debug("albumDoc: " + albumDoc);
-            logger.debug("No images found at " + nextUrl);
+            LOGGER.debug("albumDoc: " + albumDoc);
+            LOGGER.debug("No images found at " + nextUrl);
             return;
         }
 
@@ -113,7 +113,7 @@ public class PornhubRipper extends AlbumRipper {
             try {
                 Thread.sleep(IMAGE_SLEEP_TIME);
             } catch (InterruptedException e) {
-                logger.warn("Interrupted while waiting to load next image", e);
+                LOGGER.warn("Interrupted while waiting to load next image", e);
             }
         }
 
@@ -155,7 +155,7 @@ public class PornhubRipper extends AlbumRipper {
                 Elements images = doc.select("#photoImageSection img");
                 Element image = images.first();
                 String imgsrc = image.attr("src");
-                logger.info("Found URL " + imgsrc + " via " + images.get(0));
+                LOGGER.info("Found URL " + imgsrc + " via " + images.get(0));
 
                 // Provide prefix and let the AbstractRipper "guess" the filename
                 String prefix = "";
@@ -167,7 +167,7 @@ public class PornhubRipper extends AlbumRipper {
                 addURLToDownload(imgurl, prefix);
 
             } catch (IOException e) {
-                logger.error("[!] Exception while loading/parsing " + this.url, e);
+                LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
             }
         }
     }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java
index e8798476..bb60d616 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java
@@ -4,13 +4,10 @@ import java.io.File;
 import java.io.IOException;
 import java.net.MalformedURLException;
 import java.net.URL;
-import java.util.ArrayList;
 import java.util.List;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import com.rarchives.ripme.ripper.AbstractRipper;
-import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
 import org.json.JSONArray;
 import org.json.JSONObject;
 import org.json.JSONTokener;
@@ -20,9 +17,6 @@ import com.rarchives.ripme.ui.UpdateUtils;
 import com.rarchives.ripme.utils.Http;
 import com.rarchives.ripme.utils.RipUtils;
 import com.rarchives.ripme.utils.Utils;
-import org.jsoup.Jsoup;
-import org.jsoup.nodes.Document;
-import org.jsoup.nodes.Element;
 
 public class RedditRipper extends AlbumRipper {
 
@@ -110,7 +104,7 @@ public class RedditRipper extends AlbumRipper {
         try {
             Thread.sleep(2000);
         } catch (InterruptedException e) {
-            logger.warn("Interrupted while sleeping", e);
+            LOGGER.warn("Interrupted while sleeping", e);
         }
         return nextURL;
     }
@@ -122,7 +116,7 @@ public class RedditRipper extends AlbumRipper {
             try {
                 Thread.sleep(timeDiff);
             } catch (InterruptedException e) {
-                logger.warn("[!] Interrupted while waiting to load next page", e);
+                LOGGER.warn("[!] Interrupted while waiting to load next page", e);
                 return new JSONArray();
             }
         }
@@ -141,7 +135,7 @@ public class RedditRipper extends AlbumRipper {
         } else if (jsonObj instanceof JSONArray) {
             jsonArray = (JSONArray) jsonObj;
         } else {
-            logger.warn("[!] Unable to parse JSON: " + jsonString);
+            LOGGER.warn("[!] Unable to parse JSON: " + jsonString);
         }
         return jsonArray;
     }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SankakuComplexRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SankakuComplexRipper.java
index d83d5930..6d14dc30 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/SankakuComplexRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/SankakuComplexRipper.java
@@ -89,10 +89,10 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
                     String siteURL = "https://" + subDomain + "sankakucomplex.com";
                     // Get the page the full sized image is on
                     Document subPage = Http.url(siteURL + postLink).get();
-                    logger.info("Checking page " + siteURL + postLink);
+                    LOGGER.info("Checking page " + siteURL + postLink);
                     imageURLs.add("https:" + subPage.select("div[id=stats] > ul > li > a[id=highres]").attr("href"));
                 } catch (IOException e) {
-                    logger.warn("Error while loading page " + postLink, e);
+                    LOGGER.warn("Error while loading page " + postLink, e);
                 }
         }
         return imageURLs;
@@ -112,7 +112,7 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
             // Only logged in users can see past page 25
             // Trying to rip page 26 will throw a no images found error
             if (!nextPage.contains("page=26")) {
-                logger.info("Getting next page: " + pagination.attr("abs:next-page-url"));
+                LOGGER.info("Getting next page: " + pagination.attr("abs:next-page-url"));
                 return Http.url(pagination.attr("abs:next-page-url")).cookies(cookies).get();
             }
         }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java
index 9de3d2ae..d6a0f9cb 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java
@@ -50,7 +50,7 @@ public class SinfestRipper extends AbstractHTMLRipper {
     @Override
     public Document getNextPage(Document doc) throws IOException {
         Element elem = doc.select("td.style5 > a > img").last();
-        logger.info(elem.parent().attr("href"));
+        LOGGER.info(elem.parent().attr("href"));
         if (elem == null || elem.parent().attr("href").equals("view.php?date=")) {
             throw new IOException("No more pages");
         }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/StaRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/StaRipper.java
index 4cfaf485..b331bbce 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/StaRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/StaRipper.java
@@ -65,9 +65,9 @@ public class StaRipper extends AbstractHTMLRipper {
                     cookies.putAll(resp.cookies());
                     thumbPage = resp.parse();
                 } catch (MalformedURLException e) {
-                    logger.info(thumbPageURL + " is a malformed URL");
+                    LOGGER.info(thumbPageURL + " is a malformed URL");
                 } catch (IOException e) {
-                    logger.info(e.getMessage());
+                    LOGGER.info(e.getMessage());
                 }
                 String imageDownloadUrl = thumbPage.select("a.dev-page-download").attr("href");
                 if (imageDownloadUrl != null && !imageDownloadUrl.equals("")) {
@@ -97,10 +97,10 @@ public class StaRipper extends AbstractHTMLRipper {
                     .followRedirects(false)
                     .execute();
             String imageURL = response.header("Location");
-            logger.info(imageURL);
+            LOGGER.info(imageURL);
             return imageURL;
             } catch (IOException e) {
-                logger.info("Got error message " + e.getMessage() + " trying to download " + url);
+                LOGGER.info("Got error message " + e.getMessage() + " trying to download " + url);
                 return null;
             }
     }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TapasticRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TapasticRipper.java
index 8ef019a2..369ce741 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/TapasticRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TapasticRipper.java
@@ -56,7 +56,7 @@ public class TapasticRipper extends AbstractHTMLRipper {
         List<String> urls = new ArrayList<>();
         String html = page.data();
         if (!html.contains("episodeList : ")) {
-            logger.error("No 'episodeList' found at " + this.url);
+            LOGGER.error("No 'episodeList' found at " + this.url);
             return urls;
         }
         String jsonString = Utils.between(html, "episodeList : ", ",\n").get(0);
@@ -93,7 +93,7 @@ public class TapasticRipper extends AbstractHTMLRipper {
                 }
             }
         } catch (IOException e) {
-            logger.error("[!] Exception while downloading " + url, e);
+            LOGGER.error("[!] Exception while downloading " + url, e);
         }
 
     }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TeenplanetRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TeenplanetRipper.java
index 630a0d0f..d25ef345 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/TeenplanetRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TeenplanetRipper.java
@@ -69,7 +69,7 @@ public class TeenplanetRipper extends AlbumRipper {
     @Override
     public void rip() throws IOException {
         int index = 0;
-        logger.info("Retrieving " + this.url);
+        LOGGER.info("Retrieving " + this.url);
         sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
         if (albumDoc == null) {
             albumDoc = Http.url(url).get();
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java
index 70c023d3..4886503a 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java
@@ -40,7 +40,7 @@ public class TsuminoRipper extends AbstractHTMLRipper {
             JSONObject json = new JSONObject(jsonInfo);
             return json.getJSONArray("reader_page_urls");
         } catch (IOException e) {
-            logger.info(e);
+            LOGGER.info(e);
             sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_ERRORED, "Unable to download album, please compete the captcha at http://www.tsumino.com/Read/Auth/"
                     + getAlbumID() + " and try again");
             return null;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java
index 41da5d13..b7a437b2 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java
@@ -51,11 +51,11 @@ public class TumblrRipper extends AlbumRipper {
         }
 
         if (useDefaultApiKey || Utils.getConfigString(TUMBLR_AUTH_CONFIG_KEY, "JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX").equals("JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX")) {
-            logger.info("Using api key: " + API_KEY);
+            LOGGER.info("Using api key: " + API_KEY);
             return API_KEY;
         } else {
             String userDefinedAPIKey = Utils.getConfigString(TUMBLR_AUTH_CONFIG_KEY, "JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX");
-            logger.info("Using user tumblr.auth api key: " + userDefinedAPIKey);
+            LOGGER.info("Using user tumblr.auth api key: " + userDefinedAPIKey);
             return userDefinedAPIKey;
         }
 
@@ -66,7 +66,7 @@ public class TumblrRipper extends AlbumRipper {
                 "FQrwZMCxVnzonv90rgNUJcAk4FpnoS0mYuSuGYqIpM2cFgp9L4",
                 "qpdkY6nMknksfvYAhf2xIHp0iNRLkMlcWShxqzXyFJRxIsZ1Zz");
         int genNum = new Random().nextInt(APIKEYS.size());
-        logger.info(genNum);
+        LOGGER.info(genNum);
         final String API_KEY = APIKEYS.get(genNum); // Select random API key from APIKEYS
         return API_KEY;
     }
@@ -96,10 +96,10 @@ public class TumblrRipper extends AlbumRipper {
         if (StringUtils.countMatches(u, ".") > 2) {
             url = new URL(u.replace(".tumblr.com", ""));
             if (isTumblrURL(url)) {
-                logger.info("Detected tumblr site: " + url);
+                LOGGER.info("Detected tumblr site: " + url);
             }
             else {
-                logger.info("Not a tumblr site: " + url);
+                LOGGER.info("Not a tumblr site: " + url);
             }
         }
         return url;
@@ -115,7 +115,7 @@ public class TumblrRipper extends AlbumRipper {
             int status = json.getJSONObject("meta").getInt("status");
             return status == 200;
         } catch (IOException e) {
-            logger.error("Error while checking possible tumblr domain: " + url.getHost(), e);
+            LOGGER.error("Error while checking possible tumblr domain: " + url.getHost(), e);
         }
         return false;
     }
@@ -150,7 +150,7 @@ public class TumblrRipper extends AlbumRipper {
 
 
                 String apiURL = getTumblrApiURL(mediaType, offset);
-                logger.info("Retrieving " + apiURL);
+                LOGGER.info("Retrieving " + apiURL);
                 sendUpdate(STATUS.LOADING_RESOURCE, apiURL);
 
                 JSONObject json = null;
@@ -165,7 +165,7 @@ public class TumblrRipper extends AlbumRipper {
                         if (status.getStatusCode() == HttpURLConnection.HTTP_UNAUTHORIZED && !useDefaultApiKey) {
                             retry = true;
                         } else if (status.getStatusCode() == 429) {
-                            logger.error("Tumblr rate limit has been exceeded");
+                            LOGGER.error("Tumblr rate limit has been exceeded");
                             sendUpdate(STATUS.DOWNLOAD_ERRORED,"Tumblr rate limit has been exceeded");
                             exceededRateLimit = true;
                             break;
@@ -178,7 +178,7 @@ public class TumblrRipper extends AlbumRipper {
                     String apiKey = getApiKey();
 
                     String message = "401 Unauthorized. Will retry with default Tumblr API key: " + apiKey;
-                    logger.info(message);
+                    LOGGER.info(message);
                     sendUpdate(STATUS.DOWNLOAD_WARN, message);
 
                     Utils.setConfigString(TUMBLR_AUTH_CONFIG_KEY, apiKey); // save the default key to the config
@@ -186,7 +186,7 @@ public class TumblrRipper extends AlbumRipper {
                     // retry loading the JSON
 
                     apiURL = getTumblrApiURL(mediaType, offset);
-                    logger.info("Retrieving " + apiURL);
+                    LOGGER.info("Retrieving " + apiURL);
                     sendUpdate(STATUS.LOADING_RESOURCE, apiURL);
 
                     json = Http.url(apiURL).getJSON();
@@ -195,7 +195,7 @@ public class TumblrRipper extends AlbumRipper {
                 try {
                     Thread.sleep(1000);
                 } catch (InterruptedException e) {
-                    logger.error("[!] Interrupted while waiting to load next album:", e);
+                    LOGGER.error("[!] Interrupted while waiting to load next album:", e);
                     break;
                 }
 
@@ -224,7 +224,7 @@ public class TumblrRipper extends AlbumRipper {
 
         posts = json.getJSONObject("response").getJSONArray("posts");
         if (posts.length() == 0) {
-            logger.info("   Zero posts returned.");
+            LOGGER.info("   Zero posts returned.");
             return false;
         }
 
@@ -251,7 +251,7 @@ public class TumblrRipper extends AlbumRipper {
                             addURLToDownload(redirectedURL);
                         }
                     } catch (Exception e) {
-                        logger.error("[!] Error while parsing photo in " + photo, e);
+                        LOGGER.error("[!] Error while parsing photo in " + photo, e);
                     }
                 }
             } else if (post.has("video_url")) {
@@ -259,7 +259,7 @@ public class TumblrRipper extends AlbumRipper {
                     fileURL = new URL(post.getString("video_url").replaceAll("http:", "https:"));
                     addURLToDownload(fileURL);
                 } catch (Exception e) {
-                    logger.error("[!] Error while parsing video in " + post, e);
+                    LOGGER.error("[!] Error while parsing video in " + post, e);
                     return true;
                 }
             }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TwitterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TwitterRipper.java
index 60c7a6bb..cd5cf582 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/TwitterRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TwitterRipper.java
@@ -103,13 +103,13 @@ public class TwitterRipper extends AlbumRipper {
                     .getJSONObject(resource)
                     .getJSONObject(api);
             int remaining = stats.getInt("remaining");
-            logger.info("    Twitter " + resource + " calls remaining: " + remaining);
+            LOGGER.info("    Twitter " + resource + " calls remaining: " + remaining);
             if (remaining < 20) {
-                logger.error("Twitter API calls exhausted: " + stats.toString());
+                LOGGER.error("Twitter API calls exhausted: " + stats.toString());
                 throw new IOException("Less than 20 API calls remaining; not enough to rip.");
             }
         } catch (JSONException e) {
-            logger.error("JSONException: ", e);
+            LOGGER.error("JSONException: ", e);
             throw new IOException("Error while parsing JSON: " + body, e);
         }
     }
@@ -142,7 +142,7 @@ public class TwitterRipper extends AlbumRipper {
 
     private List<JSONObject> getTweets(String url) throws IOException {
         List<JSONObject> tweets = new ArrayList<>();
-        logger.info("    Retrieving " + url);
+        LOGGER.info("    Retrieving " + url);
         Document doc = Http.url(url)
                 .ignoreContentType()
                 .header("Authorization", "Bearer " + accessToken)
@@ -171,7 +171,7 @@ public class TwitterRipper extends AlbumRipper {
     private int parseTweet(JSONObject tweet) throws MalformedURLException {
         int parsedCount = 0;
         if (!tweet.has("extended_entities")) {
-            logger.error("XXX Tweet doesn't have entitites");
+            LOGGER.error("XXX Tweet doesn't have entitites");
             return 0;
         }
 
@@ -201,7 +201,7 @@ public class TwitterRipper extends AlbumRipper {
                         addURLToDownload(new URL(url));
                         parsedCount++;
                     } else {
-                        logger.debug("Unexpected media_url: " + url);
+                        LOGGER.debug("Unexpected media_url: " + url);
                     }
                 }
             }
@@ -229,14 +229,14 @@ public class TwitterRipper extends AlbumRipper {
         for (int i = 0; i < MAX_REQUESTS; i++) {
             List<JSONObject> tweets = getTweets(getApiURL(lastMaxID - 1));
             if (tweets.isEmpty()) {
-                logger.info("   No more tweets found.");
+                LOGGER.info("   No more tweets found.");
                 break;
             }
-            logger.debug("Twitter response #" + (i + 1) + " Tweets:\n" + tweets);
+            LOGGER.debug("Twitter response #" + (i + 1) + " Tweets:\n" + tweets);
             if (tweets.size() == 1 &&
                     lastMaxID.equals(tweets.get(0).getString("id_str"))
                     ) {
-                logger.info("   No more tweet found.");
+                LOGGER.info("   No more tweet found.");
                 break;
             }
 
@@ -256,7 +256,7 @@ public class TwitterRipper extends AlbumRipper {
             try {
                 Thread.sleep(WAIT_TIME);
             } catch (InterruptedException e) {
-                logger.error("[!] Interrupted while waiting to load more results", e);
+                LOGGER.error("[!] Interrupted while waiting to load more results", e);
                 break;
             }
         }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TwodgalleriesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TwodgalleriesRipper.java
index d7cd8e10..b3f48505 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/TwodgalleriesRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TwodgalleriesRipper.java
@@ -66,7 +66,7 @@ public class TwodgalleriesRipper extends AbstractHTMLRipper {
         try {
             login();
         } catch (IOException e) {
-            logger.error("Failed to login", e);
+            LOGGER.error("Failed to login", e);
         }
         String url = getURL(getGID(this.url), offset);
         return Http.url(url)
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java
index abdb0320..d2ea95fc 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java
@@ -43,7 +43,7 @@ public class ViewcomicRipper extends AbstractHTMLRipper {
                 return getHost() + "_" + title.trim();
             } catch (IOException e) {
                 // Fall back to default album naming convention
-                logger.info("Unable to find title at " + url);
+                LOGGER.info("Unable to find title at " + url);
             }
             return super.getAlbumTitle(url);
         }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/VkRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/VkRipper.java
index caf24916..b2472cc9 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/VkRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/VkRipper.java
@@ -71,7 +71,7 @@ public class VkRipper extends AlbumRipper {
         String[] jsonStrings = doc.toString().split("<!>");
         JSONObject json = new JSONObject(jsonStrings[jsonStrings.length - 1]);
         JSONArray videos = json.getJSONArray("all");
-        logger.info("Found " + videos.length() + " videos");
+        LOGGER.info("Found " + videos.length() + " videos");
         for (int i = 0; i < videos.length(); i++) {
             JSONArray jsonVideo = videos.getJSONArray(i);
             int vidid = jsonVideo.getInt(1);
@@ -85,7 +85,7 @@ public class VkRipper extends AlbumRipper {
             try {
                 Thread.sleep(500);
             } catch (InterruptedException e) {
-                logger.error("Interrupted while waiting to fetch next video URL", e);
+                LOGGER.error("Interrupted while waiting to fetch next video URL", e);
                 break;
             }
         }
@@ -96,7 +96,7 @@ public class VkRipper extends AlbumRipper {
         Map<String,String> photoIDsToURLs = new HashMap<>();
         int offset = 0;
         while (true) {
-            logger.info("    Retrieving " + this.url);
+            LOGGER.info("    Retrieving " + this.url);
 
             // al=1&offset=80&part=1
             Map<String,String> postData = new HashMap<>();
@@ -119,7 +119,7 @@ public class VkRipper extends AlbumRipper {
             Set<String> photoIDsToGet = new HashSet<>();
             for (Element a : elements) {
                 if (!a.attr("onclick").contains("showPhoto('")) {
-                    logger.error("a: " + a);
+                    LOGGER.error("a: " + a);
                     continue;
                 }
                 String photoID = a.attr("onclick");
@@ -134,12 +134,12 @@ public class VkRipper extends AlbumRipper {
                     try {
                         photoIDsToURLs.putAll(getPhotoIDsToURLs(photoID));
                     } catch (IOException e) {
-                        logger.error("Exception while retrieving photo id " + photoID, e);
+                        LOGGER.error("Exception while retrieving photo id " + photoID, e);
                         continue;
                     }
                 }
                 if (!photoIDsToURLs.containsKey(photoID)) {
-                    logger.error("Could not find URL for photo ID: " + photoID);
+                    LOGGER.error("Could not find URL for photo ID: " + photoID);
                     continue;
                 }
                 String url = photoIDsToURLs.get(photoID);
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/VscoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/VscoRipper.java
index 00eed1f4..539e9e0c 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/VscoRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/VscoRipper.java
@@ -73,7 +73,7 @@ public class VscoRipper extends AbstractHTMLRipper{
             try {
                 toRip.add(vscoImageToURL(url.toExternalForm()));
             } catch (IOException ex) {
-                logger.debug("Failed to convert " + url.toString() + " to external form.");
+                LOGGER.debug("Failed to convert " + url.toString() + " to external form.");
             }
             
         } else {//want to rip a member profile
@@ -94,12 +94,12 @@ public class VscoRipper extends AbstractHTMLRipper{
                         String relativeURL = vscoImageToURL(link.attr("href"));
                         toRip.add(baseURL + relativeURL);
                     } catch (IOException ex) {
-                        logger.debug("Could not add \"" + link.toString() + "\" to list for ripping.");
+                        LOGGER.debug("Could not add \"" + link.toString() + "\" to list for ripping.");
                     }
                 }
             }
             */
-            logger.debug("Sorry, RipMe currently only supports ripping single images.");
+            LOGGER.debug("Sorry, RipMe currently only supports ripping single images.");
             
             
         }
@@ -121,14 +121,14 @@ public class VscoRipper extends AbstractHTMLRipper{
                 givenURL = givenURL.replaceAll("\\?h=[0-9]+", "");//replace the "?h=xxx" tag at the end of the URL (where each x is a number)
                 
                 result = givenURL;
-                logger.debug("Found image URL: " + givenURL);
+                LOGGER.debug("Found image URL: " + givenURL);
                 break;//immediatly stop after getting URL (there should only be 1 image to be downloaded)
             }
         }
         
         //Means website changed, things need to be fixed.
         if (result.isEmpty()){
-            logger.error("Could not find image URL at: " + url);
+            LOGGER.error("Could not find image URL at: " + url);
         }
         
         return result;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java
index ce01f1cf..385f6d1f 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java
@@ -36,7 +36,7 @@ public class XhamsterRipper extends AbstractHTMLRipper {
         URLToReturn = URLToReturn.replaceAll("m.xhamster.com", "xhamster.com");
         URLToReturn = URLToReturn.replaceAll("\\w\\w.xhamster.com", "xhamster.com");
         URL san_url = new URL(URLToReturn.replaceAll("xhamster.com", "m.xhamster.com"));
-        logger.info("sanitized URL is " + san_url.toExternalForm());
+        LOGGER.info("sanitized URL is " + san_url.toExternalForm());
         return san_url;
     }
 
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ZizkiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ZizkiRipper.java
index c5dc447e..d0ca82b2 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ZizkiRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ZizkiRipper.java
@@ -55,11 +55,11 @@ public class ZizkiRipper extends AbstractHTMLRipper {
 
             Element authorSpan = getFirstPage().select("span[class=creator]").first();
             String author = authorSpan.select("a").first().text();
-            logger.debug("Author: " + author);
+            LOGGER.debug("Author: " + author);
             return getHost() + "_" + author + "_" + title.trim();
         } catch (IOException e) {
             // Fall back to default album naming convention
-            logger.info("Unable to find title at " + url);
+            LOGGER.info("Unable to find title at " + url);
         }
         return super.getAlbumTitle(url);
     }
@@ -78,9 +78,9 @@ public class ZizkiRipper extends AbstractHTMLRipper {
     public List<String> getURLsFromPage(Document page) {
         List<String> imageURLs = new ArrayList<>();
         // Page contains images
-        logger.info("Look for images.");
+        LOGGER.info("Look for images.");
         for (Element thumb : page.select("img")) {
-            logger.info("Img");
+            LOGGER.info("Img");
             if (super.isStopped()) break;
             // Find thumbnail image source
             String image = null;
@@ -89,7 +89,7 @@ public class ZizkiRipper extends AbstractHTMLRipper {
             if (thumb.hasAttr("typeof")) {
                 img_type = thumb.attr("typeof");
                 if (img_type.equals("foaf:Image")) {
-                  logger.debug("Found image with " + img_type);
+                  LOGGER.debug("Found image with " + img_type);
                   if (thumb.parent() != null &&
                       thumb.parent().parent() != null &&
                       thumb.parent().parent().attr("class") != null &&
@@ -97,7 +97,7 @@ public class ZizkiRipper extends AbstractHTMLRipper {
                      )
                   {
                      src = thumb.attr("src");
-                     logger.debug("Found url with " + src);
+                     LOGGER.debug("Found url with " + src);
                      if (!src.contains("zizki.com")) {
                      } else {
                        imageURLs.add(src.replace("/styles/medium/public/","/styles/large/public/"));
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/CliphunterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/CliphunterRipper.java
index fb38b216..16526945 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/CliphunterRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/CliphunterRipper.java
@@ -55,7 +55,7 @@ public class CliphunterRipper extends VideoRipper {
 
     @Override
     public void rip() throws IOException {
-        logger.info("Retrieving " + this.url);
+        LOGGER.info("Retrieving " + this.url);
         String html = Http.url(url).get().html();
         String jsonString = html.substring(html.indexOf("var flashVars = {d: '") + 21);
         jsonString = jsonString.substring(0, jsonString.indexOf("'"));
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/GfycatRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/GfycatRipper.java
index ccaaa225..75577597 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/GfycatRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/GfycatRipper.java
@@ -65,7 +65,7 @@ public class GfycatRipper extends VideoRipper {
      * @throws IOException
      */
     public static String getVideoURL(URL url) throws IOException {
-        logger.info("Retrieving " + url.toExternalForm());
+        LOGGER.info("Retrieving " + url.toExternalForm());
         
         //Sanitize the URL first
         url = new URL(url.toExternalForm().replace("/gifs/detail", ""));
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/MotherlessVideoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/MotherlessVideoRipper.java
index 9a2e47b1..6af8840b 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/MotherlessVideoRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/MotherlessVideoRipper.java
@@ -52,10 +52,10 @@ public class MotherlessVideoRipper extends VideoRipper {
 
     @Override
     public void rip() throws IOException {
-        logger.info("    Retrieving " + this.url);
+        LOGGER.info("    Retrieving " + this.url);
         String html = Http.url(this.url).get().toString();
         if (html.contains("__fileurl = '")) {
-            logger.error("WTF");
+            LOGGER.error("WTF");
         }
         List<String> vidUrls = Utils.between(html, "__fileurl = '", "';");
         if (vidUrls.isEmpty()) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/PornhubRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/PornhubRipper.java
index ea98bcfd..c5489870 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/PornhubRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/PornhubRipper.java
@@ -54,7 +54,7 @@ public class PornhubRipper extends VideoRipper {
 
     @Override
     public void rip() throws IOException {
-        logger.info("    Retrieving " + this.url.toExternalForm());
+        LOGGER.info("    Retrieving " + this.url.toExternalForm());
         Document doc = Http.url(this.url).get();
         String html = doc.body().html();
         Pattern p = Pattern.compile("^.*flashvars_[0-9]+ = (.+});.*$", Pattern.DOTALL);
@@ -81,10 +81,10 @@ public class PornhubRipper extends VideoRipper {
                 }
                 addURLToDownload(new URL(vidUrl), HOST + "_" + getGID(this.url));
             } catch (JSONException e) {
-                logger.error("Error while parsing JSON at " + url, e);
+                LOGGER.error("Error while parsing JSON at " + url, e);
                 throw e;
             } catch (Exception e) {
-                logger.error("Error while retrieving video URL at " + url, e);
+                LOGGER.error("Error while retrieving video URL at " + url, e);
                 throw new IOException(e);
             }
         }
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/TwitchVideoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/TwitchVideoRipper.java
index 8a483066..d977708a 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/TwitchVideoRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/TwitchVideoRipper.java
@@ -55,7 +55,7 @@ public class TwitchVideoRipper extends VideoRipper {
 
     @Override
     public void rip() throws IOException {
-        logger.info("Retrieving " + this.url);
+        LOGGER.info("Retrieving " + this.url);
         Document doc = Http.url(url).get();
         
         //Get user friendly filename from page title
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/ViddmeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/ViddmeRipper.java
index f36d7ce4..078b32a5 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/ViddmeRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/ViddmeRipper.java
@@ -53,7 +53,7 @@ public class ViddmeRipper extends VideoRipper {
 
     @Override
     public void rip() throws IOException {
-        logger.info("    Retrieving " + this.url.toExternalForm());
+        LOGGER.info("    Retrieving " + this.url.toExternalForm());
         Document doc = Http.url(this.url).get();
         Elements videos = doc.select("meta[name=twitter:player:stream]");
         if (videos.isEmpty()) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VidearnRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VidearnRipper.java
index bae7a965..052b2cbe 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VidearnRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VidearnRipper.java
@@ -54,7 +54,7 @@ public class VidearnRipper extends VideoRipper {
 
     @Override
     public void rip() throws IOException {
-        logger.info("Retrieving " + this.url);
+        LOGGER.info("Retrieving " + this.url);
         Document doc = Http.url(url).get();
         List<String> mp4s = Utils.between(doc.html(), "file:\"", "\"");
         if (mp4s.isEmpty()) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VineRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VineRipper.java
index d2931b0b..1ca59676 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VineRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VineRipper.java
@@ -54,7 +54,7 @@ public class VineRipper extends VideoRipper {
 
     @Override
     public void rip() throws IOException {
-        logger.info("    Retrieving " + this.url.toExternalForm());
+        LOGGER.info("    Retrieving " + this.url.toExternalForm());
         Document doc = Http.url(this.url).get();
         Elements props = doc.select("meta[property=twitter:player:stream]");
         if (props.isEmpty()) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VkRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VkRipper.java
index c610a470..70528727 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VkRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VkRipper.java
@@ -52,7 +52,7 @@ public class VkRipper extends VideoRipper {
 
     @Override
     public void rip() throws IOException {
-        logger.info("    Retrieving " + this.url);
+        LOGGER.info("    Retrieving " + this.url);
         String videoURL = getVideoURLAtPage(this.url.toExternalForm());
         addURLToDownload(new URL(videoURL), HOST + "_" + getGID(this.url));
         waitForThreads();
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/XhamsterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/XhamsterRipper.java
index 09df1c8d..9043bfeb 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/XhamsterRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/XhamsterRipper.java
@@ -54,7 +54,7 @@ public class XhamsterRipper extends VideoRipper {
 
     @Override
     public void rip() throws IOException {
-        logger.info("Retrieving " + this.url);
+        LOGGER.info("Retrieving " + this.url);
         Document doc = Http.url(url).get();
         Elements videos = doc.select("div.player-container > a");
         if (videos.isEmpty()) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/XvideosRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/XvideosRipper.java
index 6dde798d..ef71a4bb 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/XvideosRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/XvideosRipper.java
@@ -54,12 +54,12 @@ public class XvideosRipper extends VideoRipper {
 
     @Override
     public void rip() throws IOException {
-        logger.info("    Retrieving " + this.url);
+        LOGGER.info("    Retrieving " + this.url);
         Document doc = Http.url(this.url).get();
         Elements scripts = doc.select("script");
         for (Element e : scripts) {
             if (e.html().contains("html5player.setVideoUrlHigh")) {
-                logger.info("Found the right script");
+                LOGGER.info("Found the right script");
                 String[] lines = e.html().split("\n");
                 for (String line: lines) {
                     if (line.contains("html5player.setVideoUrlHigh")) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/YoupornRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/YoupornRipper.java
index 0c87f175..2399ea87 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/YoupornRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/YoupornRipper.java
@@ -54,7 +54,7 @@ public class YoupornRipper extends VideoRipper {
 
     @Override
     public void rip() throws IOException {
-        logger.info("    Retrieving " + this.url);
+        LOGGER.info("    Retrieving " + this.url);
         Document doc = Http.url(this.url).get();
         Elements videos = doc.select("video");
         if (videos.isEmpty()) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/YuvutuRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/YuvutuRipper.java
index 34c10947..2891efb5 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/YuvutuRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/YuvutuRipper.java
@@ -55,7 +55,7 @@ public class YuvutuRipper extends VideoRipper {
 
     @Override
     public void rip() throws IOException {
-        logger.info("Retrieving " + this.url);
+        LOGGER.info("Retrieving " + this.url);
         Document doc = Http.url(url).get();
         Element iframe = doc.select("iframe").first();
         String iframeSrc = iframe.attr("src");