Ejemplo n.º 1
0
    def __init__(self, _app: QtWidgets.QApplication):
        self.searchTerm = None  # The currently entered search term
        self.directory = None  # The save directory
        self.downloadImages = False
        self.downloadVideos = False
        self.saveURLS = False
        self.createSubfolder = False
        self.downloadLimit = -1

        self.r34 = rule34.Sync()
        self.totalExpected = 0  # How many posts are expected
        self.postList = []

        self.stopFlag = False  # Tells the currently running threads to stop
        self.done = False  # Set to true when the download completes

        self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=2)

        self.progBarQueue = Queue()
        self.etaQueue = Queue()
        self.lcdQueue = Queue()
        self.currentTaskQueue = Queue()

        self.app = _app
        self.uiWindow = QtWidgets.QMainWindow()
        self.ui = gui.Ui_Rule34Downloader()
Ejemplo n.º 2
0
def test_URLGenPIDLimit():
    r34 = rule34.Sync()
    try:
        r34.URLGen(tags="gay", PID=2001)
    except rule34.Request_Rejected:
        pass
    except Exception as e:
        raise rule34.SelfTest_Failed(e)
Ejemplo n.º 3
0
def test_Download():
    r34 = rule34.Sync()
    downloadURL = 'https://img.rule34.xxx/images/2003/b90ae3f67eaa30669939531292d90e55d58325af.jpg'
    name = r34.download(downloadURL)
    assert name is not None
    assert os.path.isfile(name)
    assert os.path.getsize(name) >= 70000
    os.unlink(name)
Ejemplo n.º 4
0
def test_imageGatherNew_rejectRequest():
    r34 = rule34.Sync()
    try:
        r34.getImages("straight", fuzzy=True, OverridePID=2001)
    except rule34.Request_Rejected:
        pass
    except Exception as e:
        raise rule34.SelfTest_Failed(e)
Ejemplo n.º 5
0
def test_DownloadErrorCatch():
    r34 = rule34.Sync()
    downloadURL = 'https://img.rule34.xxx/borris/2003/b90ae3f67eaa30669939531292d90e55d58325af.jpg'
    try:
        name = r34.download(downloadURL)
        os.unlink(name)
    except rule34.Rule34_Error:
        pass
    except Exception as e:
        raise rule34.SelfTest_Failed(e)
Ejemplo n.º 6
0
def test_DonwloadNameHandler():
    r34 = rule34.Sync()
    downloadURL = 'https://img.rule34.xxx/images/2003/b90ae3f67eaa30669939531292d90e55d58325af.jpg'
    names = []
    names.append(r34.download(downloadURL))
    names.append(r34.download(downloadURL))
    for name in names:
        assert name is not None
        assert os.path.isfile(name)
        assert os.path.getsize(name) >= 70000
        os.unlink(name)
Ejemplo n.º 7
0
def test_imageGatherNew_OverridePID():
    r34 = rule34.Sync()
    assert (r34.getImages("straight", singlePage=True, OverridePID=1)
            is not None)
Ejemplo n.º 8
0
def test_imageGatherNew_total():
    r34 = rule34.Sync()
    assert (len(r34.getImages("straight")) == 100)
Ejemplo n.º 9
0
def test_imageGatherNew():
    r34 = rule34.Sync()
    assert (r34.getImages("straight") is not None)
Ejemplo n.º 10
0
def test_TotalImages():
    r34 = rule34.Sync()
    assert r34.totalImages("gay") > 1000
Ejemplo n.º 11
0
def test_Sync():
    r34 = rule34.Sync()
    assert r34.l is not None
    assert r34.getImages("gay") is not None
Ejemplo n.º 12
0
def test_imageGather_Default():
    r34 = rule34.Sync()
    assert (r34.getImageURLS("straight") is not None)
Ejemplo n.º 13
0
from urllib.request import urlopen
import urllib.request
import aiohttp
import xml.etree.ElementTree
import requests
import typing
import json
import rule34

botver = "Mewtwo v2.0"
user_agent = {
    "User-Agent":
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3803.0 Safari/537.36 Edg/76.0.176.1"
}

r34 = rule34.Sync()


async def r34_s(tags: str):
    #--First we connect to the Rule34 API and get search results--#
    async with aiohttp.ClientSession() as session:
        async with session.get(r34.URLGen(tags, 1200, deleted=False)) as url:
            rget = await url.text()
    #--Now we attempt to extract the information--#
    root = xml.etree.ElementTree.fromstring(rget)
    try:
        numeros = []
        num = root.attrib["count"]
        if num == 0:
            return None
        random_pos = random.randint(0, len(root))
Ejemplo n.º 14
0
def test_imageGatherNew_fuzzy():
    r34 = rule34.Sync()
    assert (r34.getImages("vore", singlePage=True, fuzzy=True) is not None)
Ejemplo n.º 15
0
def test_URLGen():
    r34 = rule34.Sync()
    expectedString = "https://rule34.xxx/index.php?page=dapi&s=post&q=index&limit=50&tags=gay&deleted=show&rating:explicit"
    assert (r34.URLGen(tags="gay", limit=50, deleted=True) == expectedString)
Ejemplo n.º 16
0
def test_postData():
    r34 = rule34.Sync()
    assert (r34.getPostData(1) is not None)
Ejemplo n.º 17
0
def test_singleResult():
    r34 = rule34.Sync()
    assert (r34.getImages("rule34") is not None)
Ejemplo n.º 18
0
def test_imageGatherNew_nonExist():
    r34 = rule34.Sync()
    assert (r34.getImages("DNATESTMAGICCOODENOTHINGWILLRETURN") is None)
Ejemplo n.º 19
0
def test_imageGatherNew_Contradiction():
    r34 = rule34.Sync()
    assert (r34.getImages("vore", singlePage=False, randomPID=True)
            is not None)
Ejemplo n.º 20
0
 def __init__(self):
     super().__init__()
     self.rule34 = rule34.Sync()
Ejemplo n.º 21
0
def test_URLGenReturnNone():
    r34 = rule34.Sync()
    assert r34.URLGen() is None
Ejemplo n.º 22
0
    client = pybooru.Moebooru(random.choice(booruSites))
    try:
        posts = client.post_list(tags=argv, random=True, limit=10)
    except:
        print(traceback.format_exc())
        return 0
    if (len(posts) <= 0):
        return 0
    choice = xrand(len(posts))
    url = posts[0]['file_url']
    data[thr] = [url, 1, choice + 1]


loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
rule34_sync = rule34.Sync()


def pull_rule34_xxx(argv, data, thr, delay=5):
    v1, v2 = 1, 1
    try:
        t = time.time()
        while time.time() - t < delay:
            try:
                sources = rule34_sync.getImages(tags=argv,
                                                fuzzy=True,
                                                randomPID=True,
                                                singlePage=True)
                break
            except TimeoutError:
                pass
Ejemplo n.º 23
0
def main(args):
    # Prepare an instance of the logger for the current script instance
    logger = logging.getLogger("Rule34Downloader")
    logger.setLevel(LOGGING_LEVEL)

    console_handler = logging.StreamHandler()
    console_handler.setLevel(LOGGING_LEVEL)

    # Set the format for the logger output
    formatter = logging.Formatter(
        '[%(asctime)s] %(name)s %(levelname)s: %(message)s')
    console_handler.setFormatter(formatter)

    logger.addHandler(console_handler)

    # =================================================================================

    logger.debug("Arguments: {}".format(args))

    # Override rule34sync with the synchronous wrapper
    rule34sync = rule34.Sync()

    # Convert the tags to a more convenient format
    if len(args.tags) > 1:
        args.tags = ' '.join(args.tags)
    else:
        args.tags = args.tags[0]

    logger.debug("Querying Rule34 apis...")
    images_count = rule34sync.totalImages(args.tags)

    # If the total images count received from the apis is 0, no image is found
    if images_count == 0:
        logger.error("No images found with those tags")
        return

    # Otherwise, let's proceed and download em' all
    logger.info("{} images found!".format(images_count))

    # Since there is a limit set, let's tell that to the user, just to be precise
    if args.limit > 0:
        logging.info("The download limit is capped to {} images".format(
            args.limit))

    logger.info("Gathering data from Rule34... "
                "(this will take approximately {0:.3g} seconds)".format(
                    0.002 * images_count))

    fetch_start = default_timer()  # To measure how much time does it take
    try:
        images = rule34sync.getImages(args.tags, singlePage=False)
    except Exception as e:
        logger.error("There was an error while gathering images.")
        logger.error(
            "There's probably something wrong with this tag, try another one.")
        logger.debug(str(e))
        return

    fetch_end = default_timer()

    logger.info("This took exactly {0:.3g} seconds".format(
        (fetch_end - fetch_start) / images_count))

    # If something has gone wrong during the images fetch
    if images is None:
        logger.error("Rule34 didn't give any image, this should not happen")
        return

    # Divide the videos from the images
    videos = [x for x in images if "webm" in x.file_url]

    # If the user doesn't want videos, remove them
    if not args.no_videos:
        images = list_diff(images, videos)

    destination_folder = Path(args.destination)

    logger.debug("Checking destination folder existence...")
    if not destination_folder.exists():
        logger.debug("Destination folder doesn't exist! Creating it...")
        destination_folder.mkdir()
        logger.debug("Destination folder created!")
    else:
        logger.debug(
            "Destination folder exists! Checking if it's actually a directory..."
        )
        if not destination_folder.is_dir():
            logger.error("The given destination folder isn't a folder!")
            return
        logger.debug("Destination folder is a directory!")

    downloaded_images = 0
    for image in images:
        # Check for the images limit
        if args.limit > 0 and downloaded_images >= args.limit:
            logger.warning("Downloaded images limit exceeded. Stopping...")
            return

        image_name = image.file_url.split("/")[-1]
        image_extension = image.file_url.rsplit('.', 1)[-1]

        output_name = Path(destination_folder,
                           '.'.join([image.md5, image_extension]))
        logger.debug("Output file: {}".format(output_name))
        logger.info("Downloading {}...".format(image_name))

        response = requests.get(image.file_url, stream=True)
        logger.debug("API response is {}".format(response.status_code))

        if response.status_code != 200:
            logger.error("Error while downloading image! ({})".format(
                response.status_code))
            continue

        with open(output_name, 'wb') as output_file:
            for chunk in response.iter_content(1024):
                output_file.write(chunk)

        logger.info("{} downloaded!".format(image_name))
        downloaded_images += 1