Esempio n. 1
0
def tools(request: HttpRequest, tool: str = "main") -> HttpResponse:
    """Tools listing."""
    settings_text = ''
    if not request.user.is_staff:
        return render_error(request,
                            "You need to be an admin to use the tools.")
    if tool == "transfer_missing_downloads":
        crawler_thread = CrawlerThread(crawler_logger, crawler_settings,
                                       '-tmd'.split())
        crawler_thread.start()
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "retry_failed":
        crawler_thread = CrawlerThread(crawler_logger, crawler_settings,
                                       '--retry-failed'.split())
        crawler_thread.start()
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "update_newer_than":
        p = request.GET
        if p and 'newer_than' in p:
            newer_than_date = p['newer_than']
            try:
                if parse_date(newer_than_date) is not None:
                    crawler_settings.workers.web_queue.enqueue_args_list(
                        ('-unt', newer_than_date))
                    messages.success(
                        request,
                        'Updating galleries posted after ' + newer_than_date)
                else:
                    messages.error(request,
                                   'Invalid date format.',
                                   extra_tags='danger')
            except ValueError:
                messages.error(request, 'Invalid date.', extra_tags='danger')
            return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "update_missing_thumbnails":
        p = request.GET
        if p and 'limit_number' in p:

            try:
                limit_number = int(p['limit_number'])

                provider = request.GET.get('provider', '')
                if provider:
                    crawler_settings.workers.web_queue.enqueue_args_list(
                        ('-umt', str(limit_number), '-ip', provider))
                else:
                    crawler_settings.workers.web_queue.enqueue_args_list(
                        ('-umt', str(limit_number)))
                messages.success(
                    request,
                    'Updating galleries missing thumbnails, limiting to older {}'
                    .format(limit_number))

            except ValueError:
                messages.error(request, 'Invalid limit.', extra_tags='danger')
            return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "generate_missing_thumbs":
        archives = Archive.objects.filter(thumbnail='')
        for archive in archives:
            frontend_logger.info('Generating thumbs for file: {}'.format(
                archive.zipped.name))
            archive.generate_thumbnails()
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "calculate_missing_info":
        archives = Archive.objects.filter_by_missing_file_info()
        for archive in archives:
            frontend_logger.info('Calculating file info for file: {}'.format(
                archive.zipped.name))
            archive.recalc_fileinfo()
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "recalc_all_file_info":
        if thread_exists('fileinfo_worker'):
            return render_error(request,
                                "File info worker is already running.")

        archives = Archive.objects.all()
        frontend_logger.info(
            'Recalculating file info for all archives, count: {}'.format(
                archives.count()))

        image_worker_thread = ImageWorker(crawler_logger, 4)
        for archive in archives:
            if os.path.exists(archive.zipped.path):
                image_worker_thread.enqueue_archive(archive)
        fileinfo_thread = threading.Thread(
            name='fileinfo_worker',
            target=image_worker_thread.start_info_thread)
        fileinfo_thread.start()
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "set_all_hidden_as_public":

        archives = Archive.objects.filter(gallery__hidden=True)

        for archive in archives:
            if os.path.isfile(archive.zipped.path):
                archive.public = True
                archive.save()
                archive.gallery.public = True
                archive.gallery.save()

        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "regenerate_all_thumbs":
        if thread_exists('thumbnails_worker'):
            return render_error(request,
                                "Thumbnails worker is already running.")

        archives = Archive.objects.all()
        frontend_logger.info(
            'Generating thumbs for all archives, count {}'.format(
                archives.count()))

        image_worker_thread = ImageWorker(crawler_logger, 4)
        for archive in archives:
            if os.path.exists(archive.zipped.path):
                image_worker_thread.enqueue_archive(archive)
        thumbnails_thread = threading.Thread(
            name='thumbnails_worker',
            target=image_worker_thread.start_thumbs_thread)
        thumbnails_thread.start()
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "generate_possible_matches_internally":
        if thread_exists('match_unmatched_worker'):
            return render_error(request, "Matching worker is already running.")
        provider = request.GET.get('provider', '')
        try:
            cutoff = float(request.GET.get('cutoff', '0.4'))
        except ValueError:
            cutoff = 0.4
        try:
            max_matches = int(request.GET.get('max-matches', '10'))
        except ValueError:
            max_matches = 10
        frontend_logger.info(
            'Looking for possible matches in gallery database '
            'for non-matched archives (cutoff: {}, max matches: {}) '
            'using provider filter "{}"'.format(cutoff, max_matches, provider))
        matching_thread = threading.Thread(
            name='match_unmatched_worker',
            target=generate_possible_matches_for_archives,
            args=(None, ),
            kwargs={
                'logger': frontend_logger,
                'cutoff': cutoff,
                'max_matches': max_matches,
                'filters': (provider, ),
                'match_local': True,
                'match_web': False
            })
        matching_thread.daemon = True
        matching_thread.start()
        messages.success(
            request,
            'Looking for possible matches, filtering providers: {}, cutoff: {}.'
            .format(provider, cutoff))
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "clear_all_archive_possible_matches":
        ArchiveMatches.objects.all().delete()
        frontend_logger.info('Clearing all possible matches for archives.')
        messages.success(request,
                         'Clearing all possible matches for archives.')
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "search_wanted_galleries_provider_titles":
        if thread_exists('web_search_worker'):
            messages.error(request,
                           'Web search worker is already running.',
                           extra_tags='danger')
            return HttpResponseRedirect(request.META["HTTP_REFERER"])
        results = WantedGallery.objects.eligible_to_search()

        if not results:
            frontend_logger.info('No wanted galleries eligible to search.')
            messages.success(request,
                             'No wanted galleries eligible to search.')
            return HttpResponseRedirect(request.META["HTTP_REFERER"])

        provider = request.GET.get('provider', '')

        frontend_logger.info(
            'Searching for gallery matches in panda for wanted galleries, starting thread.'
        )
        messages.success(
            request,
            'Searching for gallery matches in panda for wanted galleries, starting thread.'
        )

        panda_search_thread = threading.Thread(
            name='web_search_worker',
            target=create_matches_wanted_galleries_from_providers,
            args=(results, provider),
            kwargs={'logger': frontend_logger})
        panda_search_thread.daemon = True
        panda_search_thread.start()
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "wanted_galleries_possible_matches":
        if thread_exists('wanted_local_search_worker'):
            return render_error(
                request, "Wanted local matching worker is already running.")

        non_match_wanted = WantedGallery.objects.eligible_to_search()

        if not non_match_wanted:
            frontend_logger.info('No wanted galleries eligible to search.')
            messages.success(request,
                             'No wanted galleries eligible to search.')
            return HttpResponseRedirect(request.META["HTTP_REFERER"])

        frontend_logger.info(
            'Looking for possible matches in gallery database '
            'for wanted galleries (fixed 0.4 cutoff)')

        provider = request.GET.get('provider', '')

        try:
            cutoff = float(request.GET.get('cutoff', '0.4'))
        except ValueError:
            cutoff = 0.4
        try:
            max_matches = int(request.GET.get('max-matches', '10'))
        except ValueError:
            max_matches = 10

        matching_thread = threading.Thread(
            name='wanted_local_search_worker',
            target=create_matches_wanted_galleries_from_providers_internal,
            args=(non_match_wanted, ),
            kwargs={
                'logger': frontend_logger,
                'provider_filter': provider,
                'cutoff': cutoff,
                'max_matches': max_matches
            })
        matching_thread.daemon = True
        matching_thread.start()
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "restart_viewer":
        crawler_settings.workers.stop_workers_and_wait()
        if hasattr(signal, 'SIGUSR2'):
            os.kill(os.getpid(), signal.SIGUSR2)
        else:
            return render_error(request,
                                "This OS does not support signal SIGUSR2.")
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "modify_settings":
        p = request.POST
        if p:
            settings_text = p['settings_file']
            if (os.path.isfile(
                    os.path.join(crawler_settings.default_dir,
                                 "settings.ini"))):
                with open(os.path.join(crawler_settings.default_dir,
                                       "settings.ini"),
                          "w",
                          encoding="utf-8") as f:
                    f.write(settings_text)
                    frontend_logger.info(
                        'Modified settings file for Panda Backup')
                    messages.success(
                        request, 'Modified settings file for Panda Backup')
                    return HttpResponseRedirect(request.META["HTTP_REFERER"])
        else:
            if (os.path.isfile(
                    os.path.join(crawler_settings.default_dir,
                                 "settings.ini"))):
                with open(os.path.join(crawler_settings.default_dir,
                                       "settings.ini"),
                          "r",
                          encoding="utf-8") as f:
                    first = f.read(1)
                    if first != '\ufeff':
                        # not a BOM, rewind
                        f.seek(0)
                    settings_text = f.read()
    elif tool == "reload_settings":
        if not request.user.is_staff:
            return render_error(
                request, "You need to be an admin to reload the config.")
        crawler_settings.load_config_from_file()
        frontend_logger.info('Reloaded settings file for Panda Backup')
        messages.success(request, 'Reloaded settings file for Panda Backup')
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "start_timed_dl":
        crawler_settings.workers.timed_downloader.start_running(
            timer=crawler_settings.timed_downloader_cycle_timer)
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "stop_timed_dl":
        crawler_settings.workers.timed_downloader.stop_running()
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "force_run_timed_dl":
        crawler_settings.workers.timed_downloader.stop_running()
        crawler_settings.workers.timed_downloader.force_run_once = True
        crawler_settings.workers.timed_downloader.start_running(
            timer=crawler_settings.timed_downloader_cycle_timer)
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "start_timed_crawler":
        crawler_settings.workers.timed_crawler.start_running(
            timer=crawler_settings.autochecker.cycle_timer)
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "stop_timed_crawler":
        crawler_settings.workers.timed_crawler.stop_running()
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "force_run_timed_crawler":
        crawler_settings.workers.timed_crawler.stop_running()
        crawler_settings.workers.timed_crawler.force_run_once = True
        crawler_settings.workers.timed_crawler.start_running(
            timer=crawler_settings.autochecker.cycle_timer)
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "start_timed_updater":
        crawler_settings.workers.timed_updater.start_running(
            timer=crawler_settings.autoupdater.cycle_timer)
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "stop_timed_updater":
        crawler_settings.workers.timed_updater.stop_running()
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "force_run_timed_updater":
        crawler_settings.workers.timed_updater.stop_running()
        crawler_settings.workers.timed_updater.force_run_once = True
        crawler_settings.workers.timed_updater.start_running(
            timer=crawler_settings.autoupdater.cycle_timer)
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "start_timed_auto_wanted":
        crawler_settings.workers.timed_auto_wanted.start_running(
            timer=crawler_settings.auto_wanted.cycle_timer)
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "stop_timed_auto_wanted":
        crawler_settings.workers.timed_auto_wanted.stop_running()
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "force_run_timed_auto_wanted":
        crawler_settings.workers.timed_auto_wanted.stop_running()
        crawler_settings.workers.timed_auto_wanted.force_run_once = True
        crawler_settings.workers.timed_auto_wanted.start_running(
            timer=crawler_settings.auto_wanted.cycle_timer)
        return HttpResponseRedirect(request.META["HTTP_REFERER"])
    elif tool == "start_web_queue":
        crawler_settings.workers.web_queue.start_running()
        return HttpResponseRedirect(request.META["HTTP_REFERER"])

    threads_status = get_thread_status_bool()

    d = {
        'tool': tool,
        'settings_text': settings_text,
        'threads_status': threads_status
    }

    return render(request, "viewer/tools.html", d)
Esempio n. 2
0
def wanted_galleries(request: HttpRequest) -> HttpResponse:
    p = request.POST
    get = request.GET

    title = get.get("title", '')
    tags = get.get("tags", '')

    try:
        page = int(get.get("page", '1'))
    except ValueError:
        page = 1

    if 'clear' in get:
        form = WantedGallerySearchForm()
    else:
        form = WantedGallerySearchForm(initial={'title': title, 'tags': tags})

    if not request.user.is_staff:
        results = WantedGallery.objects.filter(
            Q(should_search=True)
            & Q(found=False)
            & Q(public=True)).prefetch_related(
                'artists', 'announces').order_by('-release_date')
        return render(request, "viewer/wanted_galleries.html",
                      {'results': results})

    if p and request.user.is_staff:
        if 'delete_galleries' in p:
            pks = []
            for k, v in p.items():
                if k.startswith("sel-"):
                    # k, pk = k.split('-')
                    # results[pk][k] = v
                    pks.append(v)
            results = WantedGallery.objects.filter(id__in=pks).reverse()

            for wanted_gallery in results:
                message = 'Removing wanted gallery: {}'.format(
                    wanted_gallery.title)
                frontend_logger.info(message)
                messages.success(request, message)
                wanted_gallery.delete()
        elif 'search_for_galleries' in p:
            pks = []
            for k, v in p.items():
                if k.startswith("sel-"):
                    # k, pk = k.split('-')
                    # results[pk][k] = v
                    pks.append(v)
            results = WantedGallery.objects.filter(id__in=pks).reverse()
            results.update(should_search=True)

            for wanted_gallery in results:
                message = 'Marking gallery as to search for: {}'.format(
                    wanted_gallery.title)
                frontend_logger.info(message)
                messages.success(request, message)
        elif 'toggle-public' in p:
            pks = []
            for k, v in p.items():
                if k.startswith("sel-"):
                    # k, pk = k.split('-')
                    # results[pk][k] = v
                    pks.append(v)
            results = WantedGallery.objects.filter(id__in=pks).reverse()
            results.update(public=True)

            for wanted_gallery in results:
                message = 'Marking gallery as public: {}'.format(
                    wanted_gallery.title)
                frontend_logger.info(message)
                messages.success(request, message)
        elif 'search_provider_galleries' in p:
            if thread_exists('web_search_worker'):
                messages.error(request,
                               'Web search worker is already running.',
                               extra_tags='danger')
                return HttpResponseRedirect(request.META["HTTP_REFERER"])
            pks = []
            for k, v in p.items():
                if k.startswith("sel-"):
                    # k, pk = k.split('-')
                    # results[pk][k] = v
                    pks.append(v)
            results = WantedGallery.objects.filter(id__in=pks).reverse()

            provider = p.get('provider', '')

            try:
                cutoff = float(p.get('cutoff', '0.4'))
            except ValueError:
                cutoff = 0.4
            try:
                max_matches = int(p.get('max-matches', '10'))
            except ValueError:
                max_matches = 10

            message = 'Searching for gallery matches in providers for wanted galleries.'
            frontend_logger.info(message)
            messages.success(request, message)

            panda_search_thread = threading.Thread(
                name='web_search_worker',
                target=create_matches_wanted_galleries_from_providers,
                args=(results, provider),
                kwargs={
                    'logger': frontend_logger,
                    'cutoff': cutoff,
                    'max_matches': max_matches,
                })
            panda_search_thread.daemon = True
            panda_search_thread.start()
        elif 'search_provider_galleries_internal' in p:
            if thread_exists('wanted_local_search_worker'):
                messages.error(
                    request,
                    'Wanted local matching worker is already running.',
                    extra_tags='danger')
                return HttpResponseRedirect(request.META["HTTP_REFERER"])
            pks = []
            for k, v in p.items():
                if k.startswith("sel-"):
                    # k, pk = k.split('-')
                    # results[pk][k] = v
                    pks.append(v)
            results = WantedGallery.objects.filter(id__in=pks).reverse()

            provider = p.get('provider', '')

            try:
                cutoff = float(p.get('cutoff', '0.4'))
            except ValueError:
                cutoff = 0.4
            try:
                max_matches = int(p.get('max-matches', '10'))
            except ValueError:
                max_matches = 10

            try:
                must_be_used = bool(p.get('must-be-used', False))
            except ValueError:
                must_be_used = False

            message = 'Searching for gallery matches locally in providers for wanted galleries.'
            frontend_logger.info(message)
            messages.success(request, message)

            matching_thread = threading.Thread(
                name='web_search_worker',
                target=create_matches_wanted_galleries_from_providers_internal,
                args=(results, ),
                kwargs={
                    'logger': frontend_logger,
                    'provider_filter': provider,
                    'cutoff': cutoff,
                    'max_matches': max_matches,
                    'must_be_used': must_be_used
                })
            matching_thread.daemon = True
            matching_thread.start()
        elif 'clear_all_matches' in p:
            GalleryMatch.objects.all().delete()
            message = 'Clearing matches from every wanted gallery.'
            frontend_logger.info(message)
            messages.success(request, message)

    params = {}

    for k, v in get.items():
        params[k] = v

    for k in wanted_gallery_filter_keys:
        if k not in params:
            params[k] = ''

    results = filter_wanted_galleries_simple(params)

    results = results.prefetch_related(
        Prefetch('gallerymatch_set',
                 queryset=GalleryMatch.objects.select_related(
                     'gallery', 'wanted_gallery').prefetch_related(
                         Prefetch('gallery__tags',
                                  queryset=Tag.objects.filter(
                                      scope__exact='artist'),
                                  to_attr='artist_tags')),
                 to_attr='possible_galleries'),
        'possible_galleries__gallery__archive_set', 'artists',
        'announces').order_by('-release_date')

    paginator = Paginator(results, 100)
    try:
        results = paginator.page(page)
    except (InvalidPage, EmptyPage):
        results = paginator.page(paginator.num_pages)

    matchers = crawler_settings.provider_context.get_matchers_name_priority(
        crawler_settings, matcher_type='title')

    d = {'results': results, 'title_matchers': matchers, 'form': form}
    return render(request, "viewer/wanted_galleries.html", d)
Esempio n. 3
0
def archives_not_matched_with_gallery(request: HttpRequest) -> HttpResponse:
    p = request.POST
    get = request.GET

    title = get.get("title", '')
    tags = get.get("tags", '')

    try:
        page = int(get.get("page", '1'))
    except ValueError:
        page = 1

    if 'clear' in get:
        form = ArchiveSearchForm()
    else:
        form = ArchiveSearchForm(initial={'title': title, 'tags': tags})

    if p:
        pks = []
        for k, v in p.items():
            if k.startswith("sel-"):
                # k, pk = k.split('-')
                # results[pk][k] = v
                pks.append(v)

        preserved = Case(
            *[When(pk=pk, then=pos) for pos, pk in enumerate(pks)])

        archives = Archive.objects.filter(id__in=pks).order_by(preserved)
        if 'create_possible_matches' in p:
            if thread_exists('match_unmatched_worker'):
                return render_error(
                    request, "Local matching worker is already running.")
            provider = p['create_possible_matches']
            try:
                cutoff = float(p.get('cutoff', '0.4'))
            except ValueError:
                cutoff = 0.4
            try:
                max_matches = int(p.get('max-matches', '10'))
            except ValueError:
                max_matches = 10

            frontend_logger.info(
                'User {}: Looking for possible matches in gallery database '
                'for non-matched archives (cutoff: {}, max matches: {}) '
                'using provider filter "{}"'.format(request.user.username,
                                                    cutoff, max_matches,
                                                    provider))
            matching_thread = threading.Thread(
                name='match_unmatched_worker',
                target=generate_possible_matches_for_archives,
                args=(archives, ),
                kwargs={
                    'logger': frontend_logger,
                    'cutoff': cutoff,
                    'max_matches': max_matches,
                    'filters': (provider, ),
                    'match_local': True,
                    'match_web': False
                })
            matching_thread.daemon = True
            matching_thread.start()
            messages.success(request, 'Starting internal match worker.')
        elif 'clear_possible_matches' in p:

            for archive in archives:
                archive.possible_matches.clear()

            frontend_logger.info(
                'User {}: Clearing possible matches for archives'.format(
                    request.user.username))
            messages.success(request, 'Clearing possible matches.')

    params = {
        'sort': 'create_date',
        'asc_desc': 'desc',
        'filename': title,
    }

    for k, v in get.items():
        params[k] = v

    for k in archive_filter_keys:
        if k not in params:
            params[k] = ''

    results = filter_archives_simple(params)

    if 'show-matched' not in get:
        results = results.filter(gallery__isnull=True)

    results = results.prefetch_related(
        Prefetch('archivematches_set',
                 queryset=ArchiveMatches.objects.select_related(
                     'gallery', 'archive').prefetch_related(
                         Prefetch('gallery__tags',
                                  queryset=Tag.objects.filter(
                                      scope__exact='artist'),
                                  to_attr='artist_tags')),
                 to_attr='possible_galleries'),
        'possible_galleries__gallery',
    )

    if 'with-possible-matches' in get:
        results = results.annotate(
            n_possible_matches=Count('possible_matches')).filter(
                n_possible_matches__gt=0)

    paginator = Paginator(results, 50)
    try:
        results = paginator.page(page)
    except (InvalidPage, EmptyPage):
        results = paginator.page(paginator.num_pages)

    d = {
        'results':
        results,
        'providers':
        Gallery.objects.all().values_list('provider', flat=True).distinct(),
        'form':
        form
    }
    return render(request, "viewer/collaborators/unmatched_archives.html", d)
Esempio n. 4
0
def archives_not_matched_with_gallery(request: HttpRequest) -> HttpResponse:
    p = request.POST
    get = request.GET

    title = get.get("title", '')
    tags = get.get("tags", '')

    try:
        page = int(get.get("page", '1'))
    except ValueError:
        page = 1

    if 'clear' in get:
        form = ArchiveSearchForm()
    else:
        form = ArchiveSearchForm(initial={'title': title, 'tags': tags})

    if p:
        pks = []
        for k, v in p.items():
            if k.startswith("sel-"):
                # k, pk = k.split('-')
                # results[pk][k] = v
                pks.append(v)
        archives = Archive.objects.filter(id__in=pks).order_by('-create_date')
        if 'delete_archives' in p:
            for archive in archives:
                message = 'Removing archive not matched: {} and deleting file: {}'.format(
                    archive.title, archive.zipped.path)
                frontend_logger.info(message)
                messages.success(request, message)
                archive.delete_all_files()
                archive.delete()
        elif 'delete_objects' in p:
            for archive in archives:
                message = 'Removing archive not matched: {}, keeping file: {}'.format(
                    archive.title, archive.zipped.path)
                frontend_logger.info(message)
                messages.success(request, message)
                archive.delete_files_but_archive()
                archive.delete()
        elif 'create_possible_matches' in p:
            if thread_exists('web_match_worker'):
                return render_error(request,
                                    'Web match worker is already running.')

            matcher_filter = p['create_possible_matches']
            try:
                cutoff = float(p.get('cutoff', '0.4'))
            except ValueError:
                cutoff = 0.4
            try:
                max_matches = int(p.get('max-matches', '10'))
            except ValueError:
                max_matches = 10

            web_match_thread = threading.Thread(
                name='web_match_worker',
                target=generate_possible_matches_for_archives,
                args=(archives, ),
                kwargs={
                    'logger': frontend_logger,
                    'cutoff': cutoff,
                    'max_matches': max_matches,
                    'filters': (matcher_filter, ),
                    'match_local': False,
                    'match_web': True
                })
            web_match_thread.daemon = True
            web_match_thread.start()
            messages.success(request, 'Starting web match worker.')
        elif 'create_possible_matches_internal' in p:
            if thread_exists('match_unmatched_worker'):
                return render_error(
                    request, "Local matching worker is already running.")
            provider = p['create_possible_matches_internal']
            try:
                cutoff = float(p.get('cutoff', '0.4'))
            except ValueError:
                cutoff = 0.4
            try:
                max_matches = int(p.get('max-matches', '10'))
            except ValueError:
                max_matches = 10

            frontend_logger.info(
                'Looking for possible matches in gallery database '
                'for non-matched archives (cutoff: {}, max matches: {}) '
                'using provider filter "{}"'.format(cutoff, max_matches,
                                                    provider))
            matching_thread = threading.Thread(
                name='match_unmatched_worker',
                target=generate_possible_matches_for_archives,
                args=(archives, ),
                kwargs={
                    'logger': frontend_logger,
                    'cutoff': cutoff,
                    'max_matches': max_matches,
                    'filters': (provider, ),
                    'match_local': True,
                    'match_web': False
                })
            matching_thread.daemon = True
            matching_thread.start()
            messages.success(request, 'Starting internal match worker.')

    params = {
        'sort': 'create_date',
        'asc_desc': 'desc',
        'filename': title,
    }

    for k, v in get.items():
        params[k] = v

    for k in archive_filter_keys:
        if k not in params:
            params[k] = ''

    results = filter_archives_simple(params)

    results = results.filter(gallery__isnull=True).prefetch_related(
        Prefetch('archivematches_set',
                 queryset=ArchiveMatches.objects.select_related(
                     'gallery', 'archive').prefetch_related(
                         Prefetch('gallery__tags',
                                  queryset=Tag.objects.filter(
                                      scope__exact='artist'),
                                  to_attr='artist_tags')),
                 to_attr='possible_galleries'),
        'possible_galleries__gallery',
    )

    if 'no-custom-tags' in get:
        results = results.annotate(
            num_custom_tags=Count('custom_tags')).filter(num_custom_tags=0)
    if 'with-possible-matches' in get:
        results = results.annotate(
            n_possible_matches=Count('possible_matches')).filter(
                n_possible_matches__gt=0)

    paginator = Paginator(results, 100)
    try:
        results = paginator.page(page)
    except (InvalidPage, EmptyPage):
        results = paginator.page(paginator.num_pages)

    d = {
        'results':
        results,
        'providers':
        Gallery.objects.all().values_list('provider', flat=True).distinct(),
        'matchers':
        crawler_settings.provider_context.get_matchers(crawler_settings,
                                                       force=True),
        'api_key':
        crawler_settings.api_key,
        'form':
        form
    }
    return render(request, "viewer/archives_not_matched.html", d)
Esempio n. 5
0
def tools(request: HttpRequest, tool: str = "main", tool_arg: str = '') -> HttpResponse:
    """Tools listing."""
    response = {}
    if not request.user.is_staff:
        response['error'] = 'Not allowed'
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8", status_code=401)
    if tool == "transfer_missing_downloads":
        crawler_thread = CrawlerThread(crawler_settings, '-tmd'.split())
        crawler_thread.start()
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "retry_failed":
        crawler_thread = CrawlerThread(crawler_settings, '--retry-failed'.split())
        crawler_thread.start()
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "update_newer_than":
        p = request.GET
        if p and 'newer_than' in p and crawler_settings.workers.web_queue:
            newer_than_date = p['newer_than']
            try:
                if parse_date(newer_than_date) is not None:
                    crawler_settings.workers.web_queue.enqueue_args_list(('-unt', newer_than_date))
                    return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
                else:
                    response['error'] = 'Invalid date format.'
            except ValueError:
                response['error'] = 'Invalid date.'
            return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8", status_code=401)
    elif tool == "update_missing_thumbnails":
        p = request.GET
        if p and 'limit_number' in p and crawler_settings.workers.web_queue:

            try:
                limit_number = int(p['limit_number'])

                provider = request.GET.get('provider', '')
                if provider:
                    crawler_settings.workers.web_queue.enqueue_args_list(
                        ('-umt', str(limit_number), '-ip', provider)
                    )
                else:
                    crawler_settings.workers.web_queue.enqueue_args_list(
                        ('-umt', str(limit_number))
                    )
                return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")

            except ValueError:
                response['error'] = 'Invalid limit.'
            return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8", status_code=401)
    elif tool == "generate_missing_thumbs":
        archives = Archive.objects.filter(thumbnail='')
        for archive in archives:
            logger.info(
                'Generating thumbs for file: {}'.format(archive.zipped.name))
            archive.generate_thumbnails()
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "calculate_missing_info":
        archives_missing_file_info = Archive.objects.filter_by_missing_file_info()
        for archive in archives_missing_file_info:
            logger.info(
                'Calculating file info for file: {}'.format(archive.zipped.name))
            archive.recalc_fileinfo()
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "recalc_all_file_info":
        if thread_exists('fileinfo_worker'):
            response['error'] = 'File info worker is already running.'
            return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8", status_code=401)

        archives = Archive.objects.all()
        logger.info(
            'Recalculating file info for all archives, count: {}'.format(archives.count())
        )

        image_worker_thread = ImageWorker(4)
        for archive in archives:
            if os.path.exists(archive.zipped.path):
                image_worker_thread.enqueue_archive(archive)
        fileinfo_thread = threading.Thread(
            name='fileinfo_worker', target=image_worker_thread.start_info_thread)
        fileinfo_thread.start()
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "set_all_hidden_as_public":

        archives = Archive.objects.filter(gallery__hidden=True)

        for archive in archives:
            if os.path.isfile(archive.zipped.path):
                archive.public = True
                archive.save()
                if archive.gallery:
                    archive.gallery.public = True
                    archive.gallery.save()

        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "regenerate_all_thumbs":
        if thread_exists('thumbnails_worker'):
            response['error'] = "Thumbnails worker is already running."
            return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")

        archives = Archive.objects.all()
        logger.info(
            'Generating thumbs for all archives, count {}'.format(archives.count()))

        image_worker_thread = ImageWorker(4)
        for archive in archives:
            if os.path.exists(archive.zipped.path):
                image_worker_thread.enqueue_archive(archive)
        thumbnails_thread = threading.Thread(
            name='thumbnails_worker',
            target=image_worker_thread.start_thumbs_thread
        )
        thumbnails_thread.start()
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "generate_possible_matches_internally":
        if thread_exists('match_unmatched_worker'):
            response['error'] = "Matching worker is already running."
            return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8", status_code=401)
        provider = request.GET.get('provider', '')
        try:
            cutoff = float(request.GET.get('cutoff', '0.4'))
        except ValueError:
            cutoff = 0.4
        try:
            max_matches = int(request.GET.get('max-matches', '10'))
        except ValueError:
            max_matches = 10
        logger.info(
            'Looking for possible matches in gallery database '
            'for non-matched archives (cutoff: {}, max matches: {}) '
            'using provider filter "{}"'.format(cutoff, max_matches, provider)
        )
        matching_thread = threading.Thread(
            name='match_unmatched_worker',
            target=generate_possible_matches_for_archives,
            args=(None,),
            kwargs={
                'cutoff': cutoff, 'max_matches': max_matches, 'filters': (provider,),
                'match_local': True, 'match_web': False
            })
        matching_thread.daemon = True
        matching_thread.start()
        response['message'] = 'Looking for possible matches, filtering providers: {}, cutoff: {}.'.format(
            provider, cutoff
        )
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "clear_all_archive_possible_matches":
        ArchiveMatches.objects.all().delete()
        logger.info('Clearing all possible matches for archives.')
        response['message'] = 'Clearing all possible matches for archives.'
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "search_wanted_galleries_provider_titles":
        if thread_exists('web_search_worker'):
            response['error'] = 'Web search worker is already running.'
            return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8", status_code=401)
        results = WantedGallery.objects.eligible_to_search()

        if not results:
            logger.info('No wanted galleries eligible to search.')
            response['message'] = 'No wanted galleries eligible to search.'
            return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")

        provider = request.GET.get('provider', '')

        logger.info(
            'Searching for gallery matches in panda for wanted galleries, starting thread.')
        response['message'] = 'Searching for gallery matches in panda for wanted galleries, starting thread.'
        panda_search_thread = threading.Thread(
            name='web_search_worker',
            target=create_matches_wanted_galleries_from_providers,
            args=(results, provider),
        )
        panda_search_thread.daemon = True
        panda_search_thread.start()
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "wanted_galleries_possible_matches":
        if thread_exists('wanted_local_search_worker'):
            response['error'] = "Wanted local matching worker is already running."
            return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8", status_code=401)

        non_match_wanted = WantedGallery.objects.eligible_to_search()

        if not non_match_wanted:
            logger.info('No wanted galleries eligible to search.')
            response['message'] = 'No wanted galleries eligible to search.'
            return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")

        logger.info(
            'Looking for possible matches in gallery database '
            'for wanted galleries (fixed 0.4 cutoff)'
        )

        provider = request.GET.get('provider', '')

        try:
            cutoff = float(request.GET.get('cutoff', '0.4'))
        except ValueError:
            cutoff = 0.4
        try:
            max_matches = int(request.GET.get('max-matches', '10'))
        except ValueError:
            max_matches = 10

        matching_thread = threading.Thread(
            name='wanted_local_search_worker',
            target=create_matches_wanted_galleries_from_providers_internal,
            args=(non_match_wanted, ),
            kwargs={'provider_filter': provider, 'cutoff': cutoff, 'max_matches': max_matches})
        matching_thread.daemon = True
        matching_thread.start()
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "restart_viewer":
        crawler_settings.workers.stop_workers_and_wait()
        if hasattr(signal, 'SIGUSR2'):
            os.kill(os.getpid(), signal.SIGUSR2)  # type: ignore
        else:
            response['error'] = "This OS does not support signal SIGUSR2"
            return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8", status_code=401)
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "settings":
        if request.method == 'POST':
            if not request.body:
                response['error'] = 'Empty body'
                return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8", status_code=401)
            data = json.loads(request.body.decode("utf-8"))
            if 'data' not in data:
                response['error'] = 'Missing data'
                return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8", status_code=401)
            settings_text = data['data']
            if(os.path.isfile(
                os.path.join(crawler_settings.default_dir, "settings.ini"))
               ):
                with open(os.path.join(crawler_settings.default_dir,
                                       "settings.ini"),
                          "w",
                          encoding="utf-8"
                          ) as f:
                    f.write(settings_text)
                    logger.info(
                        'Modified settings file for Panda Backup')
                    response['message'] = 'Modified settings file for Panda Backup'
                    return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
        else:
            if(os.path.isfile(
                os.path.join(crawler_settings.default_dir, "settings.ini"))
               ):
                with open(os.path.join(crawler_settings.default_dir, "settings.ini"), "r", encoding="utf-8") as f:
                    first = f.read(1)
                    if first != '\ufeff':
                        # not a BOM, rewind
                        f.seek(0)
                    settings_text = f.read()
                    response['data'] = settings_text
                    return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "reload_settings":
        crawler_settings.load_config_from_file()
        logger.info(
            'Reloaded settings file for Panda Backup')
        response['message'] = 'Reloaded settings file for Panda Backup'
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "start_timed_dl":
        if crawler_settings.workers.timed_downloader:
            crawler_settings.workers.timed_downloader.start_running(timer=crawler_settings.timed_downloader_cycle_timer)
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "stop_timed_dl":
        if crawler_settings.workers.timed_downloader:
            crawler_settings.workers.timed_downloader.stop_running()
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "force_run_timed_dl":
        if crawler_settings.workers.timed_downloader:
            crawler_settings.workers.timed_downloader.stop_running()
            crawler_settings.workers.timed_downloader.force_run_once = True
            crawler_settings.workers.timed_downloader.start_running(timer=crawler_settings.timed_downloader_cycle_timer)
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "start_timed_crawler":
        if tool_arg:
            for provider_auto_crawler in crawler_settings.workers.timed_auto_crawlers:
                if provider_auto_crawler.provider_name == tool_arg:
                    provider_auto_crawler.start_running(
                        timer=crawler_settings.providers[provider_auto_crawler.provider_name].autochecker_timer
                    )
                    break
        else:
            for provider_auto_crawler in crawler_settings.workers.timed_auto_crawlers:
                provider_auto_crawler.start_running(
                    timer=crawler_settings.providers[provider_auto_crawler.provider_name].autochecker_timer
                )
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "stop_timed_crawler":
        if tool_arg:
            for provider_auto_crawler in crawler_settings.workers.timed_auto_crawlers:
                if provider_auto_crawler.provider_name == tool_arg:
                    provider_auto_crawler.stop_running()
                    break
        else:
            for provider_auto_crawler in crawler_settings.workers.timed_auto_crawlers:
                provider_auto_crawler.stop_running()
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "force_run_timed_crawler":
        if tool_arg:
            for provider_auto_crawler in crawler_settings.workers.timed_auto_crawlers:
                if provider_auto_crawler.provider_name == tool_arg:
                    provider_auto_crawler.stop_running()
                    provider_auto_crawler.force_run_once = True
                    provider_auto_crawler.start_running(
                        timer=crawler_settings.providers[provider_auto_crawler.provider_name].autochecker_timer
                    )
                    break
        else:
            for provider_auto_crawler in crawler_settings.workers.timed_auto_crawlers:
                provider_auto_crawler.stop_running()
                provider_auto_crawler.force_run_once = True
                provider_auto_crawler.start_running(
                    timer=crawler_settings.providers[provider_auto_crawler.provider_name].autochecker_timer
                )
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "start_timed_updater":
        if crawler_settings.workers.timed_updater:
            crawler_settings.workers.timed_updater.start_running(timer=crawler_settings.autoupdater.cycle_timer)
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "stop_timed_updater":
        if crawler_settings.workers.timed_updater:
            crawler_settings.workers.timed_updater.stop_running()
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "force_run_timed_updater":
        if crawler_settings.workers.timed_updater:
            crawler_settings.workers.timed_updater.stop_running()
            crawler_settings.workers.timed_updater.force_run_once = True
            crawler_settings.workers.timed_updater.start_running(timer=crawler_settings.autoupdater.cycle_timer)
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "start_timed_auto_wanted":
        if crawler_settings.workers.timed_auto_wanted:
            crawler_settings.workers.timed_auto_wanted.start_running(timer=crawler_settings.auto_wanted.cycle_timer)
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "stop_timed_auto_wanted":
        if crawler_settings.workers.timed_auto_wanted:
            crawler_settings.workers.timed_auto_wanted.stop_running()
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "force_run_timed_auto_wanted":
        if crawler_settings.workers.timed_auto_wanted:
            crawler_settings.workers.timed_auto_wanted.stop_running()
            crawler_settings.workers.timed_auto_wanted.force_run_once = True
            crawler_settings.workers.timed_auto_wanted.start_running(timer=crawler_settings.auto_wanted.cycle_timer)
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "start_web_queue":
        if crawler_settings.workers.web_queue:
            crawler_settings.workers.web_queue.start_running()
        return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")
    elif tool == "threads_status":
        threads_status = get_thread_status_bool()
        return HttpResponse(json.dumps({'data': threads_status}), content_type="application/json; charset=utf-8")
    elif tool == "autochecker_providers":
        threads_status = get_thread_status_bool()
        autochecker_providers = (
            (provider_name, threads_status['auto_search_' + provider_name]) for provider_name in
            crawler_settings.autochecker.providers if 'auto_search_' + provider_name in threads_status
        )
        return HttpResponse(json.dumps({'data': autochecker_providers}), content_type="application/json; charset=utf-8")

    response['error'] = 'Missing parameters'
    return HttpResponse(json.dumps(response), content_type="application/json; charset=utf-8")