Esempio n. 1
0
def create_screenshot_from_production_link(production_link_id):
    try:
        prod_link = ProductionLink.objects.get(id=production_link_id)
        if prod_link.production.screenshots.count():
            return  # don't create a screenshot if there's one already

        production_id = prod_link.production_id
        url = prod_link.download_url
        download, file_content = fetch_url(url)
        buf = cStringIO.StringIO(file_content)

        if prod_link.is_zip_file():
            z = zipfile.ZipFile(buf, 'r')
            # catalogue the zipfile contents if we don't have them already
            if not download.archive_members.all():
                download.log_zip_contents(z)
            # select the archive member to extract a screenshot from, if we don't have
            # a candidate already
            if not prod_link.file_for_screenshot:
                file_for_screenshot = download.select_screenshot_file()
                if file_for_screenshot:
                    prod_link.file_for_screenshot = file_for_screenshot
                    prod_link.is_unresolved_for_screenshotting = False
                else:
                    prod_link.is_unresolved_for_screenshotting = True
                prod_link.save()

            image_extension = prod_link.file_for_screenshot.split(
                '.')[-1].lower()
            if image_extension in USABLE_IMAGE_FILE_EXTENSIONS:
                # we encode the filename as iso-8859-1 before retrieving it, because we
                # decoded it that way on insertion into the database to ensure that it had
                # a valid unicode string representation - see mirror/models.py
                member_buf = cStringIO.StringIO(
                    z.read(prod_link.file_for_screenshot.encode('iso-8859-1')))
                z.close()
                img = PILConvertibleImage(
                    member_buf, name_hint=prod_link.file_for_screenshot)
            else:  # image is not a usable format
                z.close()
                return
        else:
            img = PILConvertibleImage(buf, name_hint=url.split('/')[-1])

        screenshot = Screenshot(production_id=production_id,
                                source_download_id=download.id)
        u = download.sha1
        basename = u[0:2] + '/' + u[2:4] + '/' + u[4:8] + '.pl' + str(
            production_link_id) + '.'
        upload_original(img, screenshot, basename, reduced_redundancy=True)
        upload_standard(img, screenshot, basename)
        upload_thumb(img, screenshot, basename)
        screenshot.save()

    except ProductionLink.DoesNotExist:
        # guess it was deleted in the meantime, then.
        pass
Esempio n. 2
0
def create_screenshot_from_production_link(production_link_id):
	try:
		prod_link = ProductionLink.objects.get(id=production_link_id)
		if prod_link.production.screenshots.count():
			return  # don't create a screenshot if there's one already

		production_id = prod_link.production_id
		url = prod_link.download_url
		download, file_content = fetch_url(url)
		buf = cStringIO.StringIO(file_content)

		if prod_link.is_zip_file():
			z = zipfile.ZipFile(buf, 'r')
			# catalogue the zipfile contents if we don't have them already
			if not download.archive_members.all():
				download.log_zip_contents(z)
			# select the archive member to extract a screenshot from, if we don't have
			# a candidate already
			if not prod_link.file_for_screenshot:
				file_for_screenshot = download.select_screenshot_file()
				if file_for_screenshot:
					prod_link.file_for_screenshot = file_for_screenshot
					prod_link.is_unresolved_for_screenshotting = False
				else:
					prod_link.is_unresolved_for_screenshotting = True
				prod_link.save()

			image_extension = prod_link.file_for_screenshot.split('.')[-1].lower()
			if image_extension in USABLE_IMAGE_FILE_EXTENSIONS:
				# we encode the filename as iso-8859-1 before retrieving it, because we
				# decoded it that way on insertion into the database to ensure that it had
				# a valid unicode string representation - see mirror/models.py
				member_buf = cStringIO.StringIO(
					z.read(prod_link.file_for_screenshot.encode('iso-8859-1'))
				)
				z.close()
				img = PILConvertibleImage(member_buf, name_hint=prod_link.file_for_screenshot)
			else:  # image is not a usable format
				z.close()
				return
		else:
			img = PILConvertibleImage(buf, name_hint=url.split('/')[-1])

		screenshot = Screenshot(production_id=production_id, source_download_id=download.id)
		u = download.sha1
		basename = u[0:2] + '/' + u[2:4] + '/' + u[4:8] + '.pl' + str(production_link_id) + '.'
		upload_original(img, screenshot, basename, reduced_redundancy=True)
		upload_standard(img, screenshot, basename)
		upload_thumb(img, screenshot, basename)
		screenshot.save()

	except ProductionLink.DoesNotExist:
		# guess it was deleted in the meantime, then.
		pass
Esempio n. 3
0
def import_screenshot(production_id, janeway_id, url, suffix):
    blob = fetch_origin_url(url)
    sha1 = blob.sha1
    img = PILConvertibleImage(blob.as_io_buffer(), name_hint=blob.filename)
    basename = sha1[0:2] + '/' + sha1[2:4] + '/' + sha1[4:8] + '.jw' + str(
        janeway_id) + suffix + '.'

    screenshot = Screenshot(production_id=production_id,
                            data_source='janeway',
                            janeway_id=janeway_id,
                            janeway_suffix=suffix)
    upload_original(img, screenshot, basename, reduced_redundancy=False)
    upload_standard(img, screenshot, basename)
    upload_thumb(img, screenshot, basename)
    screenshot.save()
Esempio n. 4
0
def home(request):
    if request.user.is_authenticated:
        banner = Banner.objects.filter(show_for_logged_in_users=True).order_by('-created_at').select_related('banner_image').first()
    else:
        banner = Banner.objects.filter(show_for_anonymous_users=True).order_by('-created_at').select_related('banner_image').first()

    latest_releases = Production.objects.filter(
        has_screenshot=True, release_date_date__isnull=False
    ).exclude(
        tags__slug__in=['xxx', 'nsfw']
    ).only(
        'id', 'title', 'release_date_date', 'release_date_precision', 'supertype',
    ).prefetch_related(
        'author_nicks', 'author_affiliation_nicks', 'platforms', 'types'
    ).order_by('-release_date_date', '-created_at')[:5]
    latest_releases_screenshots = Screenshot.select_for_production_ids([prod.id for prod in latest_releases])
    latest_releases_and_screenshots = [
        (production, latest_releases_screenshots.get(production.id))
        for production in latest_releases
    ]

    one_year_ago = datetime.datetime.now() - datetime.timedelta(365)
    latest_additions = Production.objects.exclude(
        release_date_date__gte=one_year_ago
    ).prefetch_related(
        'author_nicks', 'author_affiliation_nicks', 'platforms', 'types'
    ).order_by('-created_at')[:5]

    comments = Comment.objects.select_related(
        'user'
    ).prefetch_related(
        'commentable'
    ).order_by('-created_at')[:5]

    today = datetime.date.today()
    try:
        three_months_time = today.replace(day=1, month=today.month+3)
    except ValueError:
        three_months_time = today.replace(day=1, month=today.month-9, year=today.year+1)

    upcoming_parties = Party.objects.filter(
        end_date_date__gte=today, start_date_date__lt=three_months_time
    ).exclude(
        start_date_precision='y'
    ).order_by('start_date_date')

    if request.user.is_staff:
        news_stories = NewsStory.objects.select_related('image').order_by('-created_at')[:6]
    else:
        news_stories = NewsStory.objects.filter(is_public=True).select_related('image').order_by('-created_at')[:6]

    return render(request, 'homepage/home.html', {
        'banner': banner,
        'news_stories': news_stories,
        'forum_topics': Topic.objects.order_by('-last_post_at').select_related('created_by_user', 'last_post_by_user')[:5],
        'latest_releases_and_screenshots': latest_releases_and_screenshots,
        'latest_additions': latest_additions,
        'comments': comments,
        'upcoming_parties': upcoming_parties,
    })
Esempio n. 5
0
def recommended_production_listing(recommendations,
                                   show_screenshots=False,
                                   show_prod_types=False,
                                   mark_excludable=False):
    if show_screenshots:
        screenshots = Screenshot.select_for_production_ids([
            recommendation.production.id for recommendation in recommendations
        ])
    else:
        screenshots = {}  # pragma: no cover

    rows = [(recommendation, recommendation.production,
             screenshots.get(recommendation.production.id))
            for recommendation in recommendations]
    return {
        'rows':
        rows,
        'show_screenshots':
        show_screenshots,
        'show_prod_types':
        show_prod_types,
        'mark_excludable':
        mark_excludable,
        'can_remove_recommendations':
        (settings.SITE_IS_WRITEABLE and rows
         and rows[0][0].category.event.recommendations_enabled),
    }
Esempio n. 6
0
def import_screenshot(production_id, janeway_id, url, suffix):
    blob = fetch_origin_url(url)
    sha1 = blob.sha1
    img = PILConvertibleImage(blob.as_io_buffer(), name_hint=blob.filename)
    basename = sha1[0:2] + '/' + sha1[2:4] + '/' + sha1[4:8] + '.jw' + str(
        janeway_id) + suffix + '.'

    screenshot = Screenshot(production_id=production_id,
                            data_source='janeway',
                            janeway_id=janeway_id,
                            janeway_suffix=suffix)
    upload_standard(img, screenshot, basename)
    upload_thumb(img, screenshot, basename)
    # leave upload_original until last to prevent things screwing up if the storage
    # closes the original file handle
    upload_original(img, screenshot, basename)
    screenshot.save()
Esempio n. 7
0
def create_screenshot_from_remote_file(url, production_id):
	try:
		download, file_content = fetch_url(url)
		screenshot = Screenshot(production_id=production_id, source_download_id=download.id)

		buf = cStringIO.StringIO(file_content)
		img = PILConvertibleImage(buf, name_hint=url.split('/')[-1])

		u = download.sha1
		basename = u[0:2] + '/' + u[2:4] + '/' + u[4:8] + '.p' + str(production_id) + '.'
		upload_original(img, screenshot, basename, reduced_redundancy=True)
		upload_standard(img, screenshot, basename)
		upload_thumb(img, screenshot, basename)
		screenshot.save()

	except (urllib2.URLError, FileTooBig):
		# oh well.
		pass
Esempio n. 8
0
def show(request, party_id):
    party = get_object_or_404(Party, id=party_id)

    # trying to retrieve all competition results in one massive prefetch_related clause:
    #    competitions = party.competitions.prefetch_related('placings__production__author_nicks__releaser', 'placings__production__author_affiliation_nicks__releaser').defer('placings__production__notes', 'placings__production__author_nicks__releaser__notes', 'placings__production__author_affiliation_nicks__releaser__notes').order_by('name', 'id', 'placings__position', 'placings__production__id')
    # - fails with 'RelatedObject' object has no attribute 'rel', where the RelatedObject is <RelatedObject: demoscene:competitionplacing related to competition>. Shame, that...
    # for now, we'll do it one compo at a time (which allows us to use the slightly more sane select_related approach to pull in production records)
    competitions_with_placings = [
        (
            competition,
            competition.placings.order_by('position', 'production__id').prefetch_related('production__author_nicks__releaser', 'production__author_affiliation_nicks__releaser', 'production__platforms', 'production__types').defer('production__notes', 'production__author_nicks__releaser__notes', 'production__author_affiliation_nicks__releaser__notes')
        )
        for competition in party.competitions.order_by('name', 'id')
    ]
    entry_production_ids = [
        placing.production_id
        for _, placings in competitions_with_placings
        for placing in placings
    ]
    screenshot_map = Screenshot.select_for_production_ids(entry_production_ids)
    competitions_with_placings_and_screenshots = [
        (
            competition,
            [(placing, screenshot_map.get(placing.production_id)) for placing in placings]
        )
        for competition, placings in competitions_with_placings
    ]

    invitations = party.invitations.prefetch_related('author_nicks__releaser', 'author_affiliation_nicks__releaser', 'platforms', 'types')

    releases = party.releases.prefetch_related('author_nicks__releaser', 'author_affiliation_nicks__releaser', 'platforms', 'types')

    organisers = party.organisers.select_related('releaser').order_by('-releaser__is_group', Lower('releaser__name'))

    external_links = sorted(party.active_external_links.select_related('party'), key=lambda obj: obj.sort_key)

    if request.user.is_authenticated:
        comment = Comment(commentable=party, user=request.user)
        comment_form = CommentForm(instance=comment, prefix="comment")
    else:
        comment_form = None

    return render(request, 'parties/show.html', {
        'party': party,
        'competitions_with_placings_and_screenshots': competitions_with_placings_and_screenshots,
        'results_files': party.results_files.all(),
        'invitations': invitations,
        'releases': releases,
        'organisers': organisers,
        'editing_organisers': (request.GET.get('editing') == 'organisers'),
        'parties_in_series': party.party_series.parties.order_by('start_date_date', 'name').select_related('party_series'),
        'external_links': external_links,
        'comment_form': comment_form,
    })
Esempio n. 9
0
def create_screenshot_from_remote_file(url, production_id):
    try:
        download, file_content = fetch_url(url)
        screenshot = Screenshot(production_id=production_id,
                                source_download_id=download.id)

        buf = cStringIO.StringIO(file_content)
        img = PILConvertibleImage(buf, name_hint=url.split('/')[-1])

        u = download.sha1
        basename = u[0:2] + '/' + u[2:4] + '/' + u[4:8] + '.p' + str(
            production_id) + '.'
        upload_original(img, screenshot, basename, reduced_redundancy=True)
        upload_standard(img, screenshot, basename)
        upload_thumb(img, screenshot, basename)
        screenshot.save()

    except (urllib2.URLError, FileTooBig):
        # oh well.
        pass
Esempio n. 10
0
def combined_releases(releaser):

    credits = (releaser.credits().select_related('nick').prefetch_related(
        'production__author_nicks__releaser',
        'production__author_affiliation_nicks__releaser',
        'production__platforms', 'production__types').defer(
            'production__notes', 'production__author_nicks__releaser__notes',
            'production__author_affiliation_nicks__releaser__notes').order_by(
                '-production__release_date_date', 'production__title',
                'production__id', 'nick__name', 'nick__id'))

    # reorganise credits queryset into a list of
    # (production, nick, [credits_for_that_nick]) records
    credits_by_production = groupby(credits, lambda credit: credit.production)
    # credits_by_production = list of (production, [credits]) records

    credits_by_production_nick = []
    for (production, credits) in credits_by_production:
        for (nick, credits) in groupby(credits, lambda credit: credit.nick):
            record = (production, nick, list(credits))
            credits_by_production_nick.append(record)

    # fetch productions by this releaser which are not already covered by credits
    production_ids = [
        production.id for production, _, _ in credits_by_production_nick
    ]
    productions = releaser.productions().distinct()\
        .exclude(id__in=production_ids)\
        .prefetch_related('author_nicks__releaser', 'author_affiliation_nicks__releaser', 'platforms', 'types')\
        .defer('notes', 'author_nicks__releaser__notes', 'author_affiliation_nicks__releaser__notes')\
        .order_by('-release_date_date', 'release_date_precision', '-sortable_title')

    credits_with_prods = credits_by_production_nick + [(prod, None, None)
                                                       for prod in productions]
    credits_with_prods.sort(
        key=lambda item:
        (item[0].release_date_date is None, item[0].release_date_date),
        reverse=True)

    # get final list of production IDs
    production_ids = [production.id for production, _, _ in credits_with_prods]
    # fetch screenshots for those prods
    screenshot_map = Screenshot.select_for_production_ids(production_ids)
    # produce final credits struct: (production, nick, [credits], screenshot)
    credits_with_prods_and_screenshots = [
        (prod, nick, credits, screenshot_map.get(prod.id))
        for prod, nick, credits in credits_with_prods
    ]

    return {
        'releaser': releaser,
        'credits': credits_with_prods_and_screenshots,
    }
Esempio n. 11
0
def show(request, party_id):
	party = get_object_or_404(Party, id=party_id)

	# trying to retrieve all competition results in one massive prefetch_related clause:
	#    competitions = party.competitions.prefetch_related('placings__production__author_nicks__releaser', 'placings__production__author_affiliation_nicks__releaser').defer('placings__production__notes', 'placings__production__author_nicks__releaser__notes', 'placings__production__author_affiliation_nicks__releaser__notes').order_by('name', 'id', 'placings__position', 'placings__production__id')
	# - fails with 'RelatedObject' object has no attribute 'rel', where the RelatedObject is <RelatedObject: demoscene:competitionplacing related to competition>. Shame, that...
	# for now, we'll do it one compo at a time (which allows us to use the slightly more sane select_related approach to pull in production records)
	competitions_with_placings = [
		(
			competition,
			competition.placings.order_by('position', 'production__id').prefetch_related('production__author_nicks__releaser', 'production__author_affiliation_nicks__releaser', 'production__platforms', 'production__types').defer('production__notes', 'production__author_nicks__releaser__notes', 'production__author_affiliation_nicks__releaser__notes')
		)
		for competition in party.competitions.order_by('name', 'id')
	]
	entry_production_ids = [
		placing.production_id
		for _, placings in competitions_with_placings
		for placing in placings
	]
	screenshot_map = Screenshot.select_for_production_ids(entry_production_ids)
	competitions_with_placings_and_screenshots = [
		(
			competition,
			[(placing, screenshot_map.get(placing.production_id)) for placing in placings]
		)
		for competition, placings in competitions_with_placings
	]

	invitations = party.invitations.prefetch_related('author_nicks__releaser', 'author_affiliation_nicks__releaser', 'platforms', 'types')

	releases = party.releases.prefetch_related('author_nicks__releaser', 'author_affiliation_nicks__releaser', 'platforms', 'types')

	external_links = sorted(party.external_links.select_related('party'), key=lambda obj: obj.sort_key)

	if request.user.is_authenticated:
		comment = Comment(commentable=party, user=request.user)
		comment_form = CommentForm(instance=comment, prefix="comment")
	else:
		comment_form = None

	return render(request, 'parties/show.html', {
		'party': party,
		'competitions_with_placings_and_screenshots': competitions_with_placings_and_screenshots,
		'results_files': party.results_files.all(),
		'invitations': invitations,
		'releases': releases,
		'parties_in_series': party.party_series.parties.order_by('start_date_date', 'name').select_related('party_series'),
		'external_links': external_links,
		'comment_form': comment_form,
	})
Esempio n. 12
0
def production_listing(productions, show_screenshots=False, show_prod_types=False, mark_excludable=False):
    if show_screenshots:
        screenshots = Screenshot.select_for_production_ids([prod.id for prod in productions])
    else:
        screenshots = {}

    productions_and_screenshots = [
        (production, screenshots.get(production.id))
        for production in productions
    ]
    return {
        'productions_and_screenshots': productions_and_screenshots,
        'show_screenshots': show_screenshots,
        'show_prod_types': show_prod_types,
        'mark_excludable': mark_excludable,
        'site_is_writeable': settings.SITE_IS_WRITEABLE,
    }
Esempio n. 13
0
def combined_releases(releaser):

	credits = releaser.credits().select_related('nick')\
		.prefetch_related('production__author_nicks__releaser', 'production__author_affiliation_nicks__releaser', 'production__platforms', 'production__types')\
		.defer('production__notes', 'production__author_nicks__releaser__notes', 'production__author_affiliation_nicks__releaser__notes')\
		.order_by('-production__release_date_date', 'production__title', 'production__id', 'nick__name', 'nick__id')

	# reorganise credits queryset into a list of
	# (production, nick, [credits_for_that_nick]) records
	credits_by_production = groupby(credits, lambda credit: credit.production)
	# credits_by_production = list of (production, [credits]) records

	credits_by_production_nick = []
	for (production, credits) in credits_by_production:
		for (nick, credits) in groupby(credits, lambda credit: credit.nick):
			record = (production, nick, list(credits))
			credits_by_production_nick.append(record)

	# fetch productions by this releaser which are not already covered by credits
	production_ids = [production.id for production, _, _ in credits_by_production_nick]
	productions = releaser.productions().distinct()\
		.exclude(id__in=production_ids)\
		.prefetch_related('author_nicks__releaser', 'author_affiliation_nicks__releaser', 'platforms', 'types')\
		.defer('notes', 'author_nicks__releaser__notes', 'author_affiliation_nicks__releaser__notes')\
		.order_by('-release_date_date', 'release_date_precision', '-sortable_title')

	credits_with_prods = credits_by_production_nick + [(prod, None, None) for prod in productions]
	credits_with_prods.sort(key=lambda item: (item[0].release_date_date is None, item[0].release_date_date), reverse=True)

	# get final list of production IDs
	production_ids = [production.id for production, _, _ in credits_with_prods]
	# fetch screenshots for those prods
	screenshot_map = Screenshot.select_for_production_ids(production_ids)
	# produce final credits struct: (production, nick, [credits], screenshot)
	credits_with_prods_and_screenshots = [
		(prod, nick, credits, screenshot_map.get(prod.id))
		for prod, nick, credits in credits_with_prods
	]

	return {
		'releaser': releaser,
		'credits': credits_with_prods_and_screenshots,
	}
Esempio n. 14
0
def show(request, competition_id):
    competition = get_object_or_404(Competition, id=competition_id)

    placings = competition.placings.order_by(
        'position', 'production__id').prefetch_related(
            'production__author_nicks__releaser',
            'production__author_affiliation_nicks__releaser').defer(
                'production__notes',
                'production__author_nicks__releaser__notes',
                'production__author_affiliation_nicks__releaser__notes')
    entry_production_ids = [placing.production_id for placing in placings]
    screenshot_map = Screenshot.select_for_production_ids(entry_production_ids)
    placings = [(placing, screenshot_map.get(placing.production_id))
                for placing in placings]

    return render(request, 'competitions/show.html', {
        'competition': competition,
        'placings': placings,
    })
Esempio n. 15
0
def create_screenshot_from_production_link(production_link_id):
    try:
        prod_link = ProductionLink.objects.get(id=production_link_id)
    except ProductionLink.DoesNotExist:
        # guess it was deleted in the meantime, then.
        return

    if prod_link.production.screenshots.count():
        # don't create a screenshot if there's one already
        if prod_link.is_unresolved_for_screenshotting:
            prod_link.is_unresolved_for_screenshotting = False
            prod_link.save()
        return

    if prod_link.has_bad_image:
        return  # don't create a screenshot if a previous attempt has failed during image processing

    production_id = prod_link.production_id
    url = prod_link.download_url
    blob = fetch_link(prod_link)
    sha1 = blob.sha1

    if prod_link.is_zip_file():
        # select the archive member to extract a screenshot from, if we don't have
        # a candidate already
        archive_members = ArchiveMember.objects.filter(archive_sha1=sha1)
        if not prod_link.file_for_screenshot:
            file_for_screenshot = select_screenshot_file(archive_members)
            if file_for_screenshot:
                prod_link.file_for_screenshot = file_for_screenshot
                prod_link.is_unresolved_for_screenshotting = False
            else:
                prod_link.is_unresolved_for_screenshotting = True
            prod_link.save()

        image_extension = prod_link.file_for_screenshot.split('.')[-1].lower()
        if image_extension in USABLE_IMAGE_FILE_EXTENSIONS:
            z = blob.as_zipfile()
            # we encode the filename as iso-8859-1 before retrieving it, because we
            # decoded it that way on insertion into the database to ensure that it had
            # a valid unicode string representation - see mirror/models.py
            try:
                member_buf = cStringIO.StringIO(
                    z.read(prod_link.file_for_screenshot.encode('iso-8859-1')))
            except zipfile.BadZipfile:
                prod_link.has_bad_image = True
                prod_link.save()
                z.close()
                return

            z.close()
            try:
                img = PILConvertibleImage(
                    member_buf, name_hint=prod_link.file_for_screenshot)
            except IOError:
                prod_link.has_bad_image = True
                prod_link.save()
                return
        else:  # image is not a usable format
            return
    else:
        try:
            img = PILConvertibleImage(blob.as_io_buffer(),
                                      name_hint=url.split('/')[-1])
        except IOError:
            prod_link.has_bad_image = True
            prod_link.save()
            return

    screenshot = Screenshot(production_id=production_id)
    basename = sha1[0:2] + '/' + sha1[2:4] + '/' + sha1[4:8] + '.pl' + str(
        production_link_id) + '.'
    try:
        upload_original(img, screenshot, basename, reduced_redundancy=True)
        upload_standard(img, screenshot, basename)
        upload_thumb(img, screenshot, basename)
    except IOError:
        prod_link.has_bad_image = True
        prod_link.save()
        return
    screenshot.save()
Esempio n. 16
0
    def search(self, page_number=1, count=50):
        query = self.cleaned_data['q']

        # Look for filter expressions within query
        filter_expressions = collections.defaultdict(set)
        tag_names = set()

        def apply_filter(match):
            key, val = match.groups()
            if key in RECOGNISED_FILTER_KEYS:
                filter_expressions[key].add(val)
                return ''
            else:
                # the filter has not been recognised;
                # leave the original string intact to be handled as a search term
                return match.group(0)

        for filter_re in (FILTER_RE_ONEWORD, FILTER_RE_SINGLEQUOTE,
                          FILTER_RE_DOUBLEQUOTE):
            query = filter_re.sub(apply_filter, query)

        def apply_tag(match):
            tag_names.add(match.group(1))
            return ''

        query = TAG_RE.sub(apply_tag, query)

        asciified_query = unidecode(query).strip()
        has_search_term = bool(asciified_query)
        if has_search_term:
            psql_query = SearchQuery(unidecode(query))
            clean_query = generate_search_title(query)
            production_filter_q = Q(search_document=psql_query)
            releaser_filter_q = Q(search_document=psql_query)
            party_filter_q = Q(search_document=psql_query)
            bbs_filter_q = Q(search_document=psql_query)
        else:
            production_filter_q = Q()
            releaser_filter_q = Q()
            party_filter_q = Q()
            bbs_filter_q = Q()

        subqueries_to_perform = set(['production', 'releaser', 'party', 'bbs'])

        if 'platform' in filter_expressions or 'on' in filter_expressions:
            subqueries_to_perform &= set(['production'])
            platforms = filter_expressions['platform'] | filter_expressions[
                'on']

            platform_ids = Platform.objects.none().values_list('id', flat=True)
            for platform_name in platforms:
                platform_ids |= (Platform.objects.filter(
                    Q(name__iexact=platform_name)
                    | Q(aliases__name__iexact=platform_name)).values_list(
                        'id', flat=True))

            production_filter_q &= Q(platforms__id__in=list(platform_ids))

        if 'screenshot' in filter_expressions or 'screenshots' in filter_expressions:
            subqueries_to_perform &= set(['production'])

            for flag in filter_expressions['screenshot'] | filter_expressions[
                    'screenshots']:
                if flag in ('yes', 'true'):
                    production_filter_q &= Q(has_screenshot=True)
                elif flag in ('no', 'false'):
                    production_filter_q &= Q(has_screenshot=False)

        if 'by' in filter_expressions or 'author' in filter_expressions:
            subqueries_to_perform &= set(['production'])
            for name in filter_expressions['by'] | filter_expressions['author']:
                clean_name = generate_search_title(name)
                production_filter_q &= (
                    # join back through releaser so that we match any nick variant ever used by the author,
                    # not just the nick used on the prod. Better to err on the side of being too liberal
                    Q(author_nicks__releaser__nicks__variants__search_title=
                      clean_name) |
                    Q(author_affiliation_nicks__releaser__nicks__variants__search_title
                      =clean_name))

        if 'of' in filter_expressions:
            subqueries_to_perform &= set(['releaser'])
            for name in filter_expressions['of']:
                clean_name = generate_search_title(name)
                releaser_filter_q &= Q(
                    is_group=False,
                    group_memberships__group__nicks__variants__search_title=
                    clean_name)

        if 'group' in filter_expressions:
            subqueries_to_perform &= set(['production', 'releaser'])
            for name in filter_expressions['group']:
                clean_name = generate_search_title(name)
                releaser_filter_q &= Q(
                    is_group=False,
                    group_memberships__group__nicks__variants__search_title=
                    clean_name)
                production_filter_q &= (
                    # join back through releaser so that we match any nick variant ever used by the author,
                    # not just the nick used on the prod. Better to err on the side of being too liberal
                    Q(author_nicks__releaser__is_group=True,
                      author_nicks__releaser__nicks__variants__search_title=
                      clean_name) |
                    Q(author_affiliation_nicks__releaser__nicks__variants__search_title
                      =clean_name))

        if tag_names or ('tagged' in filter_expressions):
            subqueries_to_perform &= set(['production'])
            for tag_name in filter_expressions['tagged'] | tag_names:
                production_filter_q &= Q(tags__name=tag_name)

        if 'year' in filter_expressions or 'date' in filter_expressions:
            subqueries_to_perform &= set(['production', 'party'])
            for date_str in filter_expressions['year'] | filter_expressions[
                    'date']:
                try:
                    date_expr = FuzzyDate.parse(date_str)
                except ValueError:
                    continue

                production_filter_q &= Q(
                    release_date_date__gte=date_expr.date_range_start(),
                    release_date_date__lte=date_expr.date_range_end())
                party_filter_q &= Q(
                    end_date_date__gte=date_expr.date_range_start(),
                    start_date_date__lte=date_expr.date_range_end())

        if 'before' in filter_expressions:
            subqueries_to_perform &= set(['production', 'party'])
            for date_str in filter_expressions['before']:
                try:
                    date_expr = FuzzyDate.parse(date_str)
                except ValueError:
                    continue

                production_filter_q &= Q(
                    release_date_date__lt=date_expr.date_range_start())
                party_filter_q &= Q(
                    start_date_date__lt=date_expr.date_range_start())

        if 'until' in filter_expressions:
            subqueries_to_perform &= set(['production', 'party'])
            for date_str in filter_expressions['until']:
                try:
                    date_expr = FuzzyDate.parse(date_str)
                except ValueError:
                    continue

                production_filter_q &= Q(
                    release_date_date__lte=date_expr.date_range_end())
                party_filter_q &= Q(
                    start_date_date__lte=date_expr.date_range_end())

        if 'after' in filter_expressions:
            subqueries_to_perform &= set(['production', 'party'])
            for date_str in filter_expressions['after']:
                try:
                    date_expr = FuzzyDate.parse(date_str)
                except ValueError:
                    continue

                production_filter_q &= Q(
                    release_date_date__gt=date_expr.date_range_end())
                party_filter_q &= Q(
                    end_date_date__gt=date_expr.date_range_end())

        if 'since' in filter_expressions:
            subqueries_to_perform &= set(['production', 'party'])
            for date_str in filter_expressions['since']:
                try:
                    date_expr = FuzzyDate.parse(date_str)
                except ValueError:
                    continue

                production_filter_q &= Q(
                    release_date_date__gte=date_expr.date_range_start())
                party_filter_q &= Q(
                    end_date_date__gte=date_expr.date_range_start())

        if 'type' in filter_expressions:
            requested_types = filter_expressions['type']
            subqueries_from_type = set()
            filter_by_prod_supertype = False
            production_supertypes = []

            for supertype in ('production', 'graphics', 'music'):
                if supertype in requested_types:
                    filter_by_prod_supertype = True
                    production_supertypes.append(supertype)

            if filter_by_prod_supertype:
                subqueries_from_type.add('production')
                production_filter_q &= Q(supertype__in=production_supertypes)

            if 'releaser' in requested_types or 'scener' in requested_types or 'group' in requested_types:
                subqueries_from_type.add('releaser')

                if 'scener' in requested_types and not (
                        'releaser' in requested_types
                        or 'group' in requested_types):
                    releaser_filter_q &= Q(is_group=False)

                if 'group' in requested_types and not (
                        'releaser' in requested_types
                        or 'scener' in requested_types):
                    releaser_filter_q &= Q(is_group=True)

            if 'party' in requested_types:
                subqueries_from_type.add('party')

            if 'bbs' in requested_types:
                subqueries_from_type.add('bbs')

            # assume that any otherwise-unrecognised 'type' values indicate a production type
            production_types = set()
            for val in requested_types:
                if val not in ('production', 'graphics', 'music', 'scener',
                               'group', 'releaser', 'party', 'bbs'):
                    production_types.add(val)

            if production_types:
                prod_type_names_q = Q()
                for name in production_types:
                    prod_type_names_q |= Q(name__iexact=name)
                prod_type_ids = ProductionType.objects.filter(
                    prod_type_names_q).values_list('id', flat=True)

                subqueries_from_type.add('production')
                production_filter_q &= Q(types__in=prod_type_ids)

            subqueries_to_perform &= subqueries_from_type

        # Construct the master search query as a union of subqueries that search
        # one model each. Each subquery yields a queryset of dicts with the following fields:
        # 'type': 'production', 'releaser' or 'party'
        # 'pk': primary key of the relevant object
        # 'exactness': magic number used to prioritise exact/prefix title matches in the ordering:
        #     2 = (the cleaned version of) the title exactly matches (the cleaned verson of) the search query
        #     1 = (the cleaned version of) the title starts with (the cleaned version of) the search query
        #     0 = neither of the above
        # 'rank': search ranking as calculated by postgres search

        # start with an empty queryset
        if has_search_term:
            rank_annotation = SearchRank(F('search_document'), psql_query)
        else:
            rank_annotation = models.Value('', output_field=models.CharField())

        qs = Production.objects.annotate(
            type=models.Value('empty', output_field=models.CharField()),
            exactness=models.Value(0, output_field=models.IntegerField()),
            rank=rank_annotation).values('pk', 'type', 'exactness',
                                         'rank').none()

        if 'production' in subqueries_to_perform:
            # Search for productions

            if has_search_term:
                rank_annotation = SearchRank(F('search_document'), psql_query)
                exactness_annotation = models.Case(
                    models.When(search_title=clean_query,
                                then=models.Value(2)),
                    models.When(search_title__startswith=clean_query,
                                then=models.Value(1)),
                    default=models.Value(0,
                                         output_field=models.IntegerField()),
                    output_field=models.IntegerField())
            else:
                rank_annotation = F('sortable_title')
                exactness_annotation = models.Value(
                    0, output_field=models.IntegerField())

            qs = qs.union(
                Production.objects.annotate(
                    rank=rank_annotation,
                    type=models.Value('production',
                                      output_field=models.CharField()),
                    exactness=exactness_annotation).
                filter(production_filter_q).order_by(
                    # empty order_by to cancel the Production model's native ordering
                ).distinct().values('pk', 'type', 'exactness', 'rank'))

        if 'releaser' in subqueries_to_perform:
            # Search for releasers

            if has_search_term:
                rank_annotation = SearchRank(F('search_document'), psql_query)
                # Exactness test will be applied to each of the releaser's nick variants;
                # take the highest result
                exactness_annotation = models.Max(
                    models.Case(
                        models.When(nicks__variants__search_title=clean_query,
                                    then=models.Value(2)),
                        models.When(nicks__variants__search_title__startswith=
                                    clean_query,
                                    then=models.Value(1)),
                        default=models.Value(
                            0, output_field=models.IntegerField()),
                        output_field=models.IntegerField()))
            else:
                rank_annotation = F('name')
                exactness_annotation = models.Value(
                    0, output_field=models.IntegerField())

            qs = qs.union(
                Releaser.objects.annotate(
                    rank=rank_annotation,
                    type=models.Value('releaser',
                                      output_field=models.CharField()),
                    exactness=exactness_annotation).filter(releaser_filter_q).
                distinct().order_by(
                    # empty order_by to cancel the Releaser model's native ordering
                ).values('pk', 'type', 'exactness', 'rank'))

        if 'party' in subqueries_to_perform:
            # Search for parties

            if has_search_term:
                rank_annotation = SearchRank(F('search_document'), psql_query)
                exactness_annotation = models.Case(
                    models.When(search_title=clean_query,
                                then=models.Value(2)),
                    models.When(search_title__startswith=clean_query,
                                then=models.Value(1)),
                    default=models.Value(0,
                                         output_field=models.IntegerField()),
                    output_field=models.IntegerField())
            else:
                rank_annotation = F('name')
                exactness_annotation = models.Value(
                    0, output_field=models.IntegerField())

            qs = qs.union(
                Party.objects.annotate(
                    rank=rank_annotation,
                    type=models.Value('party',
                                      output_field=models.CharField()),
                    exactness=exactness_annotation,
                ).filter(party_filter_q).order_by(
                    # empty order_by to cancel the Party model's native ordering
                ).values('pk', 'type', 'exactness', 'rank'), )

        if 'bbs' in subqueries_to_perform:
            # Search for BBSes

            if has_search_term:
                rank_annotation = SearchRank(F('search_document'), psql_query)
                exactness_annotation = models.Case(
                    models.When(search_title=clean_query,
                                then=models.Value(2)),
                    models.When(search_title__startswith=clean_query,
                                then=models.Value(1)),
                    default=models.Value(0,
                                         output_field=models.IntegerField()),
                    output_field=models.IntegerField())
            else:
                rank_annotation = F('name')
                exactness_annotation = models.Value(
                    0, output_field=models.IntegerField())

            qs = qs.union(
                BBS.objects.annotate(
                    rank=rank_annotation,
                    type=models.Value('bbs', output_field=models.CharField()),
                    exactness=exactness_annotation,
                ).filter(bbs_filter_q).order_by(
                    # empty order_by to cancel any model-level native ordering
                ).values('pk', 'type', 'exactness', 'rank'), )

        if has_search_term:
            qs = qs.order_by('-exactness', '-rank', 'pk')
        else:
            qs = qs.order_by('-exactness', 'rank', 'pk')

        # Apply pagination to the query before performing the (expensive) real data fetches.

        paginator = Paginator(qs, count)
        # If page request (9999) is out of range, deliver last page of results.
        try:
            page = paginator.page(page_number)
        except (EmptyPage, InvalidPage):
            page = paginator.page(paginator.num_pages)

        # Assemble the results into a plan for fetching the actual models -
        # form a dict that maps model/type to a set of PKs
        to_fetch = {}
        for d in page.object_list:
            to_fetch.setdefault(d['type'], set()).add(d['pk'])

        # now do the fetches, and store the results as a mapping of (type, pk) tuple to object
        fetched = {}

        if 'production' in to_fetch:
            production_ids = to_fetch['production']
            productions = Production.objects.filter(
                pk__in=production_ids).prefetch_related(
                    'author_nicks__releaser',
                    'author_affiliation_nicks__releaser')
            if has_search_term:
                productions = productions.annotate(
                    search_snippet=TSHeadline('notes', psql_query))
            screenshots = Screenshot.select_for_production_ids(production_ids)

            for prod in productions:
                prod.selected_screenshot = screenshots.get(prod.pk)
                # Ignore any search snippets that don't actually contain a highlighted term
                prod.has_search_snippet = has_search_term and '<b>' in prod.search_snippet
                fetched[('production', prod.pk)] = prod

        if 'releaser' in to_fetch:
            releasers = Releaser.objects.filter(
                pk__in=to_fetch['releaser']).prefetch_related(
                    'group_memberships__group__nicks', 'nicks')
            if has_search_term:
                releasers = releasers.annotate(
                    search_snippet=TSHeadline('notes', psql_query))
            for releaser in releasers:
                releaser.has_search_snippet = has_search_term and '<b>' in releaser.search_snippet
                fetched[('releaser', releaser.pk)] = releaser

        if 'party' in to_fetch:
            parties = Party.objects.filter(pk__in=to_fetch['party'])
            if has_search_term:
                parties = parties.annotate(
                    search_snippet=TSHeadline('notes', psql_query))
            for party in parties:
                party.has_search_snippet = has_search_term and '<b>' in party.search_snippet
                fetched[('party', party.pk)] = party

        if 'bbs' in to_fetch:
            bbses = BBS.objects.filter(pk__in=to_fetch['bbs'])
            if has_search_term:
                bbses = bbses.annotate(
                    search_snippet=TSHeadline('notes', psql_query))
            for bbs in bbses:
                bbs.has_search_snippet = has_search_term and '<b>' in bbs.search_snippet
                fetched[('bbs', bbs.pk)] = bbs

        # Build final list in same order as returned by the original results query
        results = []
        for d in page.object_list:
            item = fetched.get((d['type'], d['pk'])) or None
            if item:
                item.search_info = d
                results.append(item)

        return (results, page)
Esempio n. 17
0
def create_screenshot_from_production_link(production_link_id):
    try:
        prod_link = ProductionLink.objects.get(id=production_link_id)
    except ProductionLink.DoesNotExist:
        # guess it was deleted in the meantime, then.
        return

    if prod_link.production.screenshots.count():
        # don't create a screenshot if there's one already
        if prod_link.is_unresolved_for_screenshotting:
            prod_link.is_unresolved_for_screenshotting = False
            prod_link.save()
        return

    if prod_link.has_bad_image:
        return  # don't create a screenshot if a previous attempt has failed during image processing

    production_id = prod_link.production_id
    url = prod_link.download_url
    blob = fetch_link(prod_link)
    sha1 = blob.sha1

    if prod_link.is_zip_file():
        # select the archive member to extract a screenshot from, if we don't have
        # a candidate already
        archive_members = ArchiveMember.objects.filter(archive_sha1=sha1)
        if not prod_link.file_for_screenshot:
            file_for_screenshot = select_screenshot_file(archive_members)
            if file_for_screenshot:
                prod_link.file_for_screenshot = file_for_screenshot
                prod_link.is_unresolved_for_screenshotting = False
            else:
                prod_link.is_unresolved_for_screenshotting = True
            prod_link.save()

        image_extension = prod_link.file_for_screenshot.split('.')[-1].lower()
        if image_extension in USABLE_IMAGE_FILE_EXTENSIONS:
            z = None
            try:
                z = blob.as_zipfile()
                # decode the filename as stored in the db
                filename = unpack_db_zip_filename(
                    prod_link.file_for_screenshot)

                member_buf = io.BytesIO(z.read(filename))
            except zipfile.BadZipfile:
                prod_link.has_bad_image = True
                prod_link.save()
                if z:  # pragma: no cover
                    z.close()
                return

            z.close()
            try:
                img = PILConvertibleImage(
                    member_buf, name_hint=prod_link.file_for_screenshot)
            except IOError:
                prod_link.has_bad_image = True
                prod_link.save()
                return
        else:  # image is not a usable format
            return
    else:
        try:
            img = PILConvertibleImage(blob.as_io_buffer(),
                                      name_hint=url.split('/')[-1])
        except IOError:
            prod_link.has_bad_image = True
            prod_link.save()
            return

    screenshot = Screenshot(production_id=production_id)
    basename = sha1[0:2] + '/' + sha1[2:4] + '/' + sha1[4:8] + '.pl' + str(
        production_link_id) + '.'
    try:
        upload_standard(img, screenshot, basename)
        upload_thumb(img, screenshot, basename)
        # leave original until last, because if it's already a websafe format it'll just return
        # the original file handle, and the storage backend might close the file after uploading
        # which screws with PIL's ability to create resized versions...
        upload_original(img, screenshot, basename)
    except IOError:  # pragma: no cover
        prod_link.has_bad_image = True
        prod_link.save()
        return
    screenshot.save()
Esempio n. 18
0
 def test_str(self):
     pondlife = Production.objects.get(title='Pondlife')
     screenshot = Screenshot(production=pondlife,
                             original_url='http://example.com/pondlife.png')
     self.assertEqual(str(screenshot),
                      "Pondlife - http://example.com/pondlife.png")
Esempio n. 19
0
	def search(self, with_real_names=False, page_number=1, count=50):
		query = self.cleaned_data['q']

		# Look for filter expressions within query
		filter_expressions = collections.defaultdict(set)
		tag_names = set()

		def apply_filter(match):
			key, val = match.groups()
			if key in RECOGNISED_FILTER_KEYS:
				filter_expressions[key].add(val)
				return ''
			else:
				# the filter has not been recognised;
				# leave the original string intact to be handled as a search term
				return match.group(0)

		for filter_re in (FILTER_RE_ONEWORD, FILTER_RE_SINGLEQUOTE, FILTER_RE_DOUBLEQUOTE):
			query = filter_re.sub(apply_filter, query)

		def apply_tag(match):
			tag_names.add(match.group(1))
			return ''

		query = TAG_RE.sub(apply_tag, query)

		psql_query = SearchQuery(unidecode(query))
		clean_query = generate_search_title(query)
		rank_annotation = SearchRank(F('search_document'), psql_query)

		subqueries_to_perform = set(['production', 'releaser', 'party'])

		production_filter_q = Q(search_document=psql_query)

		if with_real_names:
			releaser_filter_q = Q(admin_search_document=psql_query)
			releaser_rank_annotation = SearchRank(F('admin_search_document'), psql_query)
		else:
			releaser_filter_q = Q(search_document=psql_query)
			releaser_rank_annotation = rank_annotation

		party_filter_q = Q(search_document=psql_query)

		if 'platform' in filter_expressions or 'on' in filter_expressions:
			subqueries_to_perform &= set(['production'])
			platforms = filter_expressions['platform'] | filter_expressions['on']

			platform_ids = Platform.objects.none().values_list('id', flat=True)
			for platform_name in platforms:
				platform_ids |= Platform.objects.filter(Q(name__iexact=platform_name) | Q(aliases__name__iexact=platform_name)).values_list('id', flat=True)

			production_filter_q &= Q(platforms__id__in=list(platform_ids))

		if 'by' in filter_expressions or 'author' in filter_expressions:
			subqueries_to_perform &= set(['production'])
			for name in filter_expressions['by'] | filter_expressions['author']:
				clean_name = generate_search_title(name)
				production_filter_q &= (
					# join back through releaser so that we match any nick variant ever used by the author,
					# not just the nick used on the prod. Better to err on the side of being too liberal
					Q(author_nicks__releaser__nicks__variants__search_title=clean_name) |
					Q(author_affiliation_nicks__releaser__nicks__variants__search_title=clean_name)
				)

		if 'of' in filter_expressions:
			subqueries_to_perform &= set(['releaser'])
			for name in filter_expressions['of']:
				clean_name = generate_search_title(name)
				releaser_filter_q &= Q(
					is_group=False, group_memberships__group__nicks__variants__search_title=clean_name
				)

		if 'group' in filter_expressions:
			subqueries_to_perform &= set(['production', 'releaser'])
			for name in filter_expressions['group']:
				clean_name = generate_search_title(name)
				releaser_filter_q &= Q(
					is_group=False, group_memberships__group__nicks__variants__search_title=clean_name
				)
				production_filter_q &= (
					# join back through releaser so that we match any nick variant ever used by the author,
					# not just the nick used on the prod. Better to err on the side of being too liberal
					Q(
						author_nicks__releaser__is_group=True,
						author_nicks__releaser__nicks__variants__search_title=clean_name
					) | Q(
						author_affiliation_nicks__releaser__nicks__variants__search_title=clean_name
					)
				)

		if tag_names or ('tagged' in filter_expressions):
			subqueries_to_perform &= set(['production'])
			for tag_name in filter_expressions['tagged'] | tag_names:
				production_filter_q &= Q(tags__name=tag_name)

		if 'year' in filter_expressions:
			subqueries_to_perform &= set(['production', 'party'])
			for year_str in filter_expressions['year']:
				try:
					year = int(year_str)
				except ValueError:
					continue

				production_filter_q &= Q(release_date_date__year=year)
				party_filter_q &= (Q(start_date_date__year=year) | Q(end_date_date__year=year))

		if 'type' in filter_expressions:
			requested_types = filter_expressions['type']
			subqueries_from_type = set()
			filter_by_prod_supertype = False
			production_supertypes = []

			for supertype in ('production', 'graphics', 'music'):
				if supertype in requested_types:
					filter_by_prod_supertype = True
					production_supertypes.append(supertype)

			if filter_by_prod_supertype:
				subqueries_from_type.add('production')
				production_filter_q &= Q(supertype__in=production_supertypes)

			if 'releaser' in requested_types or 'scener' in requested_types or 'group' in requested_types:
				subqueries_from_type.add('releaser')

				if 'scener' in requested_types and not ('releaser' in requested_types or 'group' in requested_types):
					releaser_filter_q &= Q(is_group=False)

				if 'group' in requested_types and not ('releaser' in requested_types or 'scener' in requested_types):
					releaser_filter_q &= Q(is_group=True)

			if 'party' in requested_types:
				subqueries_from_type.add('party')

			# assume that any otherwise-unrecognised 'type' values indicate a production type
			production_types = set()
			for val in requested_types:
				if val not in ('production', 'graphics', 'music', 'scener', 'group', 'releaser', 'party'):
					production_types.add(val)

			if production_types:
				subqueries_from_type.add('production')
				production_filter_q &= Q(types__name__in=production_types)

			subqueries_to_perform &= subqueries_from_type

		# Construct the master search query as a union of subqueries that search
		# one model each. Each subquery yields a queryset of dicts with the following fields:
		# 'type': 'production', 'releaser' or 'party'
		# 'pk': primary key of the relevant object
		# 'exactness': magic number used to prioritise exact/prefix title matches in the ordering:
		#     2 = (the cleaned version of) the title exactly matches (the cleaned verson of) the search query
		#     1 = (the cleaned version of) the title starts with (the cleaned version of) the search query
		#     0 = neither of the above
		# 'rank': search ranking as calculated by postgres search

		# start with an empty queryset
		qs = Production.objects.annotate(
			type=models.Value('empty', output_field=models.CharField()),
			exactness=models.Value(0, output_field=models.IntegerField()),
			rank=rank_annotation
		).values('pk', 'type', 'exactness', 'rank').none()

		if 'production' in subqueries_to_perform:
			# Search for productions
			qs = qs.union(
				Production.objects.annotate(
					rank=rank_annotation,
					type=models.Value('production', output_field=models.CharField()),
					exactness=models.Case(
						models.When(search_title=clean_query, then=models.Value(2)),
						models.When(search_title__startswith=clean_query, then=models.Value(1)),
						default=models.Value(0, output_field=models.IntegerField()),
						output_field=models.IntegerField()
					)
				).filter(
					production_filter_q
				).order_by(
					# empty order_by to cancel the Production model's native ordering
				).distinct().values('pk', 'type', 'exactness', 'rank')
			)

		if 'releaser' in subqueries_to_perform:
			# Search for releasers
			qs = qs.union(
				Releaser.objects.annotate(
					rank=releaser_rank_annotation,
					type=models.Value('releaser', output_field=models.CharField()),
					# Exactness test will be applied to each of the releaser's nick variants;
					# take the highest result
					exactness=models.Max(models.Case(
						models.When(nicks__variants__search_title=clean_query, then=models.Value(2)),
						models.When(nicks__variants__search_title__startswith=clean_query, then=models.Value(1)),
						default=models.Value(0, output_field=models.IntegerField()),
						output_field=models.IntegerField()
					))
				).filter(
					releaser_filter_q
				).order_by(
					# empty order_by to cancel the Releaser model's native ordering
				).values('pk', 'type', 'exactness', 'rank')
			)

		if 'party' in subqueries_to_perform:
			# Search for parties
			qs = qs.union(
				Party.objects.annotate(
					rank=rank_annotation,
					type=models.Value('party', output_field=models.CharField()),
					exactness=models.Case(
						models.When(search_title=clean_query, then=models.Value(2)),
						models.When(search_title__startswith=clean_query, then=models.Value(1)),
						default=models.Value(0, output_field=models.IntegerField()),
						output_field=models.IntegerField()
					)
				).filter(
					party_filter_q
				).order_by(
					# empty order_by to cancel the Party model's native ordering
				).values('pk', 'type', 'exactness', 'rank'),
			)

		qs = qs.order_by('-exactness', '-rank', 'pk')

		# Apply pagination to the query before performing the (expensive) real data fetches.

		paginator = Paginator(qs, count)
		# If page request (9999) is out of range, deliver last page of results.
		try:
			page = paginator.page(page_number)
		except (EmptyPage, InvalidPage):
			page = paginator.page(paginator.num_pages)

		# Assemble the results into a plan for fetching the actual models -
		# form a dict that maps model/type to a set of PKs
		to_fetch = {}
		for d in page.object_list:
			to_fetch.setdefault(d['type'], set()).add(d['pk'])

		# now do the fetches, and store the results as a mapping of (type, pk) tuple to object
		fetched = {}

		if 'production' in to_fetch:
			production_ids = to_fetch['production']
			productions = Production.objects.filter(pk__in=production_ids).prefetch_related(
				'author_nicks__releaser', 'author_affiliation_nicks__releaser'
			).annotate(
				search_snippet=TSHeadline('notes', psql_query)
			)
			screenshots = Screenshot.select_for_production_ids(production_ids)

			for prod in productions:
				prod.selected_screenshot = screenshots.get(prod.pk)
				# Ignore any search snippets that don't actually contain a highlighted term
				prod.has_search_snippet = '<b>' in prod.search_snippet
				fetched[('production', prod.pk)] = prod

		if 'releaser' in to_fetch:
			releasers = Releaser.objects.filter(pk__in=to_fetch['releaser']).prefetch_related(
				'group_memberships__group__nicks', 'nicks'
			).annotate(
				search_snippet=TSHeadline('notes', psql_query)
			)
			for releaser in releasers:
				releaser.has_search_snippet = '<b>' in releaser.search_snippet
				fetched[('releaser', releaser.pk)] = releaser

		if 'party' in to_fetch:
			parties = Party.objects.filter(pk__in=to_fetch['party']).annotate(
				search_snippet=TSHeadline('notes', psql_query)
			)
			for party in parties:
				party.has_search_snippet = '<b>' in party.search_snippet
				fetched[('party', party.pk)] = party

		# Build final list in same order as returned by the original results query
		results = []
		for d in page.object_list:
			item = fetched.get((d['type'], d['pk'])) or None
			if item:
				item.search_info = d
				results.append(item)

		return (results, page)
Esempio n. 20
0
def live_search(request):
    query = request.GET.get('q')
    category = request.GET.get('category')
    if query:
        clean_query = generate_search_title(query)

        # start with an empty queryset
        qs = Production.objects.annotate(type=models.Value(
            'empty', output_field=models.CharField()), ).values('pk',
                                                                'type').none()

        if (not category) or category in ('production', 'graphics', 'music'):
            prod_qs = Production.objects.annotate(
                type=models.Value('production',
                                  output_field=models.CharField()),
                name=models.Value('', output_field=models.CharField()),
            ).order_by().filter(search_title__startswith=clean_query).values(
                'pk', 'type')
            if category in ('production', 'graphics', 'music'):
                prod_qs = prod_qs.filter(supertype=category)
            qs = qs.union(prod_qs)

        if (not category) or category in ('scener', 'group'):
            releaser_qs = Releaser.objects.annotate(
                type=models.Value('releaser', output_field=models.CharField()),
            ).order_by('pk').filter(
                nicks__variants__search_title__startswith=clean_query).values(
                    'pk', 'type').distinct()
            if category in ('scener', 'group'):
                releaser_qs = releaser_qs.filter(
                    is_group=(category == 'group'))
            qs = qs.union(releaser_qs)

        if (not category) or category == 'party':
            qs = qs.union(
                Party.objects.annotate(type=models.Value(
                    'party',
                    output_field=models.CharField()), ).order_by().filter(
                        search_title__startswith=clean_query).values(
                            'pk', 'type'))

        if (not category) or category == 'bbs':
            qs = qs.union(
                BBS.objects.annotate(type=models.Value(
                    'bbs',
                    output_field=models.CharField()), ).order_by().filter(
                        search_title__startswith=clean_query).values(
                            'pk', 'type'))

        search_result_data = list(qs[:10])

        # Assemble the results into a plan for fetching the actual models -
        # form a dict that maps model/type to a set of PKs
        to_fetch = {}
        for d in search_result_data:
            to_fetch.setdefault(d['type'], set()).add(d['pk'])

        # now do the fetches, and store the results as a mapping of (type, pk) tuple to object
        fetched = {}

        if 'production' in to_fetch:
            production_ids = to_fetch['production']
            productions = Production.objects.filter(
                pk__in=production_ids).prefetch_related(
                    'author_nicks__releaser',
                    'author_affiliation_nicks__releaser')
            screenshots = Screenshot.select_for_production_ids(production_ids)

            for prod in productions:
                prod.selected_screenshot = screenshots.get(prod.pk)
                fetched[('production', prod.pk)] = prod

        if 'releaser' in to_fetch:
            releasers = Releaser.objects.filter(
                pk__in=to_fetch['releaser']).prefetch_related(
                    'group_memberships__group__nicks', 'nicks')
            for releaser in releasers:
                fetched[('releaser', releaser.pk)] = releaser

        if 'party' in to_fetch:
            parties = Party.objects.filter(pk__in=to_fetch['party'])
            for party in parties:
                fetched[('party', party.pk)] = party

        if 'bbs' in to_fetch:
            bbses = BBS.objects.filter(pk__in=to_fetch['bbs'])
            for bbs in bbses:
                fetched[('bbs', bbs.pk)] = bbs

        # Build final list in same order as returned by the original results query
        results = []
        for d in search_result_data:
            item = fetched.get((d['type'], d['pk'])) or None
            if item:
                if d['type'] == 'production':
                    if item.selected_screenshot:
                        screenshot = item.selected_screenshot
                        width, height = screenshot.thumb_dimensions_to_fit(
                            48, 36)
                        thumbnail = {
                            'url': screenshot.thumbnail_url,
                            'width': width,
                            'height': height,
                            'natural_width': screenshot.thumbnail_width,
                            'natural_height': screenshot.thumbnail_height,
                        }
                    else:
                        thumbnail = None

                    results.append({
                        'type': item.supertype,
                        'url': item.get_absolute_url(),
                        'value': item.title_with_byline,
                        'thumbnail': thumbnail
                    })
                elif d['type'] == 'releaser':
                    primary_nick = item.primary_nick
                    if primary_nick.differentiator:
                        differentiator = " (%s)" % primary_nick.differentiator
                    else:
                        differentiator = ""

                    results.append({
                        'type':
                        'group' if item.is_group else 'scener',
                        'url':
                        item.get_absolute_url(),
                        'value':
                        item.name_with_affiliations() + differentiator,
                    })
                elif d['type'] == 'party':
                    results.append({
                        'type': 'party',
                        'url': item.get_absolute_url(),
                        'value': item.name,
                    })
                elif d['type'] == 'bbs':
                    results.append({
                        'type': 'bbs',
                        'url': item.get_absolute_url(),
                        'value': item.name,
                    })

    else:
        results = []
    return JsonResponse(results, safe=False)
Esempio n. 21
0
def create_screenshot_from_production_link(production_link_id):
	try:
		prod_link = ProductionLink.objects.get(id=production_link_id)
	except ProductionLink.DoesNotExist:
		# guess it was deleted in the meantime, then.
		return

	if prod_link.production.screenshots.count():
		# don't create a screenshot if there's one already
		if prod_link.is_unresolved_for_screenshotting:
			prod_link.is_unresolved_for_screenshotting = False
			prod_link.save()
		return

	if prod_link.has_bad_image:
		return  # don't create a screenshot if a previous attempt has failed during image processing

	production_id = prod_link.production_id
	url = prod_link.download_url
	blob = fetch_link(prod_link)
	sha1 = blob.sha1

	if prod_link.is_zip_file():
		# select the archive member to extract a screenshot from, if we don't have
		# a candidate already
		archive_members = ArchiveMember.objects.filter(archive_sha1=sha1)
		if not prod_link.file_for_screenshot:
			file_for_screenshot = select_screenshot_file(archive_members)
			if file_for_screenshot:
				prod_link.file_for_screenshot = file_for_screenshot
				prod_link.is_unresolved_for_screenshotting = False
			else:
				prod_link.is_unresolved_for_screenshotting = True
			prod_link.save()

		image_extension = prod_link.file_for_screenshot.split('.')[-1].lower()
		if image_extension in USABLE_IMAGE_FILE_EXTENSIONS:
			z = blob.as_zipfile()
			# we encode the filename as iso-8859-1 before retrieving it, because we
			# decoded it that way on insertion into the database to ensure that it had
			# a valid unicode string representation - see mirror/models.py
			try:
				member_buf = cStringIO.StringIO(
					z.read(prod_link.file_for_screenshot.encode('iso-8859-1'))
				)
			except zipfile.BadZipfile:
				prod_link.has_bad_image = True
				prod_link.save()
				z.close()
				return

			z.close()
			try:
				img = PILConvertibleImage(member_buf, name_hint=prod_link.file_for_screenshot)
			except IOError:
				prod_link.has_bad_image = True
				prod_link.save()
				return
		else:  # image is not a usable format
			return
	else:
		try:
			img = PILConvertibleImage(blob.as_io_buffer(), name_hint=url.split('/')[-1])
		except IOError:
			prod_link.has_bad_image = True
			prod_link.save()
			return

	screenshot = Screenshot(production_id=production_id)
	basename = sha1[0:2] + '/' + sha1[2:4] + '/' + sha1[4:8] + '.pl' + str(production_link_id) + '.'
	try:
		upload_original(img, screenshot, basename, reduced_redundancy=True)
		upload_standard(img, screenshot, basename)
		upload_thumb(img, screenshot, basename)
	except IOError:
		prod_link.has_bad_image = True
		prod_link.save()
		return
	screenshot.save()