def parse_query_into_sq(query, fields): sq = None not_sq = None or_flag = False for phrase in shlex.split(query.encode('utf-8')): if phrase[0] == '-': query_part = AutoQuery('"%s"' % uni(phrase[1:])) for field in fields: if not not_sq: not_sq = SQ(**{field:query_part}) else: not_sq |= SQ(**{field:query_part}) or_flag = False elif phrase == 'OR': or_flag = True else: query_part = AutoQuery('"%s"' % uni(phrase)) field_sq = None for field in fields: if not field_sq: field_sq = SQ(**{field:query_part}) else: field_sq |= SQ(**{field:query_part}) if not sq: sq = field_sq else: if or_flag: sq |= field_sq else: sq &= field_sq or_flag = False return sq, not_sq
def prepare_sq(phrase, fields, sq): new_sq = sq query_part = GcdAutoQuery('%s' % uni(phrase)) if '*' in phrase: query_part_2 = AutoQuery('%s' % uni(phrase)) else: query_part_2 = None for field in fields: if not new_sq: new_sq = SQ(**{field: query_part}) else: new_sq |= SQ(**{field: query_part}) if query_part_2: new_sq |= SQ(**{field: query_part_2}) return new_sq
def show_issue_credit(issue, credit): credit_value = "" if credit == "translation": translator = issue.issuerole_set.filter(role = 't') #print "Trans", len(translator) #translator = story.supportrole_set.filter(role = 't') for i in translator: credit_value += i.creator.name + " (" + _('translation') + "); " credit_value = credit_value[0:-2] elif credit == "colors": colors = issue.issuerole_set.filter(role = 'c') # not sure why this doesn't work, it did work in the beginning # colors = story.supportrole_set.filter(role = 'c') for i in colors: credit_value += i.creator.name + "; " credit_value = credit_value[0:-2] elif credit == "letters": letters = issue.issuerole_set.filter(role = 'l') #letters = story.supportrole_set.filter(role = 'l') for i in letters: credit_value += i.creator.name + "; " credit_value = credit_value[0:-2] elif credit == "indexer": indexer = issue.issuerole_set.filter(role = 'i') for i in indexer: credit_value += i.creator.name + "; " credit_value = credit_value[0:-2] return uni(credit_value)
def get_characters(story): characters = story.story_version.appearance_set\ .filter(character__gt = "") appearance = "" for i in characters: # TODO: there is also entrycharacternames, to have # the name for a given story, this is also # linked to the character try: tmp = i.character.charactername_set\ .filter(language = story.language)\ .filter(preferred = 'Y') except MultipleObjectsReturned: # some one-shot characters are twice in character-db tmp = Character.objects\ .filter(character__exact = i.character_code) # could use entrycharacternames especially here if len(tmp) == 0: appearance += i.character.name + "; " else: appearance += tmp[0].name + "; " appearance = appearance[:-2] if appearance == "--": return "" else: return uni(appearance)
def show_issue_credit(issue, credit): credit_value = "" if credit == "translation": translator = issue.issuerole_set.filter(role='t') #print "Trans", len(translator) #translator = story.supportrole_set.filter(role = 't') for i in translator: credit_value += i.creator.name + " (" + _('translation') + "); " credit_value = credit_value[0:-2] elif credit == "colors": colors = issue.issuerole_set.filter(role='c') # not sure why this doesn't work, it did work in the beginning # colors = story.supportrole_set.filter(role = 'c') for i in colors: credit_value += i.creator.name + "; " credit_value = credit_value[0:-2] elif credit == "letters": letters = issue.issuerole_set.filter(role='l') #letters = story.supportrole_set.filter(role = 'l') for i in letters: credit_value += i.creator.name + "; " credit_value = credit_value[0:-2] elif credit == "indexer": indexer = issue.issuerole_set.filter(role='i') for i in indexer: credit_value += i.creator.name + "; " credit_value = credit_value[0:-2] return uni(credit_value)
def issue_flatfile(issue): """ For showing the flatfile lines for an issue.""" if not issue: return "" activate(issue.series.language_code) # for translation stuff # a bit of general stuff stories = list(issue.story_set.order_by('sequence_number')) letter_default = show_issue_credit(issue, "letters") color_default = show_issue_credit(issue, "colors") translation_default = show_issue_credit(issue, "translation") #indexer = show_issue_credit(issue,"indexer") # first cover sequence line = issue.number + "\t" \ + convert_isv_date(issue.publication_date, issue.series.language_code)\ + "\t" + show_credit(stories[0],"type") + \ "\t" + "\t" + get_feature(stories[0]) + "\t" + uni(stories[0].title) + \ "\t" + show_credit(stories[0],"pencils", True) + "\t" +\ show_credit(stories[0],"inks", show_credit(stories[0],"pencils")) + \ "\t" + "\t" + show_credit(stories[0],"colors", True) + "\t" +\ show_credit(stories[0],"letters", False) + "\t" +\ "?\t" + issue.page_count + "\t" + issue.price + "\t" \ + get_characters(stories[0]) line += "\tIssue data imported from inducks.org. " line += "If you edit entries please give the updated information also to inducks.org. " #line += "Original inducks indexer " + indexer line += "[inducks issue-id:" + issue.id \ + "][inducks story-id:" + stories[0].id + \ "][inducks storyversion-id:" \ + stories[0].story_version.id + "]\t" line += "\t" + uni(reprints(stories[0])) + "\t" \ + stories[0].job_number for num in range(1, len(stories)): story = stories[num] force = story.story_version.type == 'n' script = show_credit(story, "script", force or story.story_version.type in ['t', 'a'], translation_default) notes = "" if story.story_version.type in ['c', 'i']: #notes += "Illustration idea by " + script script = "" if story.included_in_story: notes += " This sequence is included in " + \ story.included_in_story + ", please edit" line += "\n\t\t" + show_credit(story,"type", True) + "\t" + "\t" +\ get_feature(story) + "\t" + uni(story.title) + "\t" +\ show_credit(story,"pencils", force) + "\t" +\ show_credit(story,"inks", force, show_credit(story,"pencils")) \ + "\t" + script + "\t" +\ show_credit(story,"colors", force, color_default) + "\t" +\ show_credit(story,"letters", force, letter_default) + "\t" +\ "\t" + get_page_count(story) + "\t" + "\t" +\ uni(get_characters(story)) + "\t[inducks story-id:" + \ story.id + "][inducks storyversion-id:" + \ story.story_version.id + "]" + uni(story.notes) + \ uni(notes) + "\t" + "\t" + uni(reprints(story)) + "\t" +\ story.job_number deactivate() return line
def gcd_series(issue, publisher): try: if len(issue.number) == 7 and issue.number[4] == '-': number = issue.number[5:7].lstrip('0') + "/" + issue.number[0:4] else: number = issue.number gcd_issue = GCD_Issue.objects.filter(series__name__icontains = \ issue.series.name).filter(number = number)\ .filter(series__country_code__iexact =\ issue.series.country_code.code) if len(gcd_issue) > 1: gcd_issue_2 = gcd_issue.filter(series__name__istartswith = \ issue.series.name) if len(gcd_issue_2) > 0: gcd_issue = gcd_issue_2 if len(gcd_issue) > 0: if len(gcd_issue) > 1: gcd_issue = gcd_issue.filter(series__name = \ issue.series.name) #print gcd_issue, issue.series.name if len(gcd_issue) == 1: return "from " + uni(gcd_issue[0].series.name) + " (" +\ uni(gcd_issue[0].series.publisher.name) + ", " +\ uni(gcd_issue[0].series.year_began) + " series) #" +\ gcd_issue[0].number gcd_series = GCD_Series.objects.filter(name__startswith = \ issue.series.name).filter(country_code__iexact =\ issue.series.country_code.code) #print issue.series.name, issue.series.country_code if len(gcd_series) > 1 and publisher != "": gcd_series = gcd_series.\ filter(publisher__name__icontains = publisher) if len(gcd_series) == 1: #print gcd_series, issue.series.name return "from " + gcd_series[0].name + " (" +\ gcd_series[0].publisher.name + ", " +\ uni(gcd_series[0].year_began) + " series) #" + number else: for i in gcd_series: print i, i.country_code return "" except OperationalError: print issue.series.name
def issue_flatfile(issue): """ For showing the flatfile lines for an issue.""" if not issue: return "" activate(issue.series.language_code) # for translation stuff # a bit of general stuff stories = list(issue.story_set.order_by('sequence_number')) letter_default = show_issue_credit(issue,"letters") color_default = show_issue_credit(issue,"colors") translation_default = show_issue_credit(issue,"translation") #indexer = show_issue_credit(issue,"indexer") # first cover sequence line = issue.number + "\t" \ + convert_isv_date(issue.publication_date, issue.series.language_code)\ + "\t" + show_credit(stories[0],"type") + \ "\t" + "\t" + get_feature(stories[0]) + "\t" + uni(stories[0].title) + \ "\t" + show_credit(stories[0],"pencils", True) + "\t" +\ show_credit(stories[0],"inks", show_credit(stories[0],"pencils")) + \ "\t" + "\t" + show_credit(stories[0],"colors", True) + "\t" +\ show_credit(stories[0],"letters", False) + "\t" +\ "?\t" + issue.page_count + "\t" + issue.price + "\t" \ + get_characters(stories[0]) line += "\tIssue data imported from inducks.org. " line += "If you edit entries please give the updated information also to inducks.org. " #line += "Original inducks indexer " + indexer line += "[inducks issue-id:" + issue.id \ + "][inducks story-id:" + stories[0].id + \ "][inducks storyversion-id:" \ + stories[0].story_version.id + "]\t" line += "\t" + uni(reprints(stories[0])) + "\t" \ + stories[0].job_number for num in range(1,len(stories)): story = stories[num] force = story.story_version.type == 'n' script = show_credit(story,"script", force or story.story_version.type in ['t', 'a'], translation_default) notes = "" if story.story_version.type in ['c', 'i']: #notes += "Illustration idea by " + script script = "" if story.included_in_story: notes += " This sequence is included in " + \ story.included_in_story + ", please edit" line += "\n\t\t" + show_credit(story,"type", True) + "\t" + "\t" +\ get_feature(story) + "\t" + uni(story.title) + "\t" +\ show_credit(story,"pencils", force) + "\t" +\ show_credit(story,"inks", force, show_credit(story,"pencils")) \ + "\t" + script + "\t" +\ show_credit(story,"colors", force, color_default) + "\t" +\ show_credit(story,"letters", force, letter_default) + "\t" +\ "\t" + get_page_count(story) + "\t" + "\t" +\ uni(get_characters(story)) + "\t[inducks story-id:" + \ story.id + "][inducks storyversion-id:" + \ story.story_version.id + "]" + uni(story.notes) + \ uni(notes) + "\t" + "\t" + uni(reprints(story)) + "\t" +\ story.job_number deactivate() return line
def gcd_series(issue, publisher): try: if len(issue.number) == 7 and issue.number[4] == '-': number = issue.number[5:7].lstrip('0') + "/" + issue.number[0:4] else: number = issue.number gcd_issue = GCD_Issue.objects.filter(series__name__icontains = \ issue.series.name).filter(number = number)\ .filter(series__country_code__iexact =\ issue.series.country_code.code) if len(gcd_issue) > 1: gcd_issue_2 = gcd_issue.filter(series__name__istartswith = \ issue.series.name) if len(gcd_issue_2) > 0: gcd_issue = gcd_issue_2 if len(gcd_issue) > 0: if len(gcd_issue) > 1: gcd_issue = gcd_issue.filter(series__name = \ issue.series.name) #print gcd_issue, issue.series.name if len(gcd_issue) == 1: return "from " + uni(gcd_issue[0].series.name) + " (" +\ uni(gcd_issue[0].series.publisher.name) + ", " +\ uni(gcd_issue[0].series.year_began) + " series) #" +\ gcd_issue[0].number gcd_series = GCD_Series.objects.filter(name__startswith = \ issue.series.name).filter(country_code__iexact =\ issue.series.country_code.code) #print issue.series.name, issue.series.country_code if len(gcd_series) > 1 and publisher != "": gcd_series = gcd_series.\ filter(publisher__name__icontains = publisher) if len(gcd_series) == 1: #print gcd_series, issue.series.name return "from " + gcd_series[0].name + " (" +\ gcd_series[0].publisher.name + ", " +\ uni(gcd_series[0].year_began) + " series) #" + number else: for i in gcd_series: print(i, i.country_code) return "" except OperationalError: print(issue.series.name)
def get_feature(story): feature = "" try: #shouldn't be there a different way to check for existence ? features = story.story_version.base_story.feature_set.all() except: return feature for i in features: if i.feature.name != "--": #feature += i.feature.name + "; " try: feature += i.feature.charactername_set.filter(language = story.language).filter(preferred = 'Y')[0].name + "; " except IndexError: feature += i.feature.name + "; " feature = feature[:-2] return uni(feature)
def get_feature(story): feature = "" try: #shouldn't be there a different way to check for existence ? features = story.story_version.base_story.feature_set.all() except: return feature for i in features: if i.feature.name != "--": #feature += i.feature.name + "; " try: feature += i.feature.charactername_set.filter( language=story.language).filter( preferred='Y')[0].name + "; " except IndexError: feature += i.feature.name + "; " feature = feature[:-2] return uni(feature)
def cover_upload(request, cover_id, add_variant=False): """ Handles uploading of covers be it - first upload - replacement upload - variant upload """ upload_template = 'gcd/details/cover_upload.html' uploaded_template = 'gcd/details/cover_uploaded.html' style = 'default' # check for issue and cover cover = get_object_or_404(Cover, id=cover_id) issue = cover.issue if add_variant and not cover.has_image: error_text = "No cover present for %s. You cannot upload a variant." \ % cover.issue return render_error(request, error_text, redirect=False) # check that we are actually allowed to upload if cover.has_image and not cover.marked and not add_variant: tag = get_image_tag(cover, "existing cover", 2) covers_needed = Cover.objects.filter(issue__series=issue.series) covers_needed = covers_needed.exclude(marked=False, has_image=True) # TODO: make the 15 an option covers_needed = covers_needed.exclude(marked=None, has_image=True)[:15] return render_to_response(uploaded_template, { 'cover' : cover, 'covers_needed' : covers_needed, 'issue' : issue, 'tag' : tag, 'already' : "is already", 'style' : style}, context_instance=RequestContext(request)) # check what kind of upload if cover.has_image and cover.marked and not add_variant: display_cover = get_image_tag(cover, "cover to replace", 2, no_cache = True) upload_type = 'replacement' elif add_variant: display_cover = get_image_tag(cover, "first cover", 2, no_cache = True) upload_type = 'variant' else: display_cover = None upload_type = '' # current request is an upload if request.method == 'POST': try: form = UploadScanForm(request.POST,request.FILES) except IOError: # sometimes uploads misbehave. connection dropped ? error_text = 'Something went wrong with the upload. ' + \ 'Please <a href="' + request.path + '">try again</a>.' return render_error(request, error_text, redirect=False, is_safe=True) if form.is_valid(): # user has to change defaults and enter something valid if request.POST['email'] == '*****@*****.**': request.POST['email'] = '' if request.POST['name'] == 'Your name': request.POST['name'] = '' form = UploadScanForm(request.POST,request.FILES) # TODO: even if the file is a valid image it does not survive into # the next form if name/email is not valid, Django issue ? if not form.is_valid(): return render_to_response(upload_template, { 'form': form.as_ul(), 'cover' : cover, 'issue' : issue, 'style' : style, 'display_cover' : display_cover, 'upload_type' : upload_type, }, context_instance=RequestContext(request)) # if file is in form handle it if 'scan' in request.FILES: upload_datetime = datetime.today() scan = request.FILES['scan'] contributor = '%s (%s)' % (request.POST['name'], request.POST['email']) # at first (in case something goes wrong) put new covers into # media/<_local_new_scans>/<monthname>_<year>/ # with name # <cover_id>_<date>_<time>.<ext> scan_name = str(cover.id) + "_" + \ upload_datetime.strftime('%Y%m%d_%H%M%S') upload_dir = settings.MEDIA_ROOT + _local_new_scans + \ upload_datetime.strftime('%B_%Y/').lower() destination_name = upload_dir + scan_name + \ os.path.splitext(scan.name)[1] if not os.path.isdir(upload_dir): try: os.mkdir(upload_dir) except IOError: error_text = "Problem with file storage for uploaded " + \ "cover, please report an error." return render_error(request, error_text, redirect=False) # arguably a bit unlikely to happen with the current naming scheme if os.path.exists(destination_name): destination_name = os.path.splitext(destination_name)[0] + \ "_a" + os.path.splitext(scan.name)[1] # write uploaded file destination = open(destination_name, 'wb') for chunk in scan.chunks(): destination.write(chunk) destination.close() try: # generate different sizes we are using im = Image.open(destination.name) if im.size[0] >= 400: # generate the sizes we are using if check_cover_dir(cover) == False: error_text = "Problem with file storage for cover " + \ "with id %d for %s, please report an error." \ % (cover.id, uni(issue)) return render_error(request, error_text, redirect=False) base_dir = str(int(cover.id/1000)) if upload_type == 'replacement': # if we don't have the original file copy it from w400 # otherwise do nothing, old cover file stays due to # datetime string in the filename if not cover.file_extension: sub_dir = "/w400/" extension = ".jpg" current_im_name = settings.MEDIA_ROOT + \ _local_scans_by_id + base_dir + \ sub_dir + str(cover.id) + extension # check for existence if not os.path.exists(current_im_name): error_text = "Problem with existing file for cover " + \ "with id %d for %s, please report an error." \ % (cover.id, uni(issue)) return render_error(request, error_text, redirect=False) # use this for debugging locally # img_url = _server_prefixes[cover.server_version] \ # + suffix # urlretrieve(img_url,current_im_name) backup_name = settings.MEDIA_ROOT + \ _local_scans_by_id + base_dir + \ "/uploads/" + str(cover.id) + \ cover.modified.strftime('_%Y%m%d_%H%M%S') + \ extension shutil.copy(current_im_name, backup_name) if add_variant: return render_error(request, 'Adding variant covers is not yet implemented.', redirect=False) # generate different sizes generate_sizes(cover, im) # set cover table values cover.server_version = 1 cover.has_image = True cover.marked = False cover.contributor = contributor if not cover.series.has_gallery == True: series = cover.series series.has_gallery = True series.save() cover.modified = upload_datetime cover.extension = os.path.splitext(destination_name)[1] im_name = settings.MEDIA_ROOT + \ _local_scans_by_id + base_dir + "/uploads/" + \ str(cover.id) + upload_datetime. \ strftime('_%Y%m%d_%H%M%S') + cover.extension shutil.move(destination_name, im_name) cover.save() store_count = codecs.open(settings.MEDIA_ROOT + \ _local_new_scans + "cover_count", "w", "utf-8") store_count.write(str(Cover.objects.filter(has_image=True)\ .count())) store_count.close() if 'remember_me' in request.POST: request.session['gcd_uploader_name'] = \ request.POST['name'] request.session['gcd_uploader_email'] = \ request.POST['email'] else: request.session.pop('gcd_uploader_name','') request.session.pop('gcd_uploader_email','') tag = get_image_tag(cover, "uploaded cover", 2) else: os.remove(destination.name) info_text = "Image is too small, only " + str(im.size) + \ " in size." return render_to_response(upload_template, { 'form': form.as_ul(), 'info' : info_text, 'cover' : cover, 'display_cover' : display_cover, 'upload_type' : upload_type, 'issue' : issue, 'style' : style, }, context_instance=RequestContext(request)) # what else do we need covers_needed = Cover.objects.filter(issue__series = \ issue.series).exclude(marked = False, has_image = True) covers_needed = covers_needed.exclude(marked = None, has_image = True)[:15] return render_to_response(uploaded_template, { 'cover' : cover, 'covers_needed' : covers_needed, 'issue' : issue, 'tag' : tag, 'style' : style}, context_instance=RequestContext(request)) except IOError: # file type *should* be taken care of by django os.remove(destination.name) return render_to_response(upload_template, { 'form': form.as_ul(), 'info' : 'Error: File \"' + scan.name + \ '" is not a valid picture.', 'cover' : cover, 'issue' : issue, 'display_cover' : display_cover, 'upload_type' : upload_type, 'style' : style, }, context_instance=RequestContext(request)) else: # there is a pretty good chance we never end up here return render_to_response(upload_template, { 'form': form.as_ul(), 'cover' : cover, 'issue' : issue, 'style' : style}, context_instance=RequestContext(request)) else: # do we have email/name cached if 'gcd_uploader_email' in request.session: vars = {'name' : request.session['gcd_uploader_name'], 'email' : request.session['gcd_uploader_email'], 'remember_me' : True} elif request.user.is_authenticated(): vars = {'name' : unicode(request.user.indexer), 'email' : request.user.email, 'remember_me' : True} else: vars = {'name' : 'Your name', 'email' : '*****@*****.**'} form = UploadScanForm(initial=vars) # display the form return render_to_response(upload_template, { 'form': form.as_ul(), 'cover' : cover, 'issue' : issue, 'display_cover' : display_cover, 'upload_type' : upload_type, 'style' : style}, context_instance=RequestContext(request))
def find_migration_candidates(story, string, standard = True): """ Calls parser for reprint notes and returns results. returns: found issue notes found in [] sequence number of origin story (if found) origin story (if found) True if story is original, False otherwise """ string = string.strip() if string == '?' or string == '' or string[-1] == '?' or \ string.startswith('from ?') or string.startswith('uit ?') or \ string.startswith(uni("från ?")): return False, '', -1, False, False if standard == False: #otherwise some Disney stuff takes ages if string.lower().find('donald daily') >= 0: return False,'',-1,False,False elif string.lower().find('duck daily') >= 0: return False,'',-1,False,False elif string.lower().find('duck sunday') >= 0: return False,'',-1,False,False elif string.lower().find('donald sunday') >= 0: return False,'',-1,False,False elif string.lower().find('mouse sunday') >= 0: return False,'',-1,False,False elif string.lower().find('mickey sunday') >= 0: return False,'',-1,False,False elif string.lower().find('mouse daily') >= 0: return False,'',-1,False,False elif string.lower().find('mickey daily') >= 0: return False,'',-1,False,False # possible from/in in other languages reprint_direction = ["from ", "in "] if story.issue.series.language.code.lower() == 'it': #da, in reprint_direction = ["da ", "di "] + reprint_direction elif story.issue.series.language.code.lower() in ['es', 'pt']: #de, en reprint_direction = ["de ", "en "] + reprint_direction elif story.issue.series.language.code.lower() == 'nl': # uit, in reprint_direction = ["uit "] + reprint_direction elif story.issue.series.language.code.lower() in ['sv', 'no']: # från, i reprint_direction = [uni("från "), "i "] + reprint_direction elif story.issue.series.language.code.lower() == 'de': # aus reprint_direction = ["aus "] + reprint_direction reprint_direction_search = reprint_direction + [""] other_fr = True for from_to in reprint_direction_search: if standard: # check for our format reprint, notes = parse_reprint(string, from_to) # exclude same series if several issues (all with same starting year) are found if reprint.count() > 1: reprint = reprint.exclude(series__id = story.issue.series.id) if reprint.count() > 1 and reprint.count() <= 15: a = [] for i in range(reprint.count()): nr = find_reprint_sequence_in_issue(story, reprint[i].id) if (nr >= 0): a.append(i) if len(a) == 1: reprint = reprint.filter(id = reprint[a[0]].id) else: # check for some others reprint,notes = parse_reprint_full(string, from_to) reprint = reprint.exclude(id = story.issue.id) if reprint.count() > 1 and reprint.count() <= 15: a = [] for i in range(reprint.count()): nr = find_reprint_sequence_in_issue(story, reprint[i].id) if (nr >= 0): a.append(i) if len(a) == 1: reprint = reprint.filter(id = reprint[a[0]].id) # one other routine for a few specials if reprint.count() != 1 :#== 0 or reprint.count() > 5: if other_fr: # need marker to only do once other_fr = False reprint = parse_reprint_fr(string) reprint = reprint.exclude(id = story.issue.id) if reprint.count() > 1 and reprint.count() <= 15: a = [] for i in range(reprint.count()): nr = find_reprint_sequence_in_issue(story, reprint[i].id) if (nr >= 0): a.append(i) if len(a) == 1: reprint = reprint.filter(id = reprint[a[0]].id) if reprint.count() == 1: reprint_direction = "from " if reprint.count() == 1: nr = find_reprint_sequence_in_issue(story, reprint[0].id) if from_to in ["from ", "aus ", "da ", "di ", "de ", \ "uit ",u"från "]: origin = False elif from_to == "": origin = False if notes: notes += " Confirm: direction of reprint" else: notes = "Confirm: direction of reprint" else: origin = True if notes: if notes.find('originaltitel') >= 0: pos = notes.find('originaltitel') notes = notes.replace('originaltitel', 'original title') if notes.find('Originaltitel') >= 0: pos = notes.find('Originaltitel') notes = notes.replace('Originaltitel', 'original title') if notes.lower().find(uni('história original')) >= 0: pos = notes.lower().find(uni('história original')) notes = notes.replace(uni('história original'), 'original title') if notes.lower().find(uni('História original')) >= 0: pos = notes.lower().find(uni('História original')) notes = notes.replace(uni('História original'), 'original title') if notes.lower().find('titolo originale') >= 0: pos = notes.lower().find('titolo originale') notes = notes.replace('titolo originale', 'original title') if notes.find('titre original') >= 0: pos = notes.find('titre original') notes = notes.replace('titre original', 'original title') if notes.find('Titre original') >= 0: pos = notes.find('Titre original') notes = notes.replace('Titre original', 'original title') if notes.lower().find('originally titled') >= 0: pos = notes.lower().find('originally titled') notes = notes.replace('originally titled', 'original title') if nr >= 0: other_story = Story.objects.exclude(deleted=True).filter(issue = reprint[0]) other_story = other_story.filter(sequence_number = nr) return reprint[0],notes,nr,other_story[0],origin elif notes and notes.lower().find('original ti') >=0: pos = notes.lower().find('original ti') pos2 = notes[pos:].find('"') if pos2 > 0: pos3 = notes[pos+pos2+1:].find('"') if pos3 > 0: original_title = notes[pos+pos2+1:pos+pos2+pos3] end_title = pos+pos2+pos3+2 else: original_title = notes[pos+pos2:] end_title = len(notes) else: pos2 = notes[pos:].find(']') if pos2 > 0: original_title = notes[pos+len('original title'):pos+pos2] end_title = pos+pos2+1 else: pos2 = notes[pos:].find(';') if pos2 > 0: original_title = notes[pos+len('original title'):pos+pos2] end_title = pos+pos2+1 else: original_title = notes[pos+len('original title'):] end_title = len(notes) results = Story.objects.exclude(deleted=True) results = results.filter(issue = reprint[0]) results = results.filter(title__icontains = original_title.strip(' !.":')) if results.count() == 1: notes = notes[:pos] + notes[end_title:] return reprint[0],notes,results[0].sequence_number,results[0],origin elif results.count() > 1: results = results.filter(type=story.type) if results.count() == 1: notes = notes[:pos] + notes[end_title:] return reprint[0],notes,results[0].sequence_number,results[0],origin return reprint[0],notes,-1,False,origin else: return reprint[0],notes,-1,False,origin return False,'',-1,False,False
def migrate_reprints_other(select = -1): series = Series.objects.exclude(deleted=True).order_by("id") if select > 0: series = series.filter(id__gt = str((select-1)*1000)) series = series.filter(id__lte = str(select*1000)) # exclude Lars series = series.exclude(id__in = [18732,26245,31648,36955,36980,36973,36949,36975,36964, 36953, 36967, 39648]) series = series.exclude(id=153)# Walt Disney's Comics and Stories #series = series.exclude(id=687)# Topolino series = series.exclude(id=1923)# Alan Ford series = series.exclude(id=2017)# Lucky Lukes äventyr series = series.exclude(id=2199)# Little Orphan Annie series = series.exclude(id=2672)# Martin Mystère series = series.exclude(id=2695)# Eternauta, L' #series = series.exclude(id=3706)# Grandi Classici Disney, I series = series.exclude(id=3744)# Spicy Tales series = series.exclude(id=3817)# Sandman, The, series = series.exclude(id=7052)# Gustafs bästa sidor series = series.exclude(id=7087)# Serie-paraden series = series.exclude(id=7155)# Heavy Metal Magazine series = series.exclude(id=7208)# Dr. Seuss Goes to War #series = series.exclude(id=7536)# Albi Di Topolino #series = series.exclude(id=7539)# Almanacco Topolino #series = series.exclude(id=7540)# Almanacco Topolino series = series.exclude(id=7555)# Euracomix #series = series.exclude(id=7566)# Classici di Walt Disney, I #series = series.exclude(id=7569)# Grandi Classici Disney, I series = series.exclude(id=7582)# Manga Sun series = series.exclude(id=7587)# Pilot series = series.exclude(id=7640)# 91:an Karlsson [julalbum] series = series.exclude(id=7645)# Asterix series = series.exclude(id=7646)# Asterix [nytryck] series = series.exclude(id=7652)# Biffen och Bananen Ett glatt återseende series = series.exclude(id=7657)# Familjen Svenssons äventyr series = series.exclude(id=7670)# Lilla Fridolf [julalbum] series = series.exclude(id=8101)# James Bond series = series.exclude(id=8110)# Epix series = series.exclude(id=8118)# Tung metall series = series.exclude(id=8510)# Felix - Jan Lööf's Felix series = series.exclude(id=8511)# Felix' äventyr series = series.exclude(id=10413)# Naturens under series = series.exclude(id=10458)# Caliber series = series.exclude(id=10562)# Donald Duck & Co series = series.exclude(id=12068)# Boys Love Girls...More or Less 12068 series = series.exclude(id=12498)# Colt series = series.exclude(id=14584)# Journal de Spirou, Le series = series.exclude(id=18705)# Korak series = series.exclude(id=19940)# Fandom's Finest Comics series = series.exclude(id=21132)# Norske Serieperler series = series.exclude(id=21200)# Donald Duck & Co series = series.exclude(id=21672)# Action Serien series = series.exclude(id=23238)# Front serien series = series.exclude(id=27342)# Donald Duck series = series.exclude(id=28131)# Vill Vest series = series.exclude(id=29937)# Pocket Book of Esquire Cartoons, The series = series.exclude(id=31372)# Mummitrollet series = series.exclude(id=32745)# Flippie Flink series = series.exclude(id=35202)# Heathcliff Catch of The Day series = series.exclude(id=36911)# Pyton series = series.exclude(id=36912)# Pyton series = series.exclude(id=53)# Wally - His Cartoons of the A.E.F. 51 51 series = series.exclude(id=60)# How They Draw Prohibition 72 72 series = series.exclude(id=69)# Aventures De Tintin, Les 55 55 series = series.exclude(id=73)# Mickey Mouse Magazine 77 77 series = series.exclude(id=82)# King Comics 658 658 series = series.exclude(id=84)# Popular Comics 64 64 series = series.exclude(id=107)# Super Comics 56 56 series = series.exclude(id=111)# Jumbo Comics 53 93 series = series.exclude(id=119)# Magic Comics 52 52 series = series.exclude(id=240)# USA Comics 51 51 series = series.exclude(id=538)# Lone Ranger, The 185 282 series = series.exclude(id=593)# Collana Del Tex - Prima Serie 60 60 series = series.exclude(id=1017)# Rocky Lane Western 61 78 series = series.exclude(id=2867)# Doctor Who 61 61 series = series.exclude(id=4900)# New Yorker Book of Lawyer Cartoons, The 85 85 series = series.exclude(id=5839)# Smithsonian Book of Newspaper Comics, The 118 118 series = series.exclude(id=6193)# Lanciostory Anno XXV 57 58 series = series.exclude(id=6194)# Skorpio Anno XXIII 53 53 series = series.exclude(id=7593)# Tutto Disney 126 126 series = series.exclude(id=9787)# Kalle Anka & C:o 399 414 series = series.exclude(id=12754)# Jokebook Comics Digest Annual 148 149 series = series.exclude(id=14027)# Green Hornet, The 53 53 series = series.exclude(id=14988)# Angel Heart 50 50 series = series.exclude(id=15308)# Techno 95 95 series = series.exclude(id=16632)# Kalar 94 94 series = series.exclude(id=16996)# Sabotør Q5 137 137 series = series.exclude(id=18403)# Madhouse Comics Digest 68 68 series = series.exclude(id=19745)# Sheriff Classics 62 62 series = series.exclude(id=19751)# Helgenen 116 118 series = series.exclude(id=20751)# Cartoons The French Way 154 154 series = series.exclude(id=21769)# Alarm 170 176 series = series.exclude(id=22370)# Brumle 71 71 series = series.exclude(id=24745)# Hjerterevyen 375 377 series = series.exclude(id=26396)# Jippo 137 137 series = series.exclude(id=26559)# Best Cartoons from Argosy 153 153 series = series.exclude(id=29924)# Lasso 75 75 series = series.exclude(id=30264)# Little Orphan Annie 59 59 a = codecs.open( "/tmp/reprints_processing", "a", "utf-8" ) import time for serie in series: c_start = time.time() #print serie, #serie.id migrate_reprints_series(serie.id, standard=False)#, do_save = False) used_time = time.time()-c_start a.write(uni(serie) + uni(", ") + uni(serie.id) + uni(": ") + uni(used_time) + uni("\n"))
def copy_covers_new(): #covers=Cover.objects.filter(has_image=True).filter(modified__gte='2009-10-02 14:00').filter(modified__lt='2009-11-15 00:00') #covers=Cover.objects.filter(has_image=True).filter(modified__gte='2009-11-15 00:00').filter(modified__lt='2009-11-23 00:00') covers=Cover.objects.filter(has_image=True).filter(modified__gte='2009-11-23 00:00') cnt = 0 for cover in covers: issue = cover.issue scan_name = str(issue.series.id) + "_" + str(issue.id) + "_" + \ uni(issue).replace(' ','_').replace('/','-') + \ "_" + cover.modified.strftime('%Y%m%d_%H%M%S') upload_dir = settings.MEDIA_ROOT + _local_new_scans + \ cover.modified.strftime('%B_%Y/').lower() old_filename = upload_dir + scan_name new_filename = settings.MEDIA_ROOT + _local_scans_by_id + \ str(int(cover.id/1000)) + '/uploads/' + str(cover.id) + cover.modified.strftime('_%Y%m%d_%H%M%S') check_cover_dir(cover) [extension, old_filename] = find_original_cover(old_filename) if not extension: scan_name = str(issue.series.id) + "_" + str(issue.id) + "_" + \ uni(issue).replace(' ','_').replace('/','-') + \ "_" + (cover.modified + timedelta(0,1)).strftime('%Y%m%d_%H%M%S') old_filename = upload_dir + scan_name [extension, old_filename] = find_original_cover(old_filename) if not extension: print cover.issue, old_filename raise IOError new_filename += extension shutil.copy(old_filename,new_filename) os.chmod(new_filename, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) im = Image.open(new_filename) if im.mode != "RGB": print "Image Mode:",im.mode im = im.convert("RGB") print cover.id, cover.issue, extension # generate different sizes (OK, we could just copy the files in this case...) scaled_name = settings.MEDIA_ROOT + _local_scans_by_id + \ str(int(cover.id/1000)) + "/w100/" + str(cover.id) + ".jpg" size = 100,int(100./im.size[0]*im.size[1]) scaled = im.resize(size,Image.ANTIALIAS) scaled.save(scaled_name) scaled_name = settings.MEDIA_ROOT + _local_scans_by_id + \ str(int(cover.id/1000)) + "/w200/" + str(cover.id) + ".jpg" size = 200,int(200./im.size[0]*im.size[1]) scaled = im.resize(size,Image.ANTIALIAS) scaled.save(scaled_name) scaled_name = settings.MEDIA_ROOT + _local_scans_by_id + \ str(int(cover.id/1000)) + "/w400/" + str(cover.id) + ".jpg" size = 400,int(400./im.size[0]*im.size[1]) scaled = im.resize(size,Image.ANTIALIAS) scaled.save(scaled_name) cover.file_extension = extension cover.save() cnt+=1 if cnt % 100 == 0: print cnt
def cover_upload(request, issue_id, add_variant=False): """ Handles uploading of covers be it - first upload - replacement upload - variant upload """ upload_template = 'gcd/details/cover_upload.html' uploaded_template = 'gcd/details/cover_uploaded.html' error_template = 'gcd/error.html' style = 'default' # check for issue and cover # TODO might do get_object_or_404 instead of own error check issue = Issue.objects.filter(id=int(issue_id)) if len(issue) == 0: return render_to_response( error_template, { 'error_text': 'Issue ID #' + issue_id + ' does not exist.', }, context_instance=RequestContext(request)) else: issue = issue[0] cover = Cover.objects.filter(issue=issue) if len(cover) == 0: return render_to_response(error_template, { 'error_text' : 'Something wrong with issue ID #' + issue_id + \ ' and its cover table, please contact the admins.', }, context_instance=RequestContext(request)) else: cover = cover[0] if add_variant and not cover.has_image: info_text = "No cover present for %s. You cannot upload a variant." % cover.issue return render_to_response(error_template, { 'error_text': info_text, }, context_instance=RequestContext(request)) # check that we are actually allowed to upload if cover.has_image and not cover.marked and not add_variant: tag = get_image_tag(issue.series.id, cover, "existing cover", 2) covers_needed = Cover.objects.filter(issue__series=issue.series) covers_needed = covers_needed.exclude(marked=False, has_large=True) # TODO: make the 15 an option covers_needed = covers_needed.exclude(marked=None, has_large=True)[:15] return render_to_response(uploaded_template, { 'cover': cover, 'covers_needed': covers_needed, 'issue': issue, 'tag': tag, 'already': "is already", 'style': style }, context_instance=RequestContext(request)) # check what kind of upload if cover.has_image and cover.marked and not add_variant: display_cover = get_image_tag(issue.series.id, cover, "cover to replace", 2, no_cache=True) upload_type = 'replacement' elif add_variant: display_cover = get_image_tag(issue.series.id, cover, "first cover", 2, no_cache=True) upload_type = 'variant' else: display_cover = None upload_type = '' # current request is an upload if request.method == 'POST': try: form = UploadScanForm(request.POST, request.FILES) except IOError: # sometimes uploads misbehave. connection dropped ? info_text = 'Something went wrong with the upload. ' + \ 'Please <a href="' + request.path + '">try again</a>.' return render_to_response(error_template, { 'error_text': mark_safe(info_text), }, context_instance=RequestContext(request)) if form.is_valid(): # user has to change defaults and enter something valid if request.POST['email'] == '*****@*****.**': request.POST['email'] = '' if request.POST['name'] == 'Your name': request.POST['name'] = '' form = UploadScanForm(request.POST, request.FILES) # TODO: even if the file is a valid image it does not survive into # the next form if name/email is not valid, Django issue ? if not form.is_valid(): return render_to_response(upload_template, { 'form': form.as_ul(), 'issue': issue, 'style': style, 'display_cover': display_cover, 'upload_type': upload_type, }, context_instance=RequestContext(request)) # if file is in form handle it if 'scan' in request.FILES: upload_datetime = datetime.today() scan = request.FILES['scan'] contributor = '%s (%s)' % (request.POST['name'], request.POST['email']) # put new covers into media/_local_new_scans/monthname_year/ # name <issue_id>_<series_name>_#<issue_number>_<date>_<time>.<ext> scan_name = str(issue.series.id) + "_" + str(issue.id) + "_" + \ uni(issue).replace(' ','_').replace('/','-') + \ "_" + datetime.today().strftime('%Y%m%d_%H%M%S') upload_dir = settings.MEDIA_ROOT + _local_new_scans + \ upload_datetime.strftime('%B_%Y/').lower() destination_name = upload_dir + scan_name + \ os.path.splitext(scan.name)[1] if not os.path.isdir(upload_dir): try: os.mkdir(upload_dir) except IOError: info_text = "Problem with file storage for uploaded " + \ "cover please contact GCD-admins." return render_to_response( error_template, { 'error_text': info_text, }, context_instance=RequestContext(request)) # arguably a bit unlikely to happen with the current naming scheme if os.path.exists(destination_name): destination_name = os.path.splitext(destination_name)[0] + \ "_a" + os.path.splitext(scan.name)[1] # write uploaded file destination = open(destination_name, 'wb') for chunk in scan.chunks(): destination.write(chunk) destination.close() try: # generate different sizes we are using im = Image.open(destination.name) if im.size[0] >= 400: # generate the sizes we are using if check_series_cover_dir(issue.series) == False: info_text = "Problem with file storage for series " + \ "'%s', id #%d, please contact webmaster." \ % (issue.series, issue.id) return render_to_response( error_template, { 'error_text': info_text, }, context_instance=RequestContext(request)) if add_variant or upload_type == 'replacement': suffix = "%d/400/" % issue.series_id suffix = suffix + "%d_4_%s.jpg" % (issue.series.id, cover.code) current_im_name = settings.MEDIA_ROOT + \ _local_scans + suffix # check for existence, otherwise get from server if not os.path.exists(current_im_name): img_url = _server_prefixes[cover.server_version] \ + suffix urlretrieve(img_url, current_im_name) im_old = Image.open(current_im_name) # backup current scan backup_name = os.path.splitext(current_im_name)[0] + \ "_" + upload_datetime.strftime('%Y%m%d_%H%M%S') + \ "_backup.jpg" im_old.save(backup_name) # im_old is kept from before if add_variant: # we need current scan in 400 width temp = tempfile.NamedTemporaryFile(suffix='.jpg') size = 400, int(400. / im.size[0] * im.size[1]) scaled = im.resize(size, Image.ANTIALIAS) # and do the stacking h = im_old.size[1] + scaled.size[1] stacked = Image.new('RGBA', (400, h)) stacked.paste(im_old, (0, 0)) stacked.paste(scaled, (0, im_old.size[1])) im = stacked variants = codecs.open( settings.MEDIA_ROOT + \ _local_new_scans + "variant_covers", "a", "utf-8" ) save_text = "series: " + str(issue.series.id) + \ " issue: " + str(issue.id) + "\n" save_text = uni(save_text) + "--old: " + \ uni(backup_name) + "\n--old-uploader: " + \ uni(cover.contributor) + "\n--add: " + \ uni(destination_name) + "\n" variants.write(save_text) variants.close() # generate different sizes scaled_name = settings.MEDIA_ROOT + _local_scans + \ str(issue.series.id) + "/" + str(issue.series.id) + \ "_" + cover.code + ".jpg" size = 100, int(100. / im.size[0] * im.size[1]) scaled = im.resize(size, Image.ANTIALIAS) scaled.save(scaled_name) scaled_name = settings.MEDIA_ROOT + _local_scans + \ str(issue.series.id) + "/200/" + str(issue.series.id) + \ "_2_" + cover.code + ".jpg" size = 200, int(200. / im.size[0] * im.size[1]) scaled = im.resize(size, Image.ANTIALIAS) scaled.save(scaled_name) scaled_name = settings.MEDIA_ROOT + _local_scans + \ str(issue.series.id) + "/400/" + str(issue.series.id) + \ "_4_" + cover.code + ".jpg" size = 400, int(400. / im.size[0] * im.size[1]) scaled = im.resize(size, Image.ANTIALIAS) scaled.save(scaled_name) # set cover table values cover.server_version = 1 cover.has_small = True cover.has_medium = True cover.has_large = True cover.has_image = True cover.marked = False cover.contributor = contributor if not cover.series.has_gallery == True: series = cover.series series.has_gallery = True series.save() cover.modified = upload_datetime cover.save() store_count = codecs.open(settings.MEDIA_ROOT + \ _local_scans + "cover_count", "w", "utf-8") store_count.write(str(Cover.objects.filter(has_image=True)\ .count())) store_count.close() if 'remember_me' in request.POST: request.session['gcd_uploader_name'] = \ request.POST['name'] request.session['gcd_uploader_email'] = \ request.POST['email'] else: request.session.pop('gcd_uploader_name', '') request.session.pop('gcd_uploader_email', '') tag = get_image_tag(issue.series.id, cover, "uploaded cover", 2) else: os.remove(destination.name) info_text = "Image is too small, only " + str(im.size) + \ " in size." return render_to_response( upload_template, { 'form': form.as_ul(), 'info': info_text, 'display_cover': display_cover, 'upload_type': upload_type, 'issue': issue, 'style': style, }, context_instance=RequestContext(request)) # what else do we need covers_needed = Cover.objects.filter(issue__series = \ issue.series).exclude(marked = False, has_large = True) covers_needed = covers_needed.exclude(marked=None, has_large=True)[:15] return render_to_response( uploaded_template, { 'cover': cover, 'covers_needed': covers_needed, 'issue': issue, 'tag': tag, 'style': style }, context_instance=RequestContext(request)) except IOError: # file type *should* be taken care of by django os.remove(destination.name) return render_to_response(upload_template, { 'form': form.as_ul(), 'info' : 'Error: File \"' + scan.name + \ '" is not a valid picture.', 'issue' : issue, 'display_cover' : display_cover, 'upload_type' : upload_type, 'style' : style, }, context_instance=RequestContext(request)) else: # there is a pretty good chance we never end up here return render_to_response(upload_template, { 'form': form.as_ul(), 'issue': issue, 'style': style }, context_instance=RequestContext(request)) else: # do we have email/name cached if 'gcd_uploader_email' in request.session: vars = { 'name': request.session['gcd_uploader_name'], 'email': request.session['gcd_uploader_email'], 'remember_me': True } elif request.user.is_authenticated(): vars = { 'name': request.user.indexer.name, 'email': request.user.email, 'remember_me': True } else: vars = {'name': 'Your name', 'email': '*****@*****.**'} form = UploadScanForm(initial=vars) # display the form return render_to_response(upload_template, { 'form': form.as_ul(), 'issue': issue, 'display_cover': display_cover, 'upload_type': upload_type, 'style': style }, context_instance=RequestContext(request))
def reprints(story): try: if story.story_version.base_story: pass except: return "" original_story = story.story_version.base_story.original_story printings, first_printing = get_printings(original_story, story.story_version.base_story.first_publication_date) country_first = False reprint = "" if first_printing: if first_printing.filter(id = story.id): if original_story[0] == 'D': reprint = "from Egmont (DK); " elif original_story[0] == 'Q': reprint = "" elif original_story[0] == 'H': reprint = "from Netherlands; " elif original_story[0] == 'S': reprint = "Disney Studio (US); " elif story.story_version.type != 'c': reprint = "ask Jochen to include studio code: " + original_story[0] + "; " # print original_story for source in first_printing[0:3]: if source.id != story.id: if source.id[0:4] in ["us/Z", "us/Y"]: if original_story[1] == 'M': reprint += "from Mickey Mouse" elif original_story[1] == 'D': reprint += "from Donald Duck" elif original_story[1] == 'X': reprint += "from Scamp" else: reprint += "from some strip, find out and tell Jochen" print original_story if original_story[0] == 'Y': reprint += " daily" elif original_story[0] == 'Z': reprint += " Sunday" reprint += " (King Features Syndicate) " + \ newspaper_date(original_story[3:]) + "; " else: publisher = source.issue.publisherjob_set.all() if publisher: publisher_name = publisher[0].publisher.name else: publisher_name = "?" gcd_reprint = gcd_series(source.issue, publisher_name) #print "B",gcd_reprint if gcd_reprint != "": reprint += uni(gcd_reprint) + " (" + \ uni(convert_isv_date(source.\ issue.publication_date,story\ .issue.series.language_code)) else: if len(source.issue.number) == 7 \ and source.issue.number[4] == '-': number = source.issue.number[5:7].lstrip('0') + "/" \ + source.issue.number[0:4] else: number = source.issue.number reprint += "from " + uni(source.issue.series.name) + \ " (" + uni(publisher_name) + ", ???? series) #" \ + number + " (" + \ convert_isv_date(source.issue.publication_date, \ story.issue.series.language_code) if source.issue.series.country_code != \ story.issue.series.country_code: if source.issue.series.country_code.\ countryname_set.filter(language = story.language): reprint += ") [" + uni(source.issue.series\ .country_code.countryname_set\ .filter(language = story.language)[0].name) + \ "]; " else: reprint += "); " else: country_first = True if len(first_printing) > 3: reprint += "another " + str(len(first_printing)-3) \ + " issues within 4 months; " for source in first_printing: if source.issue.series.country_code == \ story.issue.series.country_code: country_first = True if country_first == False: own_country = printings.filter(issue__series__country_code = \ story.issue.series.country_code)\ .filter(issue__series__language_code = \ story.issue.series.language_code) \ .order_by('issue__publication_date') if own_country:# and not (own_country[0] == story): if own_country[0].id == story.id: pass # TODO: look for reprints else: publisher = own_country[0].issue.publisherjob_set.all() if publisher: publisher_name = publisher[0].publisher.name else: publisher_name = "" gcd_reprint = gcd_series(own_country[0].issue, publisher_name) if gcd_reprint != "": reprint += uni(gcd_reprint) + " (" + \ uni(convert_isv_date(own_country[0].\ issue.publication_date,story\ .issue.series.language_code)) + "); " else: if len(own_country[0].issue.number) == 7 \ and own_country[0].issue.number[4] == '-': number = own_country[0].issue.number[5:7].lstrip('0') \ + "/" + own_country[0].issue.number[0:4] else: number = own_country[0].issue.number reprint += "from " + uni(own_country[0].issue.series.name)+\ " (" + uni(publisher_name) + ", ???? series) #"\ + number + " (" + \ uni(convert_isv_date(own_country[0].\ issue.publication_date,story\ .issue.series.language_code)) + "); " reprint = reprint[:-2] return reprint return ""
def show_credit(story, credit, force = False, default = ""): """ For showing the credits on the search results page. For creator credits force sets '?' for empty story credits. For cover type force sets type 'backcovers' """ if not story: return "" if credit == 'type': if story.story_version.type == 'n': return 'Story' elif story.story_version.type == 'i': return 'Pinup' elif story.story_version.type == 'c': if force == False: return 'Cover' else: return 'Pinup' elif story.story_version.type == 'f': return 'centerfold' elif story.story_version.type == 't': return 'Text Story' elif story.story_version.type == 'a': if story.title.startswith('Letter'): return 'Letters' else: return 'Text Article' elif story.story_version.type == 'g': return 'Activity' elif story.story_version.type == 's': return 'strange layout' elif story.story_version.type in ['L','P']: return 'Pinup' elif story.story_version.type == 'k': return 'newspaper strip' if credit == "pencils": pencils = story.story_version.creatorrole_set.filter(role = 'a') credit_value = "" for i in pencils: credit_value += uni(i.creator.name) + "; " credit_value = credit_value[0:-2] elif credit == "inks": inks = story.story_version.creatorrole_set.filter(role = 'i') credit_value = "" for i in inks: credit_value += uni(i.creator.name) + "; " credit_value = credit_value[0:-2] if credit_value == "": # no-inks --> inks=pencils credit_value = default elif credit == "script": script = story.story_version.creatorrole_set.filter(role = 'w') plot = story.story_version.creatorrole_set.filter(role = 'p') credit_value = uni("") for i in script: if len(plot) > 0 and i not in plot: credit_value += uni(i.creator.name) + " (" + _("script") + "); " else: credit_value += uni(i.creator.name) + "; " for i in plot: if i not in script: credit_value += uni(i.creator.name) + " (" + _("plot") + "); " translator = SupportRole.objects.filter(story = story.id).filter(role = 't') #print "Trans", len(translator) #translator = story.supportrole_set.filter(role = 't') if not __credit_visible(credit_value) and force: credit_value = '?; ' if len(translator) > 0: for i in translator: credit_value += uni(i.creator.name) + " (" + _("translation") + "); " elif default != "": credit_value += default + "; " credit_value = credit_value[0:-2] elif credit == "colors": colors = SupportRole.objects.filter(story__exact = story.id).filter(role = 'c') # not sure why this doesn't work, it did work in the beginning # colors = story.supportrole_set.filter(role = 'c') credit_value = "" for i in colors: credit_value += uni(i.creator.name) + "; " credit_value = credit_value[0:-2] if credit_value == "": credit_value = default elif credit == "letters": letters = SupportRole.objects.filter(story__exact = story.id).filter(role = 'l') #letters = story.supportrole_set.filter(role = 'l') credit_value = "" for i in letters: credit_value += uni(i.creator.name) + "; " credit_value = credit_value[0:-2] if credit_value == "": credit_value = default else: credit_value = story.__dict__[credit] if "no one" in credit_value and len(credit_value) <= 8: credit_value = "none" if not __credit_visible(credit_value) and force: return '?' else: return uni(credit_value.strip(','))
def find_migration_candidates(story, string, standard=True): """ Calls parser for reprint notes and returns results. returns: found issue notes found in [] sequence number of origin story (if found) origin story (if found) True if story is original, False otherwise """ string = string.strip() if string == '?' or string == '' or string[-1] == '?' or \ string.startswith('from ?') or string.startswith('uit ?') or \ string.startswith(uni("från ?")): return False, '', -1, False, False if standard == False: #otherwise some Disney stuff takes ages if string.lower().find('donald daily') >= 0: return False, '', -1, False, False elif string.lower().find('duck daily') >= 0: return False, '', -1, False, False elif string.lower().find('duck sunday') >= 0: return False, '', -1, False, False elif string.lower().find('donald sunday') >= 0: return False, '', -1, False, False elif string.lower().find('mouse sunday') >= 0: return False, '', -1, False, False elif string.lower().find('mickey sunday') >= 0: return False, '', -1, False, False elif string.lower().find('mouse daily') >= 0: return False, '', -1, False, False elif string.lower().find('mickey daily') >= 0: return False, '', -1, False, False # possible from/in in other languages reprint_direction = ["from ", "in "] if story.issue.series.language.code.lower() == 'it': #da, in reprint_direction = ["da ", "di "] + reprint_direction elif story.issue.series.language.code.lower() in ['es', 'pt']: #de, en reprint_direction = ["de ", "en "] + reprint_direction elif story.issue.series.language.code.lower() == 'nl': # uit, in reprint_direction = ["uit "] + reprint_direction elif story.issue.series.language.code.lower() in ['sv', 'no']: # från, i reprint_direction = [uni("från "), "i "] + reprint_direction elif story.issue.series.language.code.lower() == 'de': # aus reprint_direction = ["aus "] + reprint_direction reprint_direction_search = reprint_direction + [""] other_fr = True for from_to in reprint_direction_search: if standard: # check for our format reprint, notes = parse_reprint(string, from_to) # exclude same series if several issues (all with same starting year) are found if reprint.count() > 1: reprint = reprint.exclude(series__id=story.issue.series.id) if reprint.count() > 1 and reprint.count() <= 15: a = [] for i in range(reprint.count()): nr = find_reprint_sequence_in_issue(story, reprint[i].id) if (nr >= 0): a.append(i) if len(a) == 1: reprint = reprint.filter(id=reprint[a[0]].id) else: # check for some others reprint, notes = parse_reprint_full(string, from_to) reprint = reprint.exclude(id=story.issue.id) if reprint.count() > 1 and reprint.count() <= 15: a = [] for i in range(reprint.count()): nr = find_reprint_sequence_in_issue(story, reprint[i].id) if (nr >= 0): a.append(i) if len(a) == 1: reprint = reprint.filter(id=reprint[a[0]].id) # one other routine for a few specials if reprint.count() != 1: #== 0 or reprint.count() > 5: if other_fr: # need marker to only do once other_fr = False reprint = parse_reprint_fr(string) reprint = reprint.exclude(id=story.issue.id) if reprint.count() > 1 and reprint.count() <= 15: a = [] for i in range(reprint.count()): nr = find_reprint_sequence_in_issue( story, reprint[i].id) if (nr >= 0): a.append(i) if len(a) == 1: reprint = reprint.filter(id=reprint[a[0]].id) if reprint.count() == 1: reprint_direction = "from " if reprint.count() == 1: nr = find_reprint_sequence_in_issue(story, reprint[0].id) if from_to in ["from ", "aus ", "da ", "di ", "de ", \ "uit ",u"från "]: origin = False elif from_to == "": origin = False if notes: notes += " Confirm: direction of reprint" else: notes = "Confirm: direction of reprint" else: origin = True if notes: if notes.find('originaltitel') >= 0: pos = notes.find('originaltitel') notes = notes.replace('originaltitel', 'original title') if notes.find('Originaltitel') >= 0: pos = notes.find('Originaltitel') notes = notes.replace('Originaltitel', 'original title') if notes.lower().find(uni('história original')) >= 0: pos = notes.lower().find(uni('história original')) notes = notes.replace(uni('história original'), 'original title') if notes.lower().find(uni('História original')) >= 0: pos = notes.lower().find(uni('História original')) notes = notes.replace(uni('História original'), 'original title') if notes.lower().find('titolo originale') >= 0: pos = notes.lower().find('titolo originale') notes = notes.replace('titolo originale', 'original title') if notes.find('titre original') >= 0: pos = notes.find('titre original') notes = notes.replace('titre original', 'original title') if notes.find('Titre original') >= 0: pos = notes.find('Titre original') notes = notes.replace('Titre original', 'original title') if notes.lower().find('originally titled') >= 0: pos = notes.lower().find('originally titled') notes = notes.replace('originally titled', 'original title') if nr >= 0: other_story = Story.objects.exclude(deleted=True).filter( issue=reprint[0]) other_story = other_story.filter(sequence_number=nr) return reprint[0], notes, nr, other_story[0], origin elif notes and notes.lower().find('original ti') >= 0: pos = notes.lower().find('original ti') pos2 = notes[pos:].find('"') if pos2 > 0: pos3 = notes[pos + pos2 + 1:].find('"') if pos3 > 0: original_title = notes[pos + pos2 + 1:pos + pos2 + pos3] end_title = pos + pos2 + pos3 + 2 else: original_title = notes[pos + pos2:] end_title = len(notes) else: pos2 = notes[pos:].find(']') if pos2 > 0: original_title = notes[pos + len('original title'):pos + pos2] end_title = pos + pos2 + 1 else: pos2 = notes[pos:].find(';') if pos2 > 0: original_title = notes[pos + len('original title'):pos + pos2] end_title = pos + pos2 + 1 else: original_title = notes[pos + len('original title'):] end_title = len(notes) results = Story.objects.exclude(deleted=True) results = results.filter(issue=reprint[0]) results = results.filter( title__icontains=original_title.strip(' !.":')) if results.count() == 1: notes = notes[:pos] + notes[end_title:] return reprint[0], notes, results[ 0].sequence_number, results[0], origin elif results.count() > 1: results = results.filter(type=story.type) if results.count() == 1: notes = notes[:pos] + notes[end_title:] return reprint[0], notes, results[ 0].sequence_number, results[0], origin return reprint[0], notes, -1, False, origin else: return reprint[0], notes, -1, False, origin return False, '', -1, False, False
def reprints(story): try: if story.story_version.base_story: pass except: return "" original_story = story.story_version.base_story.original_story printings, first_printing = get_printings( original_story, story.story_version.base_story.first_publication_date) country_first = False reprint = "" if first_printing: if first_printing.filter(id=story.id): if original_story[0] == 'D': reprint = "from Egmont (DK); " elif original_story[0] == 'Q': reprint = "" elif original_story[0] == 'H': reprint = "from Netherlands; " elif original_story[0] == 'S': reprint = "Disney Studio (US); " elif story.story_version.type != 'c': reprint = "ask Jochen to include studio code: " + original_story[ 0] + "; " # print original_story for source in first_printing[0:3]: if source.id != story.id: if source.id[0:4] in ["us/Z", "us/Y"]: if original_story[1] == 'M': reprint += "from Mickey Mouse" elif original_story[1] == 'D': reprint += "from Donald Duck" elif original_story[1] == 'X': reprint += "from Scamp" else: reprint += "from some strip, find out and tell Jochen" print(original_story) if original_story[0] == 'Y': reprint += " daily" elif original_story[0] == 'Z': reprint += " Sunday" reprint += " (King Features Syndicate) " + \ newspaper_date(original_story[3:]) + "; " else: publisher = source.issue.publisherjob_set.all() if publisher: publisher_name = publisher[0].publisher.name else: publisher_name = "?" gcd_reprint = gcd_series(source.issue, publisher_name) #print "B",gcd_reprint if gcd_reprint != "": reprint += uni(gcd_reprint) + " (" + \ uni(convert_isv_date(source.\ issue.publication_date,story\ .issue.series.language_code)) else: if len(source.issue.number) == 7 \ and source.issue.number[4] == '-': number = source.issue.number[5:7].lstrip('0') + "/" \ + source.issue.number[0:4] else: number = source.issue.number reprint += "from " + uni(source.issue.series.name) + \ " (" + uni(publisher_name) + ", ???? series) #" \ + number + " (" + \ convert_isv_date(source.issue.publication_date, \ story.issue.series.language_code) if source.issue.series.country_code != \ story.issue.series.country_code: if source.issue.series.country_code.\ countryname_set.filter(language = story.language): reprint += ") [" + uni(source.issue.series\ .country_code.countryname_set\ .filter(language = story.language)[0].name) + \ "]; " else: reprint += "); " else: country_first = True if len(first_printing) > 3: reprint += "another " + str(len(first_printing)-3) \ + " issues within 4 months; " for source in first_printing: if source.issue.series.country_code == \ story.issue.series.country_code: country_first = True if country_first == False: own_country = printings.filter(issue__series__country_code = \ story.issue.series.country_code)\ .filter(issue__series__language_code = \ story.issue.series.language_code) \ .order_by('issue__publication_date') if own_country: # and not (own_country[0] == story): if own_country[0].id == story.id: pass # TODO: look for reprints else: publisher = own_country[0].issue.publisherjob_set.all() if publisher: publisher_name = publisher[0].publisher.name else: publisher_name = "" gcd_reprint = gcd_series(own_country[0].issue, publisher_name) if gcd_reprint != "": reprint += uni(gcd_reprint) + " (" + \ uni(convert_isv_date(own_country[0].\ issue.publication_date,story\ .issue.series.language_code)) + "); " else: if len(own_country[0].issue.number) == 7 \ and own_country[0].issue.number[4] == '-': number = own_country[0].issue.number[5:7].lstrip('0') \ + "/" + own_country[0].issue.number[0:4] else: number = own_country[0].issue.number reprint += "from " + uni(own_country[0].issue.series.name)+\ " (" + uni(publisher_name) + ", ???? series) #"\ + number + " (" + \ uni(convert_isv_date(own_country[0].\ issue.publication_date,story\ .issue.series.language_code)) + "); " reprint = reprint[:-2] return reprint return ""
def migrate_reprints_other(select=-1): series = Series.objects.exclude(deleted=True).order_by("id") if select > 0: series = series.filter(id__gt=str((select - 1) * 1000)) series = series.filter(id__lte=str(select * 1000)) # exclude Lars series = series.exclude(id__in=[ 18732, 26245, 31648, 36955, 36980, 36973, 36949, 36975, 36964, 36953, 36967, 39648 ]) series = series.exclude(id=153) # Walt Disney's Comics and Stories #series = series.exclude(id=687)# Topolino series = series.exclude(id=1923) # Alan Ford series = series.exclude(id=2017) # Lucky Lukes äventyr series = series.exclude(id=2199) # Little Orphan Annie series = series.exclude(id=2672) # Martin Mystère series = series.exclude(id=2695) # Eternauta, L' #series = series.exclude(id=3706)# Grandi Classici Disney, I series = series.exclude(id=3744) # Spicy Tales series = series.exclude(id=3817) # Sandman, The, series = series.exclude(id=7052) # Gustafs bästa sidor series = series.exclude(id=7087) # Serie-paraden series = series.exclude(id=7155) # Heavy Metal Magazine series = series.exclude(id=7208) # Dr. Seuss Goes to War #series = series.exclude(id=7536)# Albi Di Topolino #series = series.exclude(id=7539)# Almanacco Topolino #series = series.exclude(id=7540)# Almanacco Topolino series = series.exclude(id=7555) # Euracomix #series = series.exclude(id=7566)# Classici di Walt Disney, I #series = series.exclude(id=7569)# Grandi Classici Disney, I series = series.exclude(id=7582) # Manga Sun series = series.exclude(id=7587) # Pilot series = series.exclude(id=7640) # 91:an Karlsson [julalbum] series = series.exclude(id=7645) # Asterix series = series.exclude(id=7646) # Asterix [nytryck] series = series.exclude(id=7652) # Biffen och Bananen Ett glatt återseende series = series.exclude(id=7657) # Familjen Svenssons äventyr series = series.exclude(id=7670) # Lilla Fridolf [julalbum] series = series.exclude(id=8101) # James Bond series = series.exclude(id=8110) # Epix series = series.exclude(id=8118) # Tung metall series = series.exclude(id=8510) # Felix - Jan Lööf's Felix series = series.exclude(id=8511) # Felix' äventyr series = series.exclude(id=10413) # Naturens under series = series.exclude(id=10458) # Caliber series = series.exclude(id=10562) # Donald Duck & Co series = series.exclude(id=12068) # Boys Love Girls...More or Less 12068 series = series.exclude(id=12498) # Colt series = series.exclude(id=14584) # Journal de Spirou, Le series = series.exclude(id=18705) # Korak series = series.exclude(id=19940) # Fandom's Finest Comics series = series.exclude(id=21132) # Norske Serieperler series = series.exclude(id=21200) # Donald Duck & Co series = series.exclude(id=21672) # Action Serien series = series.exclude(id=23238) # Front serien series = series.exclude(id=27342) # Donald Duck series = series.exclude(id=28131) # Vill Vest series = series.exclude(id=29937) # Pocket Book of Esquire Cartoons, The series = series.exclude(id=31372) # Mummitrollet series = series.exclude(id=32745) # Flippie Flink series = series.exclude(id=35202) # Heathcliff Catch of The Day series = series.exclude(id=36911) # Pyton series = series.exclude(id=36912) # Pyton series = series.exclude(id=53) # Wally - His Cartoons of the A.E.F. 51 51 series = series.exclude(id=60) # How They Draw Prohibition 72 72 series = series.exclude(id=69) # Aventures De Tintin, Les 55 55 series = series.exclude(id=73) # Mickey Mouse Magazine 77 77 series = series.exclude(id=82) # King Comics 658 658 series = series.exclude(id=84) # Popular Comics 64 64 series = series.exclude(id=107) # Super Comics 56 56 series = series.exclude(id=111) # Jumbo Comics 53 93 series = series.exclude(id=119) # Magic Comics 52 52 series = series.exclude(id=240) # USA Comics 51 51 series = series.exclude(id=538) # Lone Ranger, The 185 282 series = series.exclude(id=593) # Collana Del Tex - Prima Serie 60 60 series = series.exclude(id=1017) # Rocky Lane Western 61 78 series = series.exclude(id=2867) # Doctor Who 61 61 series = series.exclude( id=4900) # New Yorker Book of Lawyer Cartoons, The 85 85 series = series.exclude( id=5839) # Smithsonian Book of Newspaper Comics, The 118 118 series = series.exclude(id=6193) # Lanciostory Anno XXV 57 58 series = series.exclude(id=6194) # Skorpio Anno XXIII 53 53 series = series.exclude(id=7593) # Tutto Disney 126 126 series = series.exclude(id=9787) # Kalle Anka & C:o 399 414 series = series.exclude(id=12754) # Jokebook Comics Digest Annual 148 149 series = series.exclude(id=14027) # Green Hornet, The 53 53 series = series.exclude(id=14988) # Angel Heart 50 50 series = series.exclude(id=15308) # Techno 95 95 series = series.exclude(id=16632) # Kalar 94 94 series = series.exclude(id=16996) # Sabotør Q5 137 137 series = series.exclude(id=18403) # Madhouse Comics Digest 68 68 series = series.exclude(id=19745) # Sheriff Classics 62 62 series = series.exclude(id=19751) # Helgenen 116 118 series = series.exclude(id=20751) # Cartoons The French Way 154 154 series = series.exclude(id=21769) # Alarm 170 176 series = series.exclude(id=22370) # Brumle 71 71 series = series.exclude(id=24745) # Hjerterevyen 375 377 series = series.exclude(id=26396) # Jippo 137 137 series = series.exclude(id=26559) # Best Cartoons from Argosy 153 153 series = series.exclude(id=29924) # Lasso 75 75 series = series.exclude(id=30264) # Little Orphan Annie 59 59 a = codecs.open("/tmp/reprints_processing", "a", "utf-8") import time for serie in series: c_start = time.time() #print serie, #serie.id migrate_reprints_series(serie.id, standard=False) #, do_save = False) used_time = time.time() - c_start a.write( uni(serie) + uni(", ") + uni(serie.id) + uni(": ") + uni(used_time) + uni("\n"))
def show_credit(story, credit, force=False, default=""): """ For showing the credits on the search results page. For creator credits force sets '?' for empty story credits. For cover type force sets type 'backcovers' """ if not story: return "" if credit == 'type': if story.story_version.type == 'n': return 'Story' elif story.story_version.type == 'i': return 'Pinup' elif story.story_version.type == 'c': if force == False: return 'Cover' else: return 'Pinup' elif story.story_version.type == 'f': return 'centerfold' elif story.story_version.type == 't': return 'Text Story' elif story.story_version.type == 'a': if story.title.startswith('Letter'): return 'Letters' else: return 'Text Article' elif story.story_version.type == 'g': return 'Activity' elif story.story_version.type == 's': return 'strange layout' elif story.story_version.type in ['L', 'P']: return 'Pinup' elif story.story_version.type == 'k': return 'newspaper strip' if credit == "pencils": pencils = story.story_version.creatorrole_set.filter(role='a') credit_value = "" for i in pencils: credit_value += uni(i.creator.name) + "; " credit_value = credit_value[0:-2] elif credit == "inks": inks = story.story_version.creatorrole_set.filter(role='i') credit_value = "" for i in inks: credit_value += uni(i.creator.name) + "; " credit_value = credit_value[0:-2] if credit_value == "": # no-inks --> inks=pencils credit_value = default elif credit == "script": script = story.story_version.creatorrole_set.filter(role='w') plot = story.story_version.creatorrole_set.filter(role='p') credit_value = uni("") for i in script: if len(plot) > 0 and i not in plot: credit_value += uni( i.creator.name) + " (" + _("script") + "); " else: credit_value += uni(i.creator.name) + "; " for i in plot: if i not in script: credit_value += uni(i.creator.name) + " (" + _("plot") + "); " translator = SupportRole.objects.filter(story=story.id).filter( role='t') #print "Trans", len(translator) #translator = story.supportrole_set.filter(role = 't') if not __credit_visible(credit_value) and force: credit_value = '?; ' if len(translator) > 0: for i in translator: credit_value += uni( i.creator.name) + " (" + _("translation") + "); " elif default != "": credit_value += default + "; " credit_value = credit_value[0:-2] elif credit == "colors": colors = SupportRole.objects.filter(story__exact=story.id).filter( role='c') # not sure why this doesn't work, it did work in the beginning # colors = story.supportrole_set.filter(role = 'c') credit_value = "" for i in colors: credit_value += uni(i.creator.name) + "; " credit_value = credit_value[0:-2] if credit_value == "": credit_value = default elif credit == "letters": letters = SupportRole.objects.filter(story__exact=story.id).filter( role='l') #letters = story.supportrole_set.filter(role = 'l') credit_value = "" for i in letters: credit_value += uni(i.creator.name) + "; " credit_value = credit_value[0:-2] if credit_value == "": credit_value = default else: credit_value = story.__dict__[credit] if "no one" in credit_value and len(credit_value) <= 8: credit_value = "none" if not __credit_visible(credit_value) and force: return '?' else: return uni(credit_value.strip(','))
def cover_upload(request, issue_id, add_variant=False): """ Handles uploading of covers be it - first upload - replacement upload - variant upload """ upload_template = 'gcd/details/cover_upload.html' uploaded_template = 'gcd/details/cover_uploaded.html' error_template = 'gcd/error.html' style = 'default' # check for issue and cover # TODO might do get_object_or_404 instead of own error check issue = Issue.objects.filter(id=int(issue_id)) if len(issue) == 0: return render_to_response(error_template, { 'error_text': 'Issue ID #' + issue_id + ' does not exist.', }, context_instance=RequestContext(request)) else: issue = issue[0] cover = Cover.objects.filter(issue=issue) if len(cover) == 0: return render_to_response(error_template, { 'error_text' : 'Something wrong with issue ID #' + issue_id + \ ' and its cover table, please contact the admins.', }, context_instance=RequestContext(request)) else: cover = cover[0] if add_variant and not cover.has_image: info_text = "No cover present for %s. You cannot upload a variant." % cover.issue return render_to_response(error_template, { 'error_text' : info_text, }, context_instance=RequestContext(request)) # check that we are actually allowed to upload if cover.has_image and not cover.marked and not add_variant: tag = get_image_tag(issue.series.id, cover, "existing cover", 2) covers_needed = Cover.objects.filter(issue__series=issue.series) covers_needed = covers_needed.exclude(marked=False, has_large=True) # TODO: make the 15 an option covers_needed = covers_needed.exclude(marked=None, has_large=True)[:15] return render_to_response(uploaded_template, { 'cover' : cover, 'covers_needed' : covers_needed, 'issue' : issue, 'tag' : tag, 'already' : "is already", 'style' : style}, context_instance=RequestContext(request)) # check what kind of upload if cover.has_image and cover.marked and not add_variant: display_cover = get_image_tag(issue.series.id, cover, "cover to replace", 2, no_cache = True) upload_type = 'replacement' elif add_variant: display_cover = get_image_tag(issue.series.id, cover, "first cover", 2, no_cache = True) upload_type = 'variant' else: display_cover = None upload_type = '' # current request is an upload if request.method == 'POST': try: form = UploadScanForm(request.POST,request.FILES) except IOError: # sometimes uploads misbehave. connection dropped ? info_text = 'Something went wrong with the upload. ' + \ 'Please <a href="' + request.path + '">try again</a>.' return render_to_response(error_template, { 'error_text' : mark_safe(info_text), }, context_instance=RequestContext(request)) if form.is_valid(): # user has to change defaults and enter something valid if request.POST['email'] == '*****@*****.**': request.POST['email'] = '' if request.POST['name'] == 'Your name': request.POST['name'] = '' form = UploadScanForm(request.POST,request.FILES) # TODO: even if the file is a valid image it does not survive into # the next form if name/email is not valid, Django issue ? if not form.is_valid(): return render_to_response(upload_template, { 'form': form.as_ul(), 'issue' : issue, 'style' : style, 'display_cover' : display_cover, 'upload_type' : upload_type, }, context_instance=RequestContext(request)) # if file is in form handle it if 'scan' in request.FILES: upload_datetime = datetime.today() scan = request.FILES['scan'] contributor = '%s (%s)' % (request.POST['name'], request.POST['email']) # put new covers into media/_local_new_scans/monthname_year/ # name <issue_id>_<series_name>_#<issue_number>_<date>_<time>.<ext> scan_name = str(issue.series.id) + "_" + str(issue.id) + "_" + \ uni(issue).replace(' ','_').replace('/','-') + \ "_" + datetime.today().strftime('%Y%m%d_%H%M%S') upload_dir = settings.MEDIA_ROOT + _local_new_scans + \ upload_datetime.strftime('%B_%Y/').lower() destination_name = upload_dir + scan_name + \ os.path.splitext(scan.name)[1] if not os.path.isdir(upload_dir): try: os.mkdir(upload_dir) except IOError: info_text = "Problem with file storage for uploaded " + \ "cover please contact GCD-admins." return render_to_response(error_template, { 'error_text' : info_text, }, context_instance=RequestContext(request)) # arguably a bit unlikely to happen with the current naming scheme if os.path.exists(destination_name): destination_name = os.path.splitext(destination_name)[0] + \ "_a" + os.path.splitext(scan.name)[1] # write uploaded file destination = open(destination_name, 'wb') for chunk in scan.chunks(): destination.write(chunk) destination.close() try: # generate different sizes we are using im = Image.open(destination.name) if im.size[0] >= 400: # generate the sizes we are using if check_series_cover_dir(issue.series) == False: info_text = "Problem with file storage for series " + \ "'%s', id #%d, please contact webmaster." \ % (issue.series, issue.id) return render_to_response(error_template, { 'error_text' : info_text, }, context_instance=RequestContext(request)) if add_variant or upload_type == 'replacement': suffix = "%d/400/" % issue.series_id suffix = suffix + "%d_4_%s.jpg" % (issue.series.id, cover.code) current_im_name = settings.MEDIA_ROOT + \ _local_scans + suffix # check for existence, otherwise get from server if not os.path.exists(current_im_name): img_url = _server_prefixes[cover.server_version] \ + suffix urlretrieve(img_url,current_im_name) im_old = Image.open(current_im_name) # backup current scan backup_name = os.path.splitext(current_im_name)[0] + \ "_" + upload_datetime.strftime('%Y%m%d_%H%M%S') + \ "_backup.jpg" im_old.save(backup_name) # im_old is kept from before if add_variant: # we need current scan in 400 width temp = tempfile.NamedTemporaryFile(suffix='.jpg') size = 400,int(400./im.size[0]*im.size[1]) scaled = im.resize(size,Image.ANTIALIAS) # and do the stacking h = im_old.size[1]+scaled.size[1] stacked = Image.new('RGBA',(400,h)) stacked.paste(im_old,(0,0)) stacked.paste(scaled,(0,im_old.size[1])) im = stacked variants = codecs.open( settings.MEDIA_ROOT + \ _local_new_scans + "variant_covers", "a", "utf-8" ) save_text = "series: " + str(issue.series.id) + \ " issue: " + str(issue.id) + "\n" save_text = uni(save_text) + "--old: " + \ uni(backup_name) + "\n--old-uploader: " + \ uni(cover.contributor) + "\n--add: " + \ uni(destination_name) + "\n" variants.write(save_text) variants.close() # generate different sizes scaled_name = settings.MEDIA_ROOT + _local_scans + \ str(issue.series.id) + "/" + str(issue.series.id) + \ "_" + cover.code + ".jpg" size = 100,int(100./im.size[0]*im.size[1]) scaled = im.resize(size,Image.ANTIALIAS) scaled.save(scaled_name) scaled_name = settings.MEDIA_ROOT + _local_scans + \ str(issue.series.id) + "/200/" + str(issue.series.id) + \ "_2_" + cover.code + ".jpg" size = 200,int(200./im.size[0]*im.size[1]) scaled = im.resize(size,Image.ANTIALIAS) scaled.save(scaled_name) scaled_name = settings.MEDIA_ROOT + _local_scans + \ str(issue.series.id) + "/400/" + str(issue.series.id) + \ "_4_" + cover.code + ".jpg" size = 400,int(400./im.size[0]*im.size[1]) scaled = im.resize(size,Image.ANTIALIAS) scaled.save(scaled_name) # set cover table values cover.server_version = 1 cover.has_small = True cover.has_medium = True cover.has_large = True cover.has_image = True cover.marked = False cover.contributor = contributor if not cover.series.has_gallery == True: series = cover.series series.has_gallery = True series.save() cover.modified = upload_datetime cover.save() store_count = codecs.open(settings.MEDIA_ROOT + \ _local_scans + "cover_count", "w", "utf-8") store_count.write(str(Cover.objects.filter(has_image=True)\ .count())) store_count.close() if 'remember_me' in request.POST: request.session['gcd_uploader_name'] = \ request.POST['name'] request.session['gcd_uploader_email'] = \ request.POST['email'] else: request.session.pop('gcd_uploader_name','') request.session.pop('gcd_uploader_email','') tag = get_image_tag(issue.series.id, cover, "uploaded cover", 2) else: os.remove(destination.name) info_text = "Image is too small, only " + str(im.size) + \ " in size." return render_to_response(upload_template, { 'form': form.as_ul(), 'info' : info_text, 'display_cover' : display_cover, 'upload_type' : upload_type, 'issue' : issue, 'style' : style, }, context_instance=RequestContext(request)) # what else do we need covers_needed = Cover.objects.filter(issue__series = \ issue.series).exclude(marked = False, has_large = True) covers_needed = covers_needed.exclude(marked = None, has_large = True)[:15] return render_to_response(uploaded_template, { 'cover' : cover, 'covers_needed' : covers_needed, 'issue' : issue, 'tag' : tag, 'style' : style}, context_instance=RequestContext(request)) except IOError: # file type *should* be taken care of by django os.remove(destination.name) return render_to_response(upload_template, { 'form': form.as_ul(), 'info' : 'Error: File \"' + scan.name + \ '" is not a valid picture.', 'issue' : issue, 'display_cover' : display_cover, 'upload_type' : upload_type, 'style' : style, }, context_instance=RequestContext(request)) else: # there is a pretty good chance we never end up here return render_to_response(upload_template, { 'form': form.as_ul(), 'issue' : issue, 'style' : style}, context_instance=RequestContext(request)) else: # do we have email/name cached if 'gcd_uploader_email' in request.session: vars = {'name' : request.session['gcd_uploader_name'], 'email' : request.session['gcd_uploader_email'], 'remember_me' : True} elif request.user.is_authenticated(): vars = {'name' : request.user.indexer.name, 'email' : request.user.email, 'remember_me' : True} else: vars = {'name' : 'Your name', 'email' : '*****@*****.**'} form = UploadScanForm(initial=vars) # display the form return render_to_response(upload_template, { 'form': form.as_ul(), 'issue' : issue, 'display_cover' : display_cover, 'upload_type' : upload_type, 'style' : style}, context_instance=RequestContext(request))
def fix_all_reprint_notes(): reprint_notes = [ [ 'Marvel Masterworks: Golden Age USA Comics (Marvel, 2007 series)', 'Marvel Masterworks: Golden Age U.S.A. Comics (Marvel, 2007 series)' ], ['(Editorial Planeta DeAgostini S.A.', '(Planeta DeAgostini'], [ 'Uncle Scrooge (Gold Key, 1962 series)', 'Uncle Scrooge (Gold Key, 1963 series)' ], [ 'Uncle Scrooge (Gold Key, 1962 Series)', 'Uncle Scrooge (Gold Key, 1963 series)' ], ['Drawn and Quarterly', 'Drawn & Quarterly'], [ 'Essential Wolverine #', 'Essential Wolverine (Marvel, 1996 series) #' ], [ 'Apache Kid (Marvel, 1951 series)', 'Apache Kid (Marvel, 1950 series)' ], ['Ghost Stories #', 'Ghost Stories (Dell, 1962 series) #'], [ 'in Tip Top Comic Monthly (K. G. Murray, 1963 series)', 'in Tip Top Comic Monthly (K. G. Murray, 1965 series)' ], [ 'from Jessy (Panini Verlag, 2004 series)', 'from Jessy (Panini, 2004 series)' ], [ 'from Jessy (Panini Verlag, 2004) #', 'from Jessy (Panini, 2004 series) #' ], [ 'World\'s Finest Comics (DC, 1940 Series)', 'World\'s Finest Comics (DC, 1941 Series)' ], [ 'Superman: The World\'s Finest Comics Archives', 'Superman: The World\'s Finest Archives' ], ['(DC/Vertigo, ', '(DC, '], ['Superman(DC,1939 Series)', 'Superman (DC, 1939 Series)'], [ 'In Essential Ghost Rider #', 'in Essential Ghost Rider (Marvel, 2005 series) #' ], [ 'from Captain Marvel Adventures #', 'from Captain Marvel Adventures (Fawcett, 1941 Series) #' ], ['X-Men (1963 series)', 'X-Men (Marvel, 1963 series)'], ['(Ediciones Zinco, ', '(Zinco, '], [ 'EC Archives: Two-Fisted Tales (Gemstone, 2007 series)', 'EC Archives: Two-Fisted Tales (Gemstone, 2007 series)' ], ['rom Commando #', 'rom Commando (D.C. Thomson, 1961) #'], [ uni('från Detective Comics (DC, 1939)'), uni('från Detective Comics (DC, 1937)') ], ['reprinted from ', 'from '], ['Reprinted from ', 'from '], [ ') in Kamp Serien #', '); in Kamp Serien (Se-Bladene, 1964 Series) #' ], [ ') in Kamp serien #', '); in Kamp Serien (Se-Bladene, 1964 Series) #' ], [ ') in Kamp Spesial #', '); in Kamp Spesial (Se-Bladene, 1986 Series) #' ], ['in Kamp Serien #', 'in Kamp Serien (Se-Bladene, 1964 Series) #'], ['Kamp Spesial #', 'Kamp Spesial (Se-Bladene, 1986 Series) #'], [ '\r\nfrom Harry die bunte Jugendzeitung (Lehning, 1958 series)#', '; from Harry die bunte Jugendzeitung (Lehning, 1958 series) #' ], ['eries)#', 'eries) #'], [ 'Superman Extra (DC, 1980 Serie)', 'Superman Extra (Ehapa, 1980 Serie)' ], [ 'Superman Taschenbuch (DC, 1976 Serie)', 'Superman Taschenbuch (Ehapa, 1976 Serie)' ], [', Superman Taschenbuch (', '; Superman Taschenbuch ('], ['Batman(DC,', 'Batman (DC, '], ['.Seuqence', '.Sequence'], ['(AC Comics, ', '(AC, '], [' (DC 19', ' (DC, 19'], [' (DC 20', ' (DC, 20'], [' (Marvel 19', ' (Marvel, 19'], [' (Marvel 20', ' (Marvel, 20'], [ 'From DC Super Stars [DC, 1976 Series)', 'From DC Super Stars (DC, 1976 Series)' ], [ 'Black Cat (Harvey; 1946 series) #', 'Black Cat (Harvey, 1946 series) #' ], ['from Fables (DC, 2003 series) #', 'from Fables (DC, 2002 series) #'], ['Testament (DC, 2005 series) #', 'Testament (DC, 2006 series) #'], [ 'from Quantum And Woody (Acclaim, 1997 Series) #', 'from Quantum & Woody (Acclaim, 1997 Series) #' ], [ 'n Nexus Archives (Dark Horse Books, 2005 series) #', 'n Nexus Archives (Dark Horse, 2006 series) #' ], [ 'Flash Album, The (K. G. Murray, 1978 series)', 'Flash Album, The (K. G. Murray, 1976 series)' ], ['Essential X-Men #', 'Essential X-Men (Marvel, 1996 series) #'], [ 'Marvel\'s Greatest Comics (Marvel, 1961 series)', 'Marvel\'s Greatest Comics (Marvel, 1969 series)' ], [ 'in Superboy (Ehapa Verlag, 1980 series) #', 'in Superboy (Egmont, 1980 series) #' ], [ 'Da DC: THE NEW FRONTIER #', 'Da DC: The New Frontier (DC, 2004 series) #' ], [ 'Serie-pocket (Semic AS, 1975 series) #38', 'Serie-pocket (Semic AS, 1977 series) #38' ], [ 'in Showcase Presents Martian Manhunter (DC, 2007 series) #', 'in Showcase Presents: Martian Manhunter (DC, 2007 series) #' ], [ 'Marvel Masterworks Atlas Era Heroes (Marvel, 2007 series) #', 'Marvel Masterworks: Atlas Era Heroes (Marvel, 2007 series) #' ], ['Da XENOZOIC TALES #', 'Da Xenozoic Tales (Kitchen Sink, 1987) #'], ['Super-Team-Family', 'Super-Team Family'], [ 'From Flash, The (DC, 1959 Series) #300, August 1981, 1.Sequence', 'From Flash, The (DC, 1959 Series) #300, August 1981, 2.Sequence' ], [ uni("All-New Collectors´ Edition"), uni("All-New Collectors' Edition") ], [' (Marvel/DC, 19', ' (Marvel / DC, 19'], ['World of Krypton [DC, 1979 ', 'World of Krypton (DC, 1979 '], [ 'DC Comics Persents (DC, 1978 Series)', 'DC Comics Presents (DC, 1978 Series)' ], [ 'from from The New Adventures of Superboy', 'from The New Adventures of Superboy' ], [ 'from from Superboy (DC, 1949 Series) #225', 'from Superboy (DC, 1949 Series) #225' ], ['From, Justice League of America', 'From Justice League of America'], ["from, World's Finest Comics", "from World's Finest Comics"], [ 'rom TOP Comics Blitzmann [BSV - Williams, 1970 Serie]', 'rom TOP Comics Blitzmann (BSV - Williams, 1970 Serie)' ], ['Superboy(DC,1949 Series)', 'Superboy (DC, 1949 Series)'], [ 'Superman Superband (Ehapa,1974 Serie)', 'Superman Superband (Ehapa, 1974 Serie)' ], ['Batman Supermand (Ehapa, 1974 ', 'Batman Superman (Ehapa, 1974 '], [ 'from Legion of Superheroes, the [DC, 1980 Series] # ', 'from The Legion of Super-heroes (DC, 1980 Series) #' ], ['from Superboy Spectacular [DC', 'from Superboy Spectacular (DC'], [',1.Sequence', ', 1.Sequence'], [',2.Sequence', ', 2.Sequence'], [',3.Sequence', ', 3.Sequence'], [',5.Sequence', ', 5.Sequence'], [uni("Superman´s Pal,Jimmy Olsen"), "Superman's Pal, Jimmy Olsen"], ['Simon & Schuster', 'Simon and Schuster'], [ 'Acts of Vengeance Omnibus (Marvel, 2010 series)', 'Acts of Vengeance Omnibus (Marvel, 2011 series)' ], ['(Semic Press AB, ', '(Semic, '], [ '2099 A.D. #1 (Marvel Italia, 1995)', '2099 A.D. (Marvel Italia, 1995) #1' ], [ '2099 A.D. #2 (Marvel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #2' ], [ '2099 A.D. #3 (Marvel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #3' ], [ '2099 A.D. #4 (Marvel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #4' ], [ '2099 A.D. #5 (Marvel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #5' ], [ '2099 A.D. #6 (Marvel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #6' ], [ '2099 A.D. #7 (Mavel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #7' ], [ '2099 A.D. #8 (Marvel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #8' ], [ '2099 A.D. #9 (Marvel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #9' ], [ '2099 A.D. #10 (Marvel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #10' ], [ '2099 A.D. #11 (Marvel Itaia, 1996)', '2099 A.D. (Marvel Italia, 1995) #11' ], [ '2099 A.D. #12 (Marvel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #12' ], [ '2099 Special #13 (Marvel Italia, 12/1996)', '2099 Special (Marvel Italia, 1994) #13' ], [ '2099 Special #15 (Marvel Italia, 04/1997)', '2099 Special (Marvel Italia, 1994) #15' ], [ '2099 Special #16 (Marvel Italia, 06/1997)', '2099 Special (Marvel Italia, 1994) #16' ], [ '2099 Special #17 (Marvel Italia, 08/1997)', '2099 Special (Marvel Italia, 1994) #17' ], [ 'Stormwatch: A Finer World (DC, 1999 series) #[nn]', 'Stormwatch: A Finer World (DC, 1999 series) #[nn]' ], [ 'n Fantastic Four [Trade Paperback] (Marvel, 2003 ', 'n Fantastic Four (Marvel, 2003 ' ], [ 'n The Batman Archives (DC, 1990 series)', 'n Batman Archives (DC, 1990 series)' ], [ 'n Batman Archives, The (DC, 1990 series)', 'n Batman Archives (DC, 1990 series)' ], [ 'n The Batman archives (DC, 1990 series)', 'n Batman Archives (DC, 1990 series)' ], [ 'Showcase Presents Martian Manhunter (DC, 2007 series)', 'Showcase Presents: Martian Manhunter (DC, 2007 series)' ], [ 'Boy Commandos by Joe Simon and Jack Kirby, The (DC, 2010 series)', 'The Boy Commandos by Joe Simon & Jack Kirby (DC, 2010 series)' ], [ 'Greatest Batman Stories Ever Told, The (DC, 1988 series) #nn [1]', 'The Greatest Batman Stories Ever Told (DC, 1988 series) #[nn] [1]' ], [uni(' (Egmont Serieförlaget AB, '), ' (Egmont, '], [ 'Magic Book (Magic Press, 2002 series)', 'Magic Book (Magic Press, 2000 series)' ], ['(BSV-Williams,', '(BSV - Williams,'], [ 'Marvel Masterworks: Spider-Man (', 'Marvel Masterworks: The Amazing Spider-Man (' ], [ "Uomo Ragno, L' [Collana Super-Eroi] (", "L' Uomo Ragno [Collana Super-Eroi] (" ], ['cover reprinted in G.I. ', 'in G.I. '], [ 'in Starman: Night and Day (DC, 1997 series) #[nn]', 'in Starman (DC, 1995 series) #2' ], [ 'Collected Omaha, The (Kitchen Sink Press, Inc., 1987 series) #Volume ', 'The Collected Omaha (Kitchen Sink Press, 1987 series) #' ], ['(Kitchen Sink Press, Inc., ', '(Kitchen Sink Press, '], [ 'Air Ace Picture Library (Fleetway Publications, 1960 series)', 'Air Ace Picture Library (IPC Magazines, 1960 series)' ], [ 'Air Ace Picture Library (Fleetway Publications, 1960)', 'Air Ace Picture Library (IPC Magazines, 1960 series)' ], [ ') in Front serien (1967 series) #', '); in Front serien (Williams Forlag, 1967 series) #' ], [ ') in Bajonett serien (1967 series) #', '); in Bajonett serien (Williams Forlag, 1967 series) #' ], [ ') in Front serien (1965 series) #', '); in Front serien (Illustrerte Klassikere, 1965 series) #' ], ['from ? (UK) in ', 'from ? (UK); in '], ['(Dupuis, 1', '(Editions Dupuis, 1'], ['(Dupuis, 2', '(Editions Dupuis, 2'], ['(Dargaud, 1', uni('(Dargaud éditions, 1')], ['(Dargaud, 2', uni('(Dargaud éditions, 2')], [ 'Lucky Luke (Dargaud Publishing, 1968 series)', uni('Lucky Luke (Dargaud Benelux, 1968 series)') ], [ uni('Lucky Luke (Dargaud éditions, 1968 series)'), 'Lucky Luke (Dargaud Benelux, 1968 series)' ], [ 'Lucky Luke (Dupuis Publishing, 1949 series)', uni('Lucky Luke (Editions Dupuis, 1949 series)') ], [ 'Vill Vest (Se-Bladene, 1957 series)', 'Vill Vest (Se-Bladene, 1955 series)' ], ['; o; ', '; '], ['(Oog & Blick, ', '(Oog & Blik, '], [uni('from (À Suivre) (Casterman'), uni('from À Suivre (Casterman')], ['I.W. Publishing;Super Comics', 'I. W. Publishing; Super Comics'], ['I.W. Publishing; Super Comics', 'I. W. Publishing; Super Comics'], ] for [old, new] in reprint_notes: fix_reprint_notes_global(old, new) old_reprint_note = ' in Vill Vest (Se-Bladene, 1955 series)' new_reprint_note = '; in Vill Vest (Se-Bladene, 1955 series)' issues = Issue.objects.filter( story__reprint_notes__iregex='The Lone Ranger [0-9]', series__id=538, story__deleted=False).filter(deleted=False).distinct() fix_reprint_notes(issues, old_reprint_note, new_reprint_note) series_reprint_notes = [ [7049, 'From Superman #', 'From Superman (DC, 1939 series) #'], [ 7049, 'From Detective Comics #', 'From Detective Comics (DC, 1937 series) #' ], [7049, 'From Batman #', 'From Batman (DC, 1940 series) #'], [7049, 'From Superboy #', 'From Superboy (DC, 1949 series) #'], [ 7049, 'From Adventure Comics #', 'From Adventure Comics (DC, 1938 series) #' ], [ 7049, 'From Action Comics #', 'From Action Comics (DC, 1938 series) #' ], [ 7049, 'From World\'s Finest Comics #', 'From World\'s Finest Comics (DC, 1941 series) #' ], [19745, 'van ', 'from '], [7538, 'da Turok #', 'da Turok, Son of Stone (Gold Key, 1962) #'], [7538, '[Gold Key, USA]', ''], [ 20371, 'from STAR-STUDDED COMICS #', 'from Star-Studded Comics (Texas Trio, 1963 series) #' ], [ 10458, 'from Jonah Hex: Two-Gun Mojo #', 'from Jonah Hex: Two Gun Mojo (DC, 1993 series) #' ], [10458, 'from Jonah Hex #', 'from Jonah Hex (DC, 1977 series) #'], [ 3960, 'Da KEN PARKER #', 'da Ken Parker (Sergio Bonelli, 1977 series) #' ], [7537, 'da Magnus #', 'da Magnus (Gold Key, 1963) #'], [7537, '[GOLD KEY, USA]', ''], [ 36980, 'from Wonder Woman Vol.1 (DC, 1942 Series)', 'from Wonder Woman (DC, 1942 Series)' ], [18732, '. Sequence', '.Sequence'], [3771, 'from BLACK CAT #', 'from Black Cat (Harvey, 1946 series) #'], [687, "Dell Giant Comics", "Dell Giant"], [687, "Four Color Comic", "Four Color"], [ 687, "Walt Disney's Stories and Comics", "Walt Disney's Comics and Stories" ], [ 7540, 'in Walt Disney Stories and Comics (Gemstone, 2003 series) #668 May 2006', 'in Walt Disney\'s Comics and Stories (Gemstone, 2003 series) #668' ], [ 7540, 'da Kalle Anka & C:o (Egmont, serie del 1948) #1979-34 (22 Agosto 1979) [Svezia]', 'da Kalle Anka & C:o (Hemmets Journal, 1957) #34/1979 (22 Agosto 1979)' ], [ 7540, 'da Kalle Anka & C:o (Egmont, serie del 1948) #1979-35 (29 Agosto 1979) [Svezia]', 'da Kalle Anka & C:o (Hemmets Journal, 1957) #35/1979' ], [ 7566, 'Topolino (Libretto) 7/12', 'Topolino (Libretto) 7, Topolino (Libretto) 8, Topolino (Libretto) 9, Topolino (Libretto) 10, Topolino (Libretto) 11, Topolino (Libretto) 12' ], [ 1701, "Ripley's Believe It or Not True (Gold Key, 1965 series)", "Ripley's Believe It or Not (Gold Key, 1965 series)" ] ] for [series, old, new] in series_reprint_notes: fix_reprint_notes_series(series, old, new) old_reprint_note = "Chip'N'Dale " new_reprint_note = "Chip 'n' Dale (Dell, 1955 series) #" issues = Issue.objects.filter( story__reprint_notes__regex="Chip'N'Dale [0-9]", series__id=687, story__deleted=False).filter(deleted=False).distinct() print issues.count(), old_reprint_note fix_reprint_notes(issues, old_reprint_note, new_reprint_note) old_reprint_note = "Chip'n'Dale " new_reprint_note = "Chip 'n' Dale (Dell, 1955 series) #" issues = Issue.objects.filter( story__reprint_notes__regex="Chip'n'Dale [0-9]", series__id=687, story__deleted=False).filter(deleted=False).distinct() print issues.count(), old_reprint_note fix_reprint_notes(issues, old_reprint_note, new_reprint_note) old_reprint_note = "Donald Duck " new_reprint_note = "Donald Duck (Dell, 1952 series) #" issues = Issue.objects.filter( story__reprint_notes__iregex="Donald Duck [0-9]", series__id=687, story__deleted=False).filter(deleted=False).distinct() print issues.count(), old_reprint_note fix_reprint_notes(issues, old_reprint_note, new_reprint_note) old_reprint_note = "Mickey Mouse " new_reprint_note = "Mickey Mouse (Dell, 1952 series) #" issues = Issue.objects.filter( story__reprint_notes__iregex="Mickey Mouse [0-9]", series__id=687, story__deleted=False).filter(deleted=False).distinct() print issues.count(), old_reprint_note fix_reprint_notes(issues, old_reprint_note, new_reprint_note) old_reprint_note = "Dell Giant " new_reprint_note = "Dell Giant (Dell, 1959 series) #" issues = Issue.objects.filter( story__reprint_notes__iregex="Dell Giant [0-9]", series__id=687, story__deleted=False).filter(deleted=False).distinct() print issues.count(), old_reprint_note fix_reprint_notes(issues, old_reprint_note, new_reprint_note) old_reprint_note = "Walt Disney's Comics and Stories " new_reprint_note = "Walt Disney's Comics and Stories (Dell, 1940 series) #" issues = Issue.objects.filter( story__reprint_notes__iregex="Walt Disney's Comics and Stories [0-9]", series__id=687, story__deleted=False).filter(deleted=False).distinct() print issues.count(), old_reprint_note fix_reprint_notes(issues, old_reprint_note, new_reprint_note) for i in range(86, 126): old = '2000 AD (IPC Magazines Ltd, 1977 series) #%d' % i new = '2000 AD and Star Lord (IPC Magazines Ltd, 1978 series) #%d' % i fix_reprint_notes_global(old, new) for i in range(127, 177): old = '2000 AD (IPC Magazines Ltd, 1977 series) #%d' % i new = '2000 AD and Tornado (IPC Magazines Ltd, 1978 series) #%d' % i fix_reprint_notes_global(old, new) norwegian_series = [ [ 22049, ' in Kamp Serien (Se-Bladene, 1964)', '; from Kamp Serien (Se-Bladene, 1964)' ], [ 22049, ' in Kamp serien (Se-Bladene, 1964)', '; from Kamp Serien (Se-Bladene, 1964)' ], [22049, uni(' in På Vingene '), uni('; in På Vingene ')], [10459, uni(' in På Vingene '), uni('; in På Vingene ')], [ 28131, ' in Vill Vest (Se-Bladene, 1955 series)', '; in Vill Vest (Se-Bladene, 1955 series)' ], [ 21672, ' in Action serien (1976 series)', '; in Action serien (Atlantic, 1976 series)' ], [ 21672, 'from Combat Picture Library (1960', 'from Combat Picture Library (Micron, 1960' ], [ 21672, 'from Combat Picture Library #', 'from Combat Picture Library (Micron, 1960 series) #' ], [ 23238, 'in Granat serien (1976 series)', '; in Granat serien (Atlantic, 1976 series)' ], [ 23238, '; Granat serien (1976 series)', '; in Granat serien (Atlantic, 1976 series)' ], [ 23238, ' in Front serien (1967 series)', '; in Front serien (Williams, 1967 series)' ], [ 23238, '; Action serien (1976 series)', '; in Action serien (Atlantic, 1976 series)' ], [ 23238, ' in Action serien (1976 series)', '; in Action serien (Atlantic, 1976 series)' ], [ 22804, ' in Front serien (1967 series)', '; in Front serien (Williams, 1967 series)' ], [ 22804, ' in Action serien (1976 series)', '; in Action serien (Atlantic, 1976 series)' ], [ 22804, '; Action serien (1976 series)', '; in Action serien (Atlantic, 1976 series)' ], [ 22804, 'in Granat serien (1976 series)', '; in Granat serien (Atlantic, 1976 series)' ], [ 22804, '; Granat serien (1976 series)', '; in Granat serien (Atlantic, 1976 series)' ], [ 23238, 'from Combat Picture Library (1960', 'from Combat Picture Library (Micron, ' ], [ 21769, 'in Granat serien (1976 series)', '; in Granat serien (Atlantic, 1976 series)' ], [ 21961, '; Action serien (1976 series)', '; in Action serien (Atlantic, 1976 series)' ], [ 21961, 'in Granat serien (1976 series)', '; in Granat serien (Atlantic, 1976 series)' ], [ 21961, '; Granat serien (1976 series)', '; in Granat serien (Atlantic, 1976 series)' ], [ 21961, 'from Combat Picture Library #', 'from Combat Picture Library (Micron, 1960 series) #' ], [ 21960, 'in Action serien (1976 series)', '; in Action serien (Atlantic, 1976 series)' ], [ 21960, '; Action serien (1976 series)', '; in Action serien (Atlantic, 1976 series)' ], [ 23447, 'in Granat serien (1976 series)', '; in Granat serien (Atlantic, 1976 series)' ], [ 23447, '; Granat serien (1976 series)', '; in Granat serien (Atlantic, 1976 series)' ], [ 21672, ' in Bajonett serien (1967 series)', '; in Bajonett serien (Williams, 1967 series)' ], [ 21672, ' in Front serien (1967 series)', '; in Front serien (Williams, 1967 series)' ], [ 21672, ' in Front serien (1965 series)', '; in Front serien (Illustrerte Klassikere, 1965 series)' ], [ 21672, ' in Alarm (1967 series)', '; in Alarm (Williams, 1967 series)' ], [ 21672, ' in Alarm (1964 series)', '; in Alarm (Illustrerte Klassikere, 1964 series)' ], [ 21672, ' in Bajonett serien (Williams Forlag A/S', '; in Bajonett serien (Williams' ], [ 23238, ' in Bajonett serien (1967 series)', '; in Bajonett serien (Williams, 1967 series)' ], [ 23238, ' in Front serien (1965 series)', '; in Front serien (Illustrerte Klassikere, 1965 series)' ], [ 23447, ' in Front serien (1967 series)', '; in Front serien (Williams, 1967 series)' ], [ 21960, ' in Front serien (1967 series)', '; in Front serien (Williams, 1967 series)' ], [ 23447, ' in Bajonett serien (1967 series)', '; in Bajonett serien (Williams, 1967 series)' ], [ 23447, ' in Alarm (1967 series)', '; in Alarm (Williams, 1967 series)' ], [ 23447, ' in Front serien (1965 series)', '; in Front serien (Illustrerte Klassikere, 1965 series)' ], [26337, ' in Jessy ', '; in Jessy '], [21200, 'from Egmont (DK) ', 'from Egmont (DK); '], [21200, ' in Donald Duck', '; in Donald Duck'], [ 10562, 'Tegneserie Bokklubben #', 'Tegneserie Bokklubben (Hjemmet, 1985) #' ], [10562, ' in Walt Disney', '; in Walt Disney'], [10562, ' in Donald Duck', '; in Donald Duck'], [10562, '] in ', ']; in '], [10562, ') in ', '); in '], [10562, '? in ', '?; in '], [16059, ') in ', '); in '], [16197, ') in ', '); in '], [16994, ') in ', '); in '], ] for [series, old, new] in norwegian_series: fix_reprint_notes_series(series, old, new, check_double_semi=True)
def add_variants(for_real=False): file = open('sorted_variants') lines = file.readlines() issue_old = -1 for line in lines: content = line.split('; ') series = int(content[0]) issue = int(content[1]) #modified = datetime.strptime(content[2], '%Y%m%d_%H%M%S') modified = datetime(*(time.strptime(content[2], '%Y%m%d_%H%M%S')[0:6])) backup_filename = content[3] next_filename = content[4] contributor = content[5].split('\n')[0] cur_cover = Cover.objects.filter(issue__id=issue).latest() if issue_old == issue: # we only the old datetime for the first file from the old db, # the datetime in the case of more than one variant we need to get # from the last used filename (that's why they are sorted by issue.id) if for_real and cur_cover == cover_old: # this shouldn't happen #modified = datetime.strptime(os.path.splitext(last_uploaded)[0][-15:], '%Y%m%d_%H%M%S') modified = datetime(*(time.strptime( os.path.splitext(last_uploaded)[0][-15:], '%Y%m%d_%H%M%S') [0:6])) print last_uploaded, modified print cur_cover.issue, backup_filename, next_filename raise ValueError else: #modified = datetime.strptime(os.path.splitext(last_uploaded)[0][-15:], '%Y%m%d_%H%M%S') modified = datetime(*(time.strptime( os.path.splitext(last_uploaded)[0][-15:], '%Y%m%d_%H%M%S') [0:6])) print last_uploaded, modified print cur_cover.issue, backup_filename, next_filename copy_cover_content(cur_cover, for_real) # series, issues, code, has_image, server_version stays the same if for_real: cur_cover.contributor = contributor cur_cover.modified = modified cur_cover.file_extension = None else: print "changed contributor", cur_cover.contributor, contributor print "changed modified", cur_cover.modified, modified if modified > datetime(2009, 10, 02, 14, 0, 0): # we have the uploaded file scan_filename = str(cur_cover.issue.series.id) + "_" + str(cur_cover.issue.id) + "_" + \ uni(cur_cover.issue).replace(' ','_').replace('/','-') + \ "_" + cur_cover.modified.strftime('%Y%m%d_%H%M%S') upload_dir = settings.MEDIA_ROOT + _local_new_scans + \ cur_cover.modified.strftime('%B_%Y/').lower() [extension, scan_filename] = find_original_cover(upload_dir + scan_filename) if not for_real: print "--------------------", extension, scan_filename if not extension: raise IOError cur_cover.file_extension = extension new_filename = settings.MEDIA_ROOT + _local_scans_by_id + \ str(int(cur_cover.id/1000)) + '/uploads/' + str(cur_cover.id) + \ cur_cover.modified.strftime('_%Y%m%d_%H%M%S') + cur_cover.file_extension if for_real: shutil.copy(scan_filename, new_filename) generate_sizes(cur_cover, new_filename) else: # we don't, so we use the backup old_filename = settings.MEDIA_ROOT + '/img/gcd/' + backup_filename if not os.path.exists(old_filename): print old_filename print 'wget images.comics.org/img/gcd/' + backup_filename urlretrieve( 'http://images.comics.org/img/gcd/' + backup_filename, old_filename) #raise IOError if for_real: cur_cover.file_extension = '.jpg' # not really the uploaded file new_filename = settings.MEDIA_ROOT + _local_scans_by_id + \ str(int(cur_cover.id/1000)) + '/uploads/' + str(cur_cover.id) + \ cur_cover.modified.strftime('_%Y%m%d_%H%M%S') + cur_cover.file_extension shutil.copy(old_filename, new_filename) generate_sizes(cur_cover, new_filename) if for_real: cur_cover.save() print cur_cover.issue.id, cur_cover.issue issue_old = issue last_uploaded = next_filename cover_old = cur_cover
def add_variants(for_real=False): file = open('sorted_variants') lines = file.readlines() issue_old = -1 for line in lines: content = line.split('; ') series = int(content[0]) issue = int(content[1]) #modified = datetime.strptime(content[2], '%Y%m%d_%H%M%S') modified = datetime(*(time.strptime(content[2], '%Y%m%d_%H%M%S')[0:6])) backup_filename = content[3] next_filename = content[4] contributor = content[5].split('\n')[0] cur_cover = Cover.objects.filter(issue__id = issue).latest() if issue_old == issue: # we only the old datetime for the first file from the old db, # the datetime in the case of more than one variant we need to get # from the last used filename (that's why they are sorted by issue.id) if for_real and cur_cover == cover_old: # this shouldn't happen #modified = datetime.strptime(os.path.splitext(last_uploaded)[0][-15:], '%Y%m%d_%H%M%S') modified = datetime(*(time.strptime(os.path.splitext(last_uploaded)[0][-15:], '%Y%m%d_%H%M%S')[0:6])) print last_uploaded, modified print cur_cover.issue, backup_filename, next_filename raise ValueError else: #modified = datetime.strptime(os.path.splitext(last_uploaded)[0][-15:], '%Y%m%d_%H%M%S') modified = datetime(*(time.strptime(os.path.splitext(last_uploaded)[0][-15:], '%Y%m%d_%H%M%S')[0:6])) print last_uploaded, modified print cur_cover.issue, backup_filename, next_filename copy_cover_content(cur_cover, for_real) # series, issues, code, has_image, server_version stays the same if for_real: cur_cover.contributor=contributor cur_cover.modified=modified cur_cover.file_extension=None else: print "changed contributor", cur_cover.contributor, contributor print "changed modified", cur_cover.modified, modified if modified > datetime(2009, 10, 02, 14, 0, 0): # we have the uploaded file scan_filename = str(cur_cover.issue.series.id) + "_" + str(cur_cover.issue.id) + "_" + \ uni(cur_cover.issue).replace(' ','_').replace('/','-') + \ "_" + cur_cover.modified.strftime('%Y%m%d_%H%M%S') upload_dir = settings.MEDIA_ROOT + _local_new_scans + \ cur_cover.modified.strftime('%B_%Y/').lower() [extension, scan_filename] = find_original_cover(upload_dir + scan_filename) if not for_real: print "--------------------", extension, scan_filename if not extension: raise IOError cur_cover.file_extension = extension new_filename = settings.MEDIA_ROOT + _local_scans_by_id + \ str(int(cur_cover.id/1000)) + '/uploads/' + str(cur_cover.id) + \ cur_cover.modified.strftime('_%Y%m%d_%H%M%S') + cur_cover.file_extension if for_real: shutil.copy(scan_filename,new_filename) generate_sizes(cur_cover, new_filename) else: # we don't, so we use the backup old_filename = settings.MEDIA_ROOT + '/img/gcd/' + backup_filename if not os.path.exists(old_filename): print old_filename print 'wget images.comics.org/img/gcd/' + backup_filename urlretrieve('http://images.comics.org/img/gcd/' + backup_filename, old_filename) #raise IOError if for_real: cur_cover.file_extension = '.jpg' # not really the uploaded file new_filename = settings.MEDIA_ROOT + _local_scans_by_id + \ str(int(cur_cover.id/1000)) + '/uploads/' + str(cur_cover.id) + \ cur_cover.modified.strftime('_%Y%m%d_%H%M%S') + cur_cover.file_extension shutil.copy(old_filename,new_filename) generate_sizes(cur_cover, new_filename) if for_real: cur_cover.save() print cur_cover.issue.id, cur_cover.issue issue_old = issue last_uploaded = next_filename cover_old = cur_cover
def cover_upload(request, cover_id, add_variant=False): """ Handles uploading of covers be it - first upload - replacement upload - variant upload """ upload_template = 'gcd/details/cover_upload.html' uploaded_template = 'gcd/details/cover_uploaded.html' style = 'default' # check for issue and cover cover = get_object_or_404(Cover, id=cover_id) issue = cover.issue if add_variant and not cover.has_image: error_text = "No cover present for %s. You cannot upload a variant." \ % cover.issue return render_error(request, error_text, redirect=False) # check that we are actually allowed to upload if cover.has_image and not cover.marked and not add_variant: tag = get_image_tag(cover, "existing cover", 2) covers_needed = Cover.objects.filter(issue__series=issue.series) covers_needed = covers_needed.exclude(marked=False, has_image=True) # TODO: make the 15 an option covers_needed = covers_needed.exclude(marked=None, has_image=True)[:15] return render_to_response(uploaded_template, { 'cover': cover, 'covers_needed': covers_needed, 'issue': issue, 'tag': tag, 'already': "is already", 'style': style }, context_instance=RequestContext(request)) # check what kind of upload if cover.has_image and cover.marked and not add_variant: display_cover = get_image_tag(cover, "cover to replace", 2, no_cache=True) upload_type = 'replacement' elif add_variant: display_cover = get_image_tag(cover, "first cover", 2, no_cache=True) upload_type = 'variant' else: display_cover = None upload_type = '' # current request is an upload if request.method == 'POST': try: form = UploadScanForm(request.POST, request.FILES) except IOError: # sometimes uploads misbehave. connection dropped ? error_text = 'Something went wrong with the upload. ' + \ 'Please <a href="' + request.path + '">try again</a>.' return render_error(request, error_text, redirect=False, is_safe=True) if form.is_valid(): # user has to change defaults and enter something valid if request.POST['email'] == '*****@*****.**': request.POST['email'] = '' if request.POST['name'] == 'Your name': request.POST['name'] = '' form = UploadScanForm(request.POST, request.FILES) # TODO: even if the file is a valid image it does not survive into # the next form if name/email is not valid, Django issue ? if not form.is_valid(): return render_to_response(upload_template, { 'form': form.as_ul(), 'cover': cover, 'issue': issue, 'style': style, 'display_cover': display_cover, 'upload_type': upload_type, }, context_instance=RequestContext(request)) # if file is in form handle it if 'scan' in request.FILES: upload_datetime = datetime.today() scan = request.FILES['scan'] contributor = '%s (%s)' % (request.POST['name'], request.POST['email']) # at first (in case something goes wrong) put new covers into # media/<_local_new_scans>/<monthname>_<year>/ # with name # <cover_id>_<date>_<time>.<ext> scan_name = str(cover.id) + "_" + \ upload_datetime.strftime('%Y%m%d_%H%M%S') upload_dir = settings.MEDIA_ROOT + _local_new_scans + \ upload_datetime.strftime('%B_%Y/').lower() destination_name = upload_dir + scan_name + \ os.path.splitext(scan.name)[1] if not os.path.isdir(upload_dir): try: os.mkdir(upload_dir) except IOError: error_text = "Problem with file storage for uploaded " + \ "cover, please report an error." return render_error(request, error_text, redirect=False) # arguably a bit unlikely to happen with the current naming scheme if os.path.exists(destination_name): destination_name = os.path.splitext(destination_name)[0] + \ "_a" + os.path.splitext(scan.name)[1] # write uploaded file destination = open(destination_name, 'wb') for chunk in scan.chunks(): destination.write(chunk) destination.close() try: # generate different sizes we are using im = Image.open(destination.name) if im.size[0] >= 400: # generate the sizes we are using if check_cover_dir(cover) == False: error_text = "Problem with file storage for cover " + \ "with id %d for %s, please report an error." \ % (cover.id, uni(issue)) return render_error(request, error_text, redirect=False) base_dir = str(int(cover.id / 1000)) if upload_type == 'replacement': # if we don't have the original file copy it from w400 # otherwise do nothing, old cover file stays due to # datetime string in the filename if not cover.file_extension: sub_dir = "/w400/" extension = ".jpg" current_im_name = settings.MEDIA_ROOT + \ _local_scans_by_id + base_dir + \ sub_dir + str(cover.id) + extension # check for existence if not os.path.exists(current_im_name): error_text = "Problem with existing file for cover " + \ "with id %d for %s, please report an error." \ % (cover.id, uni(issue)) return render_error(request, error_text, redirect=False) # use this for debugging locally # img_url = _server_prefixes[cover.server_version] \ # + suffix # urlretrieve(img_url,current_im_name) backup_name = settings.MEDIA_ROOT + \ _local_scans_by_id + base_dir + \ "/uploads/" + str(cover.id) + \ cover.modified.strftime('_%Y%m%d_%H%M%S') + \ extension shutil.copy(current_im_name, backup_name) if add_variant: return render_error( request, 'Adding variant covers is not yet implemented.', redirect=False) # generate different sizes generate_sizes(cover, im) # set cover table values cover.server_version = 1 cover.has_image = True cover.marked = False cover.contributor = contributor if not cover.series.has_gallery == True: series = cover.series series.has_gallery = True series.save() cover.modified = upload_datetime cover.extension = os.path.splitext(destination_name)[1] im_name = settings.MEDIA_ROOT + \ _local_scans_by_id + base_dir + "/uploads/" + \ str(cover.id) + upload_datetime. \ strftime('_%Y%m%d_%H%M%S') + cover.extension shutil.move(destination_name, im_name) cover.save() cover_count = CountStats.objects.filter(name='covers')[0] cover_count.count += 1 cover_count.save() if 'remember_me' in request.POST: request.session['gcd_uploader_name'] = \ request.POST['name'] request.session['gcd_uploader_email'] = \ request.POST['email'] else: request.session.pop('gcd_uploader_name', '') request.session.pop('gcd_uploader_email', '') tag = get_image_tag(cover, "uploaded cover", 2) else: os.remove(destination.name) info_text = "Image is too small, only " + str(im.size) + \ " in size." return render_to_response( upload_template, { 'form': form.as_ul(), 'info': info_text, 'cover': cover, 'display_cover': display_cover, 'upload_type': upload_type, 'issue': issue, 'style': style, }, context_instance=RequestContext(request)) # what else do we need covers_needed = Cover.objects.filter(issue__series = \ issue.series).exclude(marked = False, has_image = True) covers_needed = covers_needed.exclude(marked=None, has_image=True)[:15] return render_to_response( uploaded_template, { 'cover': cover, 'covers_needed': covers_needed, 'issue': issue, 'tag': tag, 'style': style }, context_instance=RequestContext(request)) except IOError: # file type *should* be taken care of by django os.remove(destination.name) return render_to_response(upload_template, { 'form': form.as_ul(), 'info' : 'Error: File \"' + scan.name + \ '" is not a valid picture.', 'cover' : cover, 'issue' : issue, 'display_cover' : display_cover, 'upload_type' : upload_type, 'style' : style, }, context_instance=RequestContext(request)) else: # there is a pretty good chance we never end up here return render_to_response(upload_template, { 'form': form.as_ul(), 'cover': cover, 'issue': issue, 'style': style }, context_instance=RequestContext(request)) else: # do we have email/name cached if 'gcd_uploader_email' in request.session: vars = { 'name': request.session['gcd_uploader_name'], 'email': request.session['gcd_uploader_email'], 'remember_me': True } elif request.user.is_authenticated(): vars = { 'name': unicode(request.user.indexer), 'email': request.user.email, 'remember_me': True } else: vars = {'name': 'Your name', 'email': '*****@*****.**'} form = UploadScanForm(initial=vars) # display the form return render_to_response(upload_template, { 'form': form.as_ul(), 'cover': cover, 'issue': issue, 'display_cover': display_cover, 'upload_type': upload_type, 'style': style }, context_instance=RequestContext(request))
def copy_covers_new(): #covers=Cover.objects.filter(has_image=True).filter(modified__gte='2009-10-02 14:00').filter(modified__lt='2009-11-15 00:00') #covers=Cover.objects.filter(has_image=True).filter(modified__gte='2009-11-15 00:00').filter(modified__lt='2009-11-23 00:00') covers = Cover.objects.filter(has_image=True).filter( modified__gte='2009-11-23 00:00') cnt = 0 for cover in covers: issue = cover.issue scan_name = str(issue.series.id) + "_" + str(issue.id) + "_" + \ uni(issue).replace(' ','_').replace('/','-') + \ "_" + cover.modified.strftime('%Y%m%d_%H%M%S') upload_dir = settings.MEDIA_ROOT + _local_new_scans + \ cover.modified.strftime('%B_%Y/').lower() old_filename = upload_dir + scan_name new_filename = settings.MEDIA_ROOT + _local_scans_by_id + \ str(int(cover.id/1000)) + '/uploads/' + str(cover.id) + cover.modified.strftime('_%Y%m%d_%H%M%S') check_cover_dir(cover) [extension, old_filename] = find_original_cover(old_filename) if not extension: scan_name = str(issue.series.id) + "_" + str(issue.id) + "_" + \ uni(issue).replace(' ','_').replace('/','-') + \ "_" + (cover.modified + timedelta(0,1)).strftime('%Y%m%d_%H%M%S') old_filename = upload_dir + scan_name [extension, old_filename] = find_original_cover(old_filename) if not extension: print cover.issue, old_filename raise IOError new_filename += extension shutil.copy(old_filename, new_filename) os.chmod(new_filename, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) im = Image.open(new_filename) if im.mode != "RGB": print "Image Mode:", im.mode im = im.convert("RGB") print cover.id, cover.issue, extension # generate different sizes (OK, we could just copy the files in this case...) scaled_name = settings.MEDIA_ROOT + _local_scans_by_id + \ str(int(cover.id/1000)) + "/w100/" + str(cover.id) + ".jpg" size = 100, int(100. / im.size[0] * im.size[1]) scaled = im.resize(size, Image.ANTIALIAS) scaled.save(scaled_name) scaled_name = settings.MEDIA_ROOT + _local_scans_by_id + \ str(int(cover.id/1000)) + "/w200/" + str(cover.id) + ".jpg" size = 200, int(200. / im.size[0] * im.size[1]) scaled = im.resize(size, Image.ANTIALIAS) scaled.save(scaled_name) scaled_name = settings.MEDIA_ROOT + _local_scans_by_id + \ str(int(cover.id/1000)) + "/w400/" + str(cover.id) + ".jpg" size = 400, int(400. / im.size[0] * im.size[1]) scaled = im.resize(size, Image.ANTIALIAS) scaled.save(scaled_name) cover.file_extension = extension cover.save() cnt += 1 if cnt % 100 == 0: print cnt
def fix_all_reprint_notes(): reprint_notes = [ ['Marvel Masterworks: Golden Age USA Comics (Marvel, 2007 series)','Marvel Masterworks: Golden Age U.S.A. Comics (Marvel, 2007 series)'], ['(Editorial Planeta DeAgostini S.A.','(Planeta DeAgostini'], ['Uncle Scrooge (Gold Key, 1962 series)', 'Uncle Scrooge (Gold Key, 1963 series)'], ['Uncle Scrooge (Gold Key, 1962 Series)', 'Uncle Scrooge (Gold Key, 1963 series)'], ['Drawn and Quarterly','Drawn & Quarterly'], ['Essential Wolverine #','Essential Wolverine (Marvel, 1996 series) #'], ['Apache Kid (Marvel, 1951 series)','Apache Kid (Marvel, 1950 series)'], ['Ghost Stories #','Ghost Stories (Dell, 1962 series) #'], ['in Tip Top Comic Monthly (K. G. Murray, 1963 series)','in Tip Top Comic Monthly (K. G. Murray, 1965 series)'], ['from Jessy (Panini Verlag, 2004 series)','from Jessy (Panini, 2004 series)'], ['from Jessy (Panini Verlag, 2004) #','from Jessy (Panini, 2004 series) #'], ['World\'s Finest Comics (DC, 1940 Series)','World\'s Finest Comics (DC, 1941 Series)'], ['Superman: The World\'s Finest Comics Archives','Superman: The World\'s Finest Archives'], ['(DC/Vertigo, ','(DC, '], ['Superman(DC,1939 Series)','Superman (DC, 1939 Series)'], ['In Essential Ghost Rider #','in Essential Ghost Rider (Marvel, 2005 series) #'], ['from Captain Marvel Adventures #','from Captain Marvel Adventures (Fawcett, 1941 Series) #'], ['X-Men (1963 series)','X-Men (Marvel, 1963 series)'], ['(Ediciones Zinco, ','(Zinco, '], ['EC Archives: Two-Fisted Tales (Gemstone, 2007 series)','EC Archives: Two-Fisted Tales (Gemstone, 2007 series)'], ['rom Commando #', 'rom Commando (D.C. Thomson, 1961) #'], [uni('från Detective Comics (DC, 1939)'),uni('från Detective Comics (DC, 1937)')], ['reprinted from ','from '], ['Reprinted from ','from '], [') in Kamp Serien #', '); in Kamp Serien (Se-Bladene, 1964 Series) #'], [') in Kamp serien #', '); in Kamp Serien (Se-Bladene, 1964 Series) #'], [') in Kamp Spesial #', '); in Kamp Spesial (Se-Bladene, 1986 Series) #'], ['in Kamp Serien #', 'in Kamp Serien (Se-Bladene, 1964 Series) #'], ['Kamp Spesial #', 'Kamp Spesial (Se-Bladene, 1986 Series) #'], ['\r\nfrom Harry die bunte Jugendzeitung (Lehning, 1958 series)#', '; from Harry die bunte Jugendzeitung (Lehning, 1958 series) #'], ['eries)#','eries) #'], ['Superman Extra (DC, 1980 Serie)','Superman Extra (Ehapa, 1980 Serie)'], ['Superman Taschenbuch (DC, 1976 Serie)','Superman Taschenbuch (Ehapa, 1976 Serie)'], [', Superman Taschenbuch (','; Superman Taschenbuch ('], ['Batman(DC,','Batman (DC, '], ['.Seuqence', '.Sequence'], ['(AC Comics, ','(AC, '], [' (DC 19',' (DC, 19'], [' (DC 20',' (DC, 20'], [' (Marvel 19',' (Marvel, 19'], [' (Marvel 20',' (Marvel, 20'], ['From DC Super Stars [DC, 1976 Series)', 'From DC Super Stars (DC, 1976 Series)'], ['Black Cat (Harvey; 1946 series) #','Black Cat (Harvey, 1946 series) #'], ['from Fables (DC, 2003 series) #','from Fables (DC, 2002 series) #'], ['Testament (DC, 2005 series) #','Testament (DC, 2006 series) #'], ['from Quantum And Woody (Acclaim, 1997 Series) #','from Quantum & Woody (Acclaim, 1997 Series) #'], ['n Nexus Archives (Dark Horse Books, 2005 series) #','n Nexus Archives (Dark Horse, 2006 series) #'], ['Flash Album, The (K. G. Murray, 1978 series)','Flash Album, The (K. G. Murray, 1976 series)'], ['Essential X-Men #','Essential X-Men (Marvel, 1996 series) #'], ['Marvel\'s Greatest Comics (Marvel, 1961 series)','Marvel\'s Greatest Comics (Marvel, 1969 series)'], ['in Superboy (Ehapa Verlag, 1980 series) #','in Superboy (Egmont, 1980 series) #'], ['Da DC: THE NEW FRONTIER #','Da DC: The New Frontier (DC, 2004 series) #'], ['Serie-pocket (Semic AS, 1975 series) #38','Serie-pocket (Semic AS, 1977 series) #38'], ['in Showcase Presents Martian Manhunter (DC, 2007 series) #','in Showcase Presents: Martian Manhunter (DC, 2007 series) #'], ['Marvel Masterworks Atlas Era Heroes (Marvel, 2007 series) #','Marvel Masterworks: Atlas Era Heroes (Marvel, 2007 series) #'], ['Da XENOZOIC TALES #','Da Xenozoic Tales (Kitchen Sink, 1987) #'], ['Super-Team-Family','Super-Team Family'], ['From Flash, The (DC, 1959 Series) #300, August 1981, 1.Sequence', 'From Flash, The (DC, 1959 Series) #300, August 1981, 2.Sequence'], [uni("All-New Collectors´ Edition"), uni("All-New Collectors' Edition")], [' (Marvel/DC, 19', ' (Marvel / DC, 19'], ['World of Krypton [DC, 1979 ', 'World of Krypton (DC, 1979 '], ['DC Comics Persents (DC, 1978 Series)', 'DC Comics Presents (DC, 1978 Series)'], ['from from The New Adventures of Superboy', 'from The New Adventures of Superboy'], ['from from Superboy (DC, 1949 Series) #225', 'from Superboy (DC, 1949 Series) #225'], ['From, Justice League of America','From Justice League of America'], ["from, World's Finest Comics", "from World's Finest Comics"], ['rom TOP Comics Blitzmann [BSV - Williams, 1970 Serie]', 'rom TOP Comics Blitzmann (BSV - Williams, 1970 Serie)'], ['Superboy(DC,1949 Series)', 'Superboy (DC, 1949 Series)'], ['Superman Superband (Ehapa,1974 Serie)', 'Superman Superband (Ehapa, 1974 Serie)'], ['Batman Supermand (Ehapa, 1974 ', 'Batman Superman (Ehapa, 1974 '], ['from Legion of Superheroes, the [DC, 1980 Series] # ', 'from The Legion of Super-heroes (DC, 1980 Series) #'], ['from Superboy Spectacular [DC', 'from Superboy Spectacular (DC'], [',1.Sequence', ', 1.Sequence'], [',2.Sequence', ', 2.Sequence'], [',3.Sequence', ', 3.Sequence'], [',5.Sequence', ', 5.Sequence'], [uni("Superman´s Pal,Jimmy Olsen"), "Superman's Pal, Jimmy Olsen"], ['Simon & Schuster', 'Simon and Schuster'], ['Acts of Vengeance Omnibus (Marvel, 2010 series)', 'Acts of Vengeance Omnibus (Marvel, 2011 series)'], ['(Semic Press AB, ', '(Semic, '], ['2099 A.D. #1 (Marvel Italia, 1995)', '2099 A.D. (Marvel Italia, 1995) #1'], ['2099 A.D. #2 (Marvel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #2'], ['2099 A.D. #3 (Marvel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #3'], ['2099 A.D. #4 (Marvel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #4'], ['2099 A.D. #5 (Marvel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #5'], ['2099 A.D. #6 (Marvel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #6'], ['2099 A.D. #7 (Mavel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #7'], ['2099 A.D. #8 (Marvel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #8'], ['2099 A.D. #9 (Marvel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #9'], ['2099 A.D. #10 (Marvel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #10'], ['2099 A.D. #11 (Marvel Itaia, 1996)', '2099 A.D. (Marvel Italia, 1995) #11'], ['2099 A.D. #12 (Marvel Italia, 1996)', '2099 A.D. (Marvel Italia, 1995) #12'], ['2099 Special #13 (Marvel Italia, 12/1996)', '2099 Special (Marvel Italia, 1994) #13'], ['2099 Special #15 (Marvel Italia, 04/1997)', '2099 Special (Marvel Italia, 1994) #15'], ['2099 Special #16 (Marvel Italia, 06/1997)', '2099 Special (Marvel Italia, 1994) #16'], ['2099 Special #17 (Marvel Italia, 08/1997)', '2099 Special (Marvel Italia, 1994) #17'], ['Stormwatch: A Finer World (DC, 1999 series) #[nn]', 'Stormwatch: A Finer World (DC, 1999 series) #[nn]'], ['n Fantastic Four [Trade Paperback] (Marvel, 2003 ', 'n Fantastic Four (Marvel, 2003 '], ['n The Batman Archives (DC, 1990 series)', 'n Batman Archives (DC, 1990 series)'], ['n Batman Archives, The (DC, 1990 series)', 'n Batman Archives (DC, 1990 series)'], ['n The Batman archives (DC, 1990 series)', 'n Batman Archives (DC, 1990 series)'], ['Showcase Presents Martian Manhunter (DC, 2007 series)', 'Showcase Presents: Martian Manhunter (DC, 2007 series)'], ['Boy Commandos by Joe Simon and Jack Kirby, The (DC, 2010 series)', 'The Boy Commandos by Joe Simon & Jack Kirby (DC, 2010 series)'], ['Greatest Batman Stories Ever Told, The (DC, 1988 series) #nn [1]', 'The Greatest Batman Stories Ever Told (DC, 1988 series) #[nn] [1]'], [uni(' (Egmont Serieförlaget AB, '), ' (Egmont, '], ['Magic Book (Magic Press, 2002 series)', 'Magic Book (Magic Press, 2000 series)'], ['(BSV-Williams,', '(BSV - Williams,'], ['Marvel Masterworks: Spider-Man (', 'Marvel Masterworks: The Amazing Spider-Man ('], ["Uomo Ragno, L' [Collana Super-Eroi] (", "L' Uomo Ragno [Collana Super-Eroi] ("], ['cover reprinted in G.I. ', 'in G.I. '], ['in Starman: Night and Day (DC, 1997 series) #[nn]', 'in Starman (DC, 1995 series) #2'], ['Collected Omaha, The (Kitchen Sink Press, Inc., 1987 series) #Volume ', 'The Collected Omaha (Kitchen Sink Press, 1987 series) #'], ['(Kitchen Sink Press, Inc., ', '(Kitchen Sink Press, '], ['Air Ace Picture Library (Fleetway Publications, 1960 series)', 'Air Ace Picture Library (IPC Magazines, 1960 series)'], ['Air Ace Picture Library (Fleetway Publications, 1960)', 'Air Ace Picture Library (IPC Magazines, 1960 series)'], [') in Front serien (1967 series) #', '); in Front serien (Williams Forlag, 1967 series) #'], [') in Bajonett serien (1967 series) #', '); in Bajonett serien (Williams Forlag, 1967 series) #'], [') in Front serien (1965 series) #', '); in Front serien (Illustrerte Klassikere, 1965 series) #'], ['from ? (UK) in ', 'from ? (UK); in '], ['(Dupuis, 1', '(Editions Dupuis, 1'], ['(Dupuis, 2', '(Editions Dupuis, 2'], ['(Dargaud, 1', uni('(Dargaud éditions, 1')], ['(Dargaud, 2', uni('(Dargaud éditions, 2')], ['Lucky Luke (Dargaud Publishing, 1968 series)', uni('Lucky Luke (Dargaud Benelux, 1968 series)')], [uni('Lucky Luke (Dargaud éditions, 1968 series)'), 'Lucky Luke (Dargaud Benelux, 1968 series)'], ['Lucky Luke (Dupuis Publishing, 1949 series)', uni('Lucky Luke (Editions Dupuis, 1949 series)')], ['Vill Vest (Se-Bladene, 1957 series)', 'Vill Vest (Se-Bladene, 1955 series)'], ['; o; ', '; '], ['(Oog & Blick, ', '(Oog & Blik, '], [uni('from (À Suivre) (Casterman'), uni('from À Suivre (Casterman')], ['I.W. Publishing;Super Comics', 'I. W. Publishing; Super Comics'], ['I.W. Publishing; Super Comics', 'I. W. Publishing; Super Comics'], ] for [old, new] in reprint_notes: fix_reprint_notes_global(old, new) old_reprint_note = ' in Vill Vest (Se-Bladene, 1955 series)' new_reprint_note = '; in Vill Vest (Se-Bladene, 1955 series)' issues = Issue.objects.filter(story__reprint_notes__iregex='The Lone Ranger [0-9]', series__id=538, story__deleted=False).filter(deleted=False).distinct() fix_reprint_notes(issues, old_reprint_note, new_reprint_note) series_reprint_notes = [ [7049,'From Superman #','From Superman (DC, 1939 series) #'], [7049,'From Detective Comics #','From Detective Comics (DC, 1937 series) #'], [7049,'From Batman #','From Batman (DC, 1940 series) #'], [7049,'From Superboy #','From Superboy (DC, 1949 series) #'], [7049,'From Adventure Comics #','From Adventure Comics (DC, 1938 series) #'], [7049,'From Action Comics #','From Action Comics (DC, 1938 series) #'], [7049,'From World\'s Finest Comics #','From World\'s Finest Comics (DC, 1941 series) #'], [19745,'van ','from '], [7538,'da Turok #','da Turok, Son of Stone (Gold Key, 1962) #'], [7538, '[Gold Key, USA]',''], [20371,'from STAR-STUDDED COMICS #','from Star-Studded Comics (Texas Trio, 1963 series) #'], [10458,'from Jonah Hex: Two-Gun Mojo #','from Jonah Hex: Two Gun Mojo (DC, 1993 series) #'], [10458,'from Jonah Hex #','from Jonah Hex (DC, 1977 series) #'], [3960,'Da KEN PARKER #','da Ken Parker (Sergio Bonelli, 1977 series) #'], [7537, 'da Magnus #','da Magnus (Gold Key, 1963) #'], [7537, '[GOLD KEY, USA]',''], [36980, 'from Wonder Woman Vol.1 (DC, 1942 Series)','from Wonder Woman (DC, 1942 Series)'], [18732, '. Sequence', '.Sequence'], [3771, 'from BLACK CAT #','from Black Cat (Harvey, 1946 series) #'], [687,"Dell Giant Comics","Dell Giant"], [687,"Four Color Comic","Four Color"], [687,"Walt Disney's Stories and Comics","Walt Disney's Comics and Stories"], [7540,'in Walt Disney Stories and Comics (Gemstone, 2003 series) #668 May 2006','in Walt Disney\'s Comics and Stories (Gemstone, 2003 series) #668'], [7540,'da Kalle Anka & C:o (Egmont, serie del 1948) #1979-34 (22 Agosto 1979) [Svezia]','da Kalle Anka & C:o (Hemmets Journal, 1957) #34/1979 (22 Agosto 1979)'], [7540,'da Kalle Anka & C:o (Egmont, serie del 1948) #1979-35 (29 Agosto 1979) [Svezia]','da Kalle Anka & C:o (Hemmets Journal, 1957) #35/1979'], [7566,'Topolino (Libretto) 7/12','Topolino (Libretto) 7, Topolino (Libretto) 8, Topolino (Libretto) 9, Topolino (Libretto) 10, Topolino (Libretto) 11, Topolino (Libretto) 12'], [1701, "Ripley's Believe It or Not True (Gold Key, 1965 series)", "Ripley's Believe It or Not (Gold Key, 1965 series)"] ] for [series, old, new] in series_reprint_notes: fix_reprint_notes_series(series, old, new) old_reprint_note = "Chip'N'Dale " new_reprint_note = "Chip 'n' Dale (Dell, 1955 series) #" issues = Issue.objects.filter(story__reprint_notes__regex="Chip'N'Dale [0-9]", series__id=687, story__deleted=False).filter(deleted=False).distinct() print issues.count(), old_reprint_note fix_reprint_notes(issues, old_reprint_note, new_reprint_note) old_reprint_note = "Chip'n'Dale " new_reprint_note = "Chip 'n' Dale (Dell, 1955 series) #" issues = Issue.objects.filter(story__reprint_notes__regex="Chip'n'Dale [0-9]", series__id=687, story__deleted=False).filter(deleted=False).distinct() print issues.count(), old_reprint_note fix_reprint_notes(issues, old_reprint_note, new_reprint_note) old_reprint_note = "Donald Duck " new_reprint_note = "Donald Duck (Dell, 1952 series) #" issues = Issue.objects.filter(story__reprint_notes__iregex="Donald Duck [0-9]", series__id=687, story__deleted=False).filter(deleted=False).distinct() print issues.count(), old_reprint_note fix_reprint_notes(issues, old_reprint_note, new_reprint_note) old_reprint_note = "Mickey Mouse " new_reprint_note = "Mickey Mouse (Dell, 1952 series) #" issues = Issue.objects.filter(story__reprint_notes__iregex="Mickey Mouse [0-9]", series__id=687, story__deleted=False).filter(deleted=False).distinct() print issues.count(), old_reprint_note fix_reprint_notes(issues, old_reprint_note, new_reprint_note) old_reprint_note = "Dell Giant " new_reprint_note = "Dell Giant (Dell, 1959 series) #" issues = Issue.objects.filter(story__reprint_notes__iregex="Dell Giant [0-9]", series__id=687, story__deleted=False).filter(deleted=False).distinct() print issues.count(), old_reprint_note fix_reprint_notes(issues, old_reprint_note, new_reprint_note) old_reprint_note = "Walt Disney's Comics and Stories " new_reprint_note = "Walt Disney's Comics and Stories (Dell, 1940 series) #" issues = Issue.objects.filter(story__reprint_notes__iregex="Walt Disney's Comics and Stories [0-9]", series__id=687, story__deleted=False).filter(deleted=False).distinct() print issues.count(), old_reprint_note fix_reprint_notes(issues, old_reprint_note, new_reprint_note) for i in range(86,126): old = '2000 AD (IPC Magazines Ltd, 1977 series) #%d' % i new = '2000 AD and Star Lord (IPC Magazines Ltd, 1978 series) #%d' % i fix_reprint_notes_global(old, new) for i in range(127, 177): old = '2000 AD (IPC Magazines Ltd, 1977 series) #%d' % i new = '2000 AD and Tornado (IPC Magazines Ltd, 1978 series) #%d' % i fix_reprint_notes_global(old, new) norwegian_series=[ [22049,' in Kamp Serien (Se-Bladene, 1964)','; from Kamp Serien (Se-Bladene, 1964)'], [22049,' in Kamp serien (Se-Bladene, 1964)','; from Kamp Serien (Se-Bladene, 1964)'], [22049,uni(' in På Vingene '),uni('; in På Vingene ')], [10459,uni(' in På Vingene '),uni('; in På Vingene ')], [28131,' in Vill Vest (Se-Bladene, 1955 series)','; in Vill Vest (Se-Bladene, 1955 series)'], [21672,' in Action serien (1976 series)','; in Action serien (Atlantic, 1976 series)'], [21672,'from Combat Picture Library (1960','from Combat Picture Library (Micron, 1960'], [21672,'from Combat Picture Library #','from Combat Picture Library (Micron, 1960 series) #'], [23238,'in Granat serien (1976 series)','; in Granat serien (Atlantic, 1976 series)'], [23238,'; Granat serien (1976 series)','; in Granat serien (Atlantic, 1976 series)'], [23238,' in Front serien (1967 series)','; in Front serien (Williams, 1967 series)'], [23238,'; Action serien (1976 series)','; in Action serien (Atlantic, 1976 series)'], [23238,' in Action serien (1976 series)','; in Action serien (Atlantic, 1976 series)'], [22804,' in Front serien (1967 series)','; in Front serien (Williams, 1967 series)'], [22804,' in Action serien (1976 series)','; in Action serien (Atlantic, 1976 series)'], [22804,'; Action serien (1976 series)','; in Action serien (Atlantic, 1976 series)'], [22804,'in Granat serien (1976 series)','; in Granat serien (Atlantic, 1976 series)'], [22804,'; Granat serien (1976 series)','; in Granat serien (Atlantic, 1976 series)'], [23238,'from Combat Picture Library (1960','from Combat Picture Library (Micron, '], [21769,'in Granat serien (1976 series)','; in Granat serien (Atlantic, 1976 series)'], [21961,'; Action serien (1976 series)','; in Action serien (Atlantic, 1976 series)'], [21961,'in Granat serien (1976 series)','; in Granat serien (Atlantic, 1976 series)'], [21961,'; Granat serien (1976 series)','; in Granat serien (Atlantic, 1976 series)'], [21961,'from Combat Picture Library #','from Combat Picture Library (Micron, 1960 series) #'], [21960,'in Action serien (1976 series)','; in Action serien (Atlantic, 1976 series)'], [21960,'; Action serien (1976 series)','; in Action serien (Atlantic, 1976 series)'], [23447,'in Granat serien (1976 series)','; in Granat serien (Atlantic, 1976 series)'], [23447,'; Granat serien (1976 series)','; in Granat serien (Atlantic, 1976 series)'], [21672,' in Bajonett serien (1967 series)','; in Bajonett serien (Williams, 1967 series)'], [21672,' in Front serien (1967 series)','; in Front serien (Williams, 1967 series)'], [21672,' in Front serien (1965 series)','; in Front serien (Illustrerte Klassikere, 1965 series)'], [21672,' in Alarm (1967 series)','; in Alarm (Williams, 1967 series)'], [21672,' in Alarm (1964 series)','; in Alarm (Illustrerte Klassikere, 1964 series)'], [21672,' in Bajonett serien (Williams Forlag A/S','; in Bajonett serien (Williams'], [23238,' in Bajonett serien (1967 series)','; in Bajonett serien (Williams, 1967 series)'], [23238,' in Front serien (1965 series)','; in Front serien (Illustrerte Klassikere, 1965 series)'], [23447,' in Front serien (1967 series)','; in Front serien (Williams, 1967 series)'], [21960,' in Front serien (1967 series)','; in Front serien (Williams, 1967 series)'], [23447,' in Bajonett serien (1967 series)','; in Bajonett serien (Williams, 1967 series)'], [23447,' in Alarm (1967 series)','; in Alarm (Williams, 1967 series)'], [23447,' in Front serien (1965 series)','; in Front serien (Illustrerte Klassikere, 1965 series)'], [26337,' in Jessy ','; in Jessy '], [21200,'from Egmont (DK) ','from Egmont (DK); '], [21200,' in Donald Duck','; in Donald Duck'], [10562,'Tegneserie Bokklubben #','Tegneserie Bokklubben (Hjemmet, 1985) #'], [10562,' in Walt Disney','; in Walt Disney'], [10562,' in Donald Duck','; in Donald Duck'], [10562,'] in ',']; in '], [10562,') in ','); in '], [10562,'? in ','?; in '], [16059, ') in ', '); in '], [16197, ') in ', '); in '], [16994, ') in ', '); in '], ] for [series, old, new] in norwegian_series: fix_reprint_notes_series(series, old, new, check_double_semi=True)