def genre_migration(issues): changes = [] for i in issues: c = migrate_reserve(i, 'issue', 'to migrate genre names') if c: ir = c.issuerevisions.all()[0] sr = c.storyrevisions.all() changed = False for s in sr: s.genre, keywords = convert_genre(s.genre, ir.series.language.code) if keywords: if s.keywords: s.keywords += '; ' + keywords else: s.keywords = keywords if not changed: s.compare_changes() if s.is_changed: changed = True s.save() if changed: changes.append((c, True)) else: print "nothing changed in ", c c.discard(anon) c.delete() else: print "%s is reserved" % i if len(changes) > 25: do_auto_approve(changes, 'migrating genre names') changes = [] do_auto_approve(changes, 'migrating genre names')
def move_leading_article(article): changes = [] issues = Issue.objects.filter( story__feature__endswith=", %s" % article, reserved=False, deleted=False).distinct().exclude( story__feature='Spider-Man and Human Torch, The') issues = issues.distinct() # limit the number processed at a time, there are some odd problems if not for i in issues[:250]: stories = i.active_stories().filter(feature__endswith=", %s" % article) c = migrate_reserve( i, 'issue', 'for moving ", %s" to the beginning of the feature' % article) if c: sr = c.storyrevisions.filter(feature__endswith=", %s" % article) for s in sr: s.feature = "%s %s" % (article, s.feature[:-(len(article) + 2)]) print s.feature s.save() changes.append((c, True)) do_auto_approve(changes, 'move of leading article in feature') return True
def migrate_osd(issues): changes = [] for i in issues[:450]: c = None try: date = i.notes[length:length+10] year = int(date[:4]) month = int(date[5:7]) day = int(date[8:10]) c = migrate_reserve(i, 'issue', 'to move the on-sale date') except ValueError: print "on-sale date not used", i, i.notes if c: ir = c.issuerevisions.all()[0] ir.notes = ir.notes[length+10:].lstrip('.').strip() if ir.notes: print 'new:', ir.notes print 'old:', ir.issue.notes ir.year_on_sale = year ir.month_on_sale = month ir.day_on_sale = day ir.save() changes.append((c, True)) do_auto_approve(changes, 'moving of on-sale date information')
def fix_italian(series_id, series_name): changes = [] issues = Issue.objects.filter(series__id = series_id) for issue in issues: c = migrate_reserve(issue, 'issue', 'to fix reprint notes') if c: crs = c.storyrevisions.exclude(reprint_notes="").exclude(reprint_notes__icontains='?') changed = False for cr in crs: do_italian_fixing(series_name, cr) if not changed: cr.compare_changes() if cr.is_changed: changed = True cr.save() if changed: changes.append((c, True)) else: print "nothing changed in ", c c.discard(anon) c.delete() else: print "%s is reserved" % issue if len(changes) > 10: do_auto_approve(changes, 'fixing reprint notes') changes = [] do_auto_approve(changes, 'fixing reprint notes')
def migrate_osd(issues): changes = [] for i in issues[:450]: c = None try: date = i.notes[length:length + 10] year = int(date[:4]) month = int(date[5:7]) day = int(date[8:10]) c = migrate_reserve(i, 'issue', 'to move the on-sale date') except ValueError: print "on-sale date not used", i, i.notes if c: ir = c.issuerevisions.all()[0] ir.notes = ir.notes[length + 10:].lstrip('.').strip() if ir.notes: print 'new:', ir.notes print 'old:', ir.issue.notes ir.year_on_sale = year ir.month_on_sale = month ir.day_on_sale = day ir.save() changes.append((c, True)) do_auto_approve(changes, 'moving of on-sale date information')
def migrate_reprints_series(number, standard = True, do_save = True): issues = Issue.objects.exclude(deleted=True).filter(series__id = number, reserved=False).filter(story__reprint_notes__contains=' ').distinct() migrated = [] for issue in issues: #print issue if issue.active_stories().exclude(reprint_notes=None).exclude(reprint_notes='').exclude(reprint_notes='from ?').exclude(reprint_notes=u'fr\xe5n ?').exclude(reprint_notes='uit ?').exclude(reprint_notes='da ?').count(): changeset = migrate_reserve(issue, 'issue', 'to migrate reprint notes into links') if not changeset: # this should not happen since we filter out reserved issues raise ValueError, "%s is reserved" % issue things = changeset.storyrevisions.all() #things = issue.active_stories() things = things.exclude(reprint_notes=None).exclude(reprint_notes='').exclude(reprint_notes='from ?').exclude(reprint_notes=u'fr\xe5n ?').exclude(reprint_notes='uit ?').exclude(reprint_notes='da ?') is_changed = False if not standard: things = things.exclude(reprint_notes__icontains=' syndicate)').exclude(reprint_notes__icontains=' egmont (').exclude(reprint_notes__icontains=' egmont [') for i in things: if i.reprint_notes: #print i.reprint_notes is_changed |= migrate_reprint_notes(i, standard=standard, do_save=do_save) #else: #i.save() if is_changed or changeset.reprintrevisions.count(): migrated.append((changeset, True)) else: # nothing migrated print issue, 'nothing migrated' changeset.discard(changeset.indexer) # free reservation changeset.delete() # no need to keep changeset if len(migrated) > 100: do_auto_approve(migrated, 'reprint note migration') migrated = [] if len(migrated): do_auto_approve(migrated, 'reprint note migration')
def migrate_isbn(): series_set = Series.objects.filter(notes__istartswith="ISBN", deleted=False, issue_count=1, issue__isbn='') changes = [] for series in series_set[:200]: change_series = migrate_reserve(series, 'series', 'for the migration of ISBNs') if change_series: issue = series.active_issues().get() change_issue = migrate_reserve(issue, 'issue', 'for the migration of ISBNs') if change_issue is None: # could not reserve issue, no reason to keep changeset in db change_series.discard(anon) # to cleanup reserved state change_series.delete() else: success, auto_approve_series, auto_approve_issue = migrate_isbn_series_issue(change_series, change_issue) if success: changes.append((change_series, auto_approve_series)) changes.append((change_issue, auto_approve_issue)) else: print u"ISBN not valid for %s: %s" % (series, series.notes) # no valid isbn, no reason to keep changeset in db change_series.discard(anon) # to cleanup reserved state change_series.delete() change_issue.discard(anon) # to cleanup reserved state change_issue.delete() do_auto_approve(changes, 'migration of ISBNs')
def migrate_isbn(): series_set = Series.objects.filter(notes__istartswith="ISBN", deleted=False, issue_count=1, issue__isbn='') changes = [] for series in series_set[:200]: change_series = migrate_reserve(series, 'series', 'for the migration of ISBNs') if change_series: issue = series.active_issues().get() change_issue = migrate_reserve(issue, 'issue', 'for the migration of ISBNs') if change_issue is None: # could not reserve issue, no reason to keep changeset in db change_series.discard(anon) # to cleanup reserved state change_series.delete() else: success, auto_approve_series, auto_approve_issue = migrate_isbn_series_issue( change_series, change_issue) if success: changes.append((change_series, auto_approve_series)) changes.append((change_issue, auto_approve_issue)) else: print u"ISBN not valid for %s: %s" % (series, series.notes) # no valid isbn, no reason to keep changeset in db change_series.discard(anon) # to cleanup reserved state change_series.delete() change_issue.discard(anon) # to cleanup reserved state change_issue.delete() do_auto_approve(changes, 'migration of ISBNs')
def fix_reprint_notes(issues, old_reprint_note, new_reprint_note, check_double_semi=False, exact=False): changes = [] for i in issues: c = migrate_reserve(i, 'issue', 'to fix reprint notes') if c: ir = c.issuerevisions.all()[0] if exact: crs = c.storyrevisions.filter(reprint_notes=old_reprint_note) else: crs = c.storyrevisions.filter(reprint_notes__icontains=old_reprint_note) changed = False for cr in crs: cr.reprint_notes = cr.reprint_notes.replace(old_reprint_note, new_reprint_note) if check_double_semi: cr.reprint_notes = cr.reprint_notes.replace(';;', ';') cr.reprint_notes = cr.reprint_notes.replace('; ;', ';') if not changed: cr.compare_changes() if cr.is_changed: changed = True cr.save() if changed: changes.append((c, True)) else: print "nothing changed in ", c c.discard(anon) c.delete() else: print "%s is reserved" % i if len(changes) > 10: do_auto_approve(changes, 'fixing reprint notes') changes = [] do_auto_approve(changes, 'fixing reprint notes')
def migrate_if(issues): changes = [] for i in issues[:250]: c = migrate_reserve(i, 'issue', 'to set the no_indicia_frequency flag') if c: ir = c.issuerevisions.all()[0] ir.indicia_frequency = u'' ir.no_indicia_frequency = True ir.save() changes.append((c, True)) do_auto_approve(changes, 'moving of indicia_frequency information')
def move_pub_notes(): changes = [] series = Series.objects.filter(notes="", deleted=False).exclude(publication_notes="") for s in series[:250]: c = migrate_reserve(s, 'series', 'for moving publication notes to notes') if c: sr = c.seriesrevisions.all()[0] sr.notes = sr.publication_notes sr.publication_notes = "" sr.save() changes.append((c, True)) do_auto_approve(changes, 'move of publication notes') return True
def mark_as_singleton(): changes = [] series = Series.objects.filter(issue__number='[nn]', issue__indicia_frequency='').filter(issue_count=1, is_singleton=False) for s in series[:200]: c = migrate_reserve(s, 'series', 'for marking as singleton') if c: sr = c.seriesrevisions.get() sr.is_singleton = True sr.save() changes.append((c, True)) do_auto_approve(changes, 'marking of singleton') return True
def fix_reprint_notes(issues, old_reprint_note, new_reprint_note): changes = [] for i in issues[:450]: c = migrate_reserve(i, 'issue', 'to fix reprint notes') if c: ir = c.issuerevisions.all()[0] crs = c.storyrevisions.filter(reprint_notes__icontains=old_reprint_note) for cr in crs: cr.reprint_notes = cr.reprint_notes.replace(old_reprint_note, new_reprint_note) cr.save() changes.append((c, True)) else: print "%s is reserved" % i do_auto_approve(changes, 'fixing reprint notes')
def fix_reprint_notes(issues, old_reprint_note, new_reprint_note): changes = [] for i in issues[:450]: c = migrate_reserve(i, 'issue', 'to fix reprint notes') if c: ir = c.issuerevisions.all()[0] crs = c.storyrevisions.filter( reprint_notes__icontains=old_reprint_note) for cr in crs: cr.reprint_notes = cr.reprint_notes.replace( old_reprint_note, new_reprint_note) cr.save() changes.append((c, True)) else: print "%s is reserved" % i do_auto_approve(changes, 'fixing reprint notes')
def mark_as_singleton(): changes = [] series = Series.objects.filter(issue__number='[nn]', issue__indicia_frequency='').filter( issue_count=1, is_singleton=False) for s in series[:200]: c = migrate_reserve(s, 'series', 'for marking as singleton') if c: sr = c.seriesrevisions.get() sr.is_singleton = True sr.save() changes.append((c, True)) do_auto_approve(changes, 'marking of singleton') return True
def migrate_reprints_series(number, standard=True, do_save=True): issues = Issue.objects.exclude(deleted=True).filter( series__id=number, reserved=False).filter(story__reprint_notes__contains=' ').distinct() migrated = [] for issue in issues: #print issue if issue.active_stories().exclude(reprint_notes=None).exclude( reprint_notes='').exclude(reprint_notes='from ?').exclude( reprint_notes=u'fr\xe5n ?').exclude( reprint_notes='uit ?').exclude( reprint_notes='da ?').count(): changeset = migrate_reserve(issue, 'issue', 'to migrate reprint notes into links') if not changeset: # this should not happen since we filter out reserved issues raise ValueError, "%s is reserved" % issue things = changeset.storyrevisions.all() #things = issue.active_stories() things = things.exclude(reprint_notes=None).exclude( reprint_notes='').exclude(reprint_notes='from ?').exclude( reprint_notes=u'fr\xe5n ?').exclude( reprint_notes='uit ?').exclude(reprint_notes='da ?') is_changed = False if not standard: things = things.exclude( reprint_notes__icontains=' syndicate)').exclude( reprint_notes__icontains=' egmont (').exclude( reprint_notes__icontains=' egmont [') for i in things: if i.reprint_notes: #print i.reprint_notes is_changed |= migrate_reprint_notes(i, standard=standard, do_save=do_save) #else: #i.save() if is_changed or changeset.reprintrevisions.count(): migrated.append((changeset, True)) else: # nothing migrated print issue, 'nothing migrated' changeset.discard(changeset.indexer) # free reservation changeset.delete() # no need to keep changeset if len(migrated) > 100: do_auto_approve(migrated, 'reprint note migration') migrated = [] if len(migrated): do_auto_approve(migrated, 'reprint note migration')
def move_leading_article(article): # english changes = [] errors = [] series = Series.objects.filter(name__endswith=", %s" % article, reserved=False) for s in series[:250]: c = migrate_reserve(s, 'series', 'for moving ", %s" to the beginning of the series name' % article) if c: sr = c.seriesrevisions.all()[0] sr.name = "%s %s" % (article, sr.name[:-(len(article)+2)]) sr.leading_article = True sr.save() changes.append((c, True)) do_auto_approve(changes, 'move of leading article') return True
def move_leading_article(article): changes = [] issues = Issue.objects.filter(story__feature__endswith=", %s" % article, reserved=False, deleted=False).distinct().exclude(story__feature='Spider-Man and Human Torch, The') issues = issues.distinct() # limit the number processed at a time, there are some odd problems if not for i in issues[:250]: stories = i.active_stories().filter(feature__endswith=", %s" % article) c = migrate_reserve(i, 'issue', 'for moving ", %s" to the beginning of the feature' % article) if c: sr = c.storyrevisions.filter(feature__endswith=", %s" % article) for s in sr: s.feature = "%s %s" % (article, s.feature[:-(len(article)+2)]) print s.feature s.save() changes.append((c, True)) do_auto_approve(changes, 'move of leading article in feature') return True
def cleanup_cover_notes(issues, clean_all=False): changes = [] for i in issues[:450]: c = migrate_reserve(i, 'issue', 'to cleanup cover notes') if c: ir = c.issuerevisions.all()[0] cr = c.storyrevisions.get(type=cover_type, notes=ir.notes) cr.notes = '' cr.save() if clean_all: srs = c.storyrevisions.filter(notes=ir.notes) ir.notes='' ir.save() for sr in srs: sr.notes='' sr.save() changes.append((c, True)) else: print "%s is reserved" % i do_auto_approve(changes, 'cleaning cover notes')
def cleanup_cover_notes(issues, clean_all=False): changes = [] for i in issues[:450]: c = migrate_reserve(i, 'issue', 'to cleanup cover notes') if c: ir = c.issuerevisions.all()[0] cr = c.storyrevisions.get(type=cover_type, notes=ir.notes) cr.notes = '' cr.save() if clean_all: srs = c.storyrevisions.filter(notes=ir.notes) ir.notes = '' ir.save() for sr in srs: sr.notes = '' sr.save() changes.append((c, True)) else: print "%s is reserved" % i do_auto_approve(changes, 'cleaning cover notes')
def check_return_links_story(): changesets = [] for i in ReprintFromIssue.objects.filter(target__type__name = 'comic story'): results = Story.objects.exclude(deleted=True).all() results = results.filter(issue = i.origin_issue) results = results.filter(type = i.target.type) if results.count() == 1: if ReprintFromIssue.objects.filter(target__issue=i.target.issue, target__type__name='comic story', origin_issue=i.origin_issue).count() == 1: story = results[0] print i.origin_issue, i.target.issue changeset = migrate_reserve(i.origin_issue, 'issue', 'to merge reprint links') if not changeset: changeset = migrate_reserve(i.target.issue, 'issue', 'to merge reprint links') if not changeset: raise ValueError revision = ReprintRevision.objects.clone_revision(\ reprint=i, changeset=changeset) revision.origin = story revision.origin__issue = None revision.save() changesets.append((changeset, True)) if len(changesets) > 100: do_auto_approve(changesets, 'reprint link merging') changesets = [] do_auto_approve(changesets, 'reprint link merging') changesets = [] for i in ReprintToIssue.objects.filter(origin__type__name = 'comic story'): results = Story.objects.exclude(deleted=True).all() results = results.filter(issue = i.target_issue) results = results.filter(type = i.origin.type) if results.count() == 1: if ReprintToIssue.objects.filter(origin__issue=i.origin.issue, origin__type__name='comic story', target_issue = i.target_issue).count() == 1: story = results[0] print i.origin.issue, i.target_issue changeset = migrate_reserve(i.origin.issue, 'issue', 'to merge reprint links') if not changeset: changeset = migrate_reserve(i.target_issue, 'issue', 'to merge reprint links') if not changeset: raise ValueError revision = ReprintRevision.objects.clone_revision(\ reprint=i, changeset=changeset) revision.target = story revision.target__issue = None revision.save() changesets.append((changeset, True)) if len(changesets) > 100: do_auto_approve(changesets, 'reprint link merging') changesets = [] do_auto_approve(changesets, 'reprint link merging') changesets = []
def migrate_upc(issues): changes = [] for i in issues[:200]: c = migrate_reserve(i, 'issue', 'to move UPC from notes to barcode') if c: ir = c.issuerevisions.all()[0] cand_upc = ir.notes[3:].strip(':# ').split()[0].strip('.;') if valid_barcode(cand_upc): ir.barcode = cand_upc pos = ir.notes.find(cand_upc) + len(cand_upc) new_notes = ir.notes[pos:].lstrip(' ;.\r\n') ir.notes = new_notes ir.save() changes.append((c, True)) c.submit() else: # no valid barcode, no reason to keep changeset in db c.discard(anon) # to cleanup issue reserved state c.delete() print u"barcode not valid for %s: %s" % (i, i.notes) do_auto_approve(changes, 'moving of UPC from notes to barcode')
def fix_reprint_notes(issues, old_reprint_note, new_reprint_note, check_double_semi=False, exact=False): changes = [] for i in issues: c = migrate_reserve(i, 'issue', 'to fix reprint notes') if c: ir = c.issuerevisions.all()[0] if exact: crs = c.storyrevisions.filter(reprint_notes=old_reprint_note) else: crs = c.storyrevisions.filter( reprint_notes__icontains=old_reprint_note) changed = False for cr in crs: cr.reprint_notes = cr.reprint_notes.replace( old_reprint_note, new_reprint_note) if check_double_semi: cr.reprint_notes = cr.reprint_notes.replace(';;', ';') cr.reprint_notes = cr.reprint_notes.replace('; ;', ';') if not changed: cr.compare_changes() if cr.is_changed: changed = True cr.save() if changed: changes.append((c, True)) else: print "nothing changed in ", c c.discard(anon) c.delete() else: print "%s is reserved" % i if len(changes) > 10: do_auto_approve(changes, 'fixing reprint notes') changes = [] do_auto_approve(changes, 'fixing reprint notes')
def keydate_migration(issues): changes = [] for i in issues: keydate = parsePubDate(i.publication_date) if keydate: with transaction.commit_on_success(): c = migrate_reserve( i, 'issue', 'to autofill keydates from publication dates') if c: ir = c.issuerevisions.all()[0] ir.key_date = keydate ir.save() changes.append((c, True)) else: print "%s is reserved" % i if len(changes) > 25: with transaction.commit_on_success(): do_auto_approve(changes, 'publication date to keydate conversion') changes = [] with transaction.commit_on_success(): do_auto_approve(changes, 'publication date to keydate conversion')
def move_issues(issues, series, reserve_text, approve_text): changes = [] for issue in issues: if issue.reserved == False: changeset = migrate_reserve(issue, 'issue', reserve_text) if changeset: issue_revision = changeset.issuerevisions.get() if issue_revision.series.publisher != series.publisher: if issue_revision.brand: new_brand = series.publisher.active_brands()\ .filter(name=issue_revision.brand.name) if new_brand.count() == 1: issue_revision.brand = new_brand[0] else: issue_revision.brand = None if issue_revision.indicia_publisher: new_indicia_publisher = series.publisher\ .active_indicia_publishers()\ .filter(name=issue_revision.indicia_publisher.name) if new_indicia_publisher.count() == 1: issue_revision.indicia_publisher = \ new_indicia_publisher[0] else: issue_revision.indicia_publisher = None issue_revision.no_indicia_publisher = False issue_revision.series = series issue_revision.save() changes.append((changeset, True)) else: print "Issue %s could not be reserved" % issue else: print "Issue %s is reserved" % issue if len(changes) > 100: do_auto_approve(changes, approve_text) changes = [] if len(changes): do_auto_approve(changes, approve_text)
def keydate_migration(issues): changes = [] for i in issues: keydate = parsePubDate(i.publication_date) if keydate: with transaction.commit_on_success(): c = migrate_reserve(i, 'issue', 'to autofill keydates from publication dates') if c: ir = c.issuerevisions.all()[0] ir.key_date = keydate ir.save() changes.append((c, True)) else: print "%s is reserved" % i if len(changes) > 25: with transaction.commit_on_success(): do_auto_approve(changes, 'publication date to keydate conversion') changes = [] with transaction.commit_on_success(): do_auto_approve(changes, 'publication date to keydate conversion')
def migrate_reprints_lars(): # 18732 Superman # 26245 Superboy # 31648 Batman Sonderheft # 36955 Grüne Leuchte # 36980 Wundergirl # 36973 Superman Sonderausgabe # 36949 Batman Sonderausgabe # 36975 Superman Superband # 36964 Roter Blitz # 36953 Gerechtigkeitsliga # 36967 Superfreunde # 39648 Atom issues = Issue.objects.exclude(deleted=True).filter(series__id__in=[ 18732, 26245, 31648, 36955, 36980, 36973, 36949, 36975, 36964, 36953, 36967, 39648 ], reserved=False) #issues = Issue.objects.filter(series__id__in = [36953], reserved=False) migrated = [] for migrate_issue in issues: changeset = migrate_reserve(migrate_issue, 'issue', 'to migrate reprint notes into links') if not changeset: # this should not happen since we filter out reserved issues raise ValueError, "%s is reserved" % migrate_issue #else: #print migrate_issue, migrate_issue.id things = changeset.storyrevisions.all() for i in things: text_reprint = "" if i.reprint_notes: for string in split_reprint_string(i.reprint_notes): #print len(string) if string == " USA": break #print string issue, notes, sequence_number, story = parse_reprint_lars( string) if issue and issue.series.language.code.lower( ) == 'en' and string.startswith('in '): # Lars sometimes copied further US printings, don't create links #print "double", string issue = None #print issue, notes, sequence_number, story if sequence_number < 0 and issue: if i.sequence_number == 0: story = Story.objects.exclude(deleted=True).filter( issue=issue) story = story.filter(sequence_number=0) if story.count() == 1: story = story[0] sequence_number = 0 else: story = False #else: #print "no sequence number found", string, issue #print issue if issue: if sequence_number >= 0 and story: changeset.reprintrevisions.create( origin_story=story, target_story=i.story, notes=notes) else: if issue.series.language.code.lower() == 'de': nr = find_reprint_sequence_in_issue( i.story, issue.id) if string.lower().startswith('from'): if nr > 0: story = issue.active_stories().get( sequence_number=nr) changeset.reprintrevisions.create( origin_story=story, target_story=i.story, notes=notes) else: changeset.reprintrevisions.create( origin_issue=issue, target_story=i.story, notes=notes) else: if nr > 0: story = issue.active_stories().get( sequence_number=nr) changeset.reprintrevisions.create( origin_story=i.story, target_story=story, notes=notes) else: changeset.reprintrevisions.create( target_issue=issue, origin_story=i.story, notes=notes) else: changeset.reprintrevisions.create( origin_issue=issue, target_story=i.story, notes=notes) else: text_reprint += string + "; " if len(text_reprint) > 0: text_reprint = text_reprint[:-2] #print "Reprint Note Left: ", i.issue, i.issue.id, text_reprint i.migration_status.reprint_needs_inspection = True i.migration_status.save() i.reprint_notes = text_reprint i.save() if changeset.reprintrevisions.count(): migrated.append((changeset, True)) else: # nothing migrated #print 'nothing migrated' changeset.discard(changeset.indexer) # free reservation changeset.delete() # no need to keep changeset if len(migrated) > 100: do_auto_approve(migrated, 'reprint note migration') migrated = [] if len(migrated): do_auto_approve(migrated, 'reprint note migration')
def fix_stray_imprints(): """ Several rows ended up neither master publishers nor imprints, but they are in fact all associated with an existing master publisher. Set them up as imprints so that our auto-deletion code will find them. Note that we do not maintain change history for imprints, so they are just updated directly. """ # First, see if we've already run this function- it isn't the kind of thing # you can do twice, so just leave if it looks like we have. if Publisher.objects.get(id=4353).parent is not None: print "Already fixed stray imprints, skipping..." return # Handle two stray Hyperion imprints that lost their parent. hyperion = Publisher.objects.get(name='Hyperion', is_master=True) Publisher.objects.filter(id=4353).update(parent=hyperion, name='Jump at the Sun [duplicate]') Publisher.objects.filter(id=3970).update(parent=hyperion) # Stuff the Pocket Books co-publishing "imprint" under Pocket Books pocket = Publisher.objects.get(name='Pocket Books', is_master=True) Publisher.objects.filter(id__in=(2345, 2346, 2347)).update(parent=pocket) # Four imprints ended up also flagged as Master Publishers. Fix that. Publisher.objects.filter(id__in=(455, 746, 2499, 4775)).update(is_master=False) # One of these has an imprint which now needs to be re-parented. # Horror House Press should be under AC, not AC Comics. horror_house = Publisher.objects.get(id=3644) horror_house.parent = horror_house.parent.parent horror_house.save() changes = [] errors = [] # AC Comics is a special case- we just need to fix it to match the adjusted # state of the imprint that we re-parented above. s = Series.objects.get(publisher=455, imprint=3644) c = migrate_reserve(s, 'series', 'Current publisher is an imprint- fixing') if c is None: errors.append(s) else: sr = c.seriesrevisions.all()[0] sr.publisher = sr.imprint.parent sr.save() changes.append((sr, True)) # For everything else, the series has no imprint, so bump the former # master publisher down to imprint, and set the master publisher to the # imprint's parent. for series_id in (24204, 25280, # MAGNECOM 13887, 22352, 22353, 22354, # Magazine Publishers, Inc. 35897): # PowerMark Productions s = Series.objects.get(pk=series_id) c = migrate_reserve(s, 'series', 'Current publisher is an imprint- fixing') if c is None: errors.append(s) else: sr = c.seriesrevisions.all()[0] pub = sr.publisher sr.imprint = pub sr.publisher = pub.parent sr.save() changes.append((sr, True)) do_auto_approve(changes, 'Auto-approving imprint vs master publisher fix') return True if errors: print "The following series are reserved and could not be fixed:" for reserved_series in errors: print "\t%s" % reserved_series print "Exiting with changes not committed." sys.exit(-1)
def fix_stray_imprints(): """ Several rows ended up neither master publishers nor imprints, but they are in fact all associated with an existing master publisher. Set them up as imprints so that our auto-deletion code will find them. Note that we do not maintain change history for imprints, so they are just updated directly. """ # First, see if we've already run this function- it isn't the kind of thing # you can do twice, so just leave if it looks like we have. if Publisher.objects.get(id=4353).parent is not None: print "Already fixed stray imprints, skipping..." return # Handle two stray Hyperion imprints that lost their parent. hyperion = Publisher.objects.get(name='Hyperion', is_master=True) Publisher.objects.filter(id=4353).update( parent=hyperion, name='Jump at the Sun [duplicate]') Publisher.objects.filter(id=3970).update(parent=hyperion) # Stuff the Pocket Books co-publishing "imprint" under Pocket Books pocket = Publisher.objects.get(name='Pocket Books', is_master=True) Publisher.objects.filter(id__in=(2345, 2346, 2347)).update(parent=pocket) # Four imprints ended up also flagged as Master Publishers. Fix that. Publisher.objects.filter(id__in=(455, 746, 2499, 4775)).update(is_master=False) # One of these has an imprint which now needs to be re-parented. # Horror House Press should be under AC, not AC Comics. horror_house = Publisher.objects.get(id=3644) horror_house.parent = horror_house.parent.parent horror_house.save() changes = [] errors = [] # AC Comics is a special case- we just need to fix it to match the adjusted # state of the imprint that we re-parented above. s = Series.objects.get(publisher=455, imprint=3644) c = migrate_reserve(s, 'series', 'Current publisher is an imprint- fixing') if c is None: errors.append(s) else: sr = c.seriesrevisions.all()[0] sr.publisher = sr.imprint.parent sr.save() changes.append((sr, True)) # For everything else, the series has no imprint, so bump the former # master publisher down to imprint, and set the master publisher to the # imprint's parent. for series_id in ( 24204, 25280, # MAGNECOM 13887, 22352, 22353, 22354, # Magazine Publishers, Inc. 35897): # PowerMark Productions s = Series.objects.get(pk=series_id) c = migrate_reserve(s, 'series', 'Current publisher is an imprint- fixing') if c is None: errors.append(s) else: sr = c.seriesrevisions.all()[0] pub = sr.publisher sr.imprint = pub sr.publisher = pub.parent sr.save() changes.append((sr, True)) do_auto_approve(changes, 'Auto-approving imprint vs master publisher fix') return True if errors: print "The following series are reserved and could not be fixed:" for reserved_series in errors: print "\t%s" % reserved_series print "Exiting with changes not committed." sys.exit(-1)
def migrate_reprints_lars(): # 18732 Superman # 26245 Superboy # 31648 Batman Sonderheft # 36955 Grüne Leuchte # 36980 Wundergirl # 36973 Superman Sonderausgabe # 36949 Batman Sonderausgabe # 36975 Superman Superband # 36964 Roter Blitz # 36953 Gerechtigkeitsliga # 36967 Superfreunde # 39648 Atom issues = Issue.objects.exclude(deleted=True).filter(series__id__in = [18732,26245,31648,36955,36980,36973,36949,36975,36964, 36953, 36967, 39648], reserved=False) #issues = Issue.objects.filter(series__id__in = [36953], reserved=False) migrated = [] for migrate_issue in issues: changeset = migrate_reserve(migrate_issue, 'issue', 'to migrate reprint notes into links') if not changeset: # this should not happen since we filter out reserved issues raise ValueError, "%s is reserved" % migrate_issue #else: #print migrate_issue, migrate_issue.id things = changeset.storyrevisions.all() for i in things: text_reprint = "" if i.reprint_notes: for string in split_reprint_string(i.reprint_notes): #print len(string) if string == " USA": break #print string issue, notes, sequence_number, story = parse_reprint_lars(string) if issue and issue.series.language.code.lower() == 'en' and string.startswith('in '): # Lars sometimes copied further US printings, don't create links #print "double", string issue = None #print issue, notes, sequence_number, story if sequence_number < 0 and issue: if i.sequence_number == 0: story = Story.objects.exclude(deleted=True).filter(issue = issue) story = story.filter(sequence_number = 0) if story.count() == 1: story = story[0] sequence_number = 0 else: story = False #else: #print "no sequence number found", string, issue #print issue if issue: if sequence_number >= 0 and story: changeset.reprintrevisions.create(origin_story=story, target_story=i.story, notes=notes) else: if issue.series.language.code.lower() == 'de': nr = find_reprint_sequence_in_issue(i.story, issue.id) if string.lower().startswith('from'): if nr > 0: story = issue.active_stories().get(sequence_number = nr) changeset.reprintrevisions.create(origin_story=story, target_story=i.story, notes=notes) else: changeset.reprintrevisions.create(origin_issue=issue, target_story=i.story, notes=notes) else: if nr > 0: story = issue.active_stories().get(sequence_number = nr) changeset.reprintrevisions.create(origin_story=i.story, target_story=story, notes=notes) else: changeset.reprintrevisions.create(target_issue=issue, origin_story=i.story, notes=notes) else: changeset.reprintrevisions.create(origin_issue=issue, target_story=i.story, notes=notes) else: text_reprint += string + "; " if len(text_reprint) > 0: text_reprint = text_reprint[:-2] #print "Reprint Note Left: ", i.issue, i.issue.id, text_reprint i.migration_status.reprint_needs_inspection = True i.migration_status.save() i.reprint_notes = text_reprint i.save() if changeset.reprintrevisions.count(): migrated.append((changeset, True)) else: # nothing migrated #print 'nothing migrated' changeset.discard(changeset.indexer) # free reservation changeset.delete() # no need to keep changeset if len(migrated) > 100: do_auto_approve(migrated, 'reprint note migration') migrated = [] if len(migrated): do_auto_approve(migrated, 'reprint note migration')
def fix_italian_series_687(): changes = [] do_save = True issues = Issue.objects.filter(series__id = 687) for issue in issues: if issue.active_stories().exclude(reprint_notes="").exclude(reprint_notes__icontains='?').count(): c = migrate_reserve(issue, 'issue', 'to fix reprint notes') if c: crs = c.storyrevisions.exclude(reprint_notes="").exclude(reprint_notes__icontains='?') changed = False # fix missing semicolons for cr in crs: string = cr.reprint_notes.replace(' e ristampata su ','; ristampata su ') if string.find(' e su ') > 0: string = string.replace(' e su ','; ristampata su ') if string.find(' e ') > 0: string = string.replace(' e ','; ristampata su ') cr.reprint_notes = string cr.save() q_obj = Q(reprint_notes__icontains='ristampata su ') | Q(reprint_notes__icontains='Ristampa da ') crs = crs.filter(q_obj) for cr in crs: # now split via ';' string = cr.reprint_notes.replace('ristampata su ','in ') string = string.replace('Ristampata su ','in ') string = string.replace('Ristampa su ','in ') string = string.replace('Ristampa da ','from ') e = string.strip().split(';') reprints = None for st in e: d = st.strip().split(' ') series = ' '.join(d[0:-1]) try: num = int(d[-1]) res = find_italian_disney(st,'',0) if res: result = res else: result = series + " #" + str(num) except: result = st if reprints: reprints += "; " + result else: reprints = result #print cr.reprint_notes, reprints if reprints: cr.reprint_notes = reprints else: cr.reprint_notes = '' if not changed: cr.compare_changes() if cr.is_changed: changed = True cr.save() if changed: changes.append((c, True)) else: print "nothing changed in ", c c.discard(anon) c.delete() else: print "%s is reserved" % issue if len(changes) > 10: do_auto_approve(changes, 'fixing reprint notes') changes = [] do_auto_approve(changes, 'fixing reprint notes')
def check_return_links_story(): changesets = [] for i in ReprintFromIssue.objects.filter(target__type__name='comic story'): results = Story.objects.exclude(deleted=True).all() results = results.filter(issue=i.origin_issue) results = results.filter(type=i.target.type) if results.count() == 1: if ReprintFromIssue.objects.filter( target__issue=i.target.issue, target__type__name='comic story', origin_issue=i.origin_issue).count() == 1: story = results[0] print i.origin_issue, i.target.issue changeset = migrate_reserve(i.origin_issue, 'issue', 'to merge reprint links') if not changeset: changeset = migrate_reserve(i.target.issue, 'issue', 'to merge reprint links') if not changeset: raise ValueError revision = ReprintRevision.objects.clone_revision(\ reprint=i, changeset=changeset) revision.origin = story revision.origin__issue = None revision.save() changesets.append((changeset, True)) if len(changesets) > 100: do_auto_approve(changesets, 'reprint link merging') changesets = [] do_auto_approve(changesets, 'reprint link merging') changesets = [] for i in ReprintToIssue.objects.filter(origin__type__name='comic story'): results = Story.objects.exclude(deleted=True).all() results = results.filter(issue=i.target_issue) results = results.filter(type=i.origin.type) if results.count() == 1: if ReprintToIssue.objects.filter( origin__issue=i.origin.issue, origin__type__name='comic story', target_issue=i.target_issue).count() == 1: story = results[0] print i.origin.issue, i.target_issue changeset = migrate_reserve(i.origin.issue, 'issue', 'to merge reprint links') if not changeset: changeset = migrate_reserve(i.target_issue, 'issue', 'to merge reprint links') if not changeset: raise ValueError revision = ReprintRevision.objects.clone_revision(\ reprint=i, changeset=changeset) revision.target = story revision.target__issue = None revision.save() changesets.append((changeset, True)) if len(changesets) > 100: do_auto_approve(changesets, 'reprint link merging') changesets = [] do_auto_approve(changesets, 'reprint link merging') changesets = []