コード例 #1
0
ファイル: cli.py プロジェクト: 2mny/mylar
def actual_issue_data_fetch( match, settings, opts ):

	# now get the particular issue data
	try:
		comicVine = ComicVineTalker()
		comicVine.wait_for_rate_limit = opts.wait_and_retry_on_rate_limit
		cv_md = comicVine.fetchIssueData( match['volume_id'],  match['issue_number'], settings )
	except ComicVineTalkerException:
		print "Network error while getting issue details.  Save aborted"
		return None

	if settings.apply_cbl_transform_on_cv_import:
		cv_md = CBLTransformer( cv_md, settings ).apply()
		
	return cv_md
コード例 #2
0
def actual_issue_data_fetch( match, settings, opts ):

	# now get the particular issue data
	try:
		comicVine = ComicVineTalker()
		comicVine.wait_for_rate_limit = opts.wait_and_retry_on_rate_limit
		cv_md = comicVine.fetchIssueData( match['volume_id'],  match['issue_number'], settings )
	except ComicVineTalkerException:
		print >> sys.stderr, "Network error while getting issue details.  Save aborted"
		return None

	if settings.apply_cbl_transform_on_cv_import:
		cv_md = CBLTransformer( cv_md, settings ).apply()
		
	return cv_md
コード例 #3
0
ファイル: cli.py プロジェクト: 2mny/mylar
def process_file_cli( filename, opts, settings, match_results ):

	batch_mode = len( opts.file_list ) > 1
		
	ca = ComicArchive(filename, settings.rar_exe_path)
	
	if not os.path.lexists( filename ):
		print "Cannot find "+ filename
		return
		
	if not ca.seemsToBeAComicArchive():
		print "Sorry, but "+ filename + "  is not a comic archive!"
		return
	
	#if not ca.isWritableForStyle( opts.data_style ) and ( opts.delete_tags or opts.save_tags or opts.rename_file ):
	if not ca.isWritable(  ) and ( opts.delete_tags or opts.copy_tags or opts.save_tags or opts.rename_file ):
		print "This archive is not writable for that tag type"
		return

	has = [ False, False, False ]
	if ca.hasCIX(): has[ MetaDataStyle.CIX ] = True
	if ca.hasCBI(): has[ MetaDataStyle.CBI ] = True
	if ca.hasCoMet(): has[ MetaDataStyle.COMET ] = True

	if opts.print_tags:


		if opts.data_style is None:
			page_count = ca.getNumberOfPages()

			brief = ""

			if batch_mode:
				brief = u"{0}: ".format(filename)

			if ca.isZip():      brief += "ZIP archive    "
			elif ca.isRar():    brief += "RAR archive    "
			elif ca.isFolder(): brief += "Folder archive "
				
			brief += "({0: >3} pages)".format(page_count)			
			brief += "  tags:[ "

			if not ( has[ MetaDataStyle.CBI ] or has[ MetaDataStyle.CIX ] or has[ MetaDataStyle.COMET ] ):
				brief += "none "
			else:
				if has[ MetaDataStyle.CBI ]: brief += "CBL "
				if has[ MetaDataStyle.CIX ]: brief += "CR "
				if has[ MetaDataStyle.COMET ]: brief += "CoMet "
			brief += "]"
				
			print brief

		if opts.terse:
			return

		print
		
		if opts.data_style is None or opts.data_style == MetaDataStyle.CIX:
			if has[ MetaDataStyle.CIX ]:
				print "------ComicRack tags--------"
				if opts.raw:
					print u"{0}".format(unicode(ca.readRawCIX(), errors='ignore'))
				else:
					print u"{0}".format(ca.readCIX())
				
		if opts.data_style is None or opts.data_style == MetaDataStyle.CBI:
			if has[ MetaDataStyle.CBI ]:
				print "------ComicBookLover tags--------"
				if opts.raw:
					pprint(json.loads(ca.readRawCBI()))
				else:
					print u"{0}".format(ca.readCBI())
					
		if opts.data_style is None or opts.data_style == MetaDataStyle.COMET:
			if has[ MetaDataStyle.COMET ]:
				print "------CoMet tags--------"
				if opts.raw:
					print u"{0}".format(ca.readRawCoMet())
				else:
					print u"{0}".format(ca.readCoMet())
			
			
	elif opts.delete_tags:
		style_name = MetaDataStyle.name[ opts.data_style ]
		if has[ opts.data_style ]:
			if not opts.dryrun:
				if not ca.removeMetadata( opts.data_style ):
					print u"{0}: Tag removal seemed to fail!".format( filename )
				else:
					print u"{0}: Removed {1} tags.".format( filename, style_name )
			else:
				print u"{0}: dry-run.  {1} tags not removed".format( filename, style_name )		
		else:
			print u"{0}: This archive doesn't have {1} tags to remove.".format( filename, style_name )

	elif opts.copy_tags:
		dst_style_name = MetaDataStyle.name[ opts.data_style ]
		if opts.no_overwrite and has[ opts.data_style ]:
			print u"{0}: Already has {1} tags.  Not overwriting.".format(filename, dst_style_name)
			return
		if opts.copy_source == opts.data_style:
			print u"{0}: Destination and source are same: {1}.  Nothing to do.".format(filename, dst_style_name)
			return
			
		src_style_name = MetaDataStyle.name[ opts.copy_source ]
		if has[ opts.copy_source ]:
			if not opts.dryrun:
				md = ca.readMetadata( opts.copy_source )
				
				if settings.apply_cbl_transform_on_bulk_operation and opts.data_style == MetaDataStyle.CBI:
					md = CBLTransformer( md, settings ).apply()
				
				if not ca.writeMetadata( md, opts.data_style ):
					print u"{0}: Tag copy seemed to fail!".format( filename )
				else:
					print u"{0}: Copied {1} tags to {2} .".format( filename, src_style_name, dst_style_name )
			else:
				print u"{0}: dry-run.  {1} tags not copied".format( filename, src_style_name )		
		else:
			print u"{0}: This archive doesn't have {1} tags to copy.".format( filename, src_style_name )

		
	elif opts.save_tags:

		if opts.no_overwrite and has[ opts.data_style ]:
			print u"{0}: Already has {1} tags.  Not overwriting.".format(filename, MetaDataStyle.name[ opts.data_style ])
			return
		
		if batch_mode:
			print u"Processing {0}...".format(filename)
			
		md = create_local_metadata( opts, ca, has[ opts.data_style ] )
		if md.issue is None or md.issue == "":
			if opts.assume_issue_is_one_if_not_set:
				md.issue = "1"
		
		# now, search online
		if opts.search_online:
			if opts.issue_id is not None:
				# we were given the actual ID to search with
				try:
					comicVine = ComicVineTalker()
					comicVine.wait_for_rate_limit = opts.wait_and_retry_on_rate_limit
					cv_md = comicVine.fetchIssueDataByIssueID( opts.issue_id, settings )
				except ComicVineTalkerException:
					print "Network error while getting issue details.  Save aborted"
					match_results.fetchDataFailures.append(filename)
					return
				
				if cv_md is None:
					print "No match for ID {0} was found.".format(opts.issue_id)
					match_results.noMatches.append(filename)
					return
				
				if settings.apply_cbl_transform_on_cv_import:
					cv_md = CBLTransformer( cv_md, settings ).apply()
			else:
				ii = IssueIdentifier( ca, settings )
	
				if md is None or md.isEmpty:
					print "No metadata given to search online with!"
					match_results.noMatches.append(filename)
					return
	
				def myoutput( text ):
					if opts.verbose:
						IssueIdentifier.defaultWriteOutput( text )
					
				# use our overlayed MD struct to search
				ii.setAdditionalMetadata( md )
				ii.onlyUseAdditionalMetaData = True
				ii.waitAndRetryOnRateLimit = opts.wait_and_retry_on_rate_limit
				ii.setOutputFunction( myoutput )
				ii.cover_page_index = md.getCoverPageIndexList()[0]
				matches = ii.search()
				
				result = ii.search_result
				
				found_match = False
				choices = False
				low_confidence = False
				
				if result == ii.ResultNoMatches:
					pass
				elif result == ii.ResultFoundMatchButBadCoverScore:
					low_confidence = True
					found_match = True
				elif result == ii.ResultFoundMatchButNotFirstPage :
					found_match = True
				elif result == ii.ResultMultipleMatchesWithBadImageScores:
					low_confidence = True
					choices = True
				elif result == ii.ResultOneGoodMatch:
					found_match = True
				elif result == ii.ResultMultipleGoodMatches:
					choices = True
	
				if choices:
					if low_confidence:
						print "Online search: Multiple low confidence matches.  Save aborted"
						match_results.lowConfidenceMatches.append(MultipleMatch(filename,matches))
						return
					else:
						print "Online search: Multiple good matches.  Save aborted"
						match_results.multipleMatches.append(MultipleMatch(filename,matches))
						return
				if low_confidence and opts.abortOnLowConfidence:
					print "Online search: Low confidence match.  Save aborted"
					match_results.lowConfidenceMatches.append(MultipleMatch(filename,matches))
					return
				if not found_match:
					print "Online search: No match found.  Save aborted"
					match_results.noMatches.append(filename)
					return
	
	
				# we got here, so we have a single match
				
				# now get the particular issue data
				cv_md = actual_issue_data_fetch(matches[0], settings, opts)
				if cv_md is None:
					match_results.fetchDataFailures.append(filename)
					return
			
			md.overlay( cv_md )
			
		# ok, done building our metadata. time to save
		if not actual_metadata_save( ca, opts, md ):
				match_results.writeFailures.append(filename)
		else:
				match_results.goodMatches.append(filename)

	elif opts.rename_file:

		msg_hdr = ""
		if batch_mode:
			msg_hdr = u"{0}: ".format(filename)

		if opts.data_style is not None:
			use_tags = has[ opts.data_style ]
		else:
			use_tags = False
			
		md = create_local_metadata( opts, ca, use_tags )
		
		if md.series is None:
			print msg_hdr + "Can't rename without series name"
			return

		new_ext = None  # default
		if settings.rename_extension_based_on_archive:
			if ca.isZip():
				new_ext = ".cbz"
			elif ca.isRar():
				new_ext = ".cbr"
			
		renamer = FileRenamer( md )
		renamer.setTemplate( settings.rename_template )
		renamer.setIssueZeroPadding( settings.rename_issue_number_padding )
		renamer.setSmartCleanup( settings.rename_use_smart_string_cleanup )
		
		new_name = renamer.determineName( filename, ext=new_ext )
			
		if new_name == os.path.basename(filename):
			print msg_hdr + "Filename is already good!"
			return
		
		folder = os.path.dirname( os.path.abspath( filename ) )
		new_abs_path = utils.unique_file( os.path.join( folder, new_name ) )

		suffix = ""
		if not opts.dryrun:
			# rename the file
			os.rename( filename, new_abs_path )
		else:
			suffix = " (dry-run, no change)"

		print u"renamed '{0}' -> '{1}' {2}".format(os.path.basename(filename), new_name, suffix)

	elif opts.export_to_zip:
		msg_hdr = ""
		if batch_mode:
			msg_hdr = u"{0}: ".format(filename)

		if not ca.isRar():
			print msg_hdr + "Archive is not a RAR."
			return
		
		rar_file = os.path.abspath( os.path.abspath( filename ) )
		new_file = os.path.splitext(rar_file)[0] + ".cbz"
		
		if opts.abort_export_on_conflict and os.path.lexists( new_file ):
			print  msg_hdr + "{0} already exists in the that folder.".format(os.path.split(new_file)[1])
			return
		
		new_file = utils.unique_file( os.path.join( new_file ) )
	
		delete_success = False
		export_success = False
		if not opts.dryrun:
			if ca.exportAsZip( new_file ):
				export_success = True
				if opts.delete_rar_after_export:
					try:
						os.unlink( rar_file )
					except:
						print msg_hdr + "Error deleting original RAR after export"
						delete_success = False
					else:
						delete_success = True
			else:
				# last export failed, so remove the zip, if it exists
				if os.path.lexists( new_file ):
					os.remove( new_file )
		else:
			msg = msg_hdr + u"Dry-run:  Would try to create {0}".format(os.path.split(new_file)[1])
			if opts.delete_rar_after_export:
				msg += u" and delete orginal."
			print msg
			return
			
		msg = msg_hdr
		if export_success:
			msg += u"Archive exported successfully to: {0}".format( os.path.split(new_file)[1] )
			if opts.delete_rar_after_export and delete_success:
				msg += u" (Original deleted) "
		else:
			msg += u"Archive failed to export!"
			
		print msg
コード例 #4
0
    def search(self):

        ca = self.comic_archive
        self.match_list = []
        self.cancel = False
        self.search_result = self.ResultNoMatches

        if not pil_available:
            self.log_msg(
                "Python Imaging Library (PIL) is not available and is needed for issue identification.")
            return self.match_list

        if not ca.seemsToBeAComicArchive():
            self.log_msg(
                "Sorry, but " + opts.filename + " is not a comic archive!")
            return self.match_list

        cover_image_data = ca.getPage(self.cover_page_index)
        cover_hash = self.calculateHash(cover_image_data)

        # check the aspect ratio
        # if it's wider than it is high, it's probably a two page spread
        # if so, crop it and calculate a second hash
        narrow_cover_hash = None
        aspect_ratio = self.getAspectRatio(cover_image_data)
        if aspect_ratio < 1.0:
            right_side_image_data = self.cropCover(cover_image_data)
            if right_side_image_data is not None:
                narrow_cover_hash = self.calculateHash(right_side_image_data)

        #self.log_msg("Cover hash = {0:016x}".format(cover_hash))

        keys = self.getSearchKeys()
        # normalize the issue number
        keys['issue_number'] = IssueString(keys['issue_number']).asString()

        # we need, at minimum, a series and issue number
        if keys['series'] is None or keys['issue_number'] is None:
            self.log_msg("Not enough info for a search!")
            return []

        self.log_msg("Going to search for:")
        self.log_msg("\tSeries: " + keys['series'])
        self.log_msg("\tIssue:  " + keys['issue_number'])
        if keys['issue_count'] is not None:
            self.log_msg("\tCount:  " + str(keys['issue_count']))
        if keys['year'] is not None:
            self.log_msg("\tYear:   " + str(keys['year']))
        if keys['month'] is not None:
            self.log_msg("\tMonth:  " + str(keys['month']))

        #self.log_msg("Publisher Blacklist: " + str(self.publisher_blacklist))
        comicVine = ComicVineTalker()
        comicVine.wait_for_rate_limit = self.waitAndRetryOnRateLimit

        comicVine.setLogFunc(self.output_function)

        # self.log_msg(("Searching for " + keys['series'] + "...")
        self.log_msg(u"Searching for  {0} #{1} ...".format(
            keys['series'], keys['issue_number']))
        try:
            cv_search_results = comicVine.searchForSeries(keys['series'])
        except ComicVineTalkerException:
            self.log_msg(
                "Network issue while searching for series. Aborting...")
            return []

        #self.log_msg("Found " + str(len(cv_search_results)) + " initial results")
        if self.cancel:
            return []

        if cv_search_results is None:
            return []

        series_second_round_list = []

        #self.log_msg("Removing results with too long names, banned publishers, or future start dates")
        for item in cv_search_results:
            length_approved = False
            publisher_approved = True
            date_approved = True

            # remove any series that starts after the issue year
            if keys['year'] is not None and str(
                    keys['year']).isdigit() and item['start_year'] is not None and str(
                    item['start_year']).isdigit():
                if int(keys['year']) < int(item['start_year']):
                    date_approved = False

            # assume that our search name is close to the actual name, say
            # within ,e.g. 5 chars
            shortened_key = utils.removearticles(keys['series'])
            shortened_item_name = utils.removearticles(item['name'])
            if len(shortened_item_name) < (
                    len(shortened_key) + self.length_delta_thresh):
                length_approved = True

            # remove any series from publishers on the blacklist
            if item['publisher'] is not None:
                publisher = item['publisher']['name']
                if publisher is not None and publisher.lower(
                ) in self.publisher_blacklist:
                    publisher_approved = False

            if length_approved and publisher_approved and date_approved:
                series_second_round_list.append(item)

        self.log_msg(
            "Searching in " + str(len(series_second_round_list)) + " series")

        if self.callback is not None:
            self.callback(0, len(series_second_round_list))

        # now sort the list by name length
        series_second_round_list.sort(
            key=lambda x: len(x['name']), reverse=False)

        # build a list of volume IDs
        volume_id_list = list()
        for series in series_second_round_list:
            volume_id_list.append(series['id'])

        try:
            issue_list = comicVine.fetchIssuesByVolumeIssueNumAndYear(
                volume_id_list,
                keys['issue_number'],
                keys['year'])

        except ComicVineTalkerException:
            self.log_msg(
                "Network issue while searching for series details. Aborting...")
            return []

        if issue_list is None:
            return []

        shortlist = list()
        # now re-associate the issues and volumes
        for issue in issue_list:
            for series in series_second_round_list:
                if series['id'] == issue['volume']['id']:
                    shortlist.append((series, issue))
                    break

        if keys['year'] is None:
            self.log_msg(u"Found {0} series that have an issue #{1}".format(
                len(shortlist), keys['issue_number']))
        else:
            self.log_msg(
                u"Found {0} series that have an issue #{1} from {2}".format(
                    len(shortlist),
                    keys['issue_number'],
                    keys['year']))

        # now we have a shortlist of volumes with the desired issue number
        # Do first round of cover matching
        counter = len(shortlist)
        for series, issue in shortlist:
            if self.callback is not None:
                self.callback(counter, len(shortlist) * 3)
                counter += 1

            self.log_msg(u"Examining covers for  ID: {0} {1} ({2}) ...".format(
                series['id'],
                series['name'],
                series['start_year']), newline=False)

            # parse out the cover date
            day, month, year = comicVine.parseDateStr(issue['cover_date'])

            # Now check the cover match against the primary image
            hash_list = [cover_hash]
            if narrow_cover_hash is not None:
                hash_list.append(narrow_cover_hash)

            try:
                image_url = issue['image']['super_url']
                thumb_url = issue['image']['thumb_url']
                page_url = issue['site_detail_url']

                score_item = self.getIssueCoverMatchScore(
                    comicVine,
                    issue['id'],
                    image_url,
                    thumb_url,
                    page_url,
                    hash_list,
                    useRemoteAlternates=False)
            except:
                self.match_list = []
                return self.match_list

            match = dict()
            match['series'] = u"{0} ({1})".format(
                series['name'], series['start_year'])
            match['distance'] = score_item['score']
            match['issue_number'] = keys['issue_number']
            match['cv_issue_count'] = series['count_of_issues']
            match['url_image_hash'] = score_item['hash']
            match['issue_title'] = issue['name']
            match['issue_id'] = issue['id']
            match['volume_id'] = series['id']
            match['month'] = month
            match['year'] = year
            match['publisher'] = None
            if series['publisher'] is not None:
                match['publisher'] = series['publisher']['name']
            match['image_url'] = image_url
            match['thumb_url'] = thumb_url
            match['page_url'] = page_url
            match['description'] = issue['description']

            self.match_list.append(match)

            self.log_msg(" --> {0}".format(match['distance']), newline=False)

            self.log_msg("")

        if len(self.match_list) == 0:
            self.log_msg(":-(no matches!")
            self.search_result = self.ResultNoMatches
            return self.match_list

        # sort list by image match scores
        self.match_list.sort(key=lambda k: k['distance'])

        l = []
        for i in self.match_list:
            l.append(i['distance'])

        self.log_msg("Compared to covers in {0} issue(s):".format(
            len(self.match_list)), newline=False)
        self.log_msg(str(l))

        def print_match(item):
            self.log_msg(u"-----> {0} #{1} {2} ({3}/{4}) -- score: {5}".format(
                item['series'],
                item['issue_number'],
                item['issue_title'],
                item['month'],
                item['year'],
                item['distance']))

        best_score = self.match_list[0]['distance']

        if best_score >= self.min_score_thresh:
            # we have 1 or more low-confidence matches (all bad cover scores)
            # look at a few more pages in the archive, and also alternate
            # covers online
            self.log_msg(
                "Very weak scores for the cover. Analyzing alternate pages and covers...")
            hash_list = [cover_hash]
            if narrow_cover_hash is not None:
                hash_list.append(narrow_cover_hash)
            for i in range(1, min(3, ca.getNumberOfPages())):
                image_data = ca.getPage(i)
                page_hash = self.calculateHash(image_data)
                hash_list.append(page_hash)

            second_match_list = []
            counter = 2 * len(self.match_list)
            for m in self.match_list:
                if self.callback is not None:
                    self.callback(counter, len(self.match_list) * 3)
                    counter += 1
                self.log_msg(
                    u"Examining alternate covers for ID: {0} {1} ...".format(
                        m['volume_id'],
                        m['series']),
                    newline=False)
                try:
                    score_item = self.getIssueCoverMatchScore(
                        comicVine,
                        m['issue_id'],
                        m['image_url'],
                        m['thumb_url'],
                        m['page_url'],
                        hash_list,
                        useRemoteAlternates=True)
                except:
                    self.match_list = []
                    return self.match_list
                self.log_msg("--->{0}".format(score_item['score']))
                self.log_msg("")

                if score_item['score'] < self.min_alternate_score_thresh:
                    second_match_list.append(m)
                    m['distance'] = score_item['score']

            if len(second_match_list) == 0:
                if len(self.match_list) == 1:
                    self.log_msg("No matching pages in the issue.")
                    self.log_msg(
                        u"--------------------------------------------------------------------------")
                    print_match(self.match_list[0])
                    self.log_msg(
                        u"--------------------------------------------------------------------------")
                    self.search_result = self.ResultFoundMatchButBadCoverScore
                else:
                    self.log_msg(
                        u"--------------------------------------------------------------------------")
                    self.log_msg(
                        u"Multiple bad cover matches!  Need to use other info...")
                    self.log_msg(
                        u"--------------------------------------------------------------------------")
                    self.search_result = self.ResultMultipleMatchesWithBadImageScores
                return self.match_list
            else:
                # We did good, found something!
                self.log_msg("Success in secondary/alternate cover matching!")

                self.match_list = second_match_list
                # sort new list by image match scores
                self.match_list.sort(key=lambda k: k['distance'])
                best_score = self.match_list[0]['distance']
                self.log_msg(
                    "[Second round cover matching: best score = {0}]".format(best_score))
                # now drop down into the rest of the processing

        if self.callback is not None:
            self.callback(99, 100)

        # now pare down list, remove any item more than specified distant from
        # the top scores
        for item in reversed(self.match_list):
            if item['distance'] > best_score + self.min_score_distance:
                self.match_list.remove(item)

        # One more test for the case choosing limited series first issue vs a trade with the same cover:
        # if we have a given issue count > 1 and the volume from CV has
        # count==1, remove it from match list
        if len(self.match_list) >= 2 and keys[
                'issue_count'] is not None and keys['issue_count'] != 1:
            new_list = list()
            for match in self.match_list:
                if match['cv_issue_count'] != 1:
                    new_list.append(match)
                else:
                    self.log_msg(
                        "Removing volume {0} [{1}] from consideration (only 1 issue)".format(
                            match['series'],
                            match['volume_id']))

            if len(new_list) > 0:
                self.match_list = new_list

        if len(self.match_list) == 1:
            self.log_msg(
                u"--------------------------------------------------------------------------")
            print_match(self.match_list[0])
            self.log_msg(
                u"--------------------------------------------------------------------------")
            self.search_result = self.ResultOneGoodMatch

        elif len(self.match_list) == 0:
            self.log_msg(
                u"--------------------------------------------------------------------------")
            self.log_msg("No matches found :(")
            self.log_msg(
                u"--------------------------------------------------------------------------")
            self.search_result = self.ResultNoMatches
        else:
            # we've got multiple good matches:
            self.log_msg("More than one likely candidate.")
            self.search_result = self.ResultMultipleGoodMatches
            self.log_msg(
                u"--------------------------------------------------------------------------")
            for item in self.match_list:
                print_match(item)
            self.log_msg(
                u"--------------------------------------------------------------------------")

        return self.match_list
コード例 #5
0
ファイル: cli.py プロジェクト: yonkyunior/mylar
def process_file_cli(filename, opts, settings, match_results):

    batch_mode = len(opts.file_list) > 1

    ca = ComicArchive(filename, settings.rar_exe_path)

    if not os.path.lexists(filename):
        print "Cannot find " + filename
        return

    if not ca.seemsToBeAComicArchive():
        print "Sorry, but " + filename + "  is not a comic archive!"
        return

    #if not ca.isWritableForStyle( opts.data_style ) and ( opts.delete_tags or opts.save_tags or opts.rename_file ):
    if not ca.isWritable() and (opts.delete_tags or opts.copy_tags
                                or opts.save_tags or opts.rename_file):
        print "This archive is not writable for that tag type"
        return

    has = [False, False, False]
    if ca.hasCIX(): has[MetaDataStyle.CIX] = True
    if ca.hasCBI(): has[MetaDataStyle.CBI] = True
    if ca.hasCoMet(): has[MetaDataStyle.COMET] = True

    if opts.print_tags:

        if opts.data_style is None:
            page_count = ca.getNumberOfPages()

            brief = ""

            if batch_mode:
                brief = u"{0}: ".format(filename)

            if ca.isZip(): brief += "ZIP archive    "
            elif ca.isRar(): brief += "RAR archive    "
            elif ca.isFolder(): brief += "Folder archive "

            brief += "({0: >3} pages)".format(page_count)
            brief += "  tags:[ "

            if not (has[MetaDataStyle.CBI] or has[MetaDataStyle.CIX]
                    or has[MetaDataStyle.COMET]):
                brief += "none "
            else:
                if has[MetaDataStyle.CBI]: brief += "CBL "
                if has[MetaDataStyle.CIX]: brief += "CR "
                if has[MetaDataStyle.COMET]: brief += "CoMet "
            brief += "]"

            print brief

        if opts.terse:
            return

        print

        if opts.data_style is None or opts.data_style == MetaDataStyle.CIX:
            if has[MetaDataStyle.CIX]:
                print "------ComicRack tags--------"
                if opts.raw:
                    print u"{0}".format(
                        unicode(ca.readRawCIX(), errors='ignore'))
                else:
                    print u"{0}".format(ca.readCIX())

        if opts.data_style is None or opts.data_style == MetaDataStyle.CBI:
            if has[MetaDataStyle.CBI]:
                print "------ComicBookLover tags--------"
                if opts.raw:
                    pprint(json.loads(ca.readRawCBI()))
                else:
                    print u"{0}".format(ca.readCBI())

        if opts.data_style is None or opts.data_style == MetaDataStyle.COMET:
            if has[MetaDataStyle.COMET]:
                print "------CoMet tags--------"
                if opts.raw:
                    print u"{0}".format(ca.readRawCoMet())
                else:
                    print u"{0}".format(ca.readCoMet())

    elif opts.delete_tags:
        style_name = MetaDataStyle.name[opts.data_style]
        if has[opts.data_style]:
            if not opts.dryrun:
                if not ca.removeMetadata(opts.data_style):
                    print u"{0}: Tag removal seemed to fail!".format(filename)
                else:
                    print u"{0}: Removed {1} tags.".format(
                        filename, style_name)
            else:
                print u"{0}: dry-run.  {1} tags not removed".format(
                    filename, style_name)
        else:
            print u"{0}: This archive doesn't have {1} tags to remove.".format(
                filename, style_name)

    elif opts.copy_tags:
        dst_style_name = MetaDataStyle.name[opts.data_style]
        if opts.no_overwrite and has[opts.data_style]:
            print u"{0}: Already has {1} tags.  Not overwriting.".format(
                filename, dst_style_name)
            return
        if opts.copy_source == opts.data_style:
            print u"{0}: Destination and source are same: {1}.  Nothing to do.".format(
                filename, dst_style_name)
            return

        src_style_name = MetaDataStyle.name[opts.copy_source]
        if has[opts.copy_source]:
            if not opts.dryrun:
                md = ca.readMetadata(opts.copy_source)

                if settings.apply_cbl_transform_on_bulk_operation and opts.data_style == MetaDataStyle.CBI:
                    md = CBLTransformer(md, settings).apply()

                if not ca.writeMetadata(md, opts.data_style):
                    print u"{0}: Tag copy seemed to fail!".format(filename)
                else:
                    print u"{0}: Copied {1} tags to {2} .".format(
                        filename, src_style_name, dst_style_name)
            else:
                print u"{0}: dry-run.  {1} tags not copied".format(
                    filename, src_style_name)
        else:
            print u"{0}: This archive doesn't have {1} tags to copy.".format(
                filename, src_style_name)

    elif opts.save_tags:

        if opts.no_overwrite and has[opts.data_style]:
            print u"{0}: Already has {1} tags.  Not overwriting.".format(
                filename, MetaDataStyle.name[opts.data_style])
            return

        if batch_mode:
            print u"Processing {0}...".format(filename)

        md = create_local_metadata(opts, ca, has[opts.data_style])
        if md.issue is None or md.issue == "":
            if opts.assume_issue_is_one_if_not_set:
                md.issue = "1"

        # now, search online
        if opts.search_online:
            if opts.issue_id is not None:
                # we were given the actual ID to search with
                try:
                    comicVine = ComicVineTalker()
                    comicVine.wait_for_rate_limit = opts.wait_and_retry_on_rate_limit
                    cv_md = comicVine.fetchIssueDataByIssueID(
                        opts.issue_id, settings)
                except ComicVineTalkerException:
                    print "Network error while getting issue details.  Save aborted"
                    match_results.fetchDataFailures.append(filename)
                    return

                if cv_md is None:
                    print "No match for ID {0} was found.".format(
                        opts.issue_id)
                    match_results.noMatches.append(filename)
                    return

                if settings.apply_cbl_transform_on_cv_import:
                    cv_md = CBLTransformer(cv_md, settings).apply()
            else:
                ii = IssueIdentifier(ca, settings)

                if md is None or md.isEmpty:
                    print "No metadata given to search online with!"
                    match_results.noMatches.append(filename)
                    return

                def myoutput(text):
                    if opts.verbose:
                        IssueIdentifier.defaultWriteOutput(text)

                # use our overlayed MD struct to search
                ii.setAdditionalMetadata(md)
                ii.onlyUseAdditionalMetaData = True
                ii.waitAndRetryOnRateLimit = opts.wait_and_retry_on_rate_limit
                ii.setOutputFunction(myoutput)
                ii.cover_page_index = md.getCoverPageIndexList()[0]
                matches = ii.search()

                result = ii.search_result

                found_match = False
                choices = False
                low_confidence = False

                if result == ii.ResultNoMatches:
                    pass
                elif result == ii.ResultFoundMatchButBadCoverScore:
                    low_confidence = True
                    found_match = True
                elif result == ii.ResultFoundMatchButNotFirstPage:
                    found_match = True
                elif result == ii.ResultMultipleMatchesWithBadImageScores:
                    low_confidence = True
                    choices = True
                elif result == ii.ResultOneGoodMatch:
                    found_match = True
                elif result == ii.ResultMultipleGoodMatches:
                    choices = True

                if choices:
                    if low_confidence:
                        print "Online search: Multiple low confidence matches.  Save aborted"
                        match_results.lowConfidenceMatches.append(
                            MultipleMatch(filename, matches))
                        return
                    else:
                        print "Online search: Multiple good matches.  Save aborted"
                        match_results.multipleMatches.append(
                            MultipleMatch(filename, matches))
                        return
                if low_confidence and opts.abortOnLowConfidence:
                    print "Online search: Low confidence match.  Save aborted"
                    match_results.lowConfidenceMatches.append(
                        MultipleMatch(filename, matches))
                    return
                if not found_match:
                    print "Online search: No match found.  Save aborted"
                    match_results.noMatches.append(filename)
                    return

                # we got here, so we have a single match

                # now get the particular issue data
                cv_md = actual_issue_data_fetch(matches[0], settings, opts)
                if cv_md is None:
                    match_results.fetchDataFailures.append(filename)
                    return

            md.overlay(cv_md)

        # ok, done building our metadata. time to save
        if not actual_metadata_save(ca, opts, md):
            match_results.writeFailures.append(filename)
        else:
            match_results.goodMatches.append(filename)

    elif opts.rename_file:

        msg_hdr = ""
        if batch_mode:
            msg_hdr = u"{0}: ".format(filename)

        if opts.data_style is not None:
            use_tags = has[opts.data_style]
        else:
            use_tags = False

        md = create_local_metadata(opts, ca, use_tags)

        if md.series is None:
            print msg_hdr + "Can't rename without series name"
            return

        new_ext = None  # default
        if settings.rename_extension_based_on_archive:
            if ca.isZip():
                new_ext = ".cbz"
            elif ca.isRar():
                new_ext = ".cbr"

        renamer = FileRenamer(md)
        renamer.setTemplate(settings.rename_template)
        renamer.setIssueZeroPadding(settings.rename_issue_number_padding)
        renamer.setSmartCleanup(settings.rename_use_smart_string_cleanup)

        new_name = renamer.determineName(filename, ext=new_ext)

        if new_name == os.path.basename(filename):
            print msg_hdr + "Filename is already good!"
            return

        folder = os.path.dirname(os.path.abspath(filename))
        new_abs_path = utils.unique_file(os.path.join(folder, new_name))

        suffix = ""
        if not opts.dryrun:
            # rename the file
            os.rename(filename, new_abs_path)
        else:
            suffix = " (dry-run, no change)"

        print u"renamed '{0}' -> '{1}' {2}".format(os.path.basename(filename),
                                                   new_name, suffix)

    elif opts.export_to_zip:
        msg_hdr = ""
        if batch_mode:
            msg_hdr = u"{0}: ".format(filename)

        if not ca.isRar():
            print msg_hdr + "Archive is not a RAR."
            return

        rar_file = os.path.abspath(os.path.abspath(filename))
        new_file = os.path.splitext(rar_file)[0] + ".cbz"

        if opts.abort_export_on_conflict and os.path.lexists(new_file):
            print msg_hdr + "{0} already exists in the that folder.".format(
                os.path.split(new_file)[1])
            return

        new_file = utils.unique_file(os.path.join(new_file))

        delete_success = False
        export_success = False
        if not opts.dryrun:
            if ca.exportAsZip(new_file):
                export_success = True
                if opts.delete_rar_after_export:
                    try:
                        os.unlink(rar_file)
                    except:
                        print msg_hdr + "Error deleting original RAR after export"
                        delete_success = False
                    else:
                        delete_success = True
            else:
                # last export failed, so remove the zip, if it exists
                if os.path.lexists(new_file):
                    os.remove(new_file)
        else:
            msg = msg_hdr + u"Dry-run:  Would try to create {0}".format(
                os.path.split(new_file)[1])
            if opts.delete_rar_after_export:
                msg += u" and delete orginal."
            print msg
            return

        msg = msg_hdr
        if export_success:
            msg += u"Archive exported successfully to: {0}".format(
                os.path.split(new_file)[1])
            if opts.delete_rar_after_export and delete_success:
                msg += u" (Original deleted) "
        else:
            msg += u"Archive failed to export!"

        print msg