def UndeleteFiles( hashes ): local_file_service_keys = HG.client_controller.services_manager.GetServiceKeys( ( HC.LOCAL_FILE_DOMAIN, ) ) for chunk_of_hashes in HydrusData.SplitIteratorIntoChunks( hashes, 64 ): media_results = HG.client_controller.Read( 'media_results', chunk_of_hashes ) service_keys_to_hashes = collections.defaultdict( list ) for media_result in media_results: locations_manager = media_result.GetLocationsManager() if CC.TRASH_SERVICE_KEY not in locations_manager.GetCurrent(): continue hash = media_result.GetHash() for service_key in locations_manager.GetDeleted().intersection( local_file_service_keys ): service_keys_to_hashes[ service_key ].append( hash ) for ( service_key, service_hashes ) in service_keys_to_hashes.items(): content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_UNDELETE, service_hashes ) service_keys_to_content_updates = { service_key : [ content_update ] } HG.client_controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
def GetHashIdsAndNonZeroTagCounts( self, tag_display_type: int, location_context: ClientLocation.LocationContext, tag_search_context: ClientSearch.TagSearchContext, hash_ids, namespace_wildcard = None, job_key = None ): if namespace_wildcard == '*': namespace_wildcard = None if namespace_wildcard is None: namespace_ids = [] else: namespace_ids = self.modules_tag_search.GetNamespaceIdsFromWildcard( namespace_wildcard ) with self._MakeTemporaryIntegerTable( namespace_ids, 'namespace_id' ) as temp_namespace_ids_table_name: ( file_service_keys, file_location_is_cross_referenced ) = location_context.GetCoveringCurrentFileServiceKeys() mapping_and_tag_table_names = set() for file_service_key in file_service_keys: mapping_and_tag_table_names.update( self.modules_tag_search.GetMappingAndTagTables( tag_display_type, file_service_key, tag_search_context ) ) # reason why I (JOIN each table) rather than (join the UNION) is based on previous hell with having query planner figure out a "( a UNION b UNION c ) NATURAL JOIN stuff" situation # although the following sometimes makes certifiable 2KB ( 6 UNION * 4-table ) queries, it actually works fast # OK, a new problem is mass UNION leads to terrible cancelability because the first row cannot be fetched until the first n - 1 union queries are done # I tried some gubbins to try to do a pseudo table-union rather than query union and do 'get files->distinct tag count for this union of tables, and fetch hash_ids first on the union', but did not have luck # so NOW we are just going to do it in bits of files mate. this also reduces memory use from the distinct-making UNION with large numbers of hash_ids results = [] BLOCK_SIZE = max( 64, int( len( hash_ids ) ** 0.5 ) ) # go for square root for now for group_of_hash_ids in HydrusData.SplitIteratorIntoChunks( hash_ids, BLOCK_SIZE ): with self._MakeTemporaryIntegerTable( group_of_hash_ids, 'hash_id' ) as hash_ids_table_name: if namespace_wildcard is None: # temp hashes to mappings select_statements = [ 'SELECT hash_id, tag_id FROM {} CROSS JOIN {} USING ( hash_id )'.format( hash_ids_table_name, mappings_table_name ) for ( mappings_table_name, tags_table_name ) in mapping_and_tag_table_names ] else: # temp hashes to mappings to tags to namespaces select_statements = [ 'SELECT hash_id, tag_id FROM {} CROSS JOIN {} USING ( hash_id ) CROSS JOIN {} USING ( tag_id ) CROSS JOIN {} USING ( namespace_id )'.format( hash_ids_table_name, mappings_table_name, tags_table_name, temp_namespace_ids_table_name ) for ( mappings_table_name, tags_table_name ) in mapping_and_tag_table_names ] unions = '( {} )'.format( ' UNION '.join( select_statements ) ) query = 'SELECT hash_id, COUNT( tag_id ) FROM {} GROUP BY hash_id;'.format( unions ) cursor = self._Execute( query ) cancelled_hook = None if job_key is not None: cancelled_hook = job_key.IsCancelled loop_of_results = HydrusDB.ReadFromCancellableCursor( cursor, 64, cancelled_hook = cancelled_hook ) if job_key is not None and job_key.IsCancelled(): return results results.extend( loop_of_results ) return results
def UndeleteMedia( win, media ): media_deleted_service_keys = HydrusData.MassUnion( ( m.GetLocationsManager().GetDeleted() for m in media ) ) local_file_services = HG.client_controller.services_manager.GetServices( ( HC.LOCAL_FILE_DOMAIN, ) ) undeletable_services = [ local_file_service for local_file_service in local_file_services if local_file_service.GetServiceKey() in media_deleted_service_keys ] if len( undeletable_services ) > 0: do_it = False if len( undeletable_services ) > 1: choice_tuples = [] for ( i, service ) in enumerate( undeletable_services ): choice_tuples.append( ( service.GetName(), service, 'Undelete back to {}.'.format( service.GetName() ) ) ) if len( choice_tuples ) > 1: service = HG.client_controller.services_manager.GetService( CC.COMBINED_LOCAL_MEDIA_SERVICE_KEY ) choice_tuples.append( ( 'all the above', service, 'Undelete back to all services the files have been deleted from.' ) ) try: undelete_service = ClientGUIDialogsQuick.SelectFromListButtons( win, 'Undelete for?', choice_tuples ) do_it = True except HydrusExceptions.CancelledException: return else: ( undelete_service, ) = undeletable_services if HC.options[ 'confirm_trash' ]: result = ClientGUIDialogsQuick.GetYesNo( win, 'Undelete this file back to {}?'.format( undelete_service.GetName() ) ) if result == QW.QDialog.Accepted: do_it = True else: do_it = True if do_it: for chunk_of_media in HydrusData.SplitIteratorIntoChunks( media, 64 ): service_keys_to_content_updates = collections.defaultdict( list ) service_key = undelete_service.GetServiceKey() undeletee_hashes = [ m.GetHash() for m in chunk_of_media if service_key in m.GetLocationsManager().GetDeleted() ] service_keys_to_content_updates[ service_key ] = [ HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_UNDELETE, undeletee_hashes ) ] HG.client_controller.Write( 'content_updates', service_keys_to_content_updates )