def normalized_mime_type(self): try: return get_utility().get_normalized_mime_type(self) except: self.__log.exception("Could not render a mime-type for entry with" " ID [%s], for read." % (self.id)) raise
def __init__(self, gd_resource_type, raw_data): # LESSONLEARNED: We had these set as properties, but CPython was # reusing the reference between objects. self.__log = logging.getLogger().getChild('NormalEntry') self.__info = {} self.__parents = [] self.__raw_data = raw_data self.__cache_data = None self.__cache_mimetypes = None self.__cache_dict = {} """Return True if reading from this file should return info and deposit the data elsewhere. This is predominantly determined by whether we can get a file-size up-front, or we have to decide on a specific mime-type in order to do so. """ requires_mimetype = (u'fileSize' not in self.__raw_data and \ raw_data[u'mimeType'] != self.__directory_mimetype) try: self.__info['requires_mimetype'] = requires_mimetype self.__info['title'] = raw_data[u'title'] self.__info['mime_type'] = raw_data[u'mimeType'] self.__info['labels'] = raw_data[u'labels'] self.__info['id'] = raw_data[u'id'] self.__info['last_modifying_user_name'] = raw_data[u'lastModifyingUserName'] self.__info['writers_can_share'] = raw_data[u'writersCanShare'] self.__info['owner_names'] = raw_data[u'ownerNames'] self.__info['editable'] = raw_data[u'editable'] self.__info['user_permission'] = raw_data[u'userPermission'] self.__info['download_links'] = raw_data[u'exportLinks'] if u'exportLinks' in raw_data else { } self.__info['link'] = raw_data[u'embedLink'] if u'embedLink' in raw_data else None self.__info['file_size'] = int(raw_data[u'fileSize']) if u'fileSize' in raw_data else 0 self.__info['file_extension'] = raw_data[u'fileExtension'] if u'fileExtension' in raw_data else None self.__info['md5_checksum'] = raw_data[u'md5Checksum'] if u'md5Checksum' in raw_data else None self.__info['image_media_metadata'] = raw_data[u'imageMediaMetadata'] if u'imageMediaMetadata' in raw_data else None if u'downloadUrl' in raw_data: self.__info['download_links'][self.__info['mime_type']] = raw_data[u'downloadUrl'] # This is encoded for displaying locally. self.__info['title_fs'] = get_utility().translate_filename_charset(self.__info['title']) for parent in raw_data[u'parents']: self.__parents.append(parent[u'id']) except (KeyError) as e: self.__log.exception("Could not normalize entry on raw key [%s]. Does not exist in source." % (str(e))) raise
def __init__(self, gd_resource_type, raw_data): # LESSONLEARNED: We had these set as properties, but CPython was # reusing the reference between objects. self.__log = logging.getLogger().getChild('NormalEntry') self.info = { } self.parents = [ ] self.raw_data = raw_data try: self.info['mime_type'] = raw_data[u'mimeType'] self.info['labels'] = raw_data[u'labels'] self.info['id'] = raw_data[u'id'] self.info['title'] = raw_data[u'title'] self.info['last_modifying_user_name'] = raw_data[u'lastModifyingUserName'] self.info['writers_can_share'] = raw_data[u'writersCanShare'] self.info['owner_names'] = raw_data[u'ownerNames'] self.info['editable'] = raw_data[u'editable'] self.info['user_permission'] = raw_data[u'userPermission'] self.info['modified_date'] = dateutil.parser.parse(raw_data[u'modifiedDate']) self.info['modified_date_epoch'] = int(mktime(self.info['modified_date'].timetuple())) self.info['created_date'] = dateutil.parser.parse(raw_data[u'createdDate']) self.info['created_date_epoch'] = int(mktime(self.info['created_date'].timetuple())) self.info['download_links'] = raw_data[u'exportLinks'] if u'exportLinks' in raw_data else { } self.info['link'] = raw_data[u'embedLink'] if u'embedLink' in raw_data else None self.info['modified_by_me_date'] = raw_data[u'modifiedByMeDate'] if u'modifiedByMeDate' in raw_data else None self.info['last_viewed_by_me_date'] = raw_data[u'lastViewedByMeDate'] if u'lastViewedByMeDate' in raw_data else None self.info['file_size'] = int(raw_data[u'fileSize']) if u'fileSize' in raw_data else 0 self.info['file_extension'] = raw_data[u'fileExtension'] if u'fileExtension' in raw_data else None self.info['md5_checksum'] = raw_data[u'md5Checksum'] if u'md5Checksum' in raw_data else None self.info['image_media_metadata'] = raw_data[u'imageMediaMetadata'] if u'imageMediaMetadata' in raw_data else None if u'downloadUrl' in raw_data: self.info['download_links'][self.info['mime_type']] = raw_data[u'downloadUrl'] # This is encoded for displaying locally. self.info['title_fs'] = get_utility().translate_filename_charset(raw_data[u'title']) for parent in raw_data[u'parents']: self.parents.append(parent[u'id']) self.__log.debug("Entry with ID [%s] is visible? %s" % (self.id, self.is_visible)) except (KeyError) as e: self.__log.exception("Could not normalize entry on raw key [%s]. Does not exist in source." % (str(e))) raise
def __find_path_components(self, path): """Given a path, return a list of all Google Drive entries that comprise each component, or as many as can be found. As we've ensured that all sibling filenames are unique, there can not be multiple matches. """ self.__log.debug("Searching for path components of [%s]. Now " "resolving entry_clause." % (path)) if path[0] == '/': path = path[1:] if len(path) and path[-1] == '/': path = path[:-1] if path in self.path_cache: return self.path_cache[path] with PathRelations.rlock: self.__log.debug("Locating entry information for path [%s]." % (path)) try: root_id = AccountInfo.get_instance().root_id except: self.__log.exception("Could not get root-ID.") raise # Ensure that the root node is loaded. try: self.__get_entry_clause_by_id(root_id) except: self.__log.exception("Could not ensure root-node with entry-ID " "[%s]." % (root_id)) raise path_parts = path.split('/') entry_ptr = root_id parent_id = None i = 0 num_parts = len(path_parts) results = [ ] while i < num_parts: child_filename_to_search_fs = get_utility(). \ translate_filename_charset(path_parts[i]) self.__log.debug("Checking for part (%d) [%s] under parent with " "ID [%s]." % (i, child_filename_to_search_fs, entry_ptr)) try: current_clause = self.entry_ll[entry_ptr] except: # TODO: If entry with ID entry_ptr is not registered, update # children of parent parent_id. Throttle how often this # happens. self.__log.exception("Could not find current subdirectory. " "ENTRY_ID= [%s]" % (entry_ptr)) raise # Search this entry's children for the next filename further down # in the path among this entry's children. Any duplicates should've # already beeen handled as entries were stored. We name the variable # just to emphasize that no ambiguity -as well as- no error will # occur in the traversal process. first_matching_child_clause = None children = current_clause[2] # If they just wanted the "" path (root), return the root-ID. if path == "": found = [ root_id ] else: # self.__log.debug("Looking for child [%s] among (%d): %s" % # (child_filename_to_search_fs, len(children), # [ child_tuple[0] for child_tuple # in children ])) found = [ child_tuple[1][3] for child_tuple in children if child_tuple[0] == child_filename_to_search_fs ] if found: self.__log.debug("Found matching child with ID [%s]." % (found[0])) results.append(found[0]) else: self.__log.debug("Did not find matching child.") return (results, path_parts, False) # Have we traveled far enough into the linked list? if (i + 1) >= num_parts: self.__log.debug("Path has been completely resolved: %s" % (', '.join(results))) self.path_cache[path] = (results, path_parts, True) final_entry_id = results[-1] self.path_cache_byid[final_entry_id] = path return self.path_cache[path] parent_id = entry_ptr entry_ptr = found[0] i += 1
def create_directory(self, filename, **kwargs): mimetype_directory = get_utility().mimetype_directory return self.__insert_entry(filename, mimetype_directory, **kwargs)
def is_directory(self): """Return True if we represent a directory.""" return get_utility().is_directory(self)
def __init__(self, gd_resource_type, raw_data): # LESSONLEARNED: We had these set as properties, but CPython was # reusing the reference between objects. self.__log = logging.getLogger().getChild('NormalEntry') self.__info = {} self.__parents = [] self.__raw_data = raw_data self.__cache_data = None self.__cache_mimetypes = None self.__cache_dict = {} """Return True if reading from this file should return info and deposit the data elsewhere. This is predominantly determined by whether we can get a file-size up-front, or we have to decide on a specific mime-type in order to do so. """ requires_mimetype = (u'fileSize' not in self.__raw_data and \ raw_data[u'mimeType'] != self.__directory_mimetype) try: self.__info['requires_mimetype'] = requires_mimetype self.__info['title'] = raw_data[u'title'] self.__info['mime_type'] = raw_data[u'mimeType'] self.__info['labels'] = raw_data[u'labels'] self.__info['id'] = raw_data[u'id'] self.__info['last_modifying_user_name'] = raw_data[ u'lastModifyingUserName'] self.__info['writers_can_share'] = raw_data[u'writersCanShare'] self.__info['owner_names'] = raw_data[u'ownerNames'] self.__info['editable'] = raw_data[u'editable'] self.__info['user_permission'] = raw_data[u'userPermission'] self.__info['download_links'] = raw_data[ u'exportLinks'] if u'exportLinks' in raw_data else {} self.__info['link'] = raw_data[ u'embedLink'] if u'embedLink' in raw_data else None self.__info['file_size'] = int( raw_data[u'fileSize']) if u'fileSize' in raw_data else 0 self.__info['file_extension'] = raw_data[ u'fileExtension'] if u'fileExtension' in raw_data else None self.__info['md5_checksum'] = raw_data[ u'md5Checksum'] if u'md5Checksum' in raw_data else None self.__info['image_media_metadata'] = raw_data[ u'imageMediaMetadata'] if u'imageMediaMetadata' in raw_data else None if u'downloadUrl' in raw_data: self.__info['download_links'][ self.__info['mime_type']] = raw_data[u'downloadUrl'] # This is encoded for displaying locally. self.__info['title_fs'] = get_utility().translate_filename_charset( self.__info['title']) for parent in raw_data[u'parents']: self.__parents.append(parent[u'id']) except (KeyError) as e: self.__log.exception( "Could not normalize entry on raw key [%s]. Does not exist in source." % (str(e))) raise
def __find_path_components(self, path): """Given a path, return a list of all Google Drive entries that comprise each component, or as many as can be found. As we've ensured that all sibling filenames are unique, there can not be multiple matches. """ self.__log.debug("Searching for path components of [%s]. Now " "resolving entry_clause." % (path)) if path[0] == '/': path = path[1:] if len(path) and path[-1] == '/': path = path[:-1] if path in self.path_cache: return self.path_cache[path] with PathRelations.rlock: self.__log.debug("Locating entry information for path [%s]." % (path)) try: root_id = AccountInfo.get_instance().root_id except: self.__log.exception("Could not get root-ID.") raise # Ensure that the root node is loaded. try: self.__get_entry_clause_by_id(root_id) except: self.__log.exception( "Could not ensure root-node with entry-ID " "[%s]." % (root_id)) raise path_parts = path.split('/') entry_ptr = root_id parent_id = None i = 0 num_parts = len(path_parts) results = [] while i < num_parts: child_filename_to_search_fs = get_utility(). \ translate_filename_charset(path_parts[i]) self.__log.debug( "Checking for part (%d) [%s] under parent with " "ID [%s]." % (i, child_filename_to_search_fs, entry_ptr)) try: current_clause = self.entry_ll[entry_ptr] except: # TODO: If entry with ID entry_ptr is not registered, update # children of parent parent_id. Throttle how often this # happens. self.__log.exception( "Could not find current subdirectory. " "ENTRY_ID= [%s]" % (entry_ptr)) raise # Search this entry's children for the next filename further down # in the path among this entry's children. Any duplicates should've # already beeen handled as entries were stored. We name the variable # just to emphasize that no ambiguity -as well as- no error will # occur in the traversal process. first_matching_child_clause = None children = current_clause[2] # If they just wanted the "" path (root), return the root-ID. if path == "": found = [root_id] else: # self.__log.debug("Looking for child [%s] among (%d): %s" % # (child_filename_to_search_fs, len(children), # [ child_tuple[0] for child_tuple # in children ])) found = [ child_tuple[1][3] for child_tuple in children if child_tuple[0] == child_filename_to_search_fs ] if found: self.__log.debug("Found matching child with ID [%s]." % (found[0])) results.append(found[0]) else: self.__log.debug("Did not find matching child.") return (results, path_parts, False) # Have we traveled far enough into the linked list? if (i + 1) >= num_parts: self.__log.debug("Path has been completely resolved: %s" % (', '.join(results))) self.path_cache[path] = (results, path_parts, True) final_entry_id = results[-1] self.path_cache_byid[final_entry_id] = path return self.path_cache[path] parent_id = entry_ptr entry_ptr = found[0] i += 1