def write_index(self, entries): """ 将 entries 写入 .git/index 文件,这个 index 的生成规则和现有的 git 一样 index 文件存储被 git 管理的文件索引,以 path 排序,每一行都包含 [path 名称, 修改时间, 文件 sha1 值] 等 index 文件的前 12 个字节为头部[signature, version, entry length],最后的 20 个字节为 index 索引文件的 sha1 值, 中间的内容就是索引数据,索引属于以62个字节的头部+path和一些NULL组成,索引数据以 NULL 结尾 """ packed_entries = [] for entry in entries: # 62 字节的索引头部 entry_head = struct.pack('!LLLLLLLLLL20sH', entry.ctime_s, entry.ctime_n, entry.mtime_s, entry.mtime_n, entry.dev, entry.ino, entry.mode, entry.uid, entry.gid, entry.size, entry.sha1, entry.flags) path = entry.path.encode() # 对齐索引文件 length = ((62 + len(path) + 8) // 8) * 8 # 62 字节头部 + path + NULL packed_entry = entry_head + path + b'\x00' * (length - 62 - len(path)) packed_entries.append(packed_entry) # 12 字节的 index 文件头部 header = struct.pack('!4sLL', b'DIRC', 2, len(entries)) all_data = header + b''.join(packed_entries) digest = hashlib.sha1(all_data).digest() write_file(os.path.join(self.git_path, 'index'), all_data + digest)
def save_post_data_to_csv_by_profile(dir_name, file_name, posts): file_data = read_file(dir_name, file_name) if file_data[0]: print('Successfully opened the _Profile_Posts_Export.csv file') else: print('Created _Profile_Posts_Export.csv file') for post in islice(posts, ceil(p_counts)): new_row = { '_username': post.owner_username, '_media_id': post.mediaid, '_short_url': post.shortcode, '_date': post.date_local, '_date(GMT)': post.date_utc, '_caption': post.caption, '_comments_count': post.comments, '_likes_count': post.likes, '_video_views': post.video_view_count, '_video_url': post.video_url, '_thumbnail_url': post.url, '_image_url': post.url, # '_location_id': post.location == None and None or post.location['id'], # '_location_name':post.location == None and None or post.location['name'], # '_location_url': post.location == None and None or post.location['slug'], # '_lat': post.location == None and None or post.location['lat'], # '_lng': post.location == None and None or post.location['lng'] '_location_id': post.location, '_location_name': post.location, '_location_url': post.location, '_lat': post.location, '_lng': post.location } write_file(dir_name, file_name, new_row)
def action_save_resource_record(self, resource_list, type='all'): content = lib.line_sep.join(resource_list) + '\n' lib.write_file(self.file_resource_list, content) #modified_time = int(os.path.getmtime(self.file_resource_list)) #self.resource_list_final_modified_time = time.strftime(self.resource_list_final_modified_time_format, time.localtime(modified_time)) self.resource_list_final_modified_time = datetime.datetime.now( ).strftime(self.resource_list_final_modified_time_format ) # 直接取现在时间 %A = Wednesday 英文星期几 # type == 'all': 手动生成全部 # type == 'partly': 排除资源文件后局部自动更新 msg = '%s生成资源文件快照成功!文件总数:%d' % ('(自动)' if type == 'partly' else '', len(resource_list)) return lib.returnMsg(0, msg, self.resource_list_final_modified_time)
def init() -> None: """ 初始化工作目录 """ git_path = os.path.join(os.getcwd(), ".git") if os.path.exists(git_path): click.echo("工作目录已存在") return os.mkdir(git_path) for name in ["objects", "refs", "refs/heads"]: os.mkdir(os.path.join(git_path, name)) write_file(os.path.join(git_path, "HEAD"), b"ref: refs/heads/master") click.echo(f"完成初始化 git 工作目录 {git_path}")
def save_new_posts_by_hashtag(hashtag): hashtag = hashtag.replace("#", "") posts = Hashtag.from_name(IL.context, hashtag).get_posts() posts_sorted_by_time = sorted(posts, key=lambda p: p.date_utc, reverse=True) file_data = read_file('csvs', 'Hash_Tag_Export.csv') if file_data[0]: print('Successfully opened the Hash_Tag_Export.csv file') else: print('Created Hash_Tag_Export.csv file') for post in islice(posts_sorted_by_time, ceil(p_counts)): new_row = { '_Hash_Tag': hashtag, '_media_id': post.mediaid, '_short_url': post.shortcode, '_date': post.date_local, '_date(GMT)': post.date_utc, '_caption': post.caption, '_comments_count': post.comments, '_likes_count': post.likes, '_video_views': post.video_view_count, '_video_url': post.video_url, '_thumbnail_url': post.url, '_image_url': post.url, '_location_id': post.location == None and post.location or None, '_location_name': post.location == None and post.location or None, '_location_url': post.location == None and post.location or None, '_lat': post.location == None and post.location or None, '_lng': post.location == None and post.location or None, # '_location_id': post.location == None and post.location.id or None, # '_location_name':post.location == None and post.location.name or None, # '_location_url': post.location == None and post.location.slug or None, # '_lat': post.location == None and post.location.lat or None, # '_lng': post.location == None and post.location.lng or None, '_user_id': post.owner_profile.userid, '_username': post.owner_username, '_full_name': post.owner_profile.full_name, '_profile_pic_url': post.owner_profile.profile_pic_url, '_profile_url': post.owner_profile.external_url, '_Num_of_Followers': post.owner_profile.followers, '_Num_of_Posts': post.owner_profile.mediacount, '_Num_Following': post.owner_profile.followees, '_Profile_Text': post.owner_profile.biography } write_file('csvs', 'Hash_Tag_Export.csv', new_row)
def _(self, data: bytes) -> str: assert self.TYPE, f"类型错误 {self.TYPE}" full_data = self._build_head(self.TYPE, len(data)) + data sha1 = hashlib.sha1(full_data).hexdigest() path = os.path.join(self.objs_path, sha1[:2], sha1[2:]) if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path), exist_ok=True) if os.path.exists(path): return sha1 write_file(path, zlib.compress(full_data)) return sha1
def commit(message, author): path = find_path() tree_obj = Tree(path) commit_obj = Commit(path) tree = tree_obj.write_tree() parent = commit_obj.get_local_master_hash() auth_time = time.strftime("%Y-%m-%d %H:%M:%S") lines = ['tree ' + tree] if parent: lines.append('parent ' + parent) lines.append('author {} {}'.format(author, auth_time)) lines.append('committer {} {}'.format(author, auth_time)) lines.append('') lines.append(message) lines.append('') data = '\n'.join(lines).encode() sha1 = commit_obj.compress(data) master_path = os.path.join(commit_obj.get_master_path()) write_file(master_path, (sha1 + '\n').encode()) print('committed to master: {:7}'.format(sha1)) return sha1
def save_unique_comments_by_user(pfn): file_data = read_file('csvs', 'Profile_Unique_Likes_n_Comments.csv') if file_data[0]: print( 'Successfully opened the Profile_Unique_Likes_n_Comments.csv file') else: print('Created Profile_Unique_Likes_n_Comments.csv file') profile = Profile.from_username(IL.context, pfn) posts_sorted_by_time = sorted(profile.get_posts(), key=lambda p: p.date_utc, reverse=True) for post in islice(posts_sorted_by_time, ceil(p_counts)): comments = post.get_comments() for comment in comments: new_row = { '_Profile_Handle': pfn, '_user_id': comment.owner.userid, '_username': comment.owner.username, '_full_name': comment.owner.full_name, '_is_private': comment.owner.is_private, '_is_verified': comment.owner.is_verified, '_Date_of_Last_Like_or_Comment': '', '_Total_Comments_N_Likes': comment.likes_count, '_Total_Comments': '', ' _Total_Likes': comment.likes_count, '_profile_pic_url': comment.owner.profile_pic_url, '_profile_url': comment.owner.external_url, '_Num_of_Followers': comment.owner.followers, '_Num_of_Posts': comment.owner.mediacount, '_Num_Following': comment.owner.followees, '_Profile_Text': comment.owner.biography } write_file('csvs', 'Profile_Unique_Likes_n_Comments.csv', new_row)
def action_update_file_config(self): lib.write_file(self.file_config, json.dumps(self.config)) # 写入配置