def create_dataframe_faces(): # 取读所有的mask图,构成一个dataframe faces = pd.DataFrame([]) i = 0 arg = setting.args_class() for video_path,json_path,contour_path in zip(arg.videodir,arg.tracked_json,arg.contdir): img_dict = contour_path data = contour_utils.load_json(json_path) for img_name in tqdm(os.listdir(img_dict)): if not(img_name[-1] == 'g'): print(img_name+' is not used') continue if np.random.randn()>0.8: print('add %d th image '%(len(faces+1))) # print(i,img_dict + img_name) frame = plt.imread(img_dict + img_name) frame = np.asarray(frame[:,:,0],dtype = 'uint8') frame[frame == 255] = 1 # flat = frame.flatten() # print(flat.shape) face = pd.Series(frame.flatten(),name = img_name) # print(face) faces = faces.append(face) i += 1 if len(faces)>2500: break if len(faces)>2500: break # ## 可视化 # width, height = frame.shape # fig, axes = plt.subplots(10,10,figsize=(9,9), # subplot_kw = {'xticks':[], 'yticks':[]}, # gridspec_kw = dict(hspace=0.01, wspace=0.01)) # for i, ax in enumerate(axes.flat): # ax.imshow(faces.iloc[i].values.reshape(height,width),cmap='gray') # plt.savefig('love.png') import pickle print('writing file...') with open('./faces.pckl', 'wb') as f: pickle.dump(faces, f)
def pca_for_faces(show_fig=False): arg = setting.args_class() print('reading...') with open('./faces.pckl', 'rb') as f: faces = pickle.load(f) #n_components=0.80 means it will return the Eigenvectors that have the 80% of the variation in the dataset print('calculating...') faces_pca = PCA(n_components=0.9) faces_pca.fit(faces) if show_fig: fig, axes = plt.subplots(2, 5, figsize=(9, 3), subplot_kw={ 'xticks': [], 'yticks': [] }, gridspec_kw=dict(hspace=0.01, wspace=0.01)) for i, ax in enumerate(axes.flat): ax.imshow(faces_pca.components_[i].reshape(200, 200), cmap='gray') plt.savefig('love_pc.png') plt.close() plt.figure() plt.plot(np.cumsum(faces_pca.explained_variance_ratio_)) plt.xlabel('number of components') plt.ylabel('cumulative explained variance') plt.savefig('love_variance.png') plt.close() print('writing...') with open('./pca.pckl', 'wb') as f: pickle.dump(faces_pca, f) print(faces) components = faces_pca.transform(faces)
count_used += 1 continue read_flag, frame = cap.read() i += 1 cap.release() print( '%d frames dont have 2 mice, %d frames have points outside pic. %d frame will be used, %d frame in total' % (count_lessMouse, count_out, count_used, count_frame)) if __name__ == '__main__': # dir_name = '/disk1/zexin/project/mice/clustering_sequencial/forZexin/results/0603/1411_black_two/' # video_name = '1411_black_two.mov' # json_name= '/disk1/zexin/project/mice/clustering_sequencial/forZexin/results/0603/1411_black_two/alphapose-results-forvis-tracked.json' arg = setting.args_class() if (len(sys.argv) != 1): all_video_paths_id = [int(sys.argv[1])] print('only deal with the %d th video path: \n ' % (int(sys.argv[1]))) else: all_video_paths_id = [i for i in range(len(arg.videodir))] for i in all_video_paths_id: video_path = arg.videodir[i] json_path = arg.tracked_json[i] contour_path = arg.contdir[i] get_samples(video_path, json_path, contour_path, arg, targetMouseID=0) get_samples(video_path, json_path, contour_path, arg, targetMouseID=1)