# If your experiment is by design egocentrical (e.g. head-fixed experiment on treadmill etc) # you can use the following to convert your .csv to a .npy array, ready to train vame on it #vame.csv_to_numpy(config, datapath='C:\\Research\\VAME\\vame_alpha_release-Mar16-2021\\videos\\pose_estimation\\') # Step 1.3: # create the training set for the VAME model vame.create_trainset(config) # Step 2: # Train VAME: vame.train_model(config) # Step 3: # Evaluate model vame.evaluate_model(config) # Step 4: # Segment motifs/pose vame.pose_segmentation(config) #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # The following are optional choices to create motif videos, communities/hierarchies of behavior, # community videos # OPTIONIAL: Create motif videos to get insights about the fine grained poses vame.motif_videos(config, videoType='.mp4') # OPTIONAL: Create behavioural hierarchies via community detection vame.community(config, show_umap=False, cut_tree=2)
# Note: vame.align() is currently only applicable if your data is similar to our demo data. # If this is not the case please make sure to align your data egocentrically and put them into the # data folder for every video. The name of this file is the video name + -PE-seq.npy: # /Your-VAME-Project/data/video-1/video-1-PE-seq.npy vame.create_trainset(config) # Step 2: # Train rnn model: vame.rnn_model(config, model_name='VAME', pretrained_weights=False, pretrained_model='pretrained') # Step 3: # Evaluate model vame.evaluate_model(config, model_name='VAME') # Step 4: # Quantify Behavior vame.behavior_segmentation(config, model_name='VAME', cluster_method='kmeans', n_cluster=[30]) # Step 5: # Get behavioral transition matrix, model usage and graph vame.behavior_quantification(config, model_name='VAME', cluster_method='kmeans', n_cluster=30)
for file in poseFiles: if file.endswith('.csv'): sampleName = file.split('-DC')[0] if not os.path.exists(projectPath + '/data/' + sampleName + '/' + sampleName + '-PE-seq.npy'): egocentric_time_series = av.alignVideo(projectPath, sampleName, file_format, crop_size, use_video=False, check_video=False) np.save(projectPath+'/data/'+sampleName+'/'+sampleName+'-PE-seq.npy', egocentric_time_series) #Create training dataset: vame.create_trainset(config) #Train RNN: vame.rnn_model(config, model_name=modelName, pretrained_weights=False, pretrained_model=None) #Evaluate RNN: vame.evaluate_model(config, model_name=modelName) #Segment Behaviors: vame.behavior_segmentation(config, model_name=modelName, cluster_method='kmeans', n_cluster=[15,30,45]) #Quantify behaviors: vame.behavior_quantification(config, model_name=modelName, cluster_method='kmeans', n_cluster=10) #Make Example Videos: motif_videos(config, model_name=modelName, cluster_method="kmeans", n_cluster=[10]) <<<<<<< HEAD ======= >>>>>>> d955e975b9d6ac701acedd6891d0c795320865b7 #Define groups & experimental setup: group1 = ['C1-RT', 'C3-RB', 'C5-NP', 'C5-RT', 'C9_LT', 'C12_NP', 'C13_RT', 'C14_LT', 'C14_LB', 'C15_RT', 'C16_RB'] group2 = ['C2-RB', 'C3-LT', 'C4-NP', 'C4-RT', 'C10_NP', 'C12_RT', 'C13_NP', 'C14_RT', 'C15_NP', 'C16_NP']
'videos/pose_estimation/egocentric/')) df.to_csv( os.path.join( projectPath, 'videos/pose_estimation/egocentric/' + sampleName + '_egocentric.csv')) #Create training dataset: vame.create_trainset(config) #Train RNN: vame.rnn_model(config, model_name=modelName, pretrained_weights=True, pretrained_model='VG2_RTA_with6Hz_vGluT2_RTA_Epoch203_Feb16') #Evaluate RNN: vame.evaluate_model(config, model_name=modelName, suffix=None) #Segment Behaviors: vame.behavior_segmentation(config, model_name=modelName, cluster_method='GMM', n_cluster=[9, 12, 15, 18, 20]) #Quantify behaviors: vame.behavior_quantification(config, model_name=modelName, cluster_method='kmeans', n_cluster=15) #Plot transition matrices files = os.listdir(os.path.join(projectPath, 'results/')) n_cluster = 10