# #plots.xlim(xlims) # #leg = plots.legend(loc=0) # #for legobj in leg.legendHandles: # # legobj.set_linewidth(2.0) # # This simply sets the x-axis data to diagonal so it fits better. # f.autofmt_xdate() # # plt.ylabel("Fraction of occurence") # plt.title("Level " + str(lev+1)) # #plt.tight_layout() # plt.show() # pdb.set_trace() #Aggregate sound files for date in dates: print "Starting: Sound aggregation for day " + str(date) has_video_array=has_video(disconnect_file_loc + sbj_id + "_" + str(date) + ".txt") aggregate.aggregate(sbj_id, date, sound_loc + str(date) + "\\", sound_loc + str(date) + "\\", has_video_array) #Aggregate mvmt files for date in dates: print "Starting: Mvmt aggregation for day " + str(date) has_video_array=has_video(disconnect_file_loc + sbj_id + "_" + str(date) + ".txt") aggregate.aggregate(sbj_id, date, mvmt_loc, mvmt_loc, has_video_array) #Correlate clusters for date in dates: print "Starting: Correlations for day " + str(date) best_corr_clusters.append(correlate_signals.correlate( sound_loc + str(date) + "\\" + sbj_id + "_" + str(date) + ".p",
else: if printed == 0: printed = 1 print("run out of videos at frame #" + str(f) + " out of " + str(has_video_array.shape[0]) + " for subject " + sbj_id + " on day " + str(day)) total_features[f] = -1 #plt.plot(total_features) #plt.show() pickle.dump( total_features, open(output_file_loc + sbj_id + "_" + str(day) + ".p", "wb")) if __name__ == "__main__": sbj_id = "fcb01f7a" day = 13 sr = 30 input_file_loc = "E:\\mvmt\\%s\\" % sbj_id output_file_loc = input_file_loc has_video_array = has_video( "C:\\Users\\wangnxr\\Documents\\rao_lab\\video_analysis\\disconnect_times\\%s_%i.txt" % (sbj_id, day), samp_rate=sr) aggregate(sbj_id, day, input_file_loc, output_file_loc, has_video_array) # plt.plot(np.array(range(270*30*60))/(60*30.0), total_features[:270*30*60]) # #plt.ylim([10**11,6*10**15]) # plt.xlabel("Time (Min)") # plt.ylabel("Speech sound level") # plt.show()
for f in xrange(has_video_array.shape[0]): if has_video_array[f] == 0: total_features[f] = -1 else: if feature_obj.has_next(): total_features[f] = feature_obj.next() else: if printed == 0: printed = 1 print ("run out of videos at frame #" + str(f) + " out of " + str(has_video_array.shape[0]) + " for subject " + sbj_id + " on day " + str(day)) total_features[f] = -1 #plt.plot(total_features) #plt.show() pickle.dump(total_features, open(output_file_loc + sbj_id + "_" + str(day) + ".p" , "wb")) if __name__ == "__main__": sbj_id = "e70923c4" day = 5 sr=30 input_file_loc = "E:\\mvmt\\e70923c4\\" output_file_loc = input_file_loc has_video_array = has_video("C:\\Users\\wangnxr\\Documents\\rao_lab\\video_analysis\\disconnect_times\\" \ + sbj_id + "_" + str(day) + ".txt", samp_rate = sr) aggregate(sbj_id, day, input_file_loc, output_file_loc, has_video_array) # plt.plot(np.array(range(270*30*60))/(60*30.0), total_features[:270*30*60]) # #plt.ylim([10**11,6*10**15]) # plt.xlabel("Time (Min)") # plt.ylabel("Speech sound level") # plt.show()
# #plots.xlim(xlims) # #leg = plots.legend(loc=0) # #for legobj in leg.legendHandles: # # legobj.set_linewidth(2.0) # # This simply sets the x-axis data to diagonal so it fits better. # f.autofmt_xdate() # # plt.ylabel("Fraction of occurence") # plt.title("Level " + str(lev+1)) # #plt.tight_layout() # plt.show() # pdb.set_trace() #Aggregate sound files for date in dates: print "Starting: Sound aggregation for day " + str(date) has_video_array = has_video(disconnect_file_loc + sbj_id + "_" + str(date) + ".txt") aggregate.aggregate(sbj_id, date, sound_loc + str(date) + "\\", sound_loc + str(date) + "\\", has_video_array) #Aggregate mvmt files for date in dates: print "Starting: Mvmt aggregation for day " + str(date) has_video_array = has_video(disconnect_file_loc + sbj_id + "_" + str(date) + ".txt") aggregate.aggregate(sbj_id, date, mvmt_loc, mvmt_loc, has_video_array) #Correlate clusters for date in dates: print "Starting: Correlations for day " + str(date) best_corr_clusters.append(
for f in xrange(has_video_array.shape[0]): if has_video_array[f] == 0: total_features[f] = -1 else: if feature_obj.has_next(): total_features[f] = feature_obj.next() else: if printed == 0: printed = 1 print ("run out of videos at frame #" + str(f) + " out of " + str(has_video_array.shape[0]) + " for subject " + sbj_id + " on day " + str(day)) total_features[f] = -1 #plt.plot(total_features) #plt.show() pickle.dump(total_features, open(output_file_loc + sbj_id + "_" + str(day) + ".p" , "wb")) if __name__ == "__main__": sbj_id = "fcb01f7a" day = 13 sr=30 input_file_loc = "E:\\mvmt\\%s\\" % sbj_id output_file_loc = input_file_loc has_video_array = has_video("C:\\Users\\wangnxr\\Documents\\rao_lab\\video_analysis\\disconnect_times\\%s_%i.txt" % (sbj_id, day), samp_rate = sr) aggregate(sbj_id, day, input_file_loc, output_file_loc, has_video_array) # plt.plot(np.array(range(270*30*60))/(60*30.0), total_features[:270*30*60]) # #plt.ylim([10**11,6*10**15]) # plt.xlabel("Time (Min)") # plt.ylabel("Speech sound level") # plt.show()
if feature_obj.has_next(): total_features[f] = feature_obj.next() else: if printed == 0: printed = 1 print("run out of videos at frame #" + str(f) + " out of " + str(has_video_array.shape[0]) + " for subject " + sbj_id + " on day " + str(day)) total_features[f] = -1 #plt.plot(total_features) #plt.show() pickle.dump( total_features, open(output_file_loc + sbj_id + "_" + str(day) + ".p", "wb")) if __name__ == "__main__": sbj_id = "e70923c4" day = 5 sr = 30 input_file_loc = "E:\\mvmt\\e70923c4\\" output_file_loc = input_file_loc has_video_array = has_video("C:\\Users\\wangnxr\\Documents\\rao_lab\\video_analysis\\disconnect_times\\" \ + sbj_id + "_" + str(day) + ".txt", samp_rate = sr) aggregate(sbj_id, day, input_file_loc, output_file_loc, has_video_array) # plt.plot(np.array(range(270*30*60))/(60*30.0), total_features[:270*30*60]) # #plt.ylim([10**11,6*10**15]) # plt.xlabel("Time (Min)") # plt.ylabel("Speech sound level") # plt.show()