def get_light_times_from_behavior_file(session=None, logfile=None): """Return time light goes on and off in logfile from session""" if session is not None: lines = BeWatch.db.get_logfile_lines(session) elif logfile is not None: lines = TrialSpeak.read_lines_from_file(logfile) else: raise ValueError("must provide either session or logfile") # They turn on in ERROR (14), INTER_TRIAL_INTERVAL (13), # and off in ROTATE_STEPPER1 (2) parsed_df_by_trial = TrialSpeak.parse_lines_into_df_split_by_trial(lines) light_on = TrialSpeak.identify_state_change_times( parsed_df_by_trial, state1=[13, 14], show_warnings=False) light_off = TrialSpeak.identify_state_change_times( parsed_df_by_trial, state0=2) return light_on, light_off
def check_ardulines(logfile): """Error check the log file. Here are the things that would be useful to check: * File can be loaded with the loading function without error. * Reported lick times match reported choice * Reported choice and stimulus matches reported outcome * All lines are (time, arg, XXX) where arg is known * All state transitions are legal, though this is a lot of work Diagnostics to return * Values of params over trials * Various stats on time spent in each state * Empirical state transition probabilities Descriptive metrics of each trial * Lick times locked to response window opening * State transition times locked to response window opening """ # Make sure the loading functions work lines = TrialSpeak.read_lines_from_file(logfile) pldf = TrialSpeak.parse_lines_into_df(lines) plst = TrialSpeak.parse_lines_into_df_split_by_trial(lines)
def get_logfile_lines(session): """Look up the logfile for a session and return it""" # Find the filename bdf = get_behavior_df() rows = bdf[bdf.session == session] if len(rows) != 1: raise ValueError("cannot find unique session for %s" % session) filename = rows.irow(0)['filename'] # Read lines lines = TrialSpeak.read_lines_from_file(filename) # Split by trial #~ splines = split_by_trial(lines) return lines
if SHOW_WEBCAM: cmd = 'xdotool search --name %s windowmove %d %d' % ( window_title, video_window_position[0], video_window_position[1]) while os.system(cmd) != 0: # Should test here if it's been too long and then give up print "Waiting for webcam window" time.sleep(.5) while True: ## Chat updates # Update chatter chatter.update(echo_to_stdout=ECHO_TO_STDOUT) # Read lines and split by trial # Could we skip this step if chatter reports no new device lines? logfile_lines = TrialSpeak.read_lines_from_file(logfilename) splines = TrialSpeak.split_by_trial(logfile_lines) # Run the trial setting logic # This try/except is no good because it conflates actual # ValueError like sending a zero #~ try: translated_trial_matrix = ts_obj.update(splines, logfile_lines) #~ except ValueError: #~ raise ValueError("cannot get any lines; try reuploading protocol") ## Update UI if RUN_UI: ui.update_data(logfile_lines=logfile_lines) ui.get_and_handle_keypress()
def generate_mplayer_guesses_and_sync(metadata, user_results=None, guess=(1., 0.), N=4, pre_time=10): """Generates best times to check video, and potentially also syncs. metadata : a row from bv_files to sync. Needs to specify the following: 'filename' : behavioral filename 'guess_vvsb_start' 'duration_video' 'filename_video' The fit is between these datasets: X : time of retraction from behavior file, minus the test_guess_vvsb in the metadata. Y : user-supplied times of retraction from video The purpose of 'initial_guess' is to generate better guesses for the user to look in the video, but the returned data always use the combined fit that includes any initial guess. However, test_guess_vvsb is not accounted for in the returned value. N times to check in the video are printed out. Typically this is run twice, once before checking, then check, then run again now specifying the video times in `user_results`. If the initial guess is very wrong, you may need to find a large gap in the video and match it up to trials info manually, and use this to fix `guess` to be closer. """ initial_guess = np.asarray(guess) # Load trials info trials_info = TrialMatrix.make_trial_matrix_from_file(metadata['filename']) splines = TrialSpeak.load_splines_from_file(metadata['filename']) lines = TrialSpeak.read_lines_from_file(metadata['filename']) parsed_df_split_by_trial = \ TrialSpeak.parse_lines_into_df_split_by_trial(lines) # Insert servo retract time trials_info['time_retract'] = TrialSpeak.identify_servo_retract_times( parsed_df_split_by_trial) # Apply the delta-time guess to the retraction times test_guess_vvsb = metadata['guess_vvsb_start'] #/ np.timedelta64(1, 's') trials_info['time_retract_vbase'] = \ trials_info['time_retract'] - test_guess_vvsb # Choose test times for user video_duration = metadata['duration_video'] / np.timedelta64(1, 's') test_times, test_next_times = generate_test_times_for_user( trials_info['time_retract_vbase'], video_duration, initial_guess=initial_guess, N=N) # Print mplayer commands for test_time, test_next_time in zip(test_times, test_next_times): pre_test_time = int(test_time) - pre_time print 'mplayer -ss %d %s # guess %0.1f, next %0.1f' % (pre_test_time, metadata['filename_video'], test_time, test_next_time) # If no data provided, just return if user_results is None: return {'test_times': test_times} if len(user_results) != N: print "warning: len(user_results) should be %d not %d" % ( N, len(user_results)) return {'test_times': test_times} # Otherwise, fit a correction to the original guess new_fit = np.polyfit(test_times.values, user_results, deg=1) resids = np.polyval(new_fit, test_times.values) - user_results # Composite the two fits # For some reason this is not transitive! This one appears correct. combined_fit = np.polyval(np.poly1d(new_fit), np.poly1d(initial_guess)) # Diagnostics print os.path.split(metadata['filename'])[-1] print os.path.split(metadata['filename_video'])[-1] print "combined_fit: %r" % np.asarray(combined_fit) print "resids: %r" % np.asarray(resids) return {'test_times': test_times, 'resids': resids, 'combined_fit': combined_fit}