def per_frame(basedir, func, config): config_file = os.path.join(basedir, "config.pickle") cf = pickle.load(open(config_file)) env = util.Environmentz(cf['field_dim_m'], cf['frame_dim_pix']) FRAMEN = cf['end_f'] - cf['start_f'] + 1 d = np.zeros(FRAMES_TO_ANALYZE, dtype=DTYPE_POS_CONF) FRAMES_AT_A_TIME = 10 frames = np.arange(FRAMES_TO_ANALYZE) for frame_subset in util.chunk(frames, FRAMES_AT_A_TIME): fs = organizedata.get_frames(basedir, frame_subset) for fi, frame_no in enumerate(frame_subset): real_x, real_y, conf = func(fs[fi], env, **config) d[frame_no]['x'] = real_x d[frame_no]['y'] = real_y d[frame_no]['confidence'] = conf return d
positions_cleaned = positions.copy() positions_cleaned[invalid_sep] = ((np.nan, np.nan), (np.nan, np.nan), np.nan, np.nan) # just take the sanitized data, don't bother interpolating. # ignore all other positions valid_frames = np.argwhere(np.isfinite(positions_cleaned['x']))[:-3, 0] # the -3 above is to deal with some strange offset issues we have where len(positions) != total-number-of-frames PIX_REGION = 24 # num pixels on either side ledimgs = np.zeros( (len(valid_frames), 2, PIX_REGION * 2 + 1, PIX_REGION * 2 + 1), dtype=np.uint8) framepos = 0 for frame_chunk in util.chunk(valid_frames, 100): frames = organizedata.get_frames(basedir, frame_chunk) for frame_idx, frame in zip(frame_chunk, frames): for led, field in [(0, 'led_front'), (1, 'led_back')]: real_pos = positions_cleaned[field][frame_idx] x, y = env.gc.real_to_image(real_pos[0], real_pos[1]) ledimgs[framepos, led, :, :] = util.extract_region_safe( frame, int(y), int(x), PIX_REGION, 0) framepos += 1 ledimgs_mean = np.mean(ledimgs.astype(np.float32), axis=0) subsamp = 60 led_params_dict = {
positions_cleaned = positions.copy() positions_cleaned[invalid_sep] = ((np.nan, np.nan), (np.nan, np.nan), np.nan, np.nan) # just take the sanitized data, don't bother interpolating. # ignore all other positions valid_frames = np.argwhere(np.isfinite(positions_cleaned['x']))[:-3, 0] # the -3 above is to deal with some strange offset issues we have where len(positions) != total-number-of-frames PIX_REGION = 24 # num pixels on either side ledimgs = np.zeros((len(valid_frames), 2, PIX_REGION*2+1, PIX_REGION*2+1), dtype = np.uint8) framepos = 0 for frame_chunk in util.chunk(valid_frames, 100): frames = organizedata.get_frames(basedir, frame_chunk) for frame_idx, frame in zip(frame_chunk, frames): for led, field in [(0, 'led_front'), (1, 'led_back')]: real_pos = positions_cleaned[field][frame_idx] x, y = env.gc.real_to_image(real_pos[0], real_pos[1]) ledimgs[framepos, led, :, :] = util.extract_region_safe(frame, int(y), int(x), PIX_REGION, 0) framepos +=1 ledimgs_mean = np.mean(ledimgs.astype(np.float32), axis=0) subsamp = 60