コード例 #1
0
    def create_trajectory (self, nframes, h5filebase, start, finish):
    
        # need to make some kalman rows!
        # kalman rows = [obj_id, frame, time of acquisition, positions[3], velocities[3], other crap]
        self.n_artificial_trajecs = self.n_artificial_trajecs + 1
        
        # get extra, pretty much just for the time model
        ca = core_analysis.get_global_CachingAnalyzer()
        (obj_ids, use_obj_ids, is_mat_file, data_file, extra) = ca.initial_file_load(h5filebase)
        
        
        start = np.array(start)
        finish = np.array(finish)
        
        mean_vel = (finish - start)
        
        delta_t = extra['time_model'].framestamp2timestamp(2) - extra['time_model'].framestamp2timestamp(1)

        
        kalman_rows = np.zeros([nframes, 9])
        for n in range(nframes):
            
            kalman_rows[n,0] = 1
            kalman_rows[n,1] = n
            kalman_rows[n,2] = 0
            
            
            kalman_rows[n,6:9] = mean_vel
            kalman_rows[n,3:6] = start + mean_vel*delta_t
            
        traj_id = (str('artificial'+'_'+str(self.n_artificial_trajecs)))
        self.trajecs.setdefault(traj_id, Trajectory(kalman_rows, extra, stimulus = self.stimulus) )
コード例 #2
0
    def load_data(self, filename, kalman_smoothing=False, dynamic_model=None, fps=None, info={}, save_covariance=False):
        # use info to pass information to trajectory instances as a dictionary. 
        # eg. info={"post_type": "black"}
        # save_covariance: set to True if you need access to the covariance data. Keep as False if this is not important for analysis (takes up lots of space)
        
        # set up analyzer
        ca = core_analysis.get_global_CachingAnalyzer()
        (obj_ids, use_obj_ids, is_mat_file, data_file, extra) = ca.initial_file_load(filename)
        data_file.flush()

        # data set defaults
        if fps is None:
            fps = result_utils.get_fps(data_file)
        if dynamic_model is None:
            try:
	            dyn_model = extra['dynamic_model_name']
            except:
                print 'cannot find dynamic model'
                print 'using EKF mamarama, units: mm'
                dyn_model = 'EKF mamarama, units: mm'
        if dynamic_model is not None:
            dyn_model = dynamic_model

        # if kalman smoothing is on, then we cannot use the EKF model - remove that from the model name
        print '** Kalman Smoothing is: ', kalman_smoothing, ' **'
        if kalman_smoothing is True:
            dyn_model = dyn_model[4:]
        print 'using dynamic model: ', dyn_model
        print 'framerate: ', fps
        print 'loading data.... '
        
        self.dynamic_model = dyn_model
        self.fps = fps
        
        # load object id's and save as Trajectory instances
        for obj_id in use_obj_ids:
            print 'processing: ', obj_id
            try: 
                print obj_id
                kalman_rows = ca.load_data( obj_id, data_file,
                                 dynamic_model_name = dyn_model,
                                 use_kalman_smoothing= kalman_smoothing,
                                 frames_per_second= fps)
            except:
                print 'object id failed to load (probably no data): ', obj_id
                continue

            # couple object ID dictionary with trajectory objects
            trajec_id = str(obj_id) # this is not necessarily redundant with the obj_id, it allows for making a unique trajectory id when merging multiple datasets
            tmp = Trajectory(trajec_id, kalman_rows, info=info, fps=fps, save_covariance=save_covariance, extra=extra)
            self.trajecs.setdefault(trajec_id, tmp)
            
        return
コード例 #3
0
def consider_stimulus(h5file, verbose_problems=False,
                      fanout_name="fanout.xml"):
    """ 
        Parses the corresponding fanout XML and finds IDs to use as well 
        as the stimulus.
        Returns 3 values: valid, use_objs_ids, stimulus.  
        valid is false if something was wrong
    """
   
    try:
        dirname = os.path.dirname(h5file)
        fanout_xml = os.path.join(dirname, fanout_name)
        if not(os.path.exists(fanout_xml)):
            if verbose_problems:
                logger.error("Stim_xml path not found '%s' for file '%s'" % 
                             (h5file, fanout_xml))
            return False, None, None

        ca = core_analysis.get_global_CachingAnalyzer()
        (_, use_obj_ids, _, _, _) = ca.initial_file_load(h5file) 

        file_timestamp = timestamp_string_from_filename(h5file)

        fanout = xml_stimulus.xml_fanout_from_filename(fanout_xml)
        include_obj_ids, exclude_obj_ids = \
        fanout.get_obj_ids_for_timestamp(timestamp_string=file_timestamp)
        if include_obj_ids is not None:
            use_obj_ids = include_obj_ids
        if exclude_obj_ids is not None:
            use_obj_ids = list(set(use_obj_ids).difference(exclude_obj_ids))

        episode = fanout._get_episode_for_timestamp(
                                            timestamp_string=file_timestamp) 
        (_, _, stim_fname) = episode 
        return True, use_obj_ids, stim_fname

    except xml_stimulus.WrongXMLTypeError:
        if verbose_problems:
            logger.error("Caught WrongXMLTypeError for '%s'" % file_timestamp)
        return False, None, None
    except ValueError, ex:
        if verbose_problems:
            logger.error("Caught ValueError for '%s': %s" % 
                         (file_timestamp, ex))
        return False, None, None 
コード例 #4
0
    def __init__(self,kalman_filename):
        import flydra.a2.core_analysis as core_analysis
        import flydra.analysis.result_utils as result_utils

        self.ca = core_analysis.get_global_CachingAnalyzer()
        (obj_ids, use_obj_ids, is_mat_file, data_file,
         extra) = self.ca.initial_file_load(kalman_filename)
        self.data_file = data_file
        self.up_dir = None

        if 1:
            dynamic_model = extra['dynamic_model_name']
            print 'detected file loaded with dynamic model "%s"'%dynamic_model
            if dynamic_model.startswith('EKF '):
                dynamic_model = dynamic_model[4:]
            print '  for smoothing, will use dynamic model "%s"'%dynamic_model
        self.dynamic_model = dynamic_model
        self.fps = result_utils.get_fps( data_file )
        self.use_kalman_smoothing = True
        self.up_dir = (0,0,1)
コード例 #5
0
 def test(self, filename, kalman_smoothing=False, dynamic_model=None, fps=None, info={}, save_covariance=False):
     ca = core_analysis.get_global_CachingAnalyzer()
     (obj_ids, use_obj_ids, is_mat_file, data_file, extra) = ca.initial_file_load(filename)
     return ca, obj_ids, use_obj_ids, is_mat_file, data_file, extra
コード例 #6
0
    def __init__(self, h5):

        self._ca = core_analysis.get_global_CachingAnalyzer()
        obj_ids, use_obj_ids, is_mat_file, data_file, extra = self._ca.initial_file_load(
            h5)

        x = []
        y = []
        z = []

        for obj_id_enum, obj_id in enumerate(use_obj_ids):
            rows = self._ca.load_data(obj_id,
                                      data_file,
                                      use_kalman_smoothing=False)
            verts = np.array([rows['x'], rows['y'], rows['z']]).T
            x.append(verts[:, 0])
            y.append(verts[:, 1])
            z.append(verts[:, 2])

        x = np.concatenate(x)
        y = np.concatenate(y)
        z = np.concatenate(z)

        coords = np.array([x, y, z]).T

        self.pubcyl = None
        self.pubpt = None
        self.p = pcl.PointCloud()
        self.p.from_array(coords)
        self.p.to_file('/tmp/fuckit.pcd')
        self.np = len(coords)

        self._cx, self._cy, self._cz, self._ax, self._ay, self._az, self._radius = (
            0, 0, 0, 0, 0, 0, 0)

        self.pubpts, pts = calib.visualization.create_point_cloud_message_publisher(
            coords,
            topic_name='/flydracalib/points',
            publish_now=True,
            latch=True)
        self.pubptsinliers = calib.visualization.create_point_cloud_message_publisher(
            [(0, 0, 0)],
            topic_name='/flydracalib/pointsinliers',
            publish_now=False,
            latch=True)

        self.pubcyl = calib.visualization.create_cylinder_publisher(
            self._cx,
            self._cy,
            self._cz,
            self._ax,
            self._ay,
            self._az,
            self._radius,
            topic_name='/flydracalib/cyl',
            publish_now=False,
            latch=True,
            length=5,
            color=(0, 1, 1, 0.4))
        self.pubpt = calib.visualization.create_point_publisher(
            self._cx,
            self._cy,
            self._cz,
            r=0.1,
            topic_name='/flydracalib/cylcenter',
            publish_now=False,
            latch=True,
            color=(1, 0, 0, 0.5))

        thisdir = os.path.dirname(os.path.abspath(__file__))
        self.ui = Gtk.Builder()
        self.ui.add_from_file(os.path.join(thisdir, "tune.ui"))

        self._ks = -1.0
        self.ui.get_object("ks_adjustment").props.value = self._ks
        self._sr = 0.5
        self.ui.get_object("sr_adjustment").props.value = self._sr
        self._nd = 0.5
        self.ui.get_object("nd_adjustment").props.value = self._nd
        self._dt = 0.10
        self.ui.get_object("dt_adjustment").props.value = self._dt
        self._minr = 0.5
        self.ui.get_object("minr_adjustment").props.value = self._minr
        self._maxr = 1.5
        self.ui.get_object("maxr_adjustment").props.value = self._maxr

        self._exbtn = self.ui.get_object("execute_btn")
        self._exbtn.connect("clicked", self._calculate)

        self._axadj = self.ui.get_object("ax_adjustment")
        self._ayadj = self.ui.get_object("ay_adjustment")
        self._azadj = self.ui.get_object("az_adjustment")
        self._cxadj = self.ui.get_object("cx_adjustment")
        self._cyadj = self.ui.get_object("cy_adjustment")
        self._czadj = self.ui.get_object("cz_adjustment")
        self._radj = self.ui.get_object("r_adjustment")
        self._in = self.ui.get_object("inliersentry")

        self.ui.connect_signals(self)
        w = self.ui.get_object("window1")
        w.connect("delete-event", rosgobject.main_quit)
        w.show_all()
コード例 #7
0
    def load_data(self, filename, kalman_smoothing=False, dynamic_model=None, fps=None, info={}, save_covariance=False, experiment_length=19*60*60):
        # use info to pass information to trajectory instances as a dictionary. 
        # eg. info={"post_type": "black"}
        # save_covariance: set to True if you need access to the covariance data. Keep as False if this is not important for analysis (takes up lots of space)
        
        # set up analyzer
        ca = core_analysis.get_global_CachingAnalyzer()
        (obj_ids, use_obj_ids, is_mat_file, data_file, extra) = ca.initial_file_load(filename)
        data_file.flush()

        # data set defaults
        if fps is None:
            fps = result_utils.get_fps(data_file)
        if dynamic_model is None:
            try:
                dyn_model = extra['dynamic_model_name']
            except:
                print 'cannot find dynamic model'
                print 'using EKF mamarama, units: mm'
                dyn_model = 'EKF mamarama, units: mm'
        if dynamic_model is not None:
            dyn_model = dynamic_model

        # if kalman smoothing is on, then we cannot use the EKF model - remove that from the model name
        print '** Kalman Smoothing is: ', kalman_smoothing, ' **'
        if kalman_smoothing is True:
            dyn_model = dyn_model[4:]
        print 'using dynamic model: ', dyn_model
        print 'framerate: ', fps
        print 'loading data.... '
        
        self.dynamic_model = dyn_model
        self.fps = fps
        
        time_start = None
        # load object id's and save as Trajectory instances
        for obj_id in use_obj_ids:
            print 'processing: ', obj_id
            try: 
                print obj_id
                kalman_rows = ca.load_data( obj_id, data_file,
                                 dynamic_model_name = dyn_model,
                                 use_kalman_smoothing= kalman_smoothing,
                                 frames_per_second= fps)
            except:
                print 'object id failed to load (probably no data): ', obj_id
                continue

            # couple object ID dictionary with trajectory objects
            filenamebase = os.path.basename(filename)
            trajecbase = filenamebase.rstrip('5').rstrip('.h').lstrip('DATA').rstrip('.kalmanized')
            trajec_id = trajecbase + '_' + str(obj_id) # filename details + original object id - this is unique
            tmp = Trajectory(trajec_id, kalman_rows, info=info, fps=fps, save_covariance=save_covariance, extra=extra)
            
            tmp.h5 = os.path.basename(filename)
            
            if time_start is None:
                time_start = tmp.timestamp_epoch
            else:
                if tmp.timestamp_epoch - time_start > experiment_length:
                    break
                else:
                    self.trajecs.setdefault(trajec_id, tmp)
            
            
        self.h5_files_loaded.append(os.path.basename(filename))
            
        return
コード例 #8
0
ファイル: readh5.py プロジェクト: florisvb/floris
 def load_data (self,    filename, 
                         calibration_file = None, 
                         objs = None, 
                         obj_filelist = None, 
                         kalman_smoothing = True, 
                         fps = None,     
                         dynamic_model = None, 
                         load_2d=True,):
 
     self.datasets.append(len(self.datasets)+1)
     
     # raw h5 - this is the 2d raw camera data:
     if load_2d is True:
         h5 = PT.openFile( filename, mode='r' )
         camn2cam_id, cam_id2camns = result_utils.get_caminfo_dicts(h5)
         cam_ids = cam_id2camns.keys()
         cams2d = []
         for cam_id in cam_ids:
             cams2d.append( Cam2d(cam_id, h5) )
         self.cams2d.append(cams2d)
         
     # set up analyzer
     ca = core_analysis.get_global_CachingAnalyzer()
     (obj_ids, use_obj_ids, is_mat_file, data_file, extra) = ca.initial_file_load(filename)
     
     # data set defaults
     if fps is None:
         fps = result_utils.get_fps(data_file)
     if dynamic_model is None:
         try:
             dyn_model = extra['dynamic_model_name']
         except:
             dyn_model = 'EKF mamarama, units: mm'
     if dynamic_model is not None:
         dyn_model = dynamic_model
     
     # if kalman smoothing is on, then we cannot use the EKF model - remove that from the model name
     print '**Kalman Smoothing is: ', kalman_smoothing, ' **'
     if kalman_smoothing is True:
         dyn_model = dyn_model[4:]
     print 'using dynamic model: ', dyn_model
         
     if objs is None and obj_filelist is None:
         print "running through all object id's, this might take a while..."
         obj_only = use_obj_ids # this is all the unique object id's 
     if obj_filelist is not None:
         tmp = np.loadtxt(obj_filelist,delimiter=',')
         obj_only = np.array(tmp[:,0], dtype='int')
     elif objs is not None:
         obj_only = np.array(objs)
         
     print 'loading data.... '
     for obj_id in obj_only:
         try: 
             kalman_rows =  ca.load_data( obj_id, data_file,
                                  dynamic_model_name = dyn_model,
                                  use_kalman_smoothing= kalman_smoothing,
                                  frames_per_second= fps)
         except:
             print 'object id failed to load (probably no data): ', obj_id
             continue
         
         # couple object ID dictionary with trajectory objects
         traj_id = (str(self.datasets[-1])+'_'+str(obj_id))
         self.trajecs.setdefault(traj_id, ffa.Trajectory(kalman_rows, extra, stimulus = self.stimulus, fps = fps) )
コード例 #9
0
    def __init__(self, h5):

        self._ca = core_analysis.get_global_CachingAnalyzer()
        obj_ids, use_obj_ids, is_mat_file, data_file, extra = self._ca.initial_file_load(h5)

        x = []
        y = []
        z = []

        for obj_id_enum,obj_id in enumerate(use_obj_ids):
            rows = self._ca.load_data(obj_id,
                                      data_file,
                                      use_kalman_smoothing=False)
            verts = np.array( [rows['x'], rows['y'], rows['z']] ).T
            x.append( verts[:,0] )
            y.append( verts[:,1] )
            z.append( verts[:,2] )

        x = np.concatenate(x)
        y = np.concatenate(y)
        z = np.concatenate(z)

        coords = np.array([x,y,z]).T

        self.pubcyl = None
        self.pubpt = None
        self.p = pcl.PointCloud()
        self.p.from_array(coords)
        self.p.to_file('/tmp/fuckit.pcd')
        self.np = len(coords)

        self._cx,self._cy,self._cz,self._ax,self._ay,self._az,self._radius = (0,0,0,0,0,0,0)

        self.pubpts, pts = calib.visualization.create_point_cloud_message_publisher(
                                coords,
                                topic_name='/flydracalib/points',
                                publish_now=True,
                                latch=True)
        self.pubptsinliers = calib.visualization.create_point_cloud_message_publisher(
                                [(0,0,0)],
                                topic_name='/flydracalib/pointsinliers',
                                publish_now=False,
                                latch=True)

        self.pubcyl = calib.visualization.create_cylinder_publisher(
                            self._cx,self._cy,self._cz,self._ax,self._ay,self._az,self._radius,
                            topic_name='/flydracalib/cyl',
                            publish_now=False,
                            latch=True,
                            length=5,
                            color=(0,1,1,0.4))
        self.pubpt = calib.visualization.create_point_publisher(
                            self._cx,self._cy,self._cz,
                            r=0.1,
                            topic_name='/flydracalib/cylcenter',
                            publish_now=False,
                            latch=True,
                            color=(1,0,0,0.5))

        thisdir = os.path.dirname(os.path.abspath(__file__))
        self.ui = Gtk.Builder()
        self.ui.add_from_file(os.path.join(thisdir,"tune.ui"))

        self._ks = -1.0
        self.ui.get_object("ks_adjustment").props.value = self._ks
        self._sr = 0.5
        self.ui.get_object("sr_adjustment").props.value = self._sr
        self._nd = 0.5
        self.ui.get_object("nd_adjustment").props.value = self._nd
        self._dt = 0.10
        self.ui.get_object("dt_adjustment").props.value = self._dt
        self._minr = 0.5
        self.ui.get_object("minr_adjustment").props.value = self._minr
        self._maxr = 1.5
        self.ui.get_object("maxr_adjustment").props.value = self._maxr

        self._exbtn = self.ui.get_object("execute_btn")
        self._exbtn.connect("clicked", self._calculate)

        self._axadj = self.ui.get_object("ax_adjustment")
        self._ayadj = self.ui.get_object("ay_adjustment")
        self._azadj = self.ui.get_object("az_adjustment")
        self._cxadj = self.ui.get_object("cx_adjustment")
        self._cyadj = self.ui.get_object("cy_adjustment")
        self._czadj = self.ui.get_object("cz_adjustment")
        self._radj = self.ui.get_object("r_adjustment")
        self._in = self.ui.get_object("inliersentry")

        self.ui.connect_signals(self)
        w = self.ui.get_object("window1")
        w.connect("delete-event", rosgobject.main_quit)
        w.show_all()
コード例 #10
0
def get_good_smoothed_tracks(filename, obj_ids,
                             min_frames_per_track,
                             use_smoothing,
                             dynamic_model_name):
    ''' Yields (obj_id, rows) for each track in obj_ids in the file
        that has the given minimum number of frames. '''
            
    frames_per_second = 60.0
    dt = 1 / frames_per_second

    ca = core_analysis.get_global_CachingAnalyzer()  
        
    #warned = False
    
    #obj_ids, unique_obj_ids, is_mat_file, data_file, extra = \
    #     ca.initial_file_load(filename)
    data_file = filename
    
    for obj_id in obj_ids:
        try:
            frows = ca.load_data(obj_id, data_file, use_kalman_smoothing=False)

            # don't consider tracks too small
            if len(frows) < min_frames_per_track:
                continue

            # write timestamp entry
            
            # The 'timestamp' field returned by flydra is the time
            # when the computation was made, not the actual data timestamp.
            # For computing the actual timestamp, use the frame number
            # and multiply by dt
            
            global warned_fixed_dt
            if not warned_fixed_dt:
                warned_fixed_dt = True
                logger.info('Warning: We are assuming that the data is ' \
                      'equally spaced, and fps = %s.' % frames_per_second)

            for i in range(len(frows)):
                frows['timestamp'][i] = frows['frame'][i] * dt
                
            for i in range(len(frows) - 1):
                if frows['obj_id'][i] == frows['obj_id'][i + 1]:
                    if not (frows['timestamp'][i] < frows['timestamp'][i + 1]):
                        print("fishy behavior at index %d" % i)
                        
            # return raw data if smoothing is not requested
            if not use_smoothing:
                yield obj_id, frows
                continue

            # otherwise, run the smoothing
            srows = ca.load_data(obj_id, data_file, use_kalman_smoothing=True,
                     frames_per_second=frames_per_second,
                     dynamic_model_name=dynamic_model_name);
                     
            # make a copy, just in case
            srows = srows.copy()

            for i in range(len(srows)):
                srows['timestamp'][i] = srows['frame'][i] * dt
            
            
            # From Andrew:
            
            # I'm pretty sure there is an inconsistency in some of this 
            # unit stuff. Basically, I used to do the camera calibrations 
            # all in mm (so that the 3D coords would come out in mm). Then,
            # I started doing analyses in meters... And I think some of
            # the calibration and dynamic model stuff got defaulted to meters.
            # And basically there are inconsistencies in there.
            # Anyhow, I think the extent of the issue is that you'll be off 
            # by 1000, so hopefully you can just determine that by looking at the data.

            # quick fix
            if dynamic_model_name == "mamarama, units: mm":
                
                #and not warned:
                #warned = True
                # logger.info("Warning: Implementing simple workaround for flydra's " \
                #       "units inconsistencies (multiplying xvel,yvel by 1000).")
                
                #srows['xvel'] *= 1000
                #srows['yvel'] *= 1000
                #srows['xvel'] *= 1000

                v = numpy.hypot(srows['xvel'], srows['yvel'], srows['zvel'])
                
                perc = [1, 5, 50, 95, 99]
                scores = map(lambda x:  scipy.stats.scoreatpercentile(v, x), perc)
                
                print "scores: %s" % map(lambda x: "%f " % x, scores)
                
                score95 = scipy.stats.scoreatpercentile(v, 95)
    
      
                if score95 < 100.0:
                    logger.debug(" score95 = %f, assuming m" % score95)
                else:
                    logger.debug(" score95 = %f, assuming mm" % score95)
                    
                    srows['xvel'] *= 0.001
                    srows['yvel'] *= 0.001
                    srows['xvel'] *= 0.001

                v = numpy.hypot(srows['xvel'], srows['yvel'], srows['zvel'])
                final_score95 = scipy.stats.scoreatpercentile(v, 95)
                
                logger.info('After much deliberation, 95%% score is %f.' % 
                            final_score95)
                
            yield obj_id, srows 
            
        except core_analysis.NotEnoughDataToSmoothError:
            logger.warning('not enough data to smooth obj_id %d, skipping.' % (obj_id,))
            continue 
        
    ca.close()
コード例 #11
0
def main():
    np.seterr(all='raise')
                 
    parser = OptionParser()

    parser.add_option("--output_dir", default='saccade_detect_output',
                      help="Output directory")

    parser.add_option("--min_frames_per_track", default=400,
        help="Minimum number of frames per track [= %default]")

    parser.add_option("--confirm_problems",
                      help="Stop interactively on problems with log files'\
                      '(e.g.: cannot find valid obj_ids) [default: %default]",
                      default=False, action="store_true")

    parser.add_option("--dynamic_model_name",
                      help="Smoothing dynamical model [default: %default]",
                      default="mamarama, units: mm")
    
    parser.add_option("--debug_output", help="Creates debug figures.",
                      default=False, action="store_true")

    parser.add_option("--nocache", help="Ignores already computed results.",
                      default=False, action="store_true")

    parser.add_option("--smoothing", help="Uses Kalman-smoothed data.",
                      default=False, action="store_true")

    # detection parameters
    dt = 1.0 / 60
    parser.add_option("--deltaT_inner_sec", default=4 * dt, type='float',
                      help="Inner interval [= %default]")
    parser.add_option("--deltaT_outer_sec", default=10 * dt, type='float',
                      help="Outer interval [= %default]")
    parser.add_option("--min_amplitude_deg", default=25, type='float',
                      help="Minimum saccade amplitude (deg) [= %default]")
    parser.add_option("--min_linear_velocity", default=0.1, type='float',
                      help="Minimum linear velocity when saccading (m/s) "
                            "[= %default]")
    parser.add_option("--max_linear_acceleration", default=20, type='float',
                      help="Maximum linear acceleration when saccading "
                      "(m/s^2) [= %default]")
    parser.add_option("--max_angular_velocity", default=8000, type='float',
                      help="Maximum angular velocity when saccading (deg/s) "
                      "[= %default]")
    parser.add_option("--max_orientation_dispersion_deg", default=15,
                      type='float',
                      help="Maximum dispersion (deg) [= %default]")
    parser.add_option("--minimum_interval_sec", default=10 * dt, type='float',
                      help="Minimum interval between saccades. [= %default]")
    
    (options, args) = parser.parse_args()
    
    if not args:
        logger.error('No files or directories specified.')
        sys.exit(-1)
        
    # Create processed string
    processed = 'geometric_saccade_detector %s %s %s@%s Python %s' % \
                (__version__, datetime.now().strftime("%Y%m%d_%H%M%S"),
                 get_user(), platform.node(), platform.python_version())
        
    if not os.path.exists(options.output_dir):
        os.makedirs(options.output_dir)

    good_files = get_good_files(where=args, pattern="*.kh5",
                                confirm_problems=options.confirm_problems)

    if len(good_files) == 0:
        logger.error("No good files to process.")
        sys.exit(1)

    try:
        n = len(good_files)
        for i in range(n):
            (filename, obj_ids, stim_fname) = good_files[i]
            # only maintain basename
            stim_fname = os.path.splitext(os.path.basename(stim_fname))[0]
            basename = os.path.splitext(os.path.basename(filename))[0]
            
            output_basename = os.path.join(options.output_dir,
                                           basename + '-saccades')        
            output_saccades_hdf = output_basename + '.h5'
                
            if os.path.exists(output_saccades_hdf) and not options.nocache:
                logger.info('File %r exists; skipping. '
                            '(use --nocache to ignore)' % 
                                 output_saccades_hdf)
                continue
            
            logger.info("File %d/%d %s %s %s " % 
                        (i, n, str(filename), str(obj_ids), stim_fname))
            
            # concatenate all in one track
            all_data = None
    
            for _, rows in get_good_smoothed_tracks(
                    filename=filename,
                    obj_ids=obj_ids,
                    min_frames_per_track=options.min_frames_per_track,
                    dynamic_model_name=options.dynamic_model_name,
                    use_smoothing=options.smoothing):
    
                all_data = rows.copy() if all_data is None \
                            else np.concatenate((all_data, rows))                
            
            if all_data is None:
                logger.info('Not enough data found for %s; skipping.' % 
                            filename)
                continue
            
            params = {
              'deltaT_inner_sec': options.deltaT_inner_sec,
              'deltaT_outer_sec': options. deltaT_outer_sec,
              'min_amplitude_deg': options.min_amplitude_deg,
              'max_orientation_dispersion_deg': 
                        options.max_orientation_dispersion_deg,
              'minimum_interval_sec': options.minimum_interval_sec,
              'max_linear_acceleration': options.max_linear_acceleration,
              'min_linear_velocity': options.min_linear_velocity,
              'max_angular_velocity': options.max_angular_velocity,
            }
            saccades, annotated_data = geometric_saccade_detect(all_data,
                                                                params)
    
            for saccade in saccades:
                check_saccade_is_well_formed(saccade)
                
            # other fields used for managing different samples, 
            # used in the analysis
            saccades['species'] = 'Dmelanogaster'
            saccades['stimulus'] = stim_fname
            sample_name = 'DATA' + timestamp_string_from_filename(filename)
            saccades['sample'] = sample_name
            saccades['sample_num'] = -1  # will be filled in by someone else
            saccades['processed'] = processed    
        
            logger.info("Writing to %s {h5,mat,pickle}" % output_basename)
            saccades_write_all(output_basename, saccades)
            
            # Write debug figures
            if options.debug_output:
                debug_output_dir = os.path.join(options.output_dir, basename)
                logger.info("Writing HTML+png to %s" % debug_output_dir)    
                write_debug_output(debug_output_dir, basename,
                                   annotated_data, saccades)
 
    except Exception as e:
        logger.error('Error while processing. Exception and traceback follow.')
        logger.error(str(e))
        logger.error(traceback.format_exc())
        sys.exit(-2)
        
    finally:
        print('Closing flydra cache')
        ca = core_analysis.get_global_CachingAnalyzer()
        ca.close()
        
    sys.exit(0)
コード例 #12
0
def get_good_smoothed_tracks(filename, obj_ids,
                             min_frames_per_track,
                             use_smoothing,
                             dynamic_model_name):
    ''' Yields (obj_id, rows) for each track in obj_ids in the file
        that has the given minimum number of frames. '''
            
    frames_per_second = 60.0
    dt = 1 / frames_per_second

    ca = core_analysis.get_global_CachingAnalyzer()  
        
    warned = False
    
    #obj_ids, unique_obj_ids, is_mat_file, data_file, extra = \
    #     ca.initial_file_load(filename)
    data_file = filename
    
    for obj_id in obj_ids:
        try:
            frows = ca.load_data(obj_id, data_file, use_kalman_smoothing=False)

            # don't consider tracks too small
            if len(frows) < min_frames_per_track:
                continue

            # write timestamp entry
            
            # The 'timestamp' field returned by flydra is the time
            # when the computation was made, not the actual data timestamp.
            # For computing the actual timestamp, use the frame number
            # and multiply by dt
            
            global warned_fixed_dt
            if not warned_fixed_dt:
                warned_fixed_dt = True
                logger.info('Warning: We are assuming that the data is ' \
                      'equally spaced, and fps = %s.' % frames_per_second)

            for i in range(len(frows)):
                frows['timestamp'][i] = frows['frame'][i] * dt
                
            for i in range(len(frows) - 1):
                if frows['obj_id'][i] == frows['obj_id'][i + 1]:
                    assert frows['timestamp'][i] < frows['timestamp'][i + 1]
                
            # return raw data if smoothing is not requested
            if not use_smoothing:
                yield (obj_id,
                       extract_interesting_fields(frows,
                                                  np.dtype(rows_dtype)))
                continue

            # otherwise, run the smoothing
            srows = ca.load_data(obj_id, data_file, use_kalman_smoothing=True,
                     frames_per_second=frames_per_second,
                     dynamic_model_name=dynamic_model_name)
                     
            # make a copy, just in case
            srows = srows.copy()

            for i in range(len(srows)):
                srows['timestamp'][i] = srows['frame'][i] * dt
            
            # From Andrew:
            # I'm pretty sure there is an inconsistency in some of this 
            # unit stuff. Basically, I used to do the camera calibrations 
            # all in mm (so that the 3D coords would come out in mm). Then,
            # I started doing analyses in meters... And I think some of
            # the calibration and dynamic model stuff got defaulted to meters.
            # And basically there are inconsistencies in there.
            # Anyhow, I think the extent of the issue is that you'll be off 
            # by 1000, so hopefully you can just determine that by looking 
            # at the data.
            # quick fix
            if dynamic_model_name == "mamarama, units: mm" and not warned:
                warned = True
                logger.info("Warning: Implementing simple workaround"
                            " for flydra's " 
                            "units inconsistencies "
                            "(multiplying xvel,yvel by 1000).")
                srows['xvel'] *= 1000
                srows['yvel'] *= 1000
                
            yield obj_id, extract_interesting_fields(srows,
                                                     np.dtype(rows_dtype))
            
        except core_analysis.NotEnoughDataToSmoothError:
            #logger.warning('not enough data to 
            # smooth obj_id %d, skipping.'%(obj_id,))
            continue 
        
    ca.close()
コード例 #13
0
 def load_data (self, filename, calibration_file = None, objs = None, obj_filelist = None, kalman_smoothing = True, fps = None, dynamic_model = None, gender = None, post_type=None):
 
     self.datasets.append(len(self.datasets)+1)
     self.filename.append(filename)
     
     if calibration_file is not None:
         print "Calibration Files not yet supported!!!"
         
     # set up analyzer
     ca = core_analysis.get_global_CachingAnalyzer()
     (obj_ids, use_obj_ids, is_mat_file, data_file, extra) = ca.initial_file_load(filename)
     
     # data set defaults
     if fps is None:
         fps = result_utils.get_fps(data_file)
     if dynamic_model is None:
         try:
             dyn_model = extra['dynamic_model_name']
         except:
             dyn_model = 'EKF mamarama, units: mm'
     if dynamic_model is not None:
         dyn_model = dynamic_model
     
     # if kalman smoothing is on, then we cannot use the EKF model - remove that from the model name
     print '**Kalman Smoothing is: ', kalman_smoothing, ' **'
     if kalman_smoothing is True:
         dyn_model = dyn_model[4:]
     print 'using dynamic model: ', dyn_model
 
     if objs is None and obj_filelist is None:
         print "running through all object id's, this might take a while..."
         obj_only = use_obj_ids # this is all the unique object id's 
     if obj_filelist is not None:
         tmp = np.loadtxt(obj_filelist,delimiter=',')
         obj_only = np.array(tmp[:,0], dtype='int')
     elif objs is not None:
         if type(objs) is not list:
             objs = [objs] 
         obj_only = np.array(objs)
         
     print fps
     print 'loading data.... '
     for obj_id in obj_only:
         print 'processing: ', obj_id
         try: 
             print obj_id
             # kalman rows = [
             kalman_rows =  ca.load_data( obj_id, data_file,
                                  dynamic_model_name = dyn_model,
                                  use_kalman_smoothing= kalman_smoothing,
                                  frames_per_second= fps)
                                   
                                 
         except:
             print 'object id failed to load (probably no data): ', obj_id
             continue
         
         #print kalman_rows[0]
       
         # couple object ID dictionary with trajectory objects
         traj_id = (str(self.datasets[-1])+'_'+str(obj_id))
         self.trajecs.setdefault(traj_id, Trajectory(kalman_rows, extra, stimulus = self.stimulus, fps = fps, post_type=post_type) )