예제 #1
0
파일: readh5.py 프로젝트: florisvb/floris
    def __init__(self, cam_id, h5):
    
        ''' data2_distorted structure:
        /data2d_distorted (Table(3091,)) '2d data'
          description := {
          0: "camn": UInt16Col(shape=(), dflt=0, pos=0),
          1: "frame": UInt64Col(shape=(), dflt=0, pos=1),
          2: "timestamp": Float64Col(shape=(), dflt=0.0, pos=2),
          3: "cam_received_timestamp": Float64Col(shape=(), dflt=0.0, pos=3),
          4: "x": Float32Col(shape=(), dflt=0.0, pos=4),
          5: "y": Float32Col(shape=(), dflt=0.0, pos=5),
          6: "area": Float32Col(shape=(), dflt=0.0, pos=6),
          7: "slope": Float32Col(shape=(), dflt=0.0, pos=7),
          8: "eccentricity": Float32Col(shape=(), dflt=0.0, pos=8),
          9: "frame_pt_idx": UInt8Col(shape=(), dflt=0, pos=9),
          10: "cur_val": UInt8Col(shape=(), dflt=0, pos=10),
          11: "mean_val": Float32Col(shape=(), dflt=0.0, pos=11),
          12: "sumsqf_val": Float32Col(shape=(), dflt=0.0, pos=12)}
          byteorder := 'little'
          chunkshape := (585,)

        '''
        
        self.cam_id = cam_id
        camn2cam_id, cam_id2camns = result_utils.get_caminfo_dicts(h5)
        self.camn = cam_id2camns[self.cam_id]
        self.fps = result_utils.get_fps( h5 )
        self.resolution = getattr(h5.root.images,'sericomyia-mobile.local_3').read().shape

        # pull out only the data that is relevant to this camera
        # note: multiple rows for individual frames if there were multiple 2D objects detected in that frame
        all_data = h5.root.data2d_distorted[:]
        this_idx = numpy.nonzero( all_data['camn']==self.camn )[0]
        self.data = all_data[this_idx]
    def load_data(self, filename, kalman_smoothing=False, dynamic_model=None, fps=None, info={}, save_covariance=False):
        # use info to pass information to trajectory instances as a dictionary. 
        # eg. info={"post_type": "black"}
        # save_covariance: set to True if you need access to the covariance data. Keep as False if this is not important for analysis (takes up lots of space)
        
        # set up analyzer
        ca = core_analysis.get_global_CachingAnalyzer()
        (obj_ids, use_obj_ids, is_mat_file, data_file, extra) = ca.initial_file_load(filename)
        data_file.flush()

        # data set defaults
        if fps is None:
            fps = result_utils.get_fps(data_file)
        if dynamic_model is None:
            try:
	            dyn_model = extra['dynamic_model_name']
            except:
                print 'cannot find dynamic model'
                print 'using EKF mamarama, units: mm'
                dyn_model = 'EKF mamarama, units: mm'
        if dynamic_model is not None:
            dyn_model = dynamic_model

        # if kalman smoothing is on, then we cannot use the EKF model - remove that from the model name
        print '** Kalman Smoothing is: ', kalman_smoothing, ' **'
        if kalman_smoothing is True:
            dyn_model = dyn_model[4:]
        print 'using dynamic model: ', dyn_model
        print 'framerate: ', fps
        print 'loading data.... '
        
        self.dynamic_model = dyn_model
        self.fps = fps
        
        # load object id's and save as Trajectory instances
        for obj_id in use_obj_ids:
            print 'processing: ', obj_id
            try: 
                print obj_id
                kalman_rows = ca.load_data( obj_id, data_file,
                                 dynamic_model_name = dyn_model,
                                 use_kalman_smoothing= kalman_smoothing,
                                 frames_per_second= fps)
            except:
                print 'object id failed to load (probably no data): ', obj_id
                continue

            # couple object ID dictionary with trajectory objects
            trajec_id = str(obj_id) # this is not necessarily redundant with the obj_id, it allows for making a unique trajectory id when merging multiple datasets
            tmp = Trajectory(trajec_id, kalman_rows, info=info, fps=fps, save_covariance=save_covariance, extra=extra)
            self.trajecs.setdefault(trajec_id, tmp)
            
        return
예제 #3
0
    def __init__(self,kalman_filename):
        import flydra.a2.core_analysis as core_analysis
        import flydra.analysis.result_utils as result_utils

        self.ca = core_analysis.get_global_CachingAnalyzer()
        (obj_ids, use_obj_ids, is_mat_file, data_file,
         extra) = self.ca.initial_file_load(kalman_filename)
        self.data_file = data_file
        self.up_dir = None

        if 1:
            dynamic_model = extra['dynamic_model_name']
            print 'detected file loaded with dynamic model "%s"'%dynamic_model
            if dynamic_model.startswith('EKF '):
                dynamic_model = dynamic_model[4:]
            print '  for smoothing, will use dynamic model "%s"'%dynamic_model
        self.dynamic_model = dynamic_model
        self.fps = result_utils.get_fps( data_file )
        self.use_kalman_smoothing = True
        self.up_dir = (0,0,1)
    def load_data(self, filename, kalman_smoothing=False, dynamic_model=None, fps=None, info={}, save_covariance=False, experiment_length=19*60*60):
        # use info to pass information to trajectory instances as a dictionary. 
        # eg. info={"post_type": "black"}
        # save_covariance: set to True if you need access to the covariance data. Keep as False if this is not important for analysis (takes up lots of space)
        
        # set up analyzer
        ca = core_analysis.get_global_CachingAnalyzer()
        (obj_ids, use_obj_ids, is_mat_file, data_file, extra) = ca.initial_file_load(filename)
        data_file.flush()

        # data set defaults
        if fps is None:
            fps = result_utils.get_fps(data_file)
        if dynamic_model is None:
            try:
                dyn_model = extra['dynamic_model_name']
            except:
                print 'cannot find dynamic model'
                print 'using EKF mamarama, units: mm'
                dyn_model = 'EKF mamarama, units: mm'
        if dynamic_model is not None:
            dyn_model = dynamic_model

        # if kalman smoothing is on, then we cannot use the EKF model - remove that from the model name
        print '** Kalman Smoothing is: ', kalman_smoothing, ' **'
        if kalman_smoothing is True:
            dyn_model = dyn_model[4:]
        print 'using dynamic model: ', dyn_model
        print 'framerate: ', fps
        print 'loading data.... '
        
        self.dynamic_model = dyn_model
        self.fps = fps
        
        time_start = None
        # load object id's and save as Trajectory instances
        for obj_id in use_obj_ids:
            print 'processing: ', obj_id
            try: 
                print obj_id
                kalman_rows = ca.load_data( obj_id, data_file,
                                 dynamic_model_name = dyn_model,
                                 use_kalman_smoothing= kalman_smoothing,
                                 frames_per_second= fps)
            except:
                print 'object id failed to load (probably no data): ', obj_id
                continue

            # couple object ID dictionary with trajectory objects
            filenamebase = os.path.basename(filename)
            trajecbase = filenamebase.rstrip('5').rstrip('.h').lstrip('DATA').rstrip('.kalmanized')
            trajec_id = trajecbase + '_' + str(obj_id) # filename details + original object id - this is unique
            tmp = Trajectory(trajec_id, kalman_rows, info=info, fps=fps, save_covariance=save_covariance, extra=extra)
            
            tmp.h5 = os.path.basename(filename)
            
            if time_start is None:
                time_start = tmp.timestamp_epoch
            else:
                if tmp.timestamp_epoch - time_start > experiment_length:
                    break
                else:
                    self.trajecs.setdefault(trajec_id, tmp)
            
            
        self.h5_files_loaded.append(os.path.basename(filename))
            
        return
예제 #5
0
파일: readh5.py 프로젝트: florisvb/floris
 def load_data (self,    filename, 
                         calibration_file = None, 
                         objs = None, 
                         obj_filelist = None, 
                         kalman_smoothing = True, 
                         fps = None,     
                         dynamic_model = None, 
                         load_2d=True,):
 
     self.datasets.append(len(self.datasets)+1)
     
     # raw h5 - this is the 2d raw camera data:
     if load_2d is True:
         h5 = PT.openFile( filename, mode='r' )
         camn2cam_id, cam_id2camns = result_utils.get_caminfo_dicts(h5)
         cam_ids = cam_id2camns.keys()
         cams2d = []
         for cam_id in cam_ids:
             cams2d.append( Cam2d(cam_id, h5) )
         self.cams2d.append(cams2d)
         
     # set up analyzer
     ca = core_analysis.get_global_CachingAnalyzer()
     (obj_ids, use_obj_ids, is_mat_file, data_file, extra) = ca.initial_file_load(filename)
     
     # data set defaults
     if fps is None:
         fps = result_utils.get_fps(data_file)
     if dynamic_model is None:
         try:
             dyn_model = extra['dynamic_model_name']
         except:
             dyn_model = 'EKF mamarama, units: mm'
     if dynamic_model is not None:
         dyn_model = dynamic_model
     
     # if kalman smoothing is on, then we cannot use the EKF model - remove that from the model name
     print '**Kalman Smoothing is: ', kalman_smoothing, ' **'
     if kalman_smoothing is True:
         dyn_model = dyn_model[4:]
     print 'using dynamic model: ', dyn_model
         
     if objs is None and obj_filelist is None:
         print "running through all object id's, this might take a while..."
         obj_only = use_obj_ids # this is all the unique object id's 
     if obj_filelist is not None:
         tmp = np.loadtxt(obj_filelist,delimiter=',')
         obj_only = np.array(tmp[:,0], dtype='int')
     elif objs is not None:
         obj_only = np.array(objs)
         
     print 'loading data.... '
     for obj_id in obj_only:
         try: 
             kalman_rows =  ca.load_data( obj_id, data_file,
                                  dynamic_model_name = dyn_model,
                                  use_kalman_smoothing= kalman_smoothing,
                                  frames_per_second= fps)
         except:
             print 'object id failed to load (probably no data): ', obj_id
             continue
         
         # couple object ID dictionary with trajectory objects
         traj_id = (str(self.datasets[-1])+'_'+str(obj_id))
         self.trajecs.setdefault(traj_id, ffa.Trajectory(kalman_rows, extra, stimulus = self.stimulus, fps = fps) )
예제 #6
0
 def load_data (self, filename, calibration_file = None, objs = None, obj_filelist = None, kalman_smoothing = True, fps = None, dynamic_model = None, gender = None, post_type=None):
 
     self.datasets.append(len(self.datasets)+1)
     self.filename.append(filename)
     
     if calibration_file is not None:
         print "Calibration Files not yet supported!!!"
         
     # set up analyzer
     ca = core_analysis.get_global_CachingAnalyzer()
     (obj_ids, use_obj_ids, is_mat_file, data_file, extra) = ca.initial_file_load(filename)
     
     # data set defaults
     if fps is None:
         fps = result_utils.get_fps(data_file)
     if dynamic_model is None:
         try:
             dyn_model = extra['dynamic_model_name']
         except:
             dyn_model = 'EKF mamarama, units: mm'
     if dynamic_model is not None:
         dyn_model = dynamic_model
     
     # if kalman smoothing is on, then we cannot use the EKF model - remove that from the model name
     print '**Kalman Smoothing is: ', kalman_smoothing, ' **'
     if kalman_smoothing is True:
         dyn_model = dyn_model[4:]
     print 'using dynamic model: ', dyn_model
 
     if objs is None and obj_filelist is None:
         print "running through all object id's, this might take a while..."
         obj_only = use_obj_ids # this is all the unique object id's 
     if obj_filelist is not None:
         tmp = np.loadtxt(obj_filelist,delimiter=',')
         obj_only = np.array(tmp[:,0], dtype='int')
     elif objs is not None:
         if type(objs) is not list:
             objs = [objs] 
         obj_only = np.array(objs)
         
     print fps
     print 'loading data.... '
     for obj_id in obj_only:
         print 'processing: ', obj_id
         try: 
             print obj_id
             # kalman rows = [
             kalman_rows =  ca.load_data( obj_id, data_file,
                                  dynamic_model_name = dyn_model,
                                  use_kalman_smoothing= kalman_smoothing,
                                  frames_per_second= fps)
                                   
                                 
         except:
             print 'object id failed to load (probably no data): ', obj_id
             continue
         
         #print kalman_rows[0]
       
         # couple object ID dictionary with trajectory objects
         traj_id = (str(self.datasets[-1])+'_'+str(obj_id))
         self.trajecs.setdefault(traj_id, Trajectory(kalman_rows, extra, stimulus = self.stimulus, fps = fps, post_type=post_type) )