Пример #1
0
    def __init__(self, data_central, id_robot, id_agent, num_episodes,
                 cumulative=True, interval_print=5):
        self.data_central = data_central
        self.id_robot = id_robot
        self.cumulative = cumulative

        if self.cumulative:
            log_index = data_central.get_log_index()
            self.done_before = log_index.get_episodes_for_robot(id_robot,
                                                                id_agent)
            self.num_episodes_done_before = len(self.done_before)
            self.num_episodes_todo = (num_episodes - 
                                      self.num_episodes_done_before)
            logger.info('Preparing to do %d episodes (already done %d).' % 
                        (self.num_episodes_todo,
                         self.num_episodes_done_before))
        else:
            self.num_episodes_todo = num_episodes
            logger.info('Preparing to do %d episodes.' % 
                        self.num_episodes_todo)
        self.num_episodes_done = 0
        self.num_observations = 0
        self.num_observations_episode = 0
        self.observations_per_episode = []

        self.interval_print = interval_print
        self.tracker = InAWhile(interval_print)
        self.id_episodes = set()

        try:
            from compmake import progress
            progress('Simulating episodes', (0, self.num_episodes_todo))
        except ImportError:
            pass
Пример #2
0
 def test_hierarchy_flat(self):
     """ Testing basic case. """
     init_progress_tracking(lambda _: None)
     self.assert_stack_len(0)
     progress('A', (0, 2))
     self.assert_stack_len(1)
     progress('A', (1, 2))
     self.assert_stack_len(1)
Пример #3
0
 def test_hierarchy_flat(self):
     ''' Testing basic case. '''        
     init_progress_tracking(lambda stack:None) #@UnusedVariable
     self.assert_stack_len(0)
     progress('A', (0, 2))
     self.assert_stack_len(1)
     progress('A', (1, 2))
     self.assert_stack_len(1)
Пример #4
0
 def test_hierarchy_flat(self):
     """ Testing basic case. """
     init_progress_tracking(lambda _: None)
     self.assert_stack_len(0)
     progress('A', (0, 2))
     self.assert_stack_len(1)
     progress('A', (1, 2))
     self.assert_stack_len(1)
Пример #5
0
def mylongfunction():
    N = 4

    for i in range(N):
        progress('Task A', (i, N))
        time.sleep(wait)

    for i in range(N):
        progress('Task B', (i, N))
        time.sleep(wait)
Пример #6
0
    def episode_done(self):
        self.num_episodes_done += 1
        self.observations_per_episode.append(self.num_observations_episode)
        self.num_observations_episode = 0

        try:
            from compmake import progress

            progress("Simulating episodes", (self.num_episodes_done, self.num_episodes_todo))
        except ImportError:
            pass
def compute_saccade_stats(flydra_db_directory, samples, image, conditions):
    '''
    Computes the stats of an image for the saccades that respect a 
    set of conditions.
     
    db: FlydraDB directory
    samples: list of IDs
    
    condition: saccade table -> true/false selector
    '''
    with safe_flydra_db_open(flydra_db_directory) as db:
        
        progress('Computing stats', (0, 2), 'First pass')
        # first compute the mean
        group_mean = Expectation()
        group_min = None
        group_max = None
        iter = saccades_iterate_image('computing mean',
                                       db, samples, image, conditions)
        for sample, sample_saccades, values in iter: #@UnusedVariable 
            sample_mean = numpy.mean(values, axis=0)
            sample_min = numpy.min(values, axis=0)
            sample_max = numpy.max(values, axis=0)
            if group_min is None:
                group_min = sample_min
                group_max = sample_max
            else:
                group_min = numpy.minimum(sample_min, group_min)
                group_max = numpy.maximum(sample_max, group_max)
                
            group_mean.update(sample_mean, len(values))
    
        num_samples = group_mean.num_samples
        group_mean = group_mean.get_value()
    
        group_var = Expectation() 
        progress('Computing stats', (1, 2), 'Second pass')
        iter = saccades_iterate_image('computing var',
                                      db, samples, image, conditions)
        for sample, sample_saccades, values in iter: #@UnusedVariable
            err = values - group_mean
            sample_var = numpy.mean(numpy.square(err), axis=0)
            group_var.update(sample_var, len(values))
        group_var = group_var.get_value()
        
        result = Stats(mean=group_mean, var=group_var,
                       min=group_min, max=group_max, nsamples=num_samples)
        
        return result 
 def data_pass(name):
     for i, id in enumerate(samples):
         progress(name, (i, len(samples)), "Sample %s" % id)
     
         if not (db.has_sample(id) and db.has_table(id, image)):
             raise ValueError('No table "%s" for id %s' % (image, id))
         
         rows = db.get_rows(id)
         data = db.get_table(id, image)
         
         select = rows[:]['linear_velocity_modulus'] > 0.1
         #select = condition(rows)
         values = data[select]['value']
         
         yield id, values
         db.release_table(data)
         db.release_table(rows)
def saccades_iterate_image(name, db, samples, image, conditions):
    ''' Iterates over the values of an image corresponding to
        the saccades that respect a given condition.
        
        yields  sample, saccades, image_values 
    '''
    
    num_saccades = 0
    num_selected = 0
    for i, id in enumerate(samples):
        progress(name, (i, len(samples)), "Sample %s" % id)
    
        if not db.has_saccades(id):
            raise ValueError('No saccades for %s' % id)
        if not (db.has_sample(id) and db.has_table(id, image)):
            raise ValueError('No table "%s" for id %s' % (image, id))
        
        saccades_table = db.get_saccades(id)
        
        saccades = add_position_information(saccades_table)
        
        data = db.get_table(id, image)
        
        # computes and of all conditions
        select = reduce(numpy.logical_and,
                        map(lambda c:c(saccades), conditions))
        
        values = data[select]['value']
        
        if len(values) == 0:
            print ("No saccades selected for %s (out of %d)." % 
                   (id, len(saccades)))
        else:
            yield id, saccades[select], values
        
        num_saccades += len(select)
        num_selected += (select * 1).sum()
        db.release_table(data)
        db.release_table(saccades_table)
    ratio = 100.0 / num_saccades * num_selected
    print "Selected %.2f %% of saccades" % ratio
Пример #10
0
 def test_hierarchy_flat2(self):
     data = {}
     def mystack(x):     
         data['stack'] = x
     init_progress_tracking(mystack)
     self.assert_stack_len(0)
     progress('A', (0, 2))
     self.assert_stack_len(1) 
     progress('B', (0, 2))
     self.assert_stack_len(2) 
     progress('B', (1, 2))
     self.assert_stack_len(2)
     progress('A', (1, 2))
     self.assert_stack_len(1) 
Пример #11
0
    def test_hierarchy_flat2(self):
        data = {}

        def mystack(x):
            data['stack'] = x

        init_progress_tracking(mystack)
        self.assert_stack_len(0)
        progress('A', (0, 2))
        self.assert_stack_len(1)
        progress('B', (0, 2))
        self.assert_stack_len(2)
        progress('B', (1, 2))
        self.assert_stack_len(2)
        progress('A', (1, 2))
        self.assert_stack_len(1)
Пример #12
0
def compute_mean_generic(db, samples, image, operator):
    ''' 
    db: FlydraDB directory
    samples: list of IDs
    '''
    db = FlydraDB(db, False)
    
    results = { 'samples': {} }
    
    ex = Expectation()
    
    for i, id in enumerate(samples):
        progress('Computing mean %s' % image,
                 (i, len(samples)), "Sample %s" % id)
    
        if not (db.has_sample(id) and db.has_table(id, image)):
            raise ValueError('No table "%s" for id %s' % (image, id))
        
        data = db.get_table(id, image)
        
        values = data[:]['value']
        
        this = operator(values)
        
        # print "id: %s   len: %d  %d" % (id, len(data), len(values))
        ex.update(this, len(data))
    
        results['samples'][id] = this
            
        db.release_table(data)

    results['all'] = ex.get_value()
        
    db.close()
    
    return results 
Пример #13
0
def mylongfunction():
    directories = ['a', 'b', 'c', 'd', 'e']
    n = len(directories)

    for i, d in enumerate(directories):
        progress('Processing directories (first)', (i, n), 'Directory %s' % d)

        N = 3
        for k in range(N):
            progress('Processing files (a)', (k, N), 'file #%d' % k)

            time.sleep(wait)

    for i, d in enumerate(directories):
        progress('Processing directories (second)', (i, n), 'Directory %s' % d)

        N = 3
        for k in range(N):
            progress('Processing files (b)', (k, N), 'file #%d' % k)

            time.sleep(wait)
Пример #14
0
def mylongfunction():
    directories = ['a', 'b', 'c', 'd', 'e']
    n = len(directories)

    for i, d in enumerate(directories):
        progress('Processing directories (first)', (i, n), 'Directory %s' % d)
        
        N = 3
        for k in range(N):
            progress('Processing files (a)', (k, N), 'file #%d' % k)
            
            time.sleep(1)
            
    for i, d in enumerate(directories):
        progress('Processing directories (second)', (i, n), 'Directory %s' % d)
    
        N = 3
        for k in range(N):
            progress('Processing files (b)', (k, N), 'file #%d' % k)
            
            time.sleep(1)
Пример #15
0
def compute_environment_autocorrelation(db, samples, image, maxlag=50):
    nsensors = 1398
    results = numpy.ndarray(shape=(nsensors, 2 * maxlag + 1))
    
    db = FlydraDB(db, create=False)
    
    block_size = 50
    num_blocks = int(numpy.ceil(nsensors * 1.0 / block_size))
    for b in range(num_blocks):
        start = block_size * b
        stop = min(start + block_size, nsensors)
        
        progress('Computing autocorrelation', (b, num_blocks))
         
        data = [[] for i in range(nsensors)]
        
        for k, sample in enumerate(samples):
            progress('getting data', (k, len(samples)), sample)
            table = db.get_table(sample, image)
        
            chunk = (table[:]['value'][:, start:stop]).copy()
            for j, i in enumerate(range(start, stop)): 
                data[i].append(chunk[:, j])
            
            db.release_table(table)
            
        for j, i in enumerate(range(start, stop)):
            progress('Computing correlation', (j, stop - start))
            x = numpy.concatenate(data[i])
            corr, lags = xcorr(x, maxlag=maxlag)
            assert(len(lags) == 2 * maxlag + 1)
            results[i, :] = corr
        
    db.close()
    
    data = {
        'results': results,
        'lags': lags
    }
    
    return data
Пример #16
0
def enumerate_data(db, samples, interval_function, image,
                   signal, signal_component,
                       signal_op, what='enumerate_data'):
    for k, id in enumerate(samples):
        progress(what, (k, len(samples)), "Sample %s" % id)
        
        if not db.has_rows(id):
            logger.warning('Could not find rows table for %s; skipping.' % 
                           (id))
            continue
        
        if not db.has_table(id, image):
            logger.warning('Could not find table "%s" for %s; skipping.' % 
                           (image, id))
            continue
        
        rows_table = db.get_rows(id)
        image_table = db.get_table(id, image)
        image_values = image_table[:]['value']
        
        try:
            interval = interval_function(db, id, rows_table) 
        except Exception as e:
            logger.warning('Cannot compute interval for sample %s: %s '\
                           % (id, e))
            db.release_table(rows_table)
            continue
        
        if numpy.logical_not(interval).all():
            logger.warning('Sample %s with interval function "%s" '
                           'gives empty subset; skipping. ' % 
                           (id, interval_function.__name__))
            db.release_table(rows_table)
            continue
        
        if False:
            percentage = numpy.mean(interval * 1.0) * 100
            logger.info('Sample %s: function "%s" selects %.1f%% of data.' % 
                        (id, interval_function.__name__, percentage)) 
        
        # subset everything
        image_values = image_values[interval]
        rows = rows_table[interval]
        
        # get the action vector
        actions = extract_signal(rows, signal, signal_component)
        
        # remove the tails
        actions, removed_percentage = cut_tails(actions, percent=0.3)
        
        if removed_percentage > 1:
            logger.warning('Too much tail removed (%.3f%%) for %s/%s,' % 
                           (removed_percentage, id, signal))
                    
        actions = signal_op(actions)
    
        yield id, actions, image_values

        
        db.release_table(rows_table)
        db.release_table(image_table)
def compute_stats(flydra_db_directory, samples, image):
    '''
    Computes the stats of an image.
    
    *db*
      FlydraDB directory
    
    *samples*
      list of IDs
      
    image: name of a table
    '''
    
    with safe_flydra_db_open(flydra_db_directory) as db:
         
        def data_pass(name):
            for i, id in enumerate(samples):
                progress(name, (i, len(samples)), "Sample %s" % id)
            
                if not (db.has_sample(id) and db.has_table(id, image)):
                    raise ValueError('No table "%s" for id %s' % (image, id))
                
                rows = db.get_rows(id)
                data = db.get_table(id, image)
                
                select = rows[:]['linear_velocity_modulus'] > 0.1
                #select = condition(rows)
                values = data[select]['value']
                
                yield id, values
                db.release_table(data)
                db.release_table(rows)
    
        progress('Computing stats', (0, 2), 'First pass')
        # first compute the mean
        group_mean = Expectation()
        group_min = None
        group_max = None
        for sample, values in data_pass('computing mean'): #@UnusedVariable
            sample_mean = numpy.mean(values, axis=0)
            sample_min = numpy.min(values, axis=0)
            sample_max = numpy.max(values, axis=0)
            if group_min is None:
                group_min = sample_min
                group_max = sample_max
            else:
                group_min = numpy.minimum(sample_min, group_min)
                group_max = numpy.maximum(sample_max, group_max)
                
            group_mean.update(sample_mean, len(values))
    
        num_samples = group_mean.num_samples
        group_mean = group_mean.get_value()
    
        group_var = Expectation() 
        progress('Computing stats', (1, 2), 'Second pass')
        for sample, values in data_pass('computing var'): #@UnusedVariable
            err = values - group_mean
            sample_var = numpy.mean(numpy.square(err), axis=0)
            group_var.update(sample_var, len(values))
        group_var = group_var.get_value()
        
        return Stats(mean=group_mean, var=group_var,
                       min=group_min, max=group_max, nsamples=num_samples)
def bet_on_flies(flydra_db_directory, samples, image, saccades_set):
    
    kernels = {}
    for degrees in [15, 30, 45, 60, 90, 180]:
        # kernels['mf%d' % degrees] = create_matched_filter(degrees, [-45, 0], False)
        kernels['mf%d' % degrees] = create_matched_filter(degrees, [-20, 20], False)

        #kernels['pf%d' % degrees] = create_matched_filter(degrees, True)
         
    results = {}
    with safe_flydra_db_open(flydra_db_directory) as db:
        conditions = [saccades_set.args]
        
        # looking for the optimal dividing plane
        ex = {-1: Expectation(), +1: Expectation()}
        for sample, sample_saccades, image_values in saccades_iterate_image(#@UnusedVariable
                'computing optimal kernel', db, samples, image, conditions):
            for s in [-1, +1]:
                values = image_values[sample_saccades['sign'] == s]
                
                if len(values) > 0:
                    ex[s].update(values.mean(axis=0), len(values))
        
        kernels['optimal'] = ex[+1].get_value() - ex[-1].get_value()  
    
        dir = '${SNPENV_DATA}/flydra_db/out/saccade_view_joint_analysis/lasvegas/images/'
        others = {
         'center': 'lasvegas_contrast_w_posts_center:optimal:kernel.pickle',
         'border': 'lasvegas_contrast_w_posts_border:optimal:kernel.pickle',
         'allsac': 'lasvegas_contrast_w_posts_allsac:optimal:kernel.pickle'
        }
        
        for name, f in others.items():
            filename = os.path.expandvars(os.path.join(dir, f))
            kernel = cPickle.load(open(filename, 'rb'))
            #mask_deg = create_matched_filter(75, [-60, 60],True)
            mask_deg = create_matched_filter(75, [-90, 90], True)
            kernel = kernel * numpy.abs(mask_deg)
            
            kernels['common_%s' % name] = kernel
            
    
        
        for i, kernel_name in enumerate(kernels.keys()):            
            progress('betting', (i, len(kernels)), kernel_name)
            kernel = kernels[kernel_name]
            
            signs = []
            response = []
            overlap = []
            for sample, sample_saccades, image_values in saccades_iterate_image(#@UnusedVariable
                'computing response', db, samples, image, conditions):
                    
                s = sample_saccades[:]['sign']
                    
                signs.extend(s)
                
                for i in range(len(image_values)): 
                    
                    response_i = (image_values[i, :] * kernel).sum()
                    response.append(response_i)
                    
                    overlap_i = (image_values[i, :] * 
                                    numpy.abs(kernel)).sum()
                    overlap.append(overlap_i)
        
            signs = numpy.array(signs)
            response = numpy.array(response)
            results[kernel_name] = { 'signs': signs, 'response': response,
                                     'kernel': kernel, 'overlap': overlap} 
            

    return results