def _collate_results(self, p): results = Layout() timestamp = self.metadata.timestamp axis_name = p.x_axis.capitalize() axis_feature = [f for f in self.features if f.name.lower() == p.x_axis][0] curve_label = ''.join([p.measurement_prefix, axis_name, 'Tuning']) dimensions = [features.Time, features.Duration] + [f for f in self.outer] + [axis_feature] pattern_dimensions = self.outer + self.inner pattern_dim_label = '_'.join(f.name.capitalize() for f in pattern_dimensions) for label in self.measurement_product: # Deconstruct label into source name and feature_values name = label[0] f_vals = label[1:] # Get data and metadata from the DistributionMatrix objects dist_matrix = self._featureresponses[name][f_vals][p.x_axis] curve_responses = dist_matrix.distribution_matrix output_metadata = self.metadata.outputs[name] rows, cols = output_metadata['shape'] # Create top level NdMapping indexing over time, duration, the outer # feature dimensions and the x_axis dimension if (curve_label, name) not in results: vmap = HoloMap(key_dimensions=dimensions, group=curve_label, label=name) vmap.metadata = AttrDict(**output_metadata) results.set_path((curve_label, name), vmap) metadata = AttrDict(timestamp=timestamp, **output_metadata) # Populate the ViewMap with measurements for each x value for x in curve_responses[0, 0]._data.iterkeys(): y_axis_values = np.zeros(output_metadata['shape'], activity_dtype) for i in range(rows): for j in range(cols): y_axis_values[i, j] = curve_responses[i, j].get_value(x) key = (timestamp,)+f_vals+(x,) im = Image(y_axis_values, output_metadata['bounds'], label=name, group=' '.join([curve_label, 'Response']), value_dimensions=['Response']) im.metadata = metadata.copy() results[(curve_label, name)][key] = im if p.store_responses: info = (p.pattern_generator.__class__.__name__, pattern_dim_label, 'Response') results.set_path(('%s_%s_%s' % info, name), self._responses[name]) return results
def grid(self, rows=11, cols=11, lbrt=None, situated=False, **kwargs): xdensity, ydensity = self.dest.xdensity, self.dest.ydensity l, b, r, t = self.dest.bounds.lbrt() half_x_unit = ((r - l) / xdensity) / 2. half_y_unit = ((t - b) / ydensity) / 2. if lbrt is None: l, b, r, t = (l + half_x_unit, b + half_y_unit, r - half_x_unit, t - half_y_unit) else: l, b = self.dest.closest_cell_center(lbrt[0], lbrt[1]) r, t = self.dest.closest_cell_center(lbrt[2], lbrt[3]) x, y = np.meshgrid(np.linspace(l, r, cols), np.linspace(b, t, rows)) coords = zip(x.flat, y.flat) grid_items = {} for x, y in coords: grid_items[x, y] = self.view(x, y, situated=situated, **kwargs) grid = GridSpace(grid_items, label=' '.join([self.dest.name, self.name]), group='CFs') grid.metadata = AttrDict(info=self.name, proj_src_name=self.src.name, proj_dest_name=self.dest.name, timestamp=self.src.simulation.time(), **kwargs) return grid
def update_sheet_activity(sheet_name, force=False): """ Update the '_activity_buffer' ViewMap for a given sheet by name. If force is False and the existing Activity Image isn't stale, the existing view is returned. """ name = 'ActivityBuffer' sheet = topo.sim.objects(Sheet)[sheet_name] view = sheet.views.Maps.get(name, False) time = topo.sim.time() metadata = AttrDict(precedence=sheet.precedence, row_precedence=sheet.row_precedence, src_name=sheet.name, shape=sheet.activity.shape, timestamp=time) if not view: im = Image(np.array(sheet.activity), sheet.bounds) im.metadata = metadata view = HoloMap((time, im), key_dimensions=[Time]) view.metadata = metadata sheet.views.Maps[name] = view else: if force or view.range('Time')[1] < time: im = Image(np.array(sheet.activity), sheet.bounds) im.metadata = metadata view[time] = im return view
def _cf_grid(self, shape=None, **kwargs): "Create GridSpace with the correct metadata." grid = GridSpace({}) grid.metadata = AttrDict(timestamp=self.src.simulation.time(), info=self.name, proj_src_name=self.src.name, proj_dest_name=self.dest.name, **kwargs) return grid
def __getitem__(self, coords): metadata = AttrDict(precedence=self.precedence, row_precedence=self.row_precedence, timestamp=self.simulation.time()) image = Image(self.activity.copy(), self.bounds, label=self.name, group='Activity')[coords] image.metadata = metadata return image
def __getitem__(self, coords): metadata = AttrDict(precedence=self.precedence, row_precedence=self.row_precedence, timestamp=self.simulation.time()) sv = Matrix(self.activity.copy(), self.bounds, label=self.name + ' Activity', value='Activity')[coords] sv.metadata = metadata return sv
def view(self, sheet_x, sheet_y, timestamp=None, situated=False, **kwargs): """ Return a single connection field Image, for the unit located nearest to sheet coordinate (sheet_x,sheet_y). """ if timestamp is None: timestamp = self.src.simulation.time() time_dim = Dimension("Time", type=param.Dynamic.time_fn.time_type) (r, c) = self.dest.sheet2matrixidx(sheet_x, sheet_y) cf = self.cfs[r, c] r1, r2, c1, c2 = cf.input_sheet_slice situated_shape = self.src.activity.shape situated_bounds = self.src.bounds roi_bounds = cf.get_bounds(self.src) if situated: matrix_data = np.zeros(situated_shape, dtype=np.float64) matrix_data[r1:r2, c1:c2] = cf.weights.copy() bounds = situated_bounds else: matrix_data = cf.weights.copy() bounds = roi_bounds sv = CFView(matrix_data, bounds, situated_bounds=situated_bounds, input_sheet_slice=(r1, r2, c1, c2), roi_bounds=roi_bounds, label=self.name, group='CF Weight') sv.metadata = AttrDict(timestamp=timestamp) viewmap = HoloMap((timestamp, sv), kdims=[time_dim]) viewmap.metadata = AttrDict(coords=(sheet_x, sheet_y), dest_name=self.dest.name, precedence=self.src.precedence, proj_name=self.name, src_name=self.src.name, row_precedence=self.src.row_precedence, timestamp=timestamp, **kwargs) return viewmap
def projection_view(self, timestamp=None): """Returns the activity in a single projection""" if timestamp is None: timestamp = self.src.simulation.time() im = Image(self.activity.copy(), self.dest.bounds, label=self.name, group='Activity') im.metadata=AttrDict(proj_src_name=self.src.name, precedence=self.src.precedence, proj_name=self.name, row_precedence=self.src.row_precedence, src_name=self.dest.name, timestamp=timestamp) return im
def projection_view(self, timestamp=None): """Returns the activity in a single projection""" if timestamp is None: timestamp = self.src.simulation.time() sv = Matrix(self.activity.copy(), self.dest.bounds, label='Activity', title='%s {label}' % self.name) sv.metadata=AttrDict(proj_src_name=self.src.name, precedence=self.src.precedence, proj_name=self.name, row_precedence=self.src.row_precedence, src_name=self.dest.name, timestamp=timestamp) return sv
def __getitem__(self, coords): metadata = AttrDict(precedence=self.precedence, row_precedence=self.row_precedence, timestamp=self.simulation.time()) if self._channel_data: arr = np.dstack(self._channel_data) else: arr = self.activity.copy() im = Image(arr, self.bounds, label=self.name+' Activity', group='Activity')[coords] im.metadata=metadata return im
def _initialize_featureresponses(self, p): """ Create an empty DistributionMatrix for each feature and each measurement source, in addition to activity buffers and if requested, the full matrix. """ self._apply_cmd_overrides(p) self.metadata = AttrDict(p.metadata) for fn in p.metadata_fns: self.metadata.update(fn(p.inputs, p.outputs)) # Features are split depending on whether a preference_fn is supplied # to collapse them self.outer = [f for f in self.features if f.preference_fn is None] self.inner = [f for f in self.features if f.preference_fn is not None] self.outer_names, self.outer_vals = [(), ()] if not len(self.outer)\ else zip(*[(f.name.lower(), f.values) for f in self.outer]) dimensions = [features.Duration] + list(self.outer) self.measurement_product = [mp for mp in product(self.metadata.outputs.keys(), p.durations, *self.outer_vals)] ndmapping_fn = lambda: NdMapping(key_dimensions=dimensions) self._featureresponses = defaultdict(ndmapping_fn) self._activities = defaultdict(ndmapping_fn) if p.store_responses: response_dimensions = [features.Time]+dimensions+list(self.inner) response_map_fn = lambda: HoloMap(key_dimensions=response_dimensions) self._responses = defaultdict(response_map_fn) for label in self.measurement_product: out_label = label[0] output_metadata = self.metadata.outputs[out_label] f_vals = label[1:] self._activities[out_label][f_vals] = np.zeros(output_metadata['shape']) self._featureresponses[out_label][f_vals] = {} for f in self.inner: self._featureresponses[out_label][f_vals][f.name.lower()] = \ DistributionMatrix(output_metadata['shape'], axis_range=f.range, cyclic=f.cyclic)
def __call__(self, **params): p = ParamOverrides(self, params, allow_extra_keywords=True) self._apply_cmd_overrides(p) self.metadata = AttrDict(p.metadata) for fn in p.metadata_fns: self.metadata.update(fn(p.inputs, p.outputs)) output_names = self.metadata.outputs.keys() input_names = self.metadata.inputs.keys() inputs = dict.fromkeys(input_names) if p.input_patterns: for k, ip in p.input_patterns.items(): inputs[k] = ip for name in [k for k, ip in inputs.items() if ip is None]: self.warning("No pattern specified for input %s, defaulting" "to blank Constant pattern." % name) inputs[name] = imagen.Constant(scale=0) else: for k in inputs.keys(): inputs[k] = copy.deepcopy(p.pattern_generator) for f in p.pre_presentation_hooks: f() responses = p.pattern_response_fn(inputs, output_names, durations=p.durations) for f in p.post_presentation_hooks: f() label = inputs.values()[0].__class__.__name__ results = self._collate_results(responses, label) if p.measurement_storage_hook: p.measurement_storage_hook(results) return results
def topo_metadata_fn(input_names=[], output_names=[]): """ Return the shapes of the specified GeneratorSheets and measurement sheets, or if none are specified return all that can be found in the simulation. """ metadata = AttrDict() metadata['timestamp'] = topo.sim.time() generator_sheets = topo.sim.objects(GeneratorSheet) all_sheets = dict((n, s) for n, s in topo.sim.objects(Sheet).items()) measurement_sheets = dict((n, s) for n, s in topo.sim.objects(Sheet).items() if hasattr(s, 'measure_maps') and s.measure_maps) projections = dict((conn.name, conn) for conn in topo.sim.connections()) if input_names == []: input_names = generator_sheets.keys() metadata['inputs'] = {} for i in input_names: if i in generator_sheets: gs = generator_sheets[i] metadata['inputs'][i] = { 'bounds': gs.bounds, 'precedence': gs.precedence, 'row_precedence': gs.row_precedence, 'shape': gs.shape, 'src_name': gs.name } else: topo.sim.warning('Input sheet {0} not found.'.format(i)) if output_names == []: output_names = measurement_sheets.keys() metadata['outputs'] = {} for o in output_names: if o in all_sheets: s = all_sheets[o] metadata['outputs'][o] = { 'bounds': s.bounds, 'precedence': s.precedence, 'row_precedence': s.row_precedence, 'shape': s.shape, 'src_name': s.name } elif o in projections: p = projections[o] metadata['outputs'][o] = { 'bounds': p.dest.bounds, 'precedence': p.dest.precedence, 'row_precedence': p.dest.row_precedence, 'shape': p.dest.shape, 'dest_name': p.dest.name, 'src_name': p.src.name } else: topo.sim.warning('Output sheet {0} not found.'.format(o)) return metadata
class measure_response(FeatureResponses): input_patterns = param.Dict(default={}, doc=""" Assigns patterns to different inputs overriding the pattern_generator parameter. If all inputs have not been assigned a pattern, remaining inputs will be presented a blank pattern.""") pattern_generator = param.Callable(default=Gaussian(), instantiate=True, doc=""" Callable object that will generate input patterns coordinated using a list of meta parameters.""") def __call__(self, **params): p = ParamOverrides(self, params, allow_extra_keywords=True) self._apply_cmd_overrides(p) self.metadata = AttrDict(p.metadata) for fn in p.metadata_fns: self.metadata.update(fn(p.inputs, p.outputs)) output_names = self.metadata.outputs.keys() input_names = self.metadata.inputs.keys() inputs = dict.fromkeys(input_names) if p.input_patterns: for k, ip in p.input_patterns.items(): inputs[k] = ip for name in [k for k, ip in inputs.items() if ip is None]: self.warning("No pattern specified for input %s, defaulting" "to blank Constant pattern." % name) inputs[name] = imagen.Constant(scale=0) else: for k in inputs.keys(): inputs[k] = copy.deepcopy(p.pattern_generator) for f in p.pre_presentation_hooks: f() responses = p.pattern_response_fn(inputs, output_names, durations=p.durations) for f in p.post_presentation_hooks: f() label = inputs.values()[0].__class__.__name__ results = self._collate_results(responses, label) if p.measurement_storage_hook: p.measurement_storage_hook(results) return results def _collate_results(self, responses, label): time = self.metadata.timestamp dims = [f.Time, f.Duration] response_label = label + ' Response' results = Layout() for label, response in responses.items(): name, duration = label path = (response_label.replace(' ', ''), name) label = ' '.join([name, response_label]) metadata = self.metadata['outputs'][name] if path not in results: vmap = HoloMap(key_dimensions=dims) vmap.metadata = AttrDict(**metadata) results.set_path(path, vmap) im = Image(response, metadata['bounds'], label=label, group='Activity') im.metadata=AttrDict(timestamp=time) results[path][(time, duration)] = im return results def _apply_cmd_overrides(self, p): super(measure_response, self)._apply_cmd_overrides(p) for override, value in p.extra_keywords().items(): if override in p.pattern_response_fn.params(): p.pattern_response_fn.set_param(override, value) else: self.warning('%s not a parameter of measure_response ' 'or the pattern_response_fn.' % override)
def setUp(self): ### Simple case: we only pass a dictionary to Plot() ### that does not belong to a Sheet: views = {} time = 0 metadata = AttrDict(timestamp=time) ### SheetView1: ### Find a way to assign randomly the matrix. self.matrix1 = np.zeros((10,10),dtype=np.float) + np.random.random((10,10)) self.bounds1 = BoundingBox(points=((-0.5,-0.5),(0.5,0.5))) sv = Matrix(self.matrix1, self.bounds1) sv.metadata=metadata self.sheet_view1 = NdMapping((None, sv)) self.sheet_view1.metadata = AttrDict(src_name='TestInputParam', precedence=0.1, row_precedence=0.1, cyclic_range=None, timestamp=time) self.key1 = 'SV1' views[self.key1] = self.sheet_view1 ### SheetView2: ### Find a way to assign randomly the matrix. self.matrix2 = np.zeros((10,10),dtype=np.float) + 0.3 self.bounds2 = BoundingBox(points=((-0.5,-0.5),(0.5,0.5))) sv = Matrix(self.matrix2, self.bounds2) sv.metadata=metadata self.sheet_view2 = NdMapping((None, sv)) self.sheet_view2.metadata = AttrDict(src_name='TestInputParam', precedence=0.2, row_precedence=0.2, cyclic_range=None, timestamp=time) self.key2 = 'SV2' views[self.key2] = self.sheet_view2 ### SheetView3: ### Find a way to assign randomly the matrix. self.matrix3 = np.zeros((10,10),dtype=np.float) + np.random.random((10,10)) self.bounds3 = BoundingBox(points=((-0.5,-0.5),(0.5,0.5))) sv = Matrix(self.matrix3, self.bounds3) sv.metadata=metadata self.sheet_view3 = NdMapping((None, sv)) self.sheet_view3.metadata = AttrDict(src_name='TestInputParam', precedence=0.3, row_precedence=0.3, cyclic_range=None, timestamp=time) self.key3 = 'SV3' views[self.key3] = self.sheet_view3 ### SheetView4: for testing clipping + different bounding box ### Find a way to assign randomly the matrix. self.matrix4 = np.zeros((10,10),dtype=np.float) + 1.6 self.bounds4 = BoundingBox(points=((-0.7,-0.7),(0.7,0.7))) sv = Matrix(self.matrix4, self.bounds4) sv.metadata=metadata self.sheet_view4 = NdMapping((None, sv)) self.sheet_view4.metadata = AttrDict(src_name='TestInputParam', precedence=0.4, row_precedence=0.4, cyclic_range=None, timestamp=time) self.key4 = 'SV4' views[self.key4] = self.sheet_view4 self.view_dict = {'Strength': views, 'Hue': views, 'Confidence': views} ### JCALERT! for the moment we can only pass a triple when creating plot ### adding more sheetView to test when plot will be fixed for accepting ### as much as you want. # plot0: empty plot + no sheetviewdict passed: error or empty plot? ### JCALERT! It has to be fixed what to do in this case in plot.. ### disabled test for the moment. #self.plot0 = Plot((None,None,None),None,name='plot0') ### CATCH EXCEPTION plot_channels1 = {'Strength':None,'Hue':None,'Confidence':None} # plot1: empty plot self.plot1 = make_template_plot(plot_channels1,self.view_dict,density=10.0,name='plot1') plot_channels2 = {'Strength':self.key1,'Hue':None,'Confidence':None} # plot2: sheetView 1, no normalize, no clipping self.plot2 = make_template_plot(plot_channels2,self.view_dict,density=10.0,name='plot2') plot_channels3 = {'Strength':self.key1,'Hue':self.key2,'Confidence':None} # plot3: sheetView 1+2, no normalize, no clipping self.plot3 = make_template_plot(plot_channels3,self.view_dict,density=10.0,name='plot3') plot_channels4 = {'Strength':self.key1,'Hue':self.key2,'Confidence':self.key3} # plot4: sheetView 1+2+3, no normalize , no clipping self.plot4 = make_template_plot(plot_channels4,self.view_dict,density=10.0,name='plot4') plot_channels5 = {'Strength':self.key1,'Hue':None,'Confidence':self.key3} # plot5: sheetView 1+3, no normalize, no clipping self.plot5 = make_template_plot(plot_channels5,self.view_dict,density=10.0,name='plot5') plot_channels6 = {'Strength':None,'Hue':self.key2,'Confidence':self.key3} # plot6: sheetView 2+3, no normalize , no clipping self.plot6 = make_template_plot(plot_channels6,self.view_dict,density=10.0,name='plot6') plot_channels7 = {'Strength':self.key4,'Hue':self.key2,'Confidence':self.key3} # plot7: sheetView 1+2+3, no normalize , clipping self.plot7 = make_template_plot(plot_channels7,self.view_dict,density=10.0,name='plot7') plot_channels8 = {'Strength':self.key1,'Hue':self.key2,'Confidence':self.key3} # plot8: sheetView 1+2+3, normalize , no clipping self.plot8 = make_template_plot(plot_channels8,self.view_dict,density=10.0,normalize=True,name='plot8') ### JCALERT! FOR THE MOMENT I TAKE THE DEFAULT FOR NORMALIZE. ### WE WILL SEE IF IT REMAINS IN PLOT FIRST. ### also makes a sheet to test realease_sheetviews self.sheet = Sheet() self.sheet.views.Maps[self.key1]=self.sheet_view1 self.sheet.views.Maps[self.key2]=self.sheet_view2 self.sheet.views.Maps[self.key3]=self.sheet_view3 self.sheet.views.Maps[self.key4]=self.sheet_view4 plot_channels9 = {'Strength':self.key1,'Hue':self.key2,'Confidence':self.key3} self.plot9 = make_template_plot(plot_channels9,self.sheet.views.Maps,density=10.0,name='plot9')
class FeatureResponses(PatternDrivenAnalysis): """ Systematically vary input pattern feature values and collate the responses. A DistributionMatrix for each measurement source and feature is created. The DistributionMatrix stores the distribution of activity values for that feature. For instance, if the features to be tested are orientation and phase, we will create a DistributionMatrix for orientation and a DistributionMatrix for phase for each measurement source. The orientation and phase of the input are then systematically varied (when measure_responses is called), and the responses of all units from a measurement source to each pattern are collected into the DistributionMatrix. The resulting data can then be used to plot feature maps and tuning curves, or for similar types of feature-based analyses. """ cmd_overrides = param.Dict(default={}, doc=""" Dictionary used to overwrite parameters on the pattern_response_fn.""") durations = param.List(default=[1.0], doc="""Times after presentation, when a measurement is taken.""") inputs = param.List(default=[], doc="""Names of the input supplied to the metadata_fns to filter out desired inputs.""") metadata_fns = param.HookList(default=[], instantiate=False, doc=""" Interface functions for metadata. Should return a dictionary that at a minimum must contain the name and dimensions of the inputs and outputs for pattern presentation and response measurement.""") metafeature_fns = param.HookList(default=[], doc=""" Metafeature functions can be used to coordinate lower level features across input devices or depending on a metafeature set on the function itself.""") measurement_prefix = param.String(default="", doc=""" Prefix to add to the name under which results are stored.""") measurement_storage_hook = param.Callable(default=None, instantiate=True, doc=""" Interface to store measurements after they have been completed.""") outputs = param.List(default=[], doc=""" Names of the output source supplied to metadata_fns to filter out desired outputs.""") static_features = param.Dict(default={}, doc=""" Dictionary containing name value pairs of a feature, which is to be varied across measurements.""") pattern_generator = param.Callable(instantiate=True, default=None, doc=""" Defines the input pattern to be presented.""") pattern_response_fn = param.Callable(default=None, instantiate=True, doc=""" Presenter command responsible for presenting the input patterns provided to it and returning the response for the requested measurement sources.""") repetitions = param.Integer(default=1, bounds=(1, None), doc=""" How many times each stimulus will be presented. Each stimulus is specified by a particular feature combination, and need only be presented once if the network has no other source of variability. If results differ for each presentation of an identical stimulus (e.g. due to intrinsic noise), then this parameter can be increased so that results will be an average over the specified number of repetitions.""") store_responses = param.Boolean(default=False, doc=""" Determines whether or not to return the full set of responses to the presented patterns.""") metadata = {} __abstract = True def _initialize_featureresponses(self, p): """ Create an empty DistributionMatrix for each feature and each measurement source, in addition to activity buffers and if requested, the full matrix. """ self._apply_cmd_overrides(p) self.metadata = AttrDict(p.metadata) for fn in p.metadata_fns: self.metadata.update(fn(p.inputs, p.outputs)) # Features are split depending on whether a preference_fn is supplied # to collapse them self.outer = [f for f in self.features if f.preference_fn is None] self.inner = [f for f in self.features if f.preference_fn is not None] self.outer_names, self.outer_vals = [(), ()] if not len(self.outer)\ else zip(*[(f.name.lower(), f.values) for f in self.outer]) dimensions = [features.Duration] + list(self.outer) self.measurement_product = [mp for mp in product(self.metadata.outputs.keys(), p.durations, *self.outer_vals)] ndmapping_fn = lambda: NdMapping(key_dimensions=dimensions) self._featureresponses = defaultdict(ndmapping_fn) self._activities = defaultdict(ndmapping_fn) if p.store_responses: response_dimensions = [features.Time]+dimensions+list(self.inner) response_map_fn = lambda: HoloMap(key_dimensions=response_dimensions) self._responses = defaultdict(response_map_fn) for label in self.measurement_product: out_label = label[0] output_metadata = self.metadata.outputs[out_label] f_vals = label[1:] self._activities[out_label][f_vals] = np.zeros(output_metadata['shape']) self._featureresponses[out_label][f_vals] = {} for f in self.inner: self._featureresponses[out_label][f_vals][f.name.lower()] = \ DistributionMatrix(output_metadata['shape'], axis_range=f.range, cyclic=f.cyclic) def _measure_responses(self, p): """ Generate feature permutations and present each in sequence. """ # Run hooks before the analysis session for f in p.pre_analysis_session_hooks: f() features_to_permute = [f for f in self.inner if f.compute_fn is None] self.features_to_compute = [f for f in self.inner if f.compute_fn is not None] self.feature_names, values_lists = zip(*[(f.name.lower(), f.values) for f in features_to_permute]) self.permutations = [permutation for permutation in product(*values_lists)] # Permute outer or non-collapsed features self.outer_permutations = [permutation for permutation in product(*self.outer_vals)] if not self.outer_permutations: self.outer_permutations.append(()) self.n_outer = len(self.outer_permutations) self.total_steps = len(self.permutations) * len(self.outer_permutations) * p.repetitions - 1 for permutation_num, permutation in enumerate(self.permutations): try: self._present_permutation(p, permutation, permutation_num) except MeasurementInterrupt as MI: self.warning("Measurement was stopped after {0} out of {1} presentations. " "Results may be incomplete.".format(MI.current, MI.total)) break # Run hooks after the analysis session for f in p.post_analysis_session_hooks: f() def _present_permutation(self, p, permutation, permutation_num): """Present a pattern with the specified set of feature values.""" output_names = self.metadata['outputs'].keys() for label in self.measurement_product: out_label = label[0] f_vals = label[1:] self._activities[out_label][f_vals] *= 0 # Calculate complete set of settings permuted_settings = zip(self.feature_names, permutation) complete_settings = permuted_settings +\ [(f.name, f.compute_fn(permuted_settings)) for f in self.features_to_compute] for i, op in enumerate(self.outer_permutations): for j in range(0, p.repetitions): permutation = dict(permuted_settings) permutation.update(zip(self.outer_names, op)) for f in p.pre_presentation_hooks: f() presentation_num = p.repetitions*((self.n_outer*permutation_num)+i) + j inputs = self._coordinate_inputs(p, permutation) responses = p.pattern_response_fn(inputs, output_names, presentation_num, self.total_steps, durations=p.durations) for f in p.post_presentation_hooks: f() for response_labels, response in responses.items(): name, duration = response_labels self._activities[name][(duration,)+op] += response for response_labels in responses.keys(): name, duration = response_labels self._activities[name][(duration,)+op] /= p.repetitions self._update(p, complete_settings) def _coordinate_inputs(self, p, feature_values): """ Generates pattern generators for all the requested inputs, applies the correct feature values and iterates through the metafeature_fns, coordinating complex features. """ input_names = self.metadata.inputs.keys() feature_values = dict(feature_values, **p.static_features) for feature, value in feature_values.iteritems(): setattr(p.pattern_generator, feature, value) if len(input_names) == 0: input_names = ['default'] # Copy the given generator once for every input inputs = dict.fromkeys(input_names) for k in inputs.keys(): inputs[k] = copy.deepcopy(p.pattern_generator) # Apply metafeature_fns for fn in p.metafeature_fns: fn(inputs, feature_values) return inputs def _update(self, p, current_values): """ Update each DistributionMatrix with (activity,bin) and populate the full matrix, if enabled. """ timestamp = self.metadata['timestamp'] for mvals in self.measurement_product: name = mvals[0] bounds = self.metadata.outputs[name]['bounds'] f_vals = mvals[1:] act = self._activities[name][f_vals] for feature, value in current_values: self._featureresponses[name][f_vals][feature.lower()].update(act, value) if p.store_responses: cn, cv = zip(*current_values) key = (timestamp,)+f_vals+cv self._responses[name][key] = Image(act.copy(), bounds, label='Response') @bothmethod def set_cmd_overrides(self_or_cls, **kwargs): """ Allows setting of cmd_overrides at the class and instance level. cmd_overrides are applied to the pattern_response_fn. """ self_or_cls.cmd_overrides = dict(self_or_cls.cmd_overrides, **kwargs) def _apply_cmd_overrides(self, p): """ Applies the cmd_overrides to the pattern_response_fn and the pattern_coordinator before launching a measurement. """ for override, value in p.cmd_overrides.items(): if override in p.pattern_response_fn.params(): p.pattern_response_fn.set_param(override, value)