class measure_response(FeatureResponses): input_patterns = param.Dict(default={}, doc=""" Assigns patterns to different inputs overriding the pattern_generator parameter. If all inputs have not been assigned a pattern, remaining inputs will be presented a blank pattern.""") pattern_generator = param.Callable(default=Gaussian(), instantiate=True, doc=""" Callable object that will generate input patterns coordinated using a list of meta parameters.""") def __call__(self, **params): p = ParamOverrides(self, params, allow_extra_keywords=True) self._apply_cmd_overrides(p) self.metadata = AttrDict(p.metadata) for fn in p.metadata_fns: self.metadata.update(fn(p.inputs, p.outputs)) output_names = self.metadata.outputs.keys() input_names = self.metadata.inputs.keys() inputs = dict.fromkeys(input_names) if p.input_patterns: for k, ip in p.input_patterns.items(): inputs[k] = ip for name in [k for k, ip in inputs.items() if ip is None]: self.warning("No pattern specified for input %s, defaulting" "to blank Constant pattern." % name) inputs[name] = imagen.Constant(scale=0) else: for k in inputs.keys(): inputs[k] = copy.deepcopy(p.pattern_generator) for f in p.pre_presentation_hooks: f() responses = p.pattern_response_fn(inputs, output_names, durations=p.durations) for f in p.post_presentation_hooks: f() label = inputs.values()[0].__class__.__name__ results = self._collate_results(responses, label) if p.measurement_storage_hook: p.measurement_storage_hook(results) return results def _collate_results(self, responses, label): time = self.metadata.timestamp dims = [f.Time, f.Duration] response_label = label + ' Response' results = Layout() for label, response in responses.items(): name, duration = label path = (response_label.replace(' ', ''), name) label = ' '.join([name, response_label]) metadata = self.metadata['outputs'][name] if path not in results: vmap = HoloMap(key_dimensions=dims) vmap.metadata = AttrDict(**metadata) results.set_path(path, vmap) im = Image(response, metadata['bounds'], label=label, group='Activity') im.metadata=AttrDict(timestamp=time) results[path][(time, duration)] = im return results def _apply_cmd_overrides(self, p): super(measure_response, self)._apply_cmd_overrides(p) for override, value in p.extra_keywords().items(): if override in p.pattern_response_fn.params(): p.pattern_response_fn.set_param(override, value) else: self.warning('%s not a parameter of measure_response ' 'or the pattern_response_fn.' % override)
class FeatureResponses(PatternDrivenAnalysis): """ Systematically vary input pattern feature values and collate the responses. A DistributionMatrix for each measurement source and feature is created. The DistributionMatrix stores the distribution of activity values for that feature. For instance, if the features to be tested are orientation and phase, we will create a DistributionMatrix for orientation and a DistributionMatrix for phase for each measurement source. The orientation and phase of the input are then systematically varied (when measure_responses is called), and the responses of all units from a measurement source to each pattern are collected into the DistributionMatrix. The resulting data can then be used to plot feature maps and tuning curves, or for similar types of feature-based analyses. """ cmd_overrides = param.Dict(default={}, doc=""" Dictionary used to overwrite parameters on the pattern_response_fn.""") durations = param.List(default=[1.0], doc="""Times after presentation, when a measurement is taken.""") inputs = param.List(default=[], doc="""Names of the input supplied to the metadata_fns to filter out desired inputs.""") metadata_fns = param.HookList(default=[], instantiate=False, doc=""" Interface functions for metadata. Should return a dictionary that at a minimum must contain the name and dimensions of the inputs and outputs for pattern presentation and response measurement.""") metafeature_fns = param.HookList(default=[], doc=""" Metafeature functions can be used to coordinate lower level features across input devices or depending on a metafeature set on the function itself.""") measurement_prefix = param.String(default="", doc=""" Prefix to add to the name under which results are stored.""") measurement_storage_hook = param.Callable(default=None, instantiate=True, doc=""" Interface to store measurements after they have been completed.""") outputs = param.List(default=[], doc=""" Names of the output source supplied to metadata_fns to filter out desired outputs.""") static_features = param.Dict(default={}, doc=""" Dictionary containing name value pairs of a feature, which is to be varied across measurements.""") pattern_generator = param.Callable(instantiate=True, default=None, doc=""" Defines the input pattern to be presented.""") pattern_response_fn = param.Callable(default=None, instantiate=True, doc=""" Presenter command responsible for presenting the input patterns provided to it and returning the response for the requested measurement sources.""") repetitions = param.Integer(default=1, bounds=(1, None), doc=""" How many times each stimulus will be presented. Each stimulus is specified by a particular feature combination, and need only be presented once if the network has no other source of variability. If results differ for each presentation of an identical stimulus (e.g. due to intrinsic noise), then this parameter can be increased so that results will be an average over the specified number of repetitions.""") store_responses = param.Boolean(default=False, doc=""" Determines whether or not to return the full set of responses to the presented patterns.""") metadata = {} __abstract = True def _initialize_featureresponses(self, p): """ Create an empty DistributionMatrix for each feature and each measurement source, in addition to activity buffers and if requested, the full matrix. """ self._apply_cmd_overrides(p) self.metadata = AttrDict(p.metadata) for fn in p.metadata_fns: self.metadata.update(fn(p.inputs, p.outputs)) # Features are split depending on whether a preference_fn is supplied # to collapse them self.outer = [f for f in self.features if f.preference_fn is None] self.inner = [f for f in self.features if f.preference_fn is not None] self.outer_names, self.outer_vals = [(), ()] if not len(self.outer)\ else zip(*[(f.name.lower(), f.values) for f in self.outer]) dimensions = [features.Duration] + list(self.outer) self.measurement_product = [mp for mp in product(self.metadata.outputs.keys(), p.durations, *self.outer_vals)] ndmapping_fn = lambda: NdMapping(key_dimensions=dimensions) self._featureresponses = defaultdict(ndmapping_fn) self._activities = defaultdict(ndmapping_fn) if p.store_responses: response_dimensions = [features.Time]+dimensions+list(self.inner) response_map_fn = lambda: HoloMap(key_dimensions=response_dimensions) self._responses = defaultdict(response_map_fn) for label in self.measurement_product: out_label = label[0] output_metadata = self.metadata.outputs[out_label] f_vals = label[1:] self._activities[out_label][f_vals] = np.zeros(output_metadata['shape']) self._featureresponses[out_label][f_vals] = {} for f in self.inner: self._featureresponses[out_label][f_vals][f.name.lower()] = \ DistributionMatrix(output_metadata['shape'], axis_range=f.range, cyclic=f.cyclic) def _measure_responses(self, p): """ Generate feature permutations and present each in sequence. """ # Run hooks before the analysis session for f in p.pre_analysis_session_hooks: f() features_to_permute = [f for f in self.inner if f.compute_fn is None] self.features_to_compute = [f for f in self.inner if f.compute_fn is not None] self.feature_names, values_lists = zip(*[(f.name.lower(), f.values) for f in features_to_permute]) self.permutations = [permutation for permutation in product(*values_lists)] # Permute outer or non-collapsed features self.outer_permutations = [permutation for permutation in product(*self.outer_vals)] if not self.outer_permutations: self.outer_permutations.append(()) self.n_outer = len(self.outer_permutations) self.total_steps = len(self.permutations) * len(self.outer_permutations) * p.repetitions - 1 for permutation_num, permutation in enumerate(self.permutations): try: self._present_permutation(p, permutation, permutation_num) except MeasurementInterrupt as MI: self.warning("Measurement was stopped after {0} out of {1} presentations. " "Results may be incomplete.".format(MI.current, MI.total)) break # Run hooks after the analysis session for f in p.post_analysis_session_hooks: f() def _present_permutation(self, p, permutation, permutation_num): """Present a pattern with the specified set of feature values.""" output_names = self.metadata['outputs'].keys() for label in self.measurement_product: out_label = label[0] f_vals = label[1:] self._activities[out_label][f_vals] *= 0 # Calculate complete set of settings permuted_settings = zip(self.feature_names, permutation) complete_settings = permuted_settings +\ [(f.name, f.compute_fn(permuted_settings)) for f in self.features_to_compute] for i, op in enumerate(self.outer_permutations): for j in range(0, p.repetitions): permutation = dict(permuted_settings) permutation.update(zip(self.outer_names, op)) for f in p.pre_presentation_hooks: f() presentation_num = p.repetitions*((self.n_outer*permutation_num)+i) + j inputs = self._coordinate_inputs(p, permutation) responses = p.pattern_response_fn(inputs, output_names, presentation_num, self.total_steps, durations=p.durations) for f in p.post_presentation_hooks: f() for response_labels, response in responses.items(): name, duration = response_labels self._activities[name][(duration,)+op] += response for response_labels in responses.keys(): name, duration = response_labels self._activities[name][(duration,)+op] /= p.repetitions self._update(p, complete_settings) def _coordinate_inputs(self, p, feature_values): """ Generates pattern generators for all the requested inputs, applies the correct feature values and iterates through the metafeature_fns, coordinating complex features. """ input_names = self.metadata.inputs.keys() feature_values = dict(feature_values, **p.static_features) for feature, value in feature_values.iteritems(): setattr(p.pattern_generator, feature, value) if len(input_names) == 0: input_names = ['default'] # Copy the given generator once for every input inputs = dict.fromkeys(input_names) for k in inputs.keys(): inputs[k] = copy.deepcopy(p.pattern_generator) # Apply metafeature_fns for fn in p.metafeature_fns: fn(inputs, feature_values) return inputs def _update(self, p, current_values): """ Update each DistributionMatrix with (activity,bin) and populate the full matrix, if enabled. """ timestamp = self.metadata['timestamp'] for mvals in self.measurement_product: name = mvals[0] bounds = self.metadata.outputs[name]['bounds'] f_vals = mvals[1:] act = self._activities[name][f_vals] for feature, value in current_values: self._featureresponses[name][f_vals][feature.lower()].update(act, value) if p.store_responses: cn, cv = zip(*current_values) key = (timestamp,)+f_vals+cv self._responses[name][key] = Image(act.copy(), bounds, label='Response') @bothmethod def set_cmd_overrides(self_or_cls, **kwargs): """ Allows setting of cmd_overrides at the class and instance level. cmd_overrides are applied to the pattern_response_fn. """ self_or_cls.cmd_overrides = dict(self_or_cls.cmd_overrides, **kwargs) def _apply_cmd_overrides(self, p): """ Applies the cmd_overrides to the pattern_response_fn and the pattern_coordinator before launching a measurement. """ for override, value in p.cmd_overrides.items(): if override in p.pattern_response_fn.params(): p.pattern_response_fn.set_param(override, value)