def eval_samples_first(self,activations_iterator:ActivationsIterator, queues:[Queue], inner_queues:[Queue]): for activations, x_transformed in activations_iterator.samples_first(): self.put_value(queues, x_transformed) self.put_values(inner_queues,activations) self.signal_iteration_end(inner_queues) self.signal_iteration_end(queues)
def eval(self,activations_iterator:ActivationsIterator,verbose=False)->MeasureResult: activations_iterator = activations_iterator.get_both_iterator() mean_running=None for x, transformation_activations_iterator in activations_iterator.samples_first(): # transformation_activations_iterator can iterate over all transforms for x_transformed, activations,inverted_activations in transformation_activations_iterator: if mean_running is None: # do this after the first iteration since we dont know the number # of layers until the first iteration of the activations_iterator mean_running = [RunningMeanWelford() for i in range(len(activations))] for j, (layer_activations,inverted_layer_activations) in enumerate(zip(activations,inverted_activations)): self.distance_function.distance(layer_activations,inverted_layer_activations,mean_running[j]) # calculate the final mean over all samples (and layers) means = [b.mean() for b in mean_running] return MeasureResult(means,activations_iterator.layer_names(),self)
def eval(self, activations_iterator: ActivationsIterator, verbose=False) -> MeasureResult: layer_names = activations_iterator.layer_names() n_intermediates = len(layer_names) mean_running = [RunningMeanWelford() for i in range(n_intermediates)] for x, transformation_activations_iterator in activations_iterator.samples_first( ): # transformation_activations_iterator can iterate over all transforms for x_transformed, activations in transformation_activations_iterator: for j, layer_activations in enumerate(activations): # calculate the distance aggregation only for this batch layer_measure = self.distance_aggregation.apply( layer_activations) # update the mean over all transformation mean_running[j].update(layer_measure) # calculate the final mean over all samples (and layers) mean_variances = [b.mean() for b in mean_running] return MeasureResult(mean_variances, layer_names, self)
def eval(self, activations_iterator: ActivationsIterator) -> MeasureResult: layer_names = activations_iterator.layer_names() n_intermediates = len(layer_names) layer_measures = [ self.layer_measure_generator(i, n) for i, n in enumerate(layer_names) ] for r in layer_measures: r.on_begin() for activations, x_transformed in activations_iterator.samples_first(): for r in layer_measures: r.on_begin_iteration() # activations has the activations for all the transformations for j, layer_activations in enumerate(activations): layer_measures[j].update_layer(layer_activations) results = [r.get_final_result() for r in layer_measures] return MeasureResult(results, layer_names, self)
def eval(self, activations_iterator: ActivationsIterator, verbose=False) -> MeasureResult: activations_iterator = activations_iterator.get_inverted_activations_iterator( ) mean_running = None for x, transformation_activations_iterator in activations_iterator.samples_first( ): # transformation_activations_iterator can iterate over all transforms for x_transformed, activations in transformation_activations_iterator: if mean_running is None: mean_running = [ RunningMeanWelford() for i in range(len(activations)) ] for j, layer_activations in enumerate(activations): layer_measure = self.distance_aggregation.apply( layer_activations) # update the mean over all transformation mean_running[j].update(layer_measure) # calculate the final mean over all samples (and layers) means = [b.mean() for b in mean_running] return MeasureResult(means, activations_iterator.layer_names(), self)
def eval(self, activations_iterator: ActivationsIterator, verbose=False) -> MeasureResult: activations_iterator = activations_iterator.get_inverted_activations_iterator( ) mean_running = None for x, transformation_activations in activations_iterator.samples_first( ): transformation_variances_running = None #calculate the running mean/variance/std over all transformations of x for x_transformed, activations in transformation_activations: if mean_running is None: n_layers = len(activations) mean_running = [ RunningMeanWelford() for i in range(n_layers) ] if transformation_variances_running is None: n_layers = len(activations) transformation_variances_running = [ RunningMeanAndVarianceWelford() for i in range(n_layers) ] for i, layer_activations in enumerate(activations): # apply function to conv layers # update the mean over all transformations for this sample transformation_variances_running[i].update_all( layer_activations) # update the mean with the numpy sample of all transformations of x for i, layer_variance in enumerate( transformation_variances_running): mean_running[i].update(layer_variance.std()) # calculate the final mean over all samples (for each layer) mean_variances = [b.mean() for b in mean_running] return MeasureResult(mean_variances, activations_iterator.layer_names(), self)