def eval(self, activations_iterator: ActivationsIterator, verbose=False) -> MeasureResult: # calculate mean(X_t) u_t, n_t = self.eval_means_per_transformation(activations_iterator) n_layers = len(activations_iterator.layer_names()) # Calculate mean(X) u = self.eval_global_means(u_t, n_layers) # Calculate mean_t[ (mean(X_t)-mean(X))^2], that is Var( mean(X_t) ). Normalized with T-1 ssb, d_b = self.eval_between_transformations_ssd(u_t, u, n_t) # Calculate t: (mean(X_t)-X)², that is Var(X_t). Normalized with N-T ssw, d_w = self.eval_within_transformations_ssd( activations_iterator, u_t) # calculate f_score = self.divide_per_layer(ssb, ssw) return MeasureResult(f_score, activations_iterator.layer_names(), self, extra_values={ "d_b": d_b, "d_w": d_w })
def eval_within_transformations_ssd( self, activations_iterator: ActivationsIterator, means_per_layer_and_transformation: [ActivationsByLayer], ) -> ActivationsByLayer: n_layers = len(activations_iterator.layer_names()) ssdw_per_layer = [0] * n_layers samples_per_transformation = [] for means_per_layer, ( transformation, transformation_activations) in zip( means_per_layer_and_transformation, activations_iterator.transformations_first()): # calculate the variance of all samples for this transformation n_samples = 0 for x, batch_activations in transformation_activations: n_samples += x.shape[0] for j, layer_activations in enumerate(batch_activations): for i in range(layer_activations.shape[0]): d = (layer_activations[i, ] - means_per_layer[j])**2 ssdw_per_layer[j] = ssdw_per_layer[j] + d samples_per_transformation.append(n_samples) # divide by degrees of freedom degrees_of_freedom = (samples_per_transformation[0] - 1) * len(samples_per_transformation) ssdw_per_layer = [s / degrees_of_freedom for s in ssdw_per_layer] return ssdw_per_layer, degrees_of_freedom
def eval_means_per_transformation( self, activations_iterator: ActivationsIterator ) -> ([ActivationsByLayer], [int]): ''' For all activations, calculates the mean activation value for each transformation :param activations_iterator: :return: A list of mean activation values for each activation in each layer The list of samples per transformation ''' n_layers = len(activations_iterator.layer_names()) means_per_transformation = [] samples_per_transformation = [] for transformation, transformation_activations in activations_iterator.transformations_first( ): samples_variances_running = [ RunningMeanWelford() for i in range(n_layers) ] # calculate the variance of all samples for this transformation n_samples = 0 for x, batch_activations in transformation_activations: n_samples += x.shape[0] for j, layer_activations in enumerate(batch_activations): for i in range(layer_activations.shape[0]): samples_variances_running[j].update( layer_activations[i, ]) samples_per_transformation.append(n_samples) means_per_transformation.append( [rm.mean() for rm in samples_variances_running]) return means_per_transformation, samples_per_transformation
def eval(self,activations_iterator:ActivationsIterator,verbose=False)->MeasureResult: v_transformations = self.numerator_measure.eval(activations_iterator,verbose=False) v_samples=self.denominator_measure.eval(activations_iterator,verbose=False) v=divide_activations(v_transformations.layers, v_samples.layers) layer_names = activations_iterator.layer_names() return MeasureResult(v,layer_names,self)
def eval(self,activations_iterator:ActivationsIterator,verbose=False)->MeasureResult: activations_iterator = activations_iterator.get_both_iterator() mean_running=None for x, transformation_activations_iterator in activations_iterator.samples_first(): # transformation_activations_iterator can iterate over all transforms for x_transformed, activations,inverted_activations in transformation_activations_iterator: if mean_running is None: # do this after the first iteration since we dont know the number # of layers until the first iteration of the activations_iterator mean_running = [RunningMeanWelford() for i in range(len(activations))] for j, (layer_activations,inverted_layer_activations) in enumerate(zip(activations,inverted_activations)): self.distance_function.distance(layer_activations,inverted_layer_activations,mean_running[j]) # calculate the final mean over all samples (and layers) means = [b.mean() for b in mean_running] return MeasureResult(means,activations_iterator.layer_names(),self)
def eval_samples_first(self,activations_iterator:ActivationsIterator, queues:[Queue], inner_queues:[Queue]): for activations, x_transformed in activations_iterator.samples_first(): self.put_value(queues, x_transformed) self.put_values(inner_queues,activations) self.signal_iteration_end(inner_queues) self.signal_iteration_end(queues)
def eval_transformations_first(self, activations_iterator: ActivationsIterator, queues: [Queue],inner_queues: [Queue]): for transformation, batch_activations in activations_iterator.transformations_first(): self.put_value(queues,transformation) for x, batch_activation in batch_activations: self.put_values(inner_queues,batch_activation) self.signal_iteration_end(inner_queues) self.signal_iteration_end(queues)
def eval(self, activations_iterator: ActivationsIterator, verbose=False) -> MeasureResult: layer_names = activations_iterator.layer_names() n_layers = len(layer_names) mean_running = [RunningMeanWelford() for i in range(n_layers)] for transformation, transformation_activations in activations_iterator.transformations_first( ): # calculate the variance of all samples for this transformation for x, batch_activations in transformation_activations: for j, layer_activations in enumerate(batch_activations): layer_measure = self.distance_aggregation.apply( layer_activations) mean_running[j].update(layer_measure) # calculate the final mean over all transformations (and layers) mean_variances = [b.mean() for b in mean_running] return MeasureResult(mean_variances, layer_names, self)
def eval(self, activations_iterator: ActivationsIterator, verbose=False) -> MeasureResult: td_result = self.td.eval(activations_iterator, verbose) sd_result = self.sd.eval(activations_iterator, verbose) td_result = self.pre_normalization_transformation.apply(td_result) sd_result = self.pre_normalization_transformation.apply(sd_result) result = divide_activations(td_result.layers, sd_result.layers) return MeasureResult(result, activations_iterator.layer_names(), self)
def eval(self, activations_iterator: ActivationsIterator, verbose=False) -> MeasureResult: layer_names = activations_iterator.layer_names() n_intermediates = len(layer_names) mean_running = [RunningMeanWelford() for i in range(n_intermediates)] for x, transformation_activations_iterator in activations_iterator.samples_first( ): # transformation_activations_iterator can iterate over all transforms for x_transformed, activations in transformation_activations_iterator: for j, layer_activations in enumerate(activations): # calculate the distance aggregation only for this batch layer_measure = self.distance_aggregation.apply( layer_activations) # update the mean over all transformation mean_running[j].update(layer_measure) # calculate the final mean over all samples (and layers) mean_variances = [b.mean() for b in mean_running] return MeasureResult(mean_variances, layer_names, self)
def eval(self, activations_iterator: ActivationsIterator) -> MeasureResult: layer_names = activations_iterator.layer_names() n_intermediates = len(layer_names) layer_measures = [ self.layer_measure_generator(i, n) for i, n in enumerate(layer_names) ] for r in layer_measures: r.on_begin() for activations, x_transformed in activations_iterator.samples_first(): for r in layer_measures: r.on_begin_iteration() # activations has the activations for all the transformations for j, layer_activations in enumerate(activations): layer_measures[j].update_layer(layer_activations) results = [r.get_final_result() for r in layer_measures] return MeasureResult(results, layer_names, self)
def eval(self, activations_iterator: ActivationsIterator, verbose=False) -> MeasureResult: activations_iterator: ActivationsIterator = activations_iterator.get_inverted_activations_iterator( ) ts = list(map(str, (activations_iterator.get_transformations()))) mean_variances_running = None for transformation, samples_activations_iterator in activations_iterator.transformations_first( ): samples_variances_running = None # calculate the variance of all samples for this transformation for x, batch_activations in samples_activations_iterator: if mean_variances_running is None: n_layers = len(batch_activations) mean_variances_running = [ RunningMeanWelford() for i in range(n_layers) ] if samples_variances_running is None: n_layers = len(batch_activations) samples_variances_running = [ RunningMeanAndVarianceWelford() for i in range(n_layers) ] for j, layer_activations in enumerate(batch_activations): samples_variances_running[j].update_all(layer_activations) # update the mean over all transformation (and layers) for layer_mean_variances_running, layer_samples_variance_running in zip( mean_variances_running, samples_variances_running): layer_mean_variances_running.update( layer_samples_variance_running.std()) # calculate the final mean over all transformations (and layers) mean_variances = [b.mean() for b in mean_variances_running] return MeasureResult(mean_variances, activations_iterator.layer_names(), self)
def eval(self, activations_iterator: ActivationsIterator, verbose=False) -> MeasureResult: activations_iterator = activations_iterator.get_inverted_activations_iterator( ) mean_running = None for x, transformation_activations_iterator in activations_iterator.samples_first( ): # transformation_activations_iterator can iterate over all transforms for x_transformed, activations in transformation_activations_iterator: if mean_running is None: mean_running = [ RunningMeanWelford() for i in range(len(activations)) ] for j, layer_activations in enumerate(activations): layer_measure = self.distance_aggregation.apply( layer_activations) # update the mean over all transformation mean_running[j].update(layer_measure) # calculate the final mean over all samples (and layers) means = [b.mean() for b in mean_running] return MeasureResult(means, activations_iterator.layer_names(), self)
def eval(self, activations_iterator: ActivationsIterator, verbose=False) -> MeasureResult: activations_iterator = activations_iterator.get_inverted_activations_iterator( ) mean_running = None for x, transformation_activations in activations_iterator.samples_first( ): transformation_variances_running = None #calculate the running mean/variance/std over all transformations of x for x_transformed, activations in transformation_activations: if mean_running is None: n_layers = len(activations) mean_running = [ RunningMeanWelford() for i in range(n_layers) ] if transformation_variances_running is None: n_layers = len(activations) transformation_variances_running = [ RunningMeanAndVarianceWelford() for i in range(n_layers) ] for i, layer_activations in enumerate(activations): # apply function to conv layers # update the mean over all transformations for this sample transformation_variances_running[i].update_all( layer_activations) # update the mean with the numpy sample of all transformations of x for i, layer_variance in enumerate( transformation_variances_running): mean_running[i].update(layer_variance.std()) # calculate the final mean over all samples (for each layer) mean_variances = [b.mean() for b in mean_running] return MeasureResult(mean_variances, activations_iterator.layer_names(), self)
def eval(self,activations_iterator:ActivationsIterator)->MeasureResult: names = activations_iterator.layer_names() layers = len(names) layer_measures = [self.generate_layer_measure(i, name) for i, name in enumerate(names)] queues = [self.queue_class(self.queue_max_size) for i in range(layers)] inner_queues = [self.queue_class(self.queue_max_size) for i in range(layers)] result_queues = [self.queue_class(self.queue_max_size) for i in range(layers)] threads = [self.process_class(target=c.eval_private, args=[q, qi,qr],daemon=True) for c, q, qi, qr in zip(layer_measures, queues, inner_queues,result_queues )] self.start_threads(threads) if self.activations_order == ActivationsOrder.SamplesFirst: self.eval_samples_first(activations_iterator,queues,inner_queues) elif self.activations_order == ActivationsOrder.TransformationsFirst: self.eval_transformations_first(activations_iterator, queues, inner_queues) else: raise ValueError(f"Unknown activations order {self.activations_order}") self.wait_for_threads(threads) results = [qr.get() for qr in result_queues] return self.generate_result_from_layer_results(results,names)