Пример #1
0
class STDLayerMeasure(EventDrivenLayerMeasure):
    def __init__(self, layer_index: int, layer_name: str):
        super(STDLayerMeasure).__init__(layer_index, layer_name)
        self.running_mean = RunningMeanWelford()

    def update_layer(self, activations):
        layer_measure = activations.std(axis=0)
        self.running_mean.update(layer_measure)

    def get_final_result(self):
        return self.running_mean.mean()
    def eval(self,q:Queue,inner_q:Queue):
        running_mean = RunningMeanWelford()
        # activation_sum=0
        n=0
        for transformation in self.queue_as_generator(q):
            for activations in self.queue_as_generator(inner_q):
                if self.sign != 1:
                    activations *= self.sign
                activated = (activations > self.threshold) * 1
                running_mean.update_all(activated)

        self.l=running_mean.mean()
Пример #3
0
    def eval(self, q: Queue, inner_q: Queue):

        m = RunningMeanWelford()
        for iteration_info in self.queue_as_generator(q):
            inner_m = RunningMeanAndVarianceWelford()

            for activations in self.queue_as_generator(inner_q):
                activations = self.conv_aggregation.apply(activations)
                inner_m.update_all(activations)
                # i += 1

            inner_result = self.measure_function.apply_running(inner_m)
            m.update(inner_result)

        return m.mean()
Пример #4
0
    def eval(self,
             activations_iterator: ActivationsIterator,
             verbose=False) -> MeasureResult:
        running_means = [
            RunningMeanWelford() for i in activations_iterator.layer_names()
        ]

        for x, transformation_activations in tqdm(
                activations_iterator.samples_first(), disable=not verbose):
            for x_transformed, activations in transformation_activations:
                for i, layer_activations in enumerate(activations):

                    if self.sign != 1:
                        layer_activations *= self.sign

                    activated: np.ndarray = (layer_activations >
                                             self.thresholds[i]) * 1.0
                    # print(activated.shape,activated.min(),activated.max(),activated.dtype)
                    if np.any(activated < 0):
                        print(activated)
                    running_means[i].update_all(activated)

        layers_l = [m.mean() for m in running_means]

        return MeasureResult(layers_l, activations_iterator.layer_names(),
                             self)
Пример #5
0
 def eval_means_per_transformation(
     self, activations_iterator: ActivationsIterator
 ) -> ([ActivationsByLayer], [int]):
     '''
     For all activations, calculates the mean activation value for each transformation
     :param activations_iterator:
     :return: A list of mean activation values for each activation in each layer
              The list of samples per transformation
     '''
     n_layers = len(activations_iterator.layer_names())
     means_per_transformation = []
     samples_per_transformation = []
     for transformation, transformation_activations in activations_iterator.transformations_first(
     ):
         samples_variances_running = [
             RunningMeanWelford() for i in range(n_layers)
         ]
         # calculate the variance of all samples for this transformation
         n_samples = 0
         for x, batch_activations in transformation_activations:
             n_samples += x.shape[0]
             for j, layer_activations in enumerate(batch_activations):
                 for i in range(layer_activations.shape[0]):
                     samples_variances_running[j].update(
                         layer_activations[i, ])
         samples_per_transformation.append(n_samples)
         means_per_transformation.append(
             [rm.mean() for rm in samples_variances_running])
     return means_per_transformation, samples_per_transformation
Пример #6
0
    def distance(self,batch:np.ndarray,batch_inverted:np.ndarray,mean_running:RunningMeanWelford):
        n_shape=len(batch.shape)
        assert n_shape>=2
        n,f=batch.shape[0],batch.shape[1]

        if n_shape>2 and self.normalize:
            # normalize all extra dimensions
            for i in range(n):
                for j in range(f):
                    batch[i,j,:]/=np.linalg.norm(batch[i,j,:])
                    batch_inverted[i,j,:]/=np.linalg.norm(batch_inverted[i,j,:])

        # ssd of all values
        distances = (batch-batch_inverted)**2
        n_shape=len(batch.shape)
        if n_shape>2:
            # aggregate extra dims to keep only the feature dim
            distances= distances.mean(axis=tuple(range(2,n_shape)))
        distances = np.sqrt(distances)
        assert len(distances.shape)==2
        mean_running.update_all(distances)
Пример #7
0
    def eval(self,activations_iterator:ActivationsIterator,verbose=False)->MeasureResult:
        activations_iterator = activations_iterator.get_both_iterator()
        mean_running=None

        for x, transformation_activations_iterator in activations_iterator.samples_first():
            # transformation_activations_iterator can iterate over all transforms
            for x_transformed, activations,inverted_activations in transformation_activations_iterator:
                if mean_running is None:
                    # do this after the first iteration since we dont know the number
                    # of layers until the first iteration of the activations_iterator
                    mean_running = [RunningMeanWelford() for i in range(len(activations))]
                for j, (layer_activations,inverted_layer_activations) in enumerate(zip(activations,inverted_activations)):
                    self.distance_function.distance(layer_activations,inverted_layer_activations,mean_running[j])
        # calculate the final mean over all samples (and layers)
        means = [b.mean() for b in mean_running]
        return MeasureResult(means,activations_iterator.layer_names(),self)
Пример #8
0
    def eval_global_means(self, means_per_layer_and_transformation: [
        ActivationsByLayer
    ], n_layers: int) -> ActivationsByLayer:
        '''

        :param means_per_layer_and_transformation:
        :param n_layers:
        :return: The global means for each layer, averaging out the transformations
        '''
        # n_transformations = len(means_per_layer_and_transformation)
        global_means_running = [RunningMeanWelford() for i in range(n_layers)]
        for transformation_means in means_per_layer_and_transformation:
            # means_per_layer  has the means for a given transformation
            for i, layer_means in enumerate(transformation_means):
                global_means_running[i].update(layer_means)

        return [rm.mean() for rm in global_means_running]
Пример #9
0
    def eval(self,
             activations_iterator: ActivationsIterator,
             verbose=False) -> MeasureResult:
        layer_names = activations_iterator.layer_names()
        n_layers = len(layer_names)
        mean_running = [RunningMeanWelford() for i in range(n_layers)]

        for transformation, transformation_activations in activations_iterator.transformations_first(
        ):
            # calculate the variance of all samples for this transformation
            for x, batch_activations in transformation_activations:
                for j, layer_activations in enumerate(batch_activations):
                    layer_measure = self.distance_aggregation.apply(
                        layer_activations)
                    mean_running[j].update(layer_measure)

        # calculate the final mean over all transformations (and layers)
        mean_variances = [b.mean() for b in mean_running]
        return MeasureResult(mean_variances, layer_names, self)
Пример #10
0
    def eval(self,
             activations_iterator: ActivationsIterator,
             verbose=False) -> MeasureResult:
        layer_names = activations_iterator.layer_names()
        n_intermediates = len(layer_names)
        mean_running = [RunningMeanWelford() for i in range(n_intermediates)]
        for x, transformation_activations_iterator in activations_iterator.samples_first(
        ):
            # transformation_activations_iterator can iterate over all transforms
            for x_transformed, activations in transformation_activations_iterator:
                for j, layer_activations in enumerate(activations):
                    # calculate the distance aggregation only for this batch
                    layer_measure = self.distance_aggregation.apply(
                        layer_activations)
                    # update the mean over all transformation
                    mean_running[j].update(layer_measure)

        # calculate the final mean over all samples (and layers)
        mean_variances = [b.mean() for b in mean_running]
        return MeasureResult(mean_variances, layer_names, self)
Пример #11
0
    def eval(self,
             activations_iterator: ActivationsIterator,
             verbose=False) -> MeasureResult:
        activations_iterator: ActivationsIterator = activations_iterator.get_inverted_activations_iterator(
        )
        ts = list(map(str, (activations_iterator.get_transformations())))

        mean_variances_running = None

        for transformation, samples_activations_iterator in activations_iterator.transformations_first(
        ):
            samples_variances_running = None
            # calculate the variance of all samples for this transformation
            for x, batch_activations in samples_activations_iterator:
                if mean_variances_running is None:
                    n_layers = len(batch_activations)
                    mean_variances_running = [
                        RunningMeanWelford() for i in range(n_layers)
                    ]
                if samples_variances_running is None:
                    n_layers = len(batch_activations)
                    samples_variances_running = [
                        RunningMeanAndVarianceWelford()
                        for i in range(n_layers)
                    ]
                for j, layer_activations in enumerate(batch_activations):
                    samples_variances_running[j].update_all(layer_activations)
            # update the mean over all transformation (and layers)
            for layer_mean_variances_running, layer_samples_variance_running in zip(
                    mean_variances_running, samples_variances_running):
                layer_mean_variances_running.update(
                    layer_samples_variance_running.std())

        # calculate the final mean over all transformations (and layers)

        mean_variances = [b.mean() for b in mean_variances_running]
        return MeasureResult(mean_variances,
                             activations_iterator.layer_names(), self)
Пример #12
0
    def eval(self,
             activations_iterator: ActivationsIterator,
             verbose=False) -> MeasureResult:
        activations_iterator = activations_iterator.get_inverted_activations_iterator(
        )
        mean_running = None

        for x, transformation_activations_iterator in activations_iterator.samples_first(
        ):
            # transformation_activations_iterator can iterate over all transforms
            for x_transformed, activations in transformation_activations_iterator:
                if mean_running is None:
                    mean_running = [
                        RunningMeanWelford() for i in range(len(activations))
                    ]
                for j, layer_activations in enumerate(activations):
                    layer_measure = self.distance_aggregation.apply(
                        layer_activations)
                    # update the mean over all transformation
                    mean_running[j].update(layer_measure)
        # calculate the final mean over all samples (and layers)
        means = [b.mean() for b in mean_running]
        return MeasureResult(means, activations_iterator.layer_names(), self)
Пример #13
0
    def eval(self,
             activations_iterator: ActivationsIterator,
             verbose=False) -> MeasureResult:
        activations_iterator = activations_iterator.get_inverted_activations_iterator(
        )

        mean_running = None
        for x, transformation_activations in activations_iterator.samples_first(
        ):
            transformation_variances_running = None
            #calculate the running mean/variance/std over all transformations of x
            for x_transformed, activations in transformation_activations:
                if mean_running is None:
                    n_layers = len(activations)
                    mean_running = [
                        RunningMeanWelford() for i in range(n_layers)
                    ]
                if transformation_variances_running is None:
                    n_layers = len(activations)
                    transformation_variances_running = [
                        RunningMeanAndVarianceWelford()
                        for i in range(n_layers)
                    ]
                for i, layer_activations in enumerate(activations):
                    # apply function to conv layers
                    # update the mean over all transformations for this sample
                    transformation_variances_running[i].update_all(
                        layer_activations)
            # update the mean with the numpy sample of all transformations of x
            for i, layer_variance in enumerate(
                    transformation_variances_running):
                mean_running[i].update(layer_variance.std())

        # calculate the final mean over all samples (for each layer)
        mean_variances = [b.mean() for b in mean_running]
        return MeasureResult(mean_variances,
                             activations_iterator.layer_names(), self)
Пример #14
0
def test_mean_welford(x: np.ndarray):
    compare_means_batched(RunningMeanWelford(), x)
Пример #15
0
 def __init__(self, layer_index: int, layer_name: str):
     super(STDLayerMeasure).__init__(layer_index, layer_name)
     self.running_mean = RunningMeanWelford()