def _augment(self, X): tmp = super()._augment(X) tmp = [ ilayers.Reshape((-1, self._augment_by_n) + kbackend.int_shape(x)[1:])(x) for x in tmp ] difference = self._compute_difference(X) self._keras_difference = difference # Make broadcastable. difference = [ ilayers.Reshape((-1, 1) + kbackend.int_shape(x)[1:])(x) for x in difference ] # Compute path steps. multiply_with_linspace = ilayers.MultiplyWithLinspace( 0, 1, n=self._augment_by_n, axis=1) path_steps = [multiply_with_linspace(d) for d in difference] reference_inputs = self._keras_get_constant_inputs() ret = [ keras.layers.Add()([x, p]) for x, p in zip(reference_inputs, path_steps) ] ret = [ ilayers.Reshape((-1, ) + kbackend.int_shape(x)[2:])(x) for x in ret ] return ret
def _reduce(self, X): X_shape = [kbackend.int_shape(x) for x in iutils.to_list(X)] reshape = [ ilayers.Reshape((-1, self._augment_by_n) + shape[1:]) for shape in X_shape ] mean = ilayers.Mean(axis=1) return [mean(reshape_x(x)) for x, reshape_x in zip(X, reshape)]
def get_layer_neuronwise_io( layer: Layer, node_index: int = 0, Xs: List[Tensor] = None, Ys: List[Tensor] = None, return_i: bool = True, return_o: bool = True, ) -> Union[Tuple[List[Tensor], List[Tensor]], List[Tensor]]: """Returns the input and output for each neuron in a layer Returns the symbolic input and output for each neuron in a layer. For a dense layer this is the input output itself. For convolutional layers this method extracts for each neuron the input output mapping. At the moment this function is designed to work with dense and conv2d layers. :param layer: The targeted layer. :param node_index: Index of the layer node to use. :param Xs: Ignore the layer's input but use Xs instead. :param Ys: Ignore the layer's output but use Ys instead. :param return_i: Return the inputs. :param return_o: Return the outputs. :return: Inputs and outputs, if specified, for each individual neuron. """ if not kchecks.contains_kernel(layer): raise NotImplementedError() if Xs is None: Xs = iutils.to_list(layer.get_input_at(node_index)) if Ys is None: Ys = iutils.to_list(layer.get_output_at(node_index)) if isinstance(layer, keras.layers.Dense): # Xs and Ys are already in shape. ret_Xs = Xs ret_Ys = Ys elif isinstance(layer, keras.layers.Conv2D): kernel = get_kernel(layer) # Expect filter dimension to be last. n_channels = kernel.shape[-1] if return_i: extract_patches = ilayers.ExtractConv2DPatches( kernel.shape[:2], kernel.shape[2], layer.strides, layer.dilation_rate, layer.padding, ) # shape [samples, out_row, out_col, weight_size] reshape = ilayers.Reshape((-1, np.product(kernel.shape[:3]))) ret_Xs = [reshape(extract_patches(x)) for x in Xs] if return_o: # Get Ys into shape (samples, channels) if K.image_data_format() == "channels_first": # Ys shape is [samples, channels, out_row, out_col] def _reshape(x): x = ilayers.Transpose((0, 2, 3, 1))(x) x = ilayers.Reshape((-1, n_channels))(x) return x else: # Ys shape is [samples, out_row, out_col, channels] def _reshape(x): x = ilayers.Reshape((-1, n_channels))(x) return x ret_Ys = [_reshape(x) for x in Ys] else: raise NotImplementedError() # Xs is (n, d) and Ys is (d, channels) if return_i and return_o: return ret_Xs, ret_Ys elif return_i: return ret_Xs elif return_o: return ret_Ys else: raise Exception()
def _reshape(x): x = ilayers.Reshape((-1, n_channels))(x) return x
def _reshape(x): x = ilayers.Transpose((0, 2, 3, 1))(x) x = ilayers.Reshape((-1, n_channels))(x) return x