Example #1
0
def PrimaryCap(inputs, dim_capsule, n_channels, kernel_size, strides, padding):
    """
    Apply Conv2D `n_channels` times and concatenate all capsules
    :param inputs: 4D tensor, shape=[None, width, height, channels]
    :param dim_capsule: the dim of the output vector of capsule
    :param n_channels: the number of types of capsules
    :return: output tensor, shape=[None, num_capsule, dim_capsule]
    """
    output = layers.Conv2D(filters=dim_capsule * n_channels,
                           kernel_size=kernel_size,
                           strides=strides,
                           padding=padding,
                           name='primarycap_conv2d')(inputs)
    outputs = layers.Reshape(target_shape=(K.prod(output.shape[1:]) //
                                           dim_capsule, dim_capsule),
                             name='primarycap_reshape')(output)
    return layers.Lambda(squash, name='primarycap_squash')(outputs)
Example #2
0
def complexNormedMultiply(q, mask):
    x = q[0]
    y = q[1]
    r = q[2]
    initX = q[3]
    initY = q[4]
    initR = q[5]
    # Here x is the real part and y is the imaginary part
    if tf.is_tensor(mask):
        # this sets masked values to 1+0i
        mask_cast = K.cast(mask, 'float32')
        expanded = K.expand_dims(mask_cast)
        zeroX = expanded * x
        newY = expanded * y
        newR = expanded * r
        # here I flip the mask (essentially XOR)
        antiMask = tf.ones(expanded.shape)-expanded
        newX = zeroX+antiMask
    else:
        newX = x
        newY = y
        newR = r
    sumVecs = tf.math.sqrt(tf.multiply(newX,newX)+tf.multiply(newY,newY))
    normedX = newX/sumVecs
    normedY = newY/sumVecs
    normedR = newR
    initSum = tf.math.sqrt(tf.multiply(initX,initX)+tf.multiply(initY,initY))
    inX = initX/initSum
    inY = initY/initSum

    # Using builtin complex numbers
    complexVec = tf.complex(normedX,normedY)
    initVec = tf.complex(inX,inY)
    complexOut = K.prod(complexVec,axis=1)
    newCOut = tf.multiply(complexOut,tf.expand_dims(initVec,0))

    rOut = K.sum(normedR,axis=1)
    newROut = tf.add(rOut,tf.expand_dims(initR,0))

    # This part is new to account for rho
    expR = K.exp(newROut)

    mainReal = tf.math.real(newCOut)
    vecReal = tf.multiply(expR,mainReal)
    singleReal = K.sum(vecReal, axis=1)
    return singleReal
Example #3
0
 def loss(y_true, y_pred):
     # TODO(KGF): this function is unused and unique to this class
     from plasma.conf import conf
     fac = MaxHingeTarget.fac
     # overall_fac =
     # np.prod(np.array(K.shape(y_pred)[1:]).astype(np.float32))
     overall_fac = K.prod(K.cast(K.shape(y_pred)[1:], K.floatx()))
     max_val = K.max(y_pred, axis=-2)  # temporal axis!
     max_val1 = K.repeat(max_val, K.shape(y_pred)[-2])
     mask = K.cast(K.equal(max_val1, y_pred), K.floatx())
     y_pred1 = mask * y_pred + (1 - mask) * y_true
     weight_mask = K.mean(y_true, axis=-1)
     weight_mask = K.cast(K.greater(weight_mask, 0.0),
                          K.floatx())  # positive label!
     weight_mask = fac * weight_mask + (1 - weight_mask)
     # return weight_mask*squared_hinge(y_true, y_pred1)
     return conf['model']['loss_scale_factor'] * \
         overall_fac*weight_mask*hinge(y_true, y_pred1)
Example #4
0
    def call(self, inputs, output_shape=None):
        updates, mask = inputs[0], inputs[1]
        mask = K.cast(mask, 'int32')
        input_shape = tf.shape(updates, out_type='int32')

        if output_shape is None:
            output_shape = (input_shape[0], input_shape[1] * self.size[0],
                            input_shape[2] * self.size[1], input_shape[3])

        ret = tf.scatter_nd(K.expand_dims(K.flatten(mask)), K.flatten(updates),
                            [K.prod(output_shape)])

        input_shape = updates.shape
        out_shape = [
            -1, input_shape[1] * self.size[0], input_shape[2] * self.size[1],
            input_shape[3]
        ]
        return K.reshape(ret, out_shape)
Example #5
0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        if len(input_shape) != 4:
            raise ValueError('Inputs should have rank 4, '
                             'received input shape: %s' % input_shape)
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape.dims[channel_axis].value is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = int(input_shape[channel_axis])
        self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})

        kernel_shape = (self.filter_size, self.filter_size, input_dim, 1)
        # self.kernel = self.add_weight(
        #     name='kernel',
        #     shape=kernel_shape,
        #     initializer=self.kernel_initializer,
        #     regularizer=self.kernel_regularizer,
        #     constraint=self.kernel_constraint,
        #     trainable=False,
        #     dtype=self.compute_dtype)

        W = K.ones(kernel_shape, dtype=self.compute_dtype)
        W = W / K.cast(K.prod(K.int_shape(W)), dtype=self.compute_dtype)
        self.kernel = W
        # self.set_weights([W])

        if self.use_bias:
            self.bias = self.add_weight(name='bias',
                                        shape=(self.filter_size,
                                               self.filter_size),
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        trainable=False,
                                        dtype=self.compute_dtype)
        else:
            self.bias = None

        self.built = True
Example #6
0
def dice_coef_prod(y_true, y_pred):
    """
    completely same with dice_coef but cahieved with different operations

    TODO : Choose channel (and axis) of background
           Choose other merge methods (sum,avg,etc)
    """
    y_true_f = (Lambda(lambda y_true: y_true[:, :, :, :, 0:])(y_true))
    y_pred_f = (Lambda(lambda y_pred: y_pred[:, :, :, :, 0:])(y_pred))

    product = multiply([y_true_f, y_pred_f])

    red_y_true = K.sum(y_true_f, axis=[0, 1, 2, 3])
    red_y_pred = K.sum(y_pred_f, axis=[0, 1, 2, 3])
    red_product = K.sum(product, axis=[0, 1, 2, 3])

    smooth = 0.0001
    dices = (2. * red_product + smooth) / (red_y_true + red_y_pred + smooth)

    return K.prod(dices)
Example #7
0
    def _compute_target_mask(self, inputs, mask=None):
        input_shape = K.shape(inputs)
        input_type = K.dtype(inputs)

        mask_threshold = K.constant(1e8, dtype=input_type)

        channel_num = int(inputs.shape[-1])
        channel_dim = K.prod(input_shape[:-1])
        masked_inputs = inputs
        if mask is not None:
            masked_inputs = K.switch(
                K.cast(mask, K.floatx()) > 0.5, masked_inputs,
                K.ones_like(masked_inputs, dtype=input_type) * mask_threshold)
        norm = K.abs(masked_inputs)
        channeled_norm = K.transpose(
            K.reshape(norm, (channel_dim, channel_num)))
        weight_num = K.sum(
            K.reshape(K.cast(masked_inputs < mask_threshold, K.floatx()),
                      (channel_dim, channel_num)),
            axis=0,
        )
        indices = K.stack(
            [
                K.arange(channel_num, dtype='int32'),
                K.cast(self.target_rate * weight_num, dtype='int32') - 1,
            ],
            axis=-1,
        )
        threshold = -tf.gather_nd(
            tf.nn.top_k(-channeled_norm, k=K.max(indices[:, 1]) + 1).values,
            indices)

        threshold = K.reshape(tf.tile(threshold, [channel_dim]), input_shape)
        target_mask = K.switch(
            norm <= threshold,
            K.ones_like(inputs, dtype=K.floatx()),
            K.zeros_like(inputs, dtype=K.floatx()),
        )
        return target_mask
Example #8
0
    def __call__(self, y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor:
        """ Calculate the Laplacian Pyramid Loss.

        Parameters
        ----------
        y_true: :class:`tf.Tensor`
            The ground truth value
        y_pred: :class:`tf.Tensor`
            The predicted value

        Returns
        -------
        :class: `tf.Tensor`
            The loss value
        """
        pyramid_true = self._get_laplacian_pyramid(y_true)
        pyramid_pred = self._get_laplacian_pyramid(y_pred)

        losses = K.stack([K.sum(K.abs(ppred - ptrue)) / K.cast(K.prod(K.shape(ptrue)), "float32")
                          for ptrue, ppred in zip(pyramid_true, pyramid_pred)])
        loss = K.sum(losses * self._weights)

        return loss
Example #9
0
# a = np.array((None,))
# print(a.shape)
# n : 试验次数
# pvals : p长度序列,表示每次的概率
# size : int or tuple of ints, optional
# 例子:
# 1.投一个均匀骰子20次:
#
# np.random.multinomial(20, [1/6.]*6, size=1)
# result = np.random.multinomial(1, [1 / 6.] * 6, size=1)
# print(result,type( np.argmax(
#     result
# )))
# [[10  1  1  4  3  1]]
# 表示出现每个面的次数

# def sample(preds, temperature=1.0) -> [int]:
#     preds = np.asarray(preds).astype('float64')
#     preds = np.log(preds) / temperature
#     exp_preds = np.exp(preds)
#     preds = exp_preds / np.sum(exp_preds)
#     probas = np.random.multinomial(1, preds, 1)
#     return np.argmax(probas)
#
#
# print('abcd'[sample([1. / 3, 1. / 3, 1. / 3], temperature=0.5)])

a = [[1, 1], [20, 20]]
print(K.prod(K.cast(K.shape(a), 'float32')))
 def compute_output_shape(self, input_shape):
     boxes_shape, other_shape = input_shape
     n = len(boxes_shape) - 1
     output_shape = tuple(
         list(boxes_shape[:n]) + [K.prod([s for s in other_shape[n:]]) + 4])
     return tensor_shape.TensorShape(output_shape)
Example #11
0
def gmean(y_true, y_pred):
    """Compute the geometric mean.
    The geometric mean (G-mean) is the root of the product of class-wise
    sensitivity. This measure tries to maximize the accuracy on each of the
    classes while keeping these accuracies balanced.

    Papers

    .. [1] Kubat, M. and Matwin, S. "Addressing the curse of
       imbalanced training sets: one-sided selection" ICML (1997)

    .. [2] Barandela, R., Sánchez, J. S., Garcıa, V., & Rangel, E. "Strategies
       for learning in class imbalance problems", Pattern Recognition,
       36(3), (2003), pp 849-851.

    """
    def recall(y_true, y_pred):
        y_pred = backend.round(y_pred)
        tp = backend.sum(backend.cast(y_true * y_pred, tf.float32), axis=0)
        fp = backend.sum(backend.cast((1 - y_true) * y_pred, tf.float32),
                         axis=0)
        fn = backend.sum(backend.cast(y_true * (1 - y_pred), tf.float32),
                         axis=0)
        return tp / (tp + fn + backend.epsilon())

    def element_wise_recall(y_true, y_pred):
        y_pred = backend.round(y_pred)
        tp = backend.cast(y_true * y_pred, tf.float32)
        fp = backend.cast((1 - y_true) * y_pred, tf.float32)
        fn = backend.cast(y_true * (1 - y_pred), tf.float32)
        return tp / (tp + fn + backend.epsilon())

    def number_of_classes(y_pred):
        value = backend.shape(y_pred)[1]
        return tf.cond(tf.equal(value, 0), lambda: tf.constant(0, tf.int32),
                       lambda: value)

    # Create empty recall list
    recalls = tf.constant(1.0, shape=[0, 10])

    def multiply_recalls(x):
        X = tf.cond(tf.equal(x[0], x[1]), lambda: tf.constant(1, tf.int32),
                    lambda: tf.constant(0, tf.int32))
        y = 1
        r = element_wise_recall(y, X)
        indices = x[0]
        tf.scatter_add(recalls, indices, r)

    # flatten y_true
    y_true = tf.reshape(y_true, [-1])
    # get number of classes
    num_classes = number_of_classes(y_pred)
    # class predictions
    y_pred_classes = backend.map_fn(lambda x: backend.argmax(x), y_pred)
    # Concat
    y_true_y_pred = tf.stack([y_true, y_pred_classes])
    # create recall value per class
    backend.map_fn(lambda x: multiply_recalls(x), y_true_y_pred)
    # Multiply recall values
    recall_value = backend.prod(recalls)
    # create exponent
    b = tf.constant(1, tf.float32) / num_classes
    result = tf.pow(recall_value, b)

    with tf.Session() as sess:
        return sess.run(result)
    def deep_dream():
        """
        DeepDream is an artistic image-modification technique that uses the representations learned by convnets. First
        released by Google in the summer of 2015, this algorithm is very similar to the gradient ascent technique we
        viewed earlier to represent the patterns learned by individual filters during training (Chapter 5). There are a
        few differences to the algorithm:
            -> With DeepDream you try to maximise the activation of the entire layer rather than one specific filter,
               thus mixing together visualisations of a larger number of filters.
            -> You start not from a blank, slightly noisy input, but rather from an existing image - thus the resulting
               effects latch on to preexisting visual patterns, distorting elements of the image in a somewhat artistic
               fashion.
            -> The input images are processed at different scales (called octaves), which improves the quality of the
               visualisations.
               
               
        This function does not work due to version issues.

        :return: None
        """

        # You won't be training a model for this application, so let's disable all training functionality before
        # starting
        K.set_learning_phase(0)

        model = inception_v3.InceptionV3(weights='imagenet', include_top=False)

        # In Chapter 5 we use the loss value to maximise the output of a specific filter. This time we'll attempt to
        # maximise the weighted sum of the L2 norm of the activations of a set of high-level layers. The set of layers
        # chosen will have a massive impact on the resulting modifications to the image, so make these params very
        # easily configurable.
        layers_contributions = {
            'mixed2': 0.2,
            'mixed3': 3.0,
            'mixed4': 2.0,
            'mixed5': 1.5
        }
        layer_dict = dict([(layer.name, layer) for layer in model.layers])

        # You'll define the loss by adding layer contributions to this scalar value.
        loss = K.variable(0.0)
        for layer_name in layers_contributions:
            coeff = layers_contributions[layer_name]
            # Retrieve the layer's output.
            activation = layer_dict[layer_name].output

            # Define the scaling factor and add the L2 norm of the features of a layer to the loss. You avoid boarder
            # artifacts by involving non-boarder pixels in the loss.
            scaling = K.prod(K.cast(K.shape(activation), 'float32'))
            loss = loss + coeff * K.sum(K.square(
                activation[:, 2:-2, 2:-2, :])) / scaling

        # Now we can set up the gradient ascent process.
        dream = model.input

        # Compute gradient of the dream w.r.t to the loss, then NORMALISE!!!
        grads = K.gradients(loss, dream)[0]
        grads /= K.minimum(K.mean(K.abs(grads)), 1e-7)

        # Now set up a Keras function to retrieve the value of the loss and gradients given an input image.
        outputs = [loss, grads]
        fetch_loss_and_grads = K.function([dream], outputs)

        def eval_loss_and_grads(x):
            """
            This function is used to call the fetch_loss_and_grads function and package the outputs in an easy to use
            fashion.

            :param x: Input dream
            :return: The loss and the gradient of the layer w.r.t. the dream.
            """
            outs = fetch_loss_and_grads([x])
            loss_value = outs[0]
            grads_value = outs[1]
            return loss_value, grads_value

        def gradient_ascent(x, iterations, step, max_loss=None):
            """
            This function runs gradient ascent for a number of iterations.

            :param x: Input dream
            :param iterations: Number of iterations to run gradient ascent for
            :param step: Step-size of the gradient ascent
            :param max_loss: Maximum loss we'll accept during the gradient ascent before stopping.
            :return: A modified version of the input dream
            """
            for i in range(iterations):
                loss_value, grads_value = eval_loss_and_grads(x)
                if max_loss is not None and loss_value > max_loss:
                    break
                print(f"...Loss value at {i}: {loss_value}")
                x += step * grads_value
            return x

        # Now we can begin programming the DeepDream algorithm itself. First we need to define a set of scales
        # (called octaves) at which to process the image. Each octave is 40% larger than the last. At each scale (from
        # smallest to largest) you run gradient ascent to maximise the loss you previously defined. To prevent artifacts
        # of up-scaling (blurriness and stretching) we'll re-inject the lost back into the image, which is possible
        # because you know what the original image should look like at a larger scale.
        step = 0.01
        num_octave = 3
        octave_scale = 1.4
        iterations = 20

        max_loss = 10.0
        base_image_path = 'C:\\Users\\owatkins\\OneDrive - Analog Devices, Inc\\Documents\\Project Folder\\Tutorials and Courses\\Deep Learning with Python\\European_Landscape.jpg'
        print("Loading Base Image...")

        # Load the base image into Numpy array.
        img = preprocess_image_inception(base_image_path)
        print(f"Image Preprocessed: {img.dtype} of size: {img.shape}")

        # Prepare a list of shape tuples defining the different scales at which to run gradient ascent.
        original_shape = img.shape[1:3]
        successive_shapes = [original_shape]
        for i in range(1, num_octave):
            shape = tuple(
                [int(dim / (octave_scale**i)) for dim in original_shape])
            successive_shapes.append(shape)

        # Reverse the list so that they run in ascending order.
        successive_shapes = successive_shapes[::-1]

        # Resize the Numpy array of the image to the smallest size.
        original_img = np.copy(img)
        shrunk_original_image = resize_img(original_img, successive_shapes[0])

        # Run deep dream over all octaves.
        for shape in successive_shapes:
            print(f"Processing Image shape: {shape}")

            # Scales up the deep dream image
            img = resize_img(img, shape)

            # Run gradient ascent, altering the dream.
            img = gradient_ascent(img,
                                  iterations=iterations,
                                  step=step,
                                  max_loss=max_loss)

            # Scales up the smaller version of the original image: it will be pixellated. Compute the high-quality
            # version of the original image at this size. The difference between the two is the detail lost in
            # up-scaling.
            upscaled_shrunk_original_img = resize_img(shrunk_original_image,
                                                      shape)
            same_size_original = resize_img(original_img, shape)
            lost_detail = same_size_original - upscaled_shrunk_original_img

            # Re-inject the lost detail back into the dream. Grab the shrunk_original_image and save the dream at this
            # octave
            img += lost_detail
            shrunk_original_image = resize_img(original_img, shape)
            save_img(
                img,
                fname=
                'C:\\Users\\owatkins\\OneDrive - Analog Devices, Inc\\Documents\\Project Folder\\Tutorials and Courses\\Deep Learning with Python\\dream_at_scale_'
                + str(shape) + '.png')

        # Save the final dream.
        save_img(
            img,
            fname=
            'C:\\Users\\owatkins\\OneDrive - Analog Devices, Inc\\Documents\\Project Folder\\Tutorials and Courses\\Deep Learning with Python\\Final_Dream.png'
        )
Example #13
0
def feature_loss(y_true, y_pred):
    norm = K.prod(K.cast(K.shape(y_true)[1:], 'float32'))
    return K.sum(K.square(y_pred - y_true), axis=(1, 2, 3)) / norm
Example #14
0
    def update_state(self, y_true, y_pred, sample_weight=None):
        # Cast inputs
        y_pred = tf.convert_to_tensor(y_pred)
        y_true = tf.cast(y_true, dtype=y_pred.dtype)

        # Transform inputs
        [y_pred, y_true
         ], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values(
             [y_pred, y_true], sample_weight)

        # Get threshold properties
        if isinstance(self.thresholds, list):
            num_thresholds = len(self.thresholds)
        else:
            num_thresholds = len(list(self.thresholds))

        # Check input values and adjust shapes
        with ops.control_dependencies([
                check_ops.assert_greater_equal(
                    y_pred,
                    tf.cast(0.0, dtype=y_pred.dtype),
                    message='predictions must be >= 0'),
                check_ops.assert_less_equal(y_pred,
                                            tf.cast(1.0, dtype=y_pred.dtype),
                                            message='predictions must be <= 1')
        ]):

            if sample_weight is None:
                y_pred, y_true = tf_losses_utils.squeeze_or_expand_dimensions(
                    y_pred, y_true)
            else:
                y_pred, y_true, sample_weight = (
                    tf_losses_utils.squeeze_or_expand_dimensions(
                        y_pred, y_true, sample_weight=sample_weight))

        # Check shape compatibility
        y_pred.shape.assert_is_compatible_with(y_true.shape)

        # Check if num_classes corresponds to y_pred
        if self.average != 'micro':
            tf.debugging.assert_shapes(
                shapes=[(y_pred, (..., self.num_classes))],
                data=y_pred,
                summarize=10,
                message='num_classes must correspond to the prediction')

        # Filter top k
        if self.top_k is not None:
            y_pred = metrics_utils._filter_top_k(y_pred, self.top_k)

        # Select class id
        if self.class_id is not None:
            y_true = y_true[..., self.class_id]
            y_pred = y_pred[..., self.class_id]

        # Get prediction shape
        pred_shape = tf.shape(y_pred)
        num_predictions = pred_shape[0]

        # Set label shapes
        if y_pred.shape.ndims == 1:
            num_labels = 1
        else:
            num_labels = K.prod(pred_shape[1:], axis=0)

        # Flatten predicitons and labels
        predictions_extra_dim = tf.reshape(y_pred, [1, -1])
        labels_extra_dim = tf.reshape(tf.cast(y_true, dtype=tf.bool), [1, -1])

        # Tile the thresholds for every prediction
        thresh_pretile_shape = [num_thresholds, -1]
        thresh_tiles = [1, num_predictions * num_labels]
        data_tiles = [num_thresholds, 1]

        thresh_tiled = tf.tile(
            tf.reshape(tf.constant(self.thresholds, dtype=y_pred.dtype),
                       thresh_pretile_shape), tf.stack(thresh_tiles))

        # Tile the predictions for every threshold
        preds_tiled = tf.tile(predictions_extra_dim, data_tiles)

        # Compare predictions and threshold
        pred_is_pos = tf.greater(preds_tiled, thresh_tiled)

        # Tile labels by number of thresholds
        label_is_pos = tf.tile(labels_extra_dim, data_tiles)

        # Set sample weights
        if sample_weight is not None:
            sample_weight = weights_broadcast_ops.broadcast_weights(
                tf.cast(sample_weight, dtype=y_pred.dtype), y_pred)
            weights_tiled = tf.tile(tf.reshape(sample_weight, thresh_tiles),
                                    data_tiles)
        else:
            weights_tiled = None

        def _weighted_assign_add(label, pred, weights, var):
            label_and_pred = tf.cast(tf.logical_and(label, pred),
                                     dtype=y_pred.dtype)

            if weights is not None:
                label_and_pred *= weights

            if self.average != 'micro':
                label_and_pred = tf.reshape(label_and_pred,
                                            shape=[-1, self.num_classes])

            return var.assign_add(tf.reduce_sum(label_and_pred, self.axis))

        # Set return value
        update_ops = []

        # Update true positives
        update_ops.append(
            _weighted_assign_add(label_is_pos, pred_is_pos, weights_tiled,
                                 self.true_positives))

        # Update false negatives
        pred_is_neg = tf.logical_not(pred_is_pos)
        update_ops.append(
            _weighted_assign_add(label_is_pos, pred_is_neg, weights_tiled,
                                 self.false_negatives))

        # Update false positives
        label_is_neg = tf.logical_not(label_is_pos)
        update_ops.append(
            _weighted_assign_add(label_is_neg, pred_is_pos, weights_tiled,
                                 self.false_positives))

        return tf.group(update_ops)
Example #15
0
    def call(self, inputs, training=None):
        # These were moved here from build() because tf2 eager was not
        # tracking gradients:
        repeated_gamma = K.reshape(
            K.tile(K.expand_dims(self.gamma, -1), [1, self.n]),
            [-1],
        )
        repeated_beta = K.reshape(
            K.tile(K.expand_dims(self.beta, -1), [1, self.n]),
            [-1],
        )

        repeated_moving_mean = K.reshape(
            K.tile(K.expand_dims(self.moving_mean, -1), [1, self.n]),
            [-1],
        )
        repeated_moving_variance = K.reshape(
            K.tile(K.expand_dims(self.moving_variance, -1), [1, self.n]),
            [-1],
        )

        def unrepeat(w):
            n = 1
            if self.h == 'C4':
                n *= 4
            elif self.h == 'D4':
                n *= 8
            elif self.h == 'Z2':
                n *= 1
            else:
                raise ValueError('Wrong h: %s' % self.h)

            return K.mean(K.reshape(w, (K.int_shape(w)[0] // n, n)), -1)

        input_shape = K.int_shape(inputs)
        # Prepare broadcasting shape.
        ndim = len(input_shape)
        reduction_axes = list(range(len(input_shape)))
        del reduction_axes[self.axis]
        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[self.axis] = input_shape[self.axis]

        # Determines whether broadcasting is needed.
        needs_broadcasting = (sorted(reduction_axes) != list(range(ndim))[:-1])

        def normalize_inference():
            if needs_broadcasting:
                # In this case we must explicitly broadcast all parameters.
                broadcast_moving_mean = K.reshape(repeated_moving_mean,
                                                  broadcast_shape)
                broadcast_moving_variance = K.reshape(repeated_moving_variance,
                                                      broadcast_shape)

                broadcast_beta = K.reshape(repeated_beta, broadcast_shape)

                broadcast_gamma = K.reshape(repeated_gamma, broadcast_shape)

                return K.batch_normalization(inputs,
                                             broadcast_moving_mean,
                                             broadcast_moving_variance,
                                             broadcast_beta,
                                             broadcast_gamma,
                                             epsilon=self.epsilon)
            else:
                return K.batch_normalization(inputs,
                                             repeated_moving_mean,
                                             repeated_moving_variance,
                                             repeated_beta,
                                             repeated_gamma,
                                             epsilon=self.epsilon)

        def _get_training_value(training, trainable_flag):
            """
            Return a flag indicating whether a layer should be called in training
            or inference mode.
            Modified from https://git.io/JUGHX
            training: the setting used when layer is called for inference.
            trainable: flag indicating whether the layer is trainable.
            """
            if training is None:
                training = K.learning_phase()

            if isinstance(training, int):
                training = bool(training)

            # If layer not trainable, override value passed from model.
            if trainable_flag is False:
                training = False

            return training

        # If the learning phase is *static* and set to inference:
        training_val = _get_training_value(training, self.trainable)
        if training_val is False:
            return normalize_inference()

        # If the learning is either dynamic, or set to training:
        normed_training, mean, variance = K.normalize_batch_in_training(
            inputs,
            repeated_gamma,
            repeated_beta,
            reduction_axes,
            epsilon=self.epsilon)

        if K.backend() != 'cntk':
            sample_size = K.prod(
                [K.shape(inputs)[axis] for axis in reduction_axes])
            sample_size = K.cast(sample_size, dtype=K.dtype(inputs))

            # sample variance - unbiased estimator of population variance
            variance *= sample_size / (sample_size - (1.0 + self.epsilon))

        self.add_update([
            K.moving_average_update(self.moving_mean, unrepeat(mean),
                                    self.momentum),
            K.moving_average_update(self.moving_variance, unrepeat(variance),
                                    self.momentum)
        ], inputs)

        # Pick the normalized form corresponding to the training phase.
        return K.in_train_phase(normed_training,
                                normalize_inference,
                                training=training)
Example #16
0
K.set_learning_phase(0)  # disable all training operation
model = inception_v3.InceptionV3(weights='imagenet', include_top=False)
# model.summary()
layer_contributions = {'mixed2': .2,
                       'mixed3': 3.,
                       'mixed4': 2.,
                       'mixed5': 1.5}
layer_dict = dict([(layer.name, layer) for layer in model.layers])
loss = K.variable(0.)
for layer_name in layer_contributions:
    print(layer_name)
    coeff = layer_contributions[layer_name]
    activation = layer_dict[layer_name].output
    print(activation)
    scaling = K.prod(K.cast(K.shape(activation), 'float32'))
    loss = loss + coeff * K.sum(K.square(activation[:, 2: -2, 2: -2, :])) / scaling


dream = model.input
grads = K.gradients(loss, dream)[0]  # compute the gradient of the dream in regard with loss
grads /= K.maximum(K.mean(K.abs(grads)), 1e-7)  # normalize gradient
outputs = [loss, grads]
fetch_loss_and_grads = K.function([dream], outputs)  # retrieve value of the loss/gradients given input image


def eval_loss_and_grads(x):
    outs = fetch_loss_and_grads([x])
    loss_value = outs[0]
    grad_values = outs[1]
    return loss_value, grad_values
Example #17
0
def correct_mean(y):
    return K.prod(K.cast(K.shape(y)[:-1], dtype=K.floatx())) / count_labels(y)
Example #18
0
dream = model.input
print('Model loaded.')

# Get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers])

# Define the loss.
loss = K.variable(0.)
for layer_name in settings['features']:
    # Add the L2 norm of the features of a layer to the loss.
    if layer_name not in layer_dict:
        raise ValueError('Layer ' + layer_name + ' not found in model.')
    coeff = settings['features'][layer_name]
    x = layer_dict[layer_name].output
    # We avoid border artifacts by only involving non-border pixels in the loss.
    scaling = K.prod(K.cast(K.shape(x), 'float32'))
    if K.image_data_format() == 'channels_first':
        loss = loss + coeff * K.sum(K.square(x[:, :, 2:-2, 2:-2])) / scaling
    else:
        loss = loss + coeff * K.sum(K.square(x[:, 2:-2, 2:-2, :])) / scaling

# Compute the gradients of the dream wrt the loss.
grads = K.gradients(loss, dream)[0]
# Normalize gradients.
grads /= K.maximum(K.mean(K.abs(grads)), K.epsilon())

# Set up function to retrieve the value
# of the loss and gradients given an input image.
outputs = [loss, grads]
fetch_loss_and_grads = K.function([dream], outputs)
Example #19
0
def relu(x, alpha=0.0, max_value=None, threshold=0.0, mode="diag"):
    """Rectified Linear Unit.

    Assumed Density Filtering (ADF) version of the Keras `relu` activation.

    Parameters
    ----------
    x : list or tuple
        Input tensors (means and covariances).
    alpha: float, optional
        Slope of negative section. Default is ``0.0``.
        Currently no value other than the default is supported for  ADF.
    max_value: float, optional
        Saturation threshold. Default is `None`.
        Currently no value other than the default is supported for  ADF.
    threshold: float, optional
        Threshold value for thresholded activation. Default is ``0.0``.
        Currently no value other than the default is supported for  ADF.
    mode: {"diag", "diagonal", "lowrank", "half", "full"}
        Covariance computation mode. Default is "diag".

    Returns
    -------
    list
        List of transformed means and covariances, according to
        the ReLU activation: ``max(x, 0)``.

    """
    if not alpha == 0.0:
        raise NotImplementedError(
            "The relu activation function with alpha other than 0.0 has"
            "not been implemented for ADF layers yet."
        )
    if max_value is not None:
        raise NotImplementedError(
            "The relu activation function with max_value other than `None` "
            "has not been implemented for ADF layers yet."
        )
    if not threshold == 0.0:
        raise NotImplementedError(
            "The relu activation function with threshold other than 0.0 has"
            "not been implemented for ADF layers yet."
        )
    if not isinstance(x, list) and len(x) == 2:
        raise ValueError(
            "The relu activation function expects a list of "
            "exactly two input tensors, but got: %s" % x
        )
    means, covariances = x
    means_shape = means.get_shape().as_list()
    means_rank = len(means_shape)
    cov_shape = covariances.get_shape().as_list()
    cov_rank = len(cov_shape)
    EPS = K.cast(K.epsilon(), covariances.dtype)
    # treat inputs according to rank and mode
    if means_rank == 1:
        # if rank(mean)=1, treat as single vector, no reshapes necessary
        pass
    elif means_rank == 2:
        # if rank(mean)=2, treat as batch of vectors, no reshapes necessary
        pass
    else:
        # if rank(mean)=2+n, treat as batch of rank=n tensors + channels
        means = K.reshape(means, [-1] + [K.prod(means_shape[1:])],)
        if mode == "diag":
            covariances = K.reshape(
                covariances, [-1] + [K.prod(cov_shape[1:])],
            )
        elif mode == "half":
            covariances = K.reshape(
                covariances, [-1] + [cov_shape[1]] + [K.prod(cov_shape[2:])],
            )
        elif mode == "full":
            covariances = K.reshape(
                covariances,
                [-1]
                + [K.prod(cov_shape[1 : (cov_rank - 1) // 2 + 1])]
                + [K.prod(cov_shape[(cov_rank - 1) // 2 + 1 :])],
            )
    if mode == "diag":
        covariances = covariances + EPS
        std = K.sqrt(covariances)
        div = means / std
        gd_div = _gauss_density(div)
        gc_div = _gauss_cumulative(div)
        new_means = K.maximum(
            means,
            K.maximum(K.zeros_like(means), means * gc_div + std * gd_div),
        )
        new_covariances = (
            K.square(means) * gc_div
            + covariances * gc_div
            + means * std * gd_div
            - K.square(new_means)
        )
        new_covariances = K.maximum(
            K.zeros_like(new_covariances), new_covariances
        )
    elif mode == "half":
        variances = K.sum(K.square(covariances), axis=1) + EPS
        std = K.sqrt(variances)
        div = means / std
        gd_div = _gauss_density(div)
        gc_div = _gauss_cumulative(div)
        new_means = K.maximum(
            means,
            K.maximum(K.zeros_like(means), means * gc_div + std * gd_div),
        )
        gc_div = K.expand_dims(gc_div, 1)
        new_covariances = covariances * gc_div
    elif mode == "full":
        variances = array_ops.matrix_diag_part(covariances) + EPS
        std = K.sqrt(variances)
        div = means / std
        gd_div = _gauss_density(div)
        gc_div = _gauss_cumulative(div)
        new_means = K.maximum(
            means,
            K.maximum(K.zeros_like(means), means * gc_div + std * gd_div),
        )
        gc_div = K.expand_dims(gc_div, 1)
        new_covariances = covariances * gc_div
        new_covariances = K.permute_dimensions(new_covariances, [0, 2, 1])
        new_covariances = new_covariances * gc_div
        new_covariances = K.permute_dimensions(new_covariances, [0, 2, 1])
    # undo reshapes if necessary
    new_means = K.reshape(new_means, [-1] + means_shape[1:])
    new_covariances = K.reshape(new_covariances, [-1] + cov_shape[1:])
    return [new_means, new_covariances]
def _tf_prod(x, axis=None, keepdims=False):
    return K.prod(x, axis=axis, keepdims=keepdims)
    def call(self, inputs, training=None):
        if self.quant_mode not in [None, 'extrinsic', 'hybrid', 'intrinsic']:
            raise ValueError(
                'Invalid quantization mode. The \'quant_mode\' argument must be one of \'extrinsic\' , \'intrinsic\' , \'hybrid\' or None.'
            )

        if isinstance(self.quantizer, list) and len(self.quantizer) == 3:
            quantizer_input = self.quantizer[0]
            quantizer_weight = self.quantizer[1]
            quantizer_output = self.quantizer[2]
        else:
            quantizer_input = self.quantizer
            quantizer_weight = self.quantizer
            quantizer_output = self.quantizer

        input_shape = K.int_shape(inputs)
        # Prepare broadcasting shape.
        ndim = len(input_shape)
        reduction_axes = list(range(len(input_shape)))
        del reduction_axes[self.axis]
        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[self.axis] = input_shape[self.axis]

        # Determines whether broadcasting is needed.
        needs_broadcasting = (sorted(reduction_axes) != list(range(ndim))[:-1])

        def normalize_inference():
            if needs_broadcasting:
                # In this case we must explicitly broadcast all parameters.
                broadcast_moving_mean = K.reshape(self.moving_mean,
                                                  broadcast_shape)
                broadcast_moving_variance = K.reshape(self.moving_variance,
                                                      broadcast_shape)
                if self.center:
                    broadcast_beta = K.reshape(self.beta, broadcast_shape)
                else:
                    broadcast_beta = None
                if self.scale:
                    broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
                else:
                    broadcast_gamma = None

                if self.quant_mode in ['hybrid', 'intrinsic']:
                    broadcast_moving_mean = quantizer_weight.quantize(
                        broadcast_moving_mean)
                    broadcast_moving_variance = quantizer_weight.quantize(
                        broadcast_moving_variance)
                    if self.center:
                        broadcast_beta = quantizer_weight.quantize(
                            broadcast_beta)
                    if self.scale:
                        broadcast_gamma = quantizer_weight.quantize(
                            broadcast_gamma)

                if self.quant_mode in ['hybrid', 'intrinsic']:
                    quantized_inputs = quantizer_input.quantize(inputs)

                if self.quant_mode == 'intrinsic':
                    return QuantizedBatchNormalizationCore(
                        quantized_inputs, broadcast_moving_mean,
                        broadcast_moving_variance, broadcast_beta,
                        broadcast_gamma, self.epsilon, quantizer_output)
                elif self.quant_mode == 'hybrid':
                    output = K.batch_normalization(quantized_inputs,
                                                   broadcast_moving_mean,
                                                   broadcast_moving_variance,
                                                   broadcast_beta,
                                                   broadcast_gamma,
                                                   axis=self.axis,
                                                   epsilon=self.epsilon)
                    return quantizer_output.quantize(output)
                elif self.quant_mode == 'extrinsic':
                    output = K.batch_normalization(inputs,
                                                   broadcast_moving_mean,
                                                   broadcast_moving_variance,
                                                   broadcast_beta,
                                                   broadcast_gamma,
                                                   axis=self.axis,
                                                   epsilon=self.epsilon)
                    return quantizer_output.quantize(output)
                elif self.quant_mode is None:
                    return K.batch_normalization(inputs,
                                                 broadcast_moving_mean,
                                                 broadcast_moving_variance,
                                                 broadcast_beta,
                                                 broadcast_gamma,
                                                 axis=self.axis,
                                                 epsilon=self.epsilon)

            else:
                if self.quant_mode in ['hybrid', 'intrinsic']:
                    moving_mean = quantizer_weight.quantize(self.moving_mean)
                    moving_variance = quantizer_weight.quantize(
                        self.moving_variance)
                    if self.center:
                        beta = quantizer_weight.quantize(self.beta)
                    else:
                        beta = self.beta
                    if self.scale:
                        gamma = quantizer_weight.quantize(self.gamma)
                    else:
                        gamma = self.gamma

                if self.quant_mode in ['hybrid', 'intrinsic']:
                    quantized_inputs = quantizer_input.quantize(inputs)

                if self.quant_mode == 'intrinsic':
                    return QuantizedBatchNormalizationCore(
                        quantized_inputs, moving_mean, moving_variance, beta,
                        gamma, self.epsilon, quantizer_output)
                elif self.quant_mode == 'hybrid':
                    output = K.batch_normalization(quantized_inputs,
                                                   moving_mean,
                                                   moving_variance,
                                                   beta,
                                                   gamma,
                                                   axis=self.axis,
                                                   epsilon=self.epsilon)
                    return quantizer_output.quantize(output)
                elif self.quant_mode == 'extrinsic':
                    output = K.batch_normalization(inputs,
                                                   self.moving_mean,
                                                   self.moving_variance,
                                                   self.beta,
                                                   self.gamma,
                                                   axis=self.axis,
                                                   epsilon=self.epsilon)
                    return quantizer_output.quantize(output)
                elif self.quant_mode == None:
                    return K.batch_normalization(inputs,
                                                 self.moving_mean,
                                                 self.moving_variance,
                                                 self.beta,
                                                 self.gamma,
                                                 axis=self.axis,
                                                 epsilon=self.epsilon)

        # If the learning phase is *static* and set to inference:
        if not training:
            return normalize_inference()

        # If the learning is either dynamic, or set to training:
        normed_training, mean, variance = K.normalize_batch_in_training(
            inputs,
            self.gamma,
            self.beta,
            reduction_axes,
            epsilon=self.epsilon)

        if K.backend() != 'cntk':
            sample_size = K.prod(
                [K.shape(inputs)[axis] for axis in reduction_axes])
            sample_size = K.cast(sample_size, dtype=K.dtype(inputs))

            # sample variance - unbiased estimator of population variance
            variance *= sample_size / (sample_size - (1.0 + self.epsilon))

        self.add_update([
            K.moving_average_update(self.moving_mean, mean, self.momentum),
            K.moving_average_update(self.moving_variance, variance,
                                    self.momentum)
        ], inputs)

        # Pick the normalized form corresponding to the training phase.
        return K.in_train_phase(normed_training,
                                normalize_inference,
                                training=training)
def to1d(t):
    entries = backend.prod(t.shape)
    return backend.reshape(t, (entries, ))
Example #23
0
    def dice(self, y_true, y_pred):
        """
        compute dice for given Tensors

        """
        if self.crop_indices is not None:
            y_true = utils.batch_gather(y_true, self.crop_indices)
            y_pred = utils.batch_gather(y_pred, self.crop_indices)

        if self.input_type == 'prob':
            # We assume that y_true is probabilistic, but just in case:
            y_true /= K.sum(y_true, axis=-1, keepdims=True)
            y_true = K.clip(y_true, K.epsilon(), 1)

            # make sure pred is a probability
            y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
            y_pred = K.clip(y_pred, K.epsilon(), 1)

        # Prepare the volumes to operate on
        # If we're doing 'hard' Dice, then we will prepare one-hot-based matrices of size
        # [batch_size, nb_voxels, nb_labels], where for each voxel in each batch entry,
        # the entries are either 0 or 1
        if self.dice_type == 'hard':

            # if given predicted probability, transform to "hard max""
            if self.input_type == 'prob':
                if self.approx_hard_max:
                    y_pred_op = _hard_max(y_pred, axis=-1)
                    y_true_op = _hard_max(y_true, axis=-1)
                else:
                    y_pred_op = _label_to_one_hot(K.argmax(y_pred, axis=-1),
                                                  self.nb_labels)
                    y_true_op = _label_to_one_hot(K.argmax(y_true, axis=-1),
                                                  self.nb_labels)

            # if given predicted label, transform to one hot notation
            else:
                assert self.input_type == 'max_label'
                y_pred_op = _label_to_one_hot(y_pred, self.nb_labels)
                y_true_op = _label_to_one_hot(y_true, self.nb_labels)

        # If we're doing soft Dice, require prob output, and the data already is as we need it
        # [batch_size, nb_voxels, nb_labels]
        else:
            assert self.input_type == 'prob', "cannot do soft dice with max_label input"
            y_pred_op = y_pred
            y_true_op = y_true

        # reshape data to [batch_size, nb_voxels, nb_labels]
        flat_shape = tf.stack(
            [-1, K.prod(K.shape(y_true_op)[1:-1]),
             K.shape(y_true_op)[-1]])
        y_true_op = K.reshape(y_true_op, flat_shape)
        y_pred_op = K.reshape(y_pred_op, flat_shape)

        # compute dice for each entry in batch.
        # dice will now be [batch_size, nb_labels]
        top = 2 * K.sum(y_true_op * y_pred_op, 1)
        bottom = K.sum(K.square(y_true_op), 1) + K.sum(K.square(y_pred_op), 1)
        # make sure we have no 0s on the bottom. K.epsilon()
        bottom = K.maximum(bottom, self.area_reg)
        return top / bottom
Example #24
0
def deep_dream_example():
	base_image_filepath = './Machu_Picchu.jpg'  # Path to the image to transform.
	result_prefix = './deep_dream_results'  # Prefix for the saved results.

	# These are the names of the layersfor which we try to maximize activation,
	# as well as their weight in the final loss we try to maximize.
	# You can tweak these setting to obtain new visual effects.
	settings = {
		'features': {
			'mixed2': 0.2,
			'mixed3': 0.5,
			'mixed4': 2.,
			'mixed5': 1.5,
		},
	}

	K.set_learning_phase(0)

	# Build the InceptionV3 network with our placeholder.
	# The model will be loaded with pre-trained ImageNet weights.
	model = inception_v3.InceptionV3(weights='imagenet', include_top=False)
	dream = model.input
	print('Model loaded.')

	# Get the symbolic outputs of each "key" layer (we gave them unique names).
	layer_dict = dict([(layer.name, layer) for layer in model.layers])

	# Define the loss.
	loss = K.variable(0.)
	for layer_name in settings['features']:
		# Add the L2 norm of the features of a layer to the loss.
		if layer_name not in layer_dict:
			raise ValueError('Layer ' + layer_name + ' not found in model.')
		coeff = settings['features'][layer_name]
		x = layer_dict[layer_name].output
		# We avoid border artifacts by only involving non-border pixels in the loss.
		scaling = K.prod(K.cast(K.shape(x), 'float32'))
		if K.image_data_format() == 'channels_first':
			loss = loss + coeff * K.sum(K.square(x[:, :, 2: -2, 2: -2])) / scaling
		else:
			loss = loss + coeff * K.sum(K.square(x[:, 2: -2, 2: -2, :])) / scaling

	# Compute the gradients of the dream wrt the loss.
	grads = K.gradients(loss, dream)[0]
	# Normalize gradients.
	grads /= K.maximum(K.mean(K.abs(grads)), K.epsilon())

	# Set up function to retrieve the value of the loss and gradients given an input image.
	outputs = [loss, grads]
	fetch_loss_and_grads = K.function([dream], outputs)

	"""Process:

	- Load the original image.
	- Define a number of processing scales (i.e. image shapes), from smallest to largest.
	- Resize the original image to the smallest scale.
	- For every scale, starting with the smallest (i.e. current one):
		- Run gradient ascent
		- Upscale image to the next scale
		- Reinject the detail that was lost at upscaling time
	- Stop when we are back to the original size.

	To obtain the detail lost during upscaling, we simply
	take the original image, shrink it down, upscale it,
	and compare the result to the (resized) original image.
	"""

	# Playing with these hyperparameters will also allow you to achieve new effects.
	step = 0.01  # Gradient ascent step size.
	num_octave = 3  # Number of scales at which to run gradient ascent.
	octave_scale = 1.4  # Size ratio between scales.
	iterations = 20  # Number of ascent steps per scale.
	max_loss = 10.

	img = preprocess_image(base_image_filepath)
	if K.image_data_format() == 'channels_first':
		original_shape = img.shape[2:]
	else:
		original_shape = img.shape[1:3]
	successive_shapes = [original_shape]
	for i in range(1, num_octave):
		shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape])
		successive_shapes.append(shape)
	successive_shapes = successive_shapes[::-1]
	original_img = np.copy(img)
	shrunk_original_img = resize_img(img, successive_shapes[0])

	for shape in successive_shapes:
		print('Processing image shape', shape)
		img = resize_img(img, shape)
		img = gradient_ascent(img, fetch_loss_and_grads, iterations=iterations, step=step, max_loss=max_loss)
		upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape)
		same_size_original = resize_img(original_img, shape)
		lost_detail = same_size_original - upscaled_shrunk_original_img

		img += lost_detail
		shrunk_original_img = resize_img(original_img, shape)

	save_img(result_prefix + '.png', deprocess_image(np.copy(img)))
    def call(self, inputs, training=None):
        input_shape = K.int_shape(inputs)
        # Prepare broadcasting shape.
        ndim = len(input_shape)
        reduction_axes = list(range(len(input_shape)))
        del reduction_axes[self.axis]
        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[self.axis] = input_shape[self.axis]

        # Determines whether broadcasting is needed.
        needs_broadcasting = (sorted(reduction_axes) != list(range(ndim))[:-1])

        def normalize_inference():
            if needs_broadcasting:
                # In this case we must explicitly broadcast all parameters.
                broadcast_moving_mean = K.reshape(self.moving_mean,
                                                  broadcast_shape)
                broadcast_moving_variance = K.reshape(self.moving_variance,
                                                      broadcast_shape)
                if self.center:
                    broadcast_beta = K.reshape(self.beta, broadcast_shape)
                else:
                    broadcast_beta = None
                if self.scale:
                    broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
                else:
                    broadcast_gamma = None
                return tf.nn.batch_normalization(  #K.batch_normalization(
                    inputs,
                    broadcast_moving_mean,
                    broadcast_moving_variance,
                    broadcast_beta,
                    broadcast_gamma,
                    #axis=self.axis,
                    self.epsilon)  #epsilon=self.epsilon)
            else:
                return tf.nn.batch_normalization(  #K.batch_normalization(
                    inputs,
                    self.moving_mean,
                    self.moving_variance,
                    self.beta,
                    self.gamma,
                    #axis=self.axis,
                    self.epsilon)  #epsilon=self.epsilon)

        # If the learning phase is *static* and set to inference:
        if training in {0, False}:
            return normalize_inference()

        # If the learning is either dynamic, or set to training:
        normed_training, mean, variance = _regular_normalize_batch_in_training(  #K.normalize_batch_in_training(
            inputs,
            self.gamma,
            self.beta,
            reduction_axes,
            epsilon=self.epsilon)

        if K.backend() != 'cntk':
            sample_size = K.prod(
                [K.shape(inputs)[axis] for axis in reduction_axes])
            sample_size = K.cast(sample_size, dtype=K.dtype(inputs))

            # sample variance - unbiased estimator of population variance
            variance *= sample_size / (sample_size - (1.0 + self.epsilon))

        self.add_update([
            K.moving_average_update(self.moving_mean, mean, self.momentum),
            K.moving_average_update(self.moving_variance, variance,
                                    self.momentum)
        ], inputs)

        # Pick the normalized form corresponding to the training phase.
        return K.in_train_phase(normed_training,
                                normalize_inference,
                                training=training)
def top_1_categorical_accuracy(y_true, y_pred):
    s = K.prod(K.shape(y_true)[1:])
    y_true = K.reshape(y_true, [-1, s])
    y_pred = K.reshape(y_pred, [-1, s])
    return metrics.top_k_categorical_accuracy(y_true, y_pred, k=1)
Example #27
0
 def call(self, x, **kwargs):
     channel_voxel = Reshape(
         (K.prod(self.myinput_shape[1:-1]), self.myinput_shape[-1]))(x)
     result = K.dot(channel_voxel, self.kernel)
     return Reshape(self.myinput_shape[1:-1] +
                    (self.output_channels, ))(result)
def to2d(t):
    s = backend.prod(backend.shape(t)[1:])
    t = backend.reshape(t, [-1, s])
    return t