Пример #1
0
        def masked():
            # pick cval
            beta = K.sigmoid(self.beta)
            cval = self.min_value * beta + self.max_value * (1 - beta)

            # determine a mask
            ratio = K.sigmoid(self.ratio)

            size = K.random_uniform([], maxval=0.2, dtype='float32')
            offset = K.random_uniform([], maxval=1 - size, dtype='float32')
            '''
            ratio = K.concatenate([self.ratio, [0.]])
            ratio = ratio + K.random_normal([3,], dtype='float32')
            ratio = K.softmax(ratio)
            '''
            mask = K.arange(0., 1., 1 / freq, dtype='float32')
            ge = K.cast(K.greater_equal(mask, offset), dtype='float32')
            le = K.cast(K.less_equal(mask, size + offset), dtype='float32')

            mask = 1 - ge * le
            mask = K.reshape(mask, broadcast_shape)

            outputs = inputs * mask + cval * (1 - mask)

            return outputs
 def random_combine_images(X):
     batch_size = K.shape(X[0])[0]
     if self.dimensionality == 2:
         alpha = K.random_uniform(shape=(batch_size, 1, 1, 1))
     else:
         alpha = K.random_uniform(shape=(batch_size, 1, 1, 1, 1))
     return(alpha * X[0] + (1.0 - alpha) * X[1])
Пример #3
0
    def call(self,
             inputs: tf.Tensor,
             mask: Optional[tf.Tensor] = None,
             n_symbols: Optional[int] = None):
        """
        Args:
            inputs (tf.Tensor[ndims=2, int]): Tensor of values to mask
            mask (Optional[tf.Tensor[bool]]): Locations in the inputs to that are valid
                                                     (i.e. not padding, start tokens, etc.)
        Returns:
            masked_inputs (tf.Tensor[ndims=2, int]): Tensor of masked values
            bert_mask: Locations in the input that were masked
        """

        discrete = inputs.dtype not in [tf.float16, tf.float32, tf.float64]
        mask_shape = K.shape(inputs) if discrete else K.shape(inputs)[:-1]

        if n_symbols is None:
            n_symbols = self.n_symbols

        bert_mask = K.random_uniform(mask_shape) < self.percentage

        if mask is not None:
            bert_mask &= mask

        if not discrete:
            bert_mask = tf.expand_dims(bert_mask, -1)

        masked_inputs = inputs * \
            tf.cast(~bert_mask, inputs.dtype)  # type: ignore

        token_bert_mask = K.random_uniform(K.shape(bert_mask)) < 0.8
        random_bert_mask = (K.random_uniform(K.shape(bert_mask)) <
                            0.1) & ~token_bert_mask
        true_bert_mask = ~token_bert_mask & ~random_bert_mask

        token_bert_mask = tf.cast(token_bert_mask & bert_mask, inputs.dtype)
        random_bert_mask = tf.cast(random_bert_mask & bert_mask, inputs.dtype)
        true_bert_mask = tf.cast(true_bert_mask & bert_mask, inputs.dtype)

        masked_inputs += self.mask_token * token_bert_mask  # type: ignore

        if discrete:
            assert n_symbols is not None
            masked_inputs += K.random_uniform(
                K.shape(bert_mask), 0, n_symbols,
                dtype=inputs.dtype) * random_bert_mask
        else:
            masked_inputs += (K.random_normal(K.shape(masked_inputs)) +
                              inputs) * random_bert_mask

        masked_inputs += inputs * true_bert_mask

        return masked_inputs, bert_mask
Пример #4
0
 def call(self, inputs):
     # Implement Eq.(9)
     perturbed_kernel = self.kernel + \
         self.sigma_kernel * K.random_uniform(shape=self.kernel_shape)
     outputs = K.dot(inputs, perturbed_kernel)
     if self.use_bias:
         perturbed_bias = self.bias + \
             self.sigma_bias * K.random_uniform(shape=self.bias_shape)
         outputs = K.bias_add(outputs, perturbed_bias)
     if self.activation is not None:
         outputs = self.activation(outputs)
     return outputs
Пример #5
0
    def call(self, inputs):
        assert inputs[0].shape[1:] == inputs[1].shape[
            1:], "Inputs must have same shape!"

        if len(inputs[0].shape[1:]) == 4:
            weights = K.random_uniform((self.batch_size, 1, 1, 1, 1))
        elif len(inputs[0].shape[1:]) == 3:
            weights = K.random_uniform((self.batch_size, 1, 1, 1))
        else:
            raise ValueError("Wrong input shape.")

        return (weights * inputs[0]) + ((1 - weights) * inputs[1])
Пример #6
0
    def train_mask(self):
        W2 = self.kernel * self.kernel
        n_filter_weights = np.product(self.kernel_size)
        if self.hamiltonian == 'unstructured':
            Qp = tfp.stats.percentile(K.flatten(W2), self.p*100, interpolation='linear')
            P0 = 1/(1+K.exp(self.beta*(W2-Qp)))
            R = K.random_uniform(K.shape(P0))
            return K.cast(R > P0, 'float32')
        elif self.hamiltonian == 'kernel':
            # Prune kernels by finding A and B for hamiltonian H(x) = x^TAx +
            # b^Tx, and sampling directly for each kernel
            flat_W2 = K.reshape(W2, (n_filter_weights, self.n_channels, self.filters))
            Qp = tfp.stats.percentile(K.sum(flat_W2,axis=0)/n_filter_weights, self.p*100, interpolation='linear')
            b = Qp - flat_W2
            A = -self.c * K.constant(np.ones((n_filter_weights, n_filter_weights, self.n_channels, self.filters)))
            A_mask = np.ones((n_filter_weights,n_filter_weights))
            np.fill_diagonal(A_mask, False)
            A = A * A_mask[:,:,None,None]
            M = K.reshape(tf_sample_gibbs(A, b, self.beta, n_filter_weights), K.shape(W2))
            return (M+1)/2
        elif self.hamiltonian == 'filter':
            # Prune filters with chromatic gibbs sampling
            flat_W2 = K.reshape(W2, (n_filter_weights, self.n_channels, self.filters))
            Qp = tfp.stats.percentile(tf.reduce_sum(flat_W2,axis=[0,1])/n_filter_weights/self.n_channels, self.p*100, interpolation='linear')
            b = Qp - flat_W2
            A = -self.c * K.constant(np.ones((n_filter_weights, n_filter_weights, self.n_channels, self.filters)))
            A_mask = np.ones((n_filter_weights,n_filter_weights))
            np.fill_diagonal(A_mask, False)
            A = A * A_mask[:,:,None,None]

            filt_avgs = tf.reduce_sum(flat_W2,axis=[0,1])/n_filter_weights/self.n_channels
            x_cvg = K.cast(filt_avgs > Qp, 'float32')
            colour_b = b - self.c * (self.n_channels//2) * n_filter_weights * (x_cvg*2-1)[None,None,:]

            split = self.n_channels//2
            colour_b = colour_b[:,0:split,:]
            for i in range(self.mcmc_steps):
                P0 = 1/(1+K.exp(-self.beta*colour_b))
                R = K.random_uniform(K.shape(P0))
                M0 = K.cast(R > P0, 'float32')*2-1
                filter_sums = tf.reduce_sum(M0, axis=[0,1])
                colour_b = b[:,split:,:] - self.c*filter_sums[None,None,:]
                P0 = 1/(1+K.exp(-self.beta*colour_b))
                R = K.random_uniform(K.shape(P0))
                M1 = K.cast(R > P0, 'float32')*2-1
                filter_sums = tf.reduce_sum(M1, axis=[0,1])
                colour_b = b[:,0:split,:] - self.c*filter_sums[None,None,:]
            M = K.reshape(K.concatenate((M0,M1), axis=1), K.shape(W2))
            return (M+1)/2
Пример #7
0
    def concrete_dropout(self, x):
        """Concrete dropout
        Args:
          x:
        Returns:
          A
        """
        eps = K.cast_to_floatx(K.epsilon())

        # Unifrom noise
        u = K.random_uniform(shape=K.shape(x))

        # the Concrete distribution relaxation
        z_tilde = (K.log(self.p + eps) - K.log(1.0 - self.p + eps) +
                   K.log(u + eps) - K.log(1.0 - u + eps))
        z_tilde = K.sigmoid(z_tilde / self.temperature)

        # Random tensor
        random_tensor = 1.0 - drop_prob

        retain_prob = 1.0 - self.p

        x *= random_tensor
        x /= retain_prob

        return x
Пример #8
0
    def call(self, inputs, training=None):
        """
		The function that takes the inputs of the layer and conducts the
		Dense layer multiplication with concrete dropout.

		Parameters:
			inputs (tf.Keras.Layer): The inputs to the Dense layer.
			training (bool): A required input for call. Setting training to
				true or false does nothing because concrete dropout behaves the
				same way in both cases.

		Returns:
			(tf.Keras.Layer): The output of the Dense layer.
		"""
        # Small epsilon parameter needed for stable optimization
        eps = K.cast_to_floatx(K.epsilon())

        # Build the random tensor for dropout from uniform noise. This
        # formulation allows for a derivative with respect to p.
        input_shape = K.shape(inputs)
        noise_shape = (input_shape[0], 1, 1, input_shape[3])
        unif_noise = K.random_uniform(shape=noise_shape, seed=self.random_seed)
        drop_prob = (K.log(K.sigmoid(self.p_logit) + eps) -
                     K.log(1.0 - K.sigmoid(self.p_logit) + eps) +
                     K.log(unif_noise + eps) - K.log(1.0 - unif_noise + eps))
        drop_prob = K.sigmoid(drop_prob / self.temp)
        inputs *= (1.0 - drop_prob)
        inputs /= (1.0 - K.sigmoid(self.p_logit))

        # Now just carry out the basic operations of a Dense layer.
        return super(SpatialConcreteDropout, self).call(inputs)
Пример #9
0
def evaluate_lip_const(model: Model, x, eps=1e-4, seed=None):
    """
    Evaluate the Lipschitz constant of a model, with the naive method.
    Please note that the estimation of the lipschitz constant is done locally around
    input sample. This may not correctly estimate the behaviour in the whole domain.

    Args:
        model: built keras model used to make predictions
        x: inputs used to compute the lipschitz constant
        eps: magnitude of noise to add to input in order to compute the constant
        seed: seed used when generating the noise ( can be set to None )

    Returns:
        the empirically evaluated lipschitz constant. The computation might also be
        inaccurate in high dimensional space.

    """
    y_pred = model.predict(x)
    # x = np.repeat(x, 100, 0)
    # y_pred = np.repeat(y_pred, 100, 0)
    x_var = x + K.random_uniform(
        shape=x.shape, minval=eps * 0.25, maxval=eps, seed=seed
    )
    y_pred_var = model.predict(x_var)
    dx = x - x_var
    dfx = y_pred - y_pred_var
    ndx = K.sqrt(K.sum(K.square(dx), axis=range(1, len(x.shape))))
    ndfx = K.sqrt(K.sum(K.square(dfx), axis=range(1, len(y_pred.shape))))
    lip_cst = K.max(ndfx / ndx)
    print("lip cst: %.3f" % lip_cst)
    return lip_cst
Пример #10
0
        def func(val):
            val_name = val.op.name
            if '/W' in val_name and 'conv1' not in val_name and 'fct' not in val_name:
                name_scope, device_scope = x.op.name.split('/W')

                with tf.variable_scope(name_scope, reuse=tf.AUTO_REUSE):
                    if eval(self.quantizer_config['W_opts']['fix_max']) ==True:
                        max_x = tf.stop_gradient(
                            tf.get_variable('maxW', shape=(), initializer=tf.ones_initializer, dtype=tf.float32))
                        max_x *= float(self.quantizer_config['W_opts']['max_scale'])
                    else:
                        max_x = tf.stop_gradient(tf.reduce_max(tf.abs(x)))
                    mask = tf.get_variable('maskW', shape=val.shape, initializer=tf.zeros_initializer, dtype=tf.float32)

                probThreshold = (1 + gamma * get_global_step_var()) ** -1

                # Determine which filters shall be updated this iteration
                random_number = K.random_uniform(shape=(1, 1, 1, int(mask.shape[-1])))
                random_number1 = K.cast(random_number < probThreshold, dtype='float32')
                random_number2 = K.cast(random_number < (probThreshold * 0.1), dtype='float32')

                thresh = max_x * ratio * 0.999

                # Incorporate hysteresis into the threshold
                alpha = thresh
                beta = 1.2 * thresh

                # Update the significant weight mask by applying the threshold to the unmasked weights
                abs_kernel = K.abs(x=val)
                new_mask = mask - K.cast(abs_kernel < alpha, dtype='float32') * random_number1
                new_mask = new_mask + K.cast(abs_kernel > beta, dtype='float32') * random_number2
                new_mask = K.clip(x=new_mask, min_value=0., max_value=1.)
                return tf.assign(mask, new_mask, use_locking=False).op
Пример #11
0
    def train_discriminator(self, real_images, conditions=None):
        with tf.GradientTape() as discriminator_tape, tf.GradientTape() as gp_tape:
            discriminator_tape.watch(real_images)
            gp_tape.watch(real_images)

            noise = tf.random.normal(shape=(tf.shape(real_images)[0], self.latent_dim), mean=0, stddev=1)
            if self.is_conditional:

                fake_images = self.generator([noise, conditions])
                fake_images = K.clip(fake_images + self.noise_multiplier * tf.random.normal(tf.shape(fake_images), 0, 1), -1, 1)
                real_images = K.clip(real_images + self.noise_multiplier * tf.random.normal(tf.shape(fake_images), 0, 1) - 1, 1)
                discriminator_output_real = self.discriminator([real_images, conditions])
                discriminator_output_fake = self.discriminator([fake_images, conditions])
            else:
                fake_images = self.generator(noise)

                discriminator_output_real = self.discriminator(real_images)
                discriminator_output_fake = self.discriminator(fake_images)

                epsilon = K.random_uniform(shape=[tf.shape(real_images)[0], 1, 1, 1], minval=0, maxval=1)
                interpolated_images = epsilon * real_images + (1 - epsilon) * fake_images
                discriminator_output_interpolated_images = self.discriminator(interpolated_images)

            dis_gp = gp_tape.gradient(discriminator_output_interpolated_images, interpolated_images)
            dis_gp = K.sqrt(K.sum(K.square(dis_gp), axis=[1, 2, 3]))  # norm of gradients for every image
            dis_gp = K.mean(K.square(dis_gp - 1 * K.ones_like(dis_gp)))  # force norm of every gradient to 1

            discriminator_loss = self.dis_loss(discriminator_output_real, discriminator_output_fake) + self.gp_weight * dis_gp
        discriminator_gradients = discriminator_tape.gradient(discriminator_loss, self.discriminator.trainable_weights)
        self.discriminator_optimizer.apply_gradients(zip(discriminator_gradients, self.discriminator.trainable_weights))

        return discriminator_loss, dis_gp
Пример #12
0
 def call(self, x, training=None):
     mask = K.random_uniform(K.shape(x)[:-1], 0.0, 1.0)
     mask = K.expand_dims(mask, -1)
     mask = K.repeat_elements(mask, K.int_shape(x)[-1], -1)
     rand_x = K.switch(K.less(mask, self.rate),
                       K.random_normal(K.shape(x), 0.0, 1.0), x)
     return K.in_train_phase(rand_x, x, training=training)
Пример #13
0
def dlt_coefs(vandermonde, weights=None, returned_type: str = "numpy"):
    """
    compute coefficients of a conic through direct linear mapping.
    vandermonde and weights arguments should be or both np.ndarrays or both tf.tensors


    :param vandermonde: (number of points, number of monomials),np.array or tf.tensor. each row contains monomials (e.g. for a conic: x^2 xy y^2 x y 1) of the corresponding point
    :param weights: (number of points,) np.array or tf.tensor. probability of belonging to the model for each row in the vandermonde matrix.
                    if all points belong to the model don't specify its value.
    :param returned_type: str, "numpy" returns an np.ndarray; "tensorflow" returns a tf.tensor
    :return: np.ndarray or tf.tensor (depending on the value of the parameter "type"), (number of monomials,), the coefficients computed via dlt
    """

    # tmp
    """
    if type(vandermonde) is not tf.constant:
        vandermonde = tf.constant(vandermonde)
    
    if type(weights) is not tf.constant and weights is not None:
        weights = tf.constant(weights)
        
    # this line is inserted only because in my project weights are np.ndarrays, and it conflicts with vandermonde (which is a tensor)
    weights = tf.constant(weights)
    """

    npps = weights.shape[0]

    if returned_type == "numpy":

        print("cani")
        print("{}\n{}".format(type(weights), type(vandermonde)))
        # preprocess weights

        weights = tf.cast(weights, dtype=tf.float64)
        weights = weights + K.random_uniform(
            (npps, ), 0, 1e-9, dtype=tf.float64)  # --> (npps, nm)
        weights = tf.linalg.normalize(weights, axis=0)[0]
        weights = tf.linalg.diag(weights)
        weighted_vander = tf.matmul(weights, vandermonde)

        U, S, V = tf.linalg.svd(
            weighted_vander
        )  # pay attention. tf.linals.svd returns directly V!! not V tranposed!

    elif returned_type == "tensorflow":
        weights = weights + np.random.normal(0, 1e-9)
        weights = weights / np.linalg.norm(weights)
        weights = np.diag(weights)
        weighted_vander = np.matmul(weights, vandermonde)
        U, S, VT = np.linalg.svd(weighted_vander)
        V = np.transpose(VT)

    else:
        raise Exception("Invalid argument for return_type")

    dlt_coefficients = V[:, -1]
    dlt_coefficients = dlt_coefficients * (
        1.0 / dlt_coefficients[0]
    )  # want the x^2 and y^2 terms to be close to 1
    return dlt_coefficients
Пример #14
0
    def thiscall(self, y, M):
        M_prime = tf.concat([M[1:], tf.expand_dims(M[0], 0)],
                            0)  # (b,24, 24, 128)

        y_exp = Reshape((1, 1, 64))(y)  # b 1,1,64
        y_exp = tf.tile(y_exp, [1, 24, 24, 1])  # b 24,24 64,

        y_M = tf.concat((M, y_exp), -1)  # b 24,24 192
        y_M_prime = tf.concat((M_prime, y_exp), -1)  # b 24,24 192

        Ej = -K.mean(softplus(-self.LocalD(y_M)))
        Em = K.mean(softplus(self.LocalD(y_M_prime)))
        LOCAL = (Em - Ej) * self.beta

        Ej = -K.mean(softplus(-self.GlobalD(y, M)))
        Em = K.mean(softplus(self.GlobalD(y, M_prime)))
        GLOBAL = (Em - Ej) * self.alpha

        prior = K.random_uniform(shape=(K.shape(y)[0], K.shape(y)[1]))

        term_a = K.mean(K.log(self.PriorD(prior)))
        term_b = K.mean(K.log(1.0 - self.PriorD(y)))
        PRIOR = -(term_a + term_b) * self.gamma

        return GLOBAL, LOCAL, PRIOR
Пример #15
0
 def dropped_mask():
     drop_mask = K.switch(
         K.random_uniform(K.shape(inputs)) < self.drop_rate,
         K.ones_like(inputs, K.floatx()),
         K.zeros_like(inputs, K.floatx()),
     )
     return target_mask * drop_mask
Пример #16
0
 def discriminator_train_step(self, batch):
     '''
     A discriminator train step. Returns losses.
     '''
     c = batch[1]
     data = tf.one_hot(batch[0], depth=seq_dim, axis=-1, on_value=1, off_value=0)
     data = tf.cast(data, dtype=tf.dtypes.float32)
     data_size = data.shape[0]
     e_shape = (data_size,)
     for i in data.shape[1:]:
         e_shape = e_shape + (1,)
     z = self.sample_z(data_size)
     with tf.GradientTape() as disc_tape:
         generated_data = self.generator([z,c], training=True)
         real_output, real_label_output = self.discriminator([data,c], training=True)
         fake_output, fake_label_output = self.discriminator([generated_data,c], training=True)
         epsilon = K.random_uniform(e_shape, dtype=tf.dtypes.float32)
         random_weighted_average = (epsilon * data) + ((1 - epsilon) * generated_data)
         # calculate gradient for penalty
         with tf.GradientTape() as norm_tape:
             norm_tape.watch(random_weighted_average)
             average_output = self.discriminator([random_weighted_average,c], training=True)
         gradient = norm_tape.gradient(average_output, random_weighted_average)
         disc_loss, w_loss, ac_loss = self.discriminator_loss(real_output, fake_output, real_label_output, fake_label_output, c, gradient, L=10)
     gradients_of_discriminator = disc_tape.gradient(disc_loss, self.discriminator.trainable_variables)
     self.discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, self.discriminator.trainable_variables))
     return (disc_loss, w_loss, ac_loss)
Пример #17
0
 def call(self, layer, inputs, *args, **kwargs):
     output = K.in_train_phase(
         K.switch(
             K.random_uniform([]) > self.rate,
             layer(inputs, *args, **kwargs), inputs),
         layer(inputs, *args, **kwargs))
     return output
Пример #18
0
    def call(self,
             inputs: tf.Tensor,
             mask: Optional[tf.Tensor] = None):
        """
        Args:
            inputs (tf.Tensor[ndims=2, int]): Tensor of values to mask
            mask (Optional[tf.Tensor[bool]]): Locations in the inputs to that are valid
                                                     (i.e. not padding, start tokens, etc.)
        Returns:
            masked_inputs (tf.Tensor[ndims=2, int]): Tensor of masked values
            bert_mask: Locations in the input that were masked
        """

        random_mask = self._generate_bert_mask(inputs)

        if mask is not None:
            random_mask &= mask

        masked_inputs = inputs * tf.cast(~random_mask, inputs.dtype)

        random_mask = tf.cast(random_mask, inputs.dtype)

        masked_inputs += K.random_uniform(
            K.shape(random_mask), 0, self.n_symbols, dtype=inputs.dtype) * random_mask

        return masked_inputs
Пример #19
0
    def spatial_concrete_dropout(self, x):
        '''
        Concrete dropout - used at training time (gradients can be propagated)
        :param x: input
        :return:  approx. dropped out input
        '''
        eps = K.cast_to_floatx(K.epsilon())
        temp = 2. / 3.
        #temp = 0.1

        input_shape = K.shape(x)
        if self.data_format == 'channels_first':
            noise_shape = (input_shape[0], input_shape[1], 1, 1)
        else:
            noise_shape = (input_shape[0], 1, 1, input_shape[3])
        unif_noise = K.random_uniform(shape=noise_shape)

        drop_prob = (K.log(self.p + eps) - K.log(1. - self.p + eps) +
                     K.log(unif_noise + eps) - K.log(1. - unif_noise + eps))
        drop_prob = K.sigmoid(drop_prob / temp)
        random_tensor = 1. - drop_prob

        retain_prob = 1. - self.p
        x *= random_tensor
        x /= retain_prob
        return x
Пример #20
0
    def train_on_batch(self, epoch, iteration, batch):
        self.global_step.assign_add(1)

        loss_d, loss_g = None, None
        with tf.GradientTape() as tape_generator, tf.GradientTape(
        ) as tape_discriminator:

            z = K.random_uniform(
                (K.int_shape(batch)[0], self.config.input_dim))
            generated = self.generator(z, training=True)
            logits_generated = self.discriminator(generated, training=True)
            logits_real = self.discriminator(batch, training=True)

            loss_d = self.config.discriminator_loss_fn(logits_generated,
                                                       logits_real, generated,
                                                       batch,
                                                       self.discriminator)
            if self.global_step.numpy() % self.config.training_ratio == 0:
                loss_g = self.config.generator_loss_fn(logits_generated,
                                                       logits_real, generated,
                                                       batch, self.generator)

        _update(loss_d, self.discriminator, self.optimizer_discriminator,
                tape_discriminator)
        _update(loss_g, self.generator, self.optimizer_generator,
                tape_generator)

        return loss_g, loss_d
    def call(self, inputs, **kwargs):
        # Generate random uniform tensor between [1-alpha, 1+alpha] for training and ones tensor for test (ReLU)
        k = K.in_train_phase(
            K.random_uniform(inputs.shape[1:], 1 - self.alpha, 1 + self.alpha),
            K.ones(inputs.shape[1:]))

        return keras.activations.relu(inputs * k)
Пример #22
0
    def call(self, x):
        x1 = self.conv1(x)
        x2 = self.conv2(x)

        # create alpha and beta
        batch_size = backend.shape(x1)[0]
        alpha = backend.random_uniform((batch_size, 1, 1, 1))
        beta = backend.random_uniform((batch_size, 1, 1, 1))

        # shake-shake during training phase
        def x_shake():
            return beta * x1 + (1 - beta) * x2 + backend.stop_gradient((alpha - beta) * x1 + (beta - alpha) * x2)

        # even-even during testing phase
        def x_even():
            return 0.5 * x1 + 0.5 * x2
        return self.activation(backend.in_train_phase(x_shake, x_even))
Пример #23
0
    def call(self, x):
        # unpack x1 and x2
        assert isinstance(x, list)
        x1, x2 = x
        # create alpha and beta
        batch_size = K.shape(x1)[0]
        alpha = K.random_uniform((batch_size, 1, 1, 1))
        beta = K.random_uniform((batch_size, 1, 1, 1))
        # shake-shake during training phase

        def x_shake():
            return beta * x1 + (1 - beta) * x2 + K.stop_gradient((alpha - beta) * x1 + (beta - alpha) * x2)
        # even-even during testing phase

        def x_even():
            return 0.5 * x1 + 0.5 * x2
        return K.in_train_phase(x_shake, x_even)
Пример #24
0
def sampling(args):
    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    thre = K.random_uniform(shape=(batch, 1))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon
 def gradient_penalty(self,generated_samples,real_images,half_batch):
     alpha = backend.random_uniform(shape=[half_batch,1,1,1],minval=0.0,maxval=1.0)
     differences = generated_samples - real_images
     interpolates = real_images + (alpha * differences)
     gradients = tf.gradients(self.critic(interpolates),[interpolates])[0]
     slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients),axis=[1,2,3]))
     gradient_p = tf.reduce_mean((slopes-1.)**2)
     return gradient_p
Пример #26
0
 def noiseAndLatentC(self):
     noise = normal([self.batchSize, self.noiseDim - 1])
     rC = random_uniform(shape=(self.batchSize, 1), minval=0, maxval=9, dtype='int32')
     cForLoss = to_categorical(rC, num_classes=10) 
     cForLoss = transpose(cForLoss)
     rC = cast(rC, dtype='float')
     noiseAndC = concat([rC, noise], axis=1)
     return (cForLoss, noiseAndC)
Пример #27
0
 def call(self, inputs: tf.Tensor, delay: tf.Tensor = None) -> tf.Tensor:
     debug_tensor(LOGGER, inputs, 'Del.in')
     if delay is None:
         delay: tf.Tensor = K.random_uniform([self._taps], -1.0, 1.0)
     debug_tensor(LOGGER, delay, 'Del.del')
     result: tf.Tensor = conv1d(inputs, delay)
     debug_tensor(LOGGER, result, 'Del.rslt')
     return result
Пример #28
0
    def test_Dense(self):
        inputs = K.random_uniform([2, 16])

        model = Sequential([Dense(32, input_shape=K.int_shape(inputs)[1:])])
        self.assertEqual(model.output_shape, (None, 32))

        outputs = model(inputs)
        self.assertEqual(K.int_shape(outputs), (2, 32))
Пример #29
0
def sampling_concrete(alpha,temperature=0.67):
    batch=K.shape(alpha)[0]
    dim=K.int_shape(alpha)[1]
    uniform = K.random_uniform(shape=(batch,dim))
    gumbel = - K.log(- K.log(uniform + EPSILON) + EPSILON)
    fenshi=tf.reduce_logsumexp((K.log(alpha+EPSILON)+gumbel)/temperature,1,keep_dims=True)
    logit = (K.log(alpha + EPSILON) + gumbel) / temperature -fenshi
    return logit
Пример #30
0
    def channel(self, zMean):
        batch = K.shape(zMean)[0]
        # Generate Laplace r.v.
        # Source: https://en.wikipedia.org/wiki/Laplace_distribution#Generating_random_variables_according_to_the_Laplace_distribution
        # Code: https://stackoverflow.com/questions/56691436/how-can-one-add-laplacian-noise-to-a-tensor-in-keras
        u = K.random_uniform((batch, self.latent_dim), minval=-0.5, maxval=0.5)
        epsilon = K.sign(u) * K.log(1 - 2 * K.abs(u) + K.epsilon())

        return zMean + self.n0 / self.latent_dim * epsilon