Ejemplo n.º 1
0
def uniform_latent_sampling(latent_shape, low=0.0, high=1.0):
    """
    Sample from uniform distribution
    :param latent_shape: batch shape
    :return: normal samples, shape=(n,)+latent_shape
    """
    return Lambda(lambda x: K.random_uniform((K.shape(x)[0],) + latent_shape, low, high),
                  output_shape=lambda x: ((x[0],) + latent_shape))
Ejemplo n.º 2
0
    def get_initial_states(self, x):
        # build an all-zero tensor of shape [(samples, output_dim), (samples, output_dim)]
        initial_state = K.zeros_like(x)  # (samples, timesteps, input_dim)
        initial_state = K.sum(initial_state, axis=1)  # (samples, input_dim)
        reducer = K.random_uniform((self.input_dim, self.units))
        reducer = reducer / K.exp(reducer)

        initial_state = K.dot(initial_state, reducer)  # (samples, output_dim)
        initial_states = [K.stack([initial_state, initial_state]) for _ in range(len(self.states))]
        return initial_states
Ejemplo n.º 3
0
 def call(self, x, training = None):
     
     eps = 0.01
     ax  = K.abs(x)
     M   = K.mean((ax+eps) ** 4, axis = self.channel_axis) ** (1./4)     # Minkowsky's average to focus more on the (few) large values than on (many) smaller ones
     
     noise = K.random_uniform(shape = K.shape(x), minval = -1.0, maxval = 1.0, seed = self.seed)
     
     # xr  = ax * K.exp(self.reduction)
     # red = xr / (1 + xr**2)
     red = 1 / (1 + ax)                                          # individual noise reduction for each element of input
     mag = K.exp(-M / K.exp(self.sensitivity)) * self.scale      # global magnitude:  if M = 0.0 -> large magnitude (1.0) ... if M >> 0.0 -> low magnitude (~0.0)
     
     noisy = x + noise * red * mag[...,None]
     
     return noisy
Ejemplo n.º 4
0
    def loss(self, y_true, y_pred):

        # get the value for the true and fake images
        disc_true = self.disc(y_true)
        disc_pred = self.disc(y_pred)

        # sample a x_hat by sampling along the line between true and pred
        # z = tf.placeholder(tf.float32, shape=[None, 1])
        # shp = y_true.get_shape()[0]
        # WARNING: SHOULD REALLY BE shape=[batch_size, 1] !!!
        # self.batch_size does not work, since it's not None!!!
        alpha = K.random_uniform(shape=[K.shape(y_pred)[0], 1, 1, 1])
        diff = y_pred - y_true
        interp = y_true + alpha * diff

        # take gradient of D(x_hat)
        gradients = K.gradients(self.disc(interp), [interp])[0]
        grad_pen = K.mean(K.square(K.sqrt(K.sum(K.square(gradients), axis=1))-1))

        # compute loss
        return (K.mean(disc_pred) - K.mean(disc_true)) + self.lambda_gp * grad_pen
Ejemplo n.º 5
0
    def concrete_dropout(self, x):
        '''
        Concrete dropout - used at training time (gradients can be propagated)
        :param x: input
        :return:  approx. dropped out input
        '''
        eps = K.cast_to_floatx(K.epsilon())
        temp = 0.1

        unif_noise = K.random_uniform(shape=K.shape(x))
        drop_prob = (
            K.log(self.p + eps)
            - K.log(1. - self.p + eps)
            + K.log(unif_noise + eps)
            - K.log(1. - unif_noise + eps)
        )
        drop_prob = K.sigmoid(drop_prob / temp)
        random_tensor = 1. - drop_prob

        retain_prob = 1. - self.p
        x *= random_tensor
        x /= retain_prob
        return x
Ejemplo n.º 6
0
 def _merge_function(self, inputs):
     alpha = K.random_uniform((32, 1, 1, 1))
     return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
Ejemplo n.º 7
0
 def _merge_function(self, inputs):
     alpha = K.random_uniform((32, 1, 1, 1))  #??????为什么是这个形状batch-size?????
     return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
Ejemplo n.º 8
0
 def layer(x):
     weights = K.random_uniform((K.shape(x[0])[0], 1, 1, 1))
     return (weights * x[0]) + ((1 - weights) * x[1])
    def copy_generator_network(batch_size,
                               sequence_class,
                               n_classes=1,
                               seq_length=145,
                               supply_inputs=False,
                               master_generator=master_generator,
                               copy_number=copy_number):

        sequence_class_onehots = np.eye(n_classes)

        #Generator network parameters
        latent_size = 100

        #Generator inputs
        latent_input_1, latent_input_2, latent_input_1_out, latent_input_2_out = None, None, None, None
        if not supply_inputs:
            latent_input_1 = Input(tensor=K.ones((batch_size, latent_size)),
                                   name='noise_input_1')
            latent_input_2 = Input(tensor=K.ones((batch_size, latent_size)),
                                   name='noise_input_2')
            latent_input_1_out = Lambda(
                lambda inp: inp * K.random_uniform(
                    (batch_size, latent_size), minval=-1.0, maxval=1.0),
                name='lambda_rand_input_1')(latent_input_1)
            latent_input_2_out = Lambda(
                lambda inp: inp * K.random_uniform(
                    (batch_size, latent_size), minval=-1.0, maxval=1.0),
                name='lambda_rand_input_2')(latent_input_2)
        else:
            latent_input_1 = Input(batch_shape=(batch_size, latent_size),
                                   name='noise_input_1')
            latent_input_2 = Input(batch_shape=(batch_size, latent_size),
                                   name='noise_input_2')
            latent_input_1_out = Lambda(
                lambda inp: inp, name='lambda_rand_input_1')(latent_input_1)
            latent_input_2_out = Lambda(
                lambda inp: inp, name='lambda_rand_input_2')(latent_input_2)

        class_embedding = Lambda(
            lambda x: K.gather(K.constant(sequence_class_onehots),
                               K.cast(x[:, 0], dtype='int32')))(sequence_class)

        seed_input_1 = Concatenate(axis=-1)(
            [latent_input_1_out, class_embedding])
        seed_input_2 = Concatenate(axis=-1)(
            [latent_input_2_out, class_embedding])

        #Policy network definition
        policy_dense_1 = master_generator.get_layer('policy_dense_1')

        policy_dense_1_reshape = Reshape((14, 1, 384))

        policy_deconv_0 = master_generator.get_layer('policy_deconv_0')

        policy_deconv_1 = master_generator.get_layer('policy_deconv_1')

        policy_deconv_2 = master_generator.get_layer('policy_deconv_2')

        policy_conv_3 = master_generator.get_layer('policy_conv_3')

        policy_conv_4 = master_generator.get_layer('policy_conv_4')

        policy_conv_5 = master_generator.get_layer('policy_conv_5')

        #policy_deconv_3 = Conv2DTranspose(4, (7, 1), strides=(1, 1), padding='valid', activation='linear', kernel_initializer='glorot_normal', name='policy_deconv_3')

        batch_norm_0 = master_generator.get_layer('policy_batch_norm_0')
        relu_0 = Lambda(lambda x: K.relu(x))
        batch_norm_1 = master_generator.get_layer('policy_batch_norm_1')
        relu_1 = Lambda(lambda x: K.relu(x))
        batch_norm_2 = master_generator.get_layer('policy_batch_norm_2')
        relu_2 = Lambda(lambda x: K.relu(x))

        batch_norm_3 = master_generator.get_layer('policy_batch_norm_3')
        relu_3 = Lambda(lambda x: K.relu(x))

        batch_norm_4 = master_generator.get_layer('policy_batch_norm_4')
        relu_4 = Lambda(lambda x: K.relu(x))

        policy_out_1 = Reshape((seq_length, 4, 1))(policy_conv_5(
            relu_4(
                batch_norm_4(policy_conv_4(
                    relu_3(
                        batch_norm_3(policy_conv_3(
                            relu_2(
                                batch_norm_2(policy_deconv_2(
                                    relu_1(
                                        batch_norm_1(policy_deconv_1(
                                            relu_0(
                                                batch_norm_0(policy_deconv_0(
                                                    policy_dense_1_reshape(
                                                        policy_dense_1(
                                                            seed_input_1))),
                                                             training=True))),
                                                     training=True))),
                                             training=True))),
                                     training=True))),
                             training=True))))
        policy_out_2 = Reshape((seq_length, 4, 1))(policy_conv_5(
            relu_4(
                batch_norm_4(policy_conv_4(
                    relu_3(
                        batch_norm_3(policy_conv_3(
                            relu_2(
                                batch_norm_2(policy_deconv_2(
                                    relu_1(
                                        batch_norm_1(policy_deconv_1(
                                            relu_0(
                                                batch_norm_0(policy_deconv_0(
                                                    policy_dense_1_reshape(
                                                        policy_dense_1(
                                                            seed_input_2))),
                                                             training=True))),
                                                     training=True))),
                                             training=True))),
                                     training=True))),
                             training=True))))

        return [latent_input_1, latent_input_2], [policy_out_1,
                                                  policy_out_2], []
Ejemplo n.º 10
0
def interpolating(x):
    u = K.random_uniform((K.shape(x[0])[0], ) + (1, ) * (K.ndim(x[0]) - 1))
    return x[0] * u + x[1] * (1 - u)
Ejemplo n.º 11
0
 def call(self, x, mask=None):
     return K.in_train_phase(K.relu(x, K.random_uniform(K.shape(x), self.l, self.u)),
                             K.relu(x, self.average))
Ejemplo n.º 12
0
class RandomWeightedAverage(_Merge):
    def _merge_function(self, inputs):
        weights = K.random_uniform((BATCH_SIZE, 1))
 def sample(self, batch_size):
     return K.cast(K.less(K.random_uniform(shape=(batch_size, self._size)),
                          self.p),
                   dtype="float32")
Ejemplo n.º 14
0
def categorical_distribution(z):
    uni = K.random_uniform(shape=(K.shape(z)[0], ),
                           low=0,
                           high=6,
                           dtype='int32')
    return K.one_hot(uni, 6)
Ejemplo n.º 15
0
    def __call__(self, shape, dtype=None):
        dtype = dtype or K.floatx()

        init_range = 1.0 / np.sqrt(shape[1])
        return K.random_uniform(shape, -init_range, init_range, dtype=dtype)
Ejemplo n.º 16
0
 def exps(args) :
     lamb = args
     eps = K.random_uniform(shape=(bs, ls))
     ans = (-1./lamb) * K.log(-eps + 1)
     return ans
Ejemplo n.º 17
0
    def build_generator(self):
        L = self.inputShape_[0]
        z = Input(shape=(self.latentDim_, ))
        x = Dense(100 * self.inputShape_[0])(z)
        x = Reshape((
            L,
            100,
        ))(x)

        # res block 1:
        res_in = x
        x = layers.Activation('relu')(x)
        x = Conv1D(100, 5, padding='same')(x)
        x = layers.Activation('relu')(x)
        x = Conv1D(100, 5, padding='same')(x)
        x = Lambda(lambda z: z * 0.3)(x)
        x = Add()([res_in, x])

        # res block 2:
        res_in = x
        x = layers.Activation('relu')(x)
        x = Conv1D(100, 5, padding='same')(x)
        x = layers.Activation('relu')(x)
        x = Conv1D(100, 5, padding='same')(x)
        x = Lambda(lambda z: z * 0.3)(x)
        x = Add()([res_in, x])

        # res block 3:
        res_in = x
        x = layers.Activation('relu')(x)
        x = Conv1D(100, 5, padding='same')(x)
        x = layers.Activation('relu')(x)
        x = Conv1D(100, 5, padding='same')(x)
        x = Lambda(lambda z: z * 0.3)(x)
        x = Add()([res_in, x])

        # res block 4:
        res_in = x
        x = layers.Activation('relu')(x)
        x = Conv1D(100, 5, padding='same')(x)
        x = layers.Activation('relu')(x)
        x = Conv1D(100, 5, padding='same')(x)
        x = Lambda(lambda z: z * 0.3)(x)
        x = Add()([res_in, x])

        # res block 5:
        res_in = x
        x = layers.Activation('relu')(x)
        x = Conv1D(100, 5, padding='same')(x)
        x = layers.Activation('relu')(x)
        x = Conv1D(100, 5, padding='same')(x)
        x = Lambda(lambda z: z * 0.3)(x)
        x = Add()([res_in, x])

        x = Conv1D(self.inputShape_[-1], 1, padding='same')(x)

        logits = x
        if self.gumbel_:
            #             U = Input(tensor=K.random_uniform(K.shape(logits), 0, 1))
            eps = 1e-20
            g = Lambda(lambda y: 1. / (self.tau_) * (y - K.log(-K.log(
                K.random_uniform(K.shape(logits), 0, 1) + eps) + eps)))(logits)
            out = layers.Activation('softmax')(g)
            if self.hardGumbel_:
                k = K.shape(logits)[-1]
                out_hard = Lambda(lambda y: K.tf.cast(
                    K.tf.equal(y, K.tf.reduce_max(y, 1, keepdims=True)), y.
                    dtype))(out)
                out = Lambda(stop_grad)([out_hard, out])
            model = Model(inputs=z, outputs=out)
        else:
            out = Activation('softmax')(logits)
            model = Model(inputs=z, outputs=out)

        return model
Ejemplo n.º 18
0
def sampling_gumbel(shape, eps=1e-8):
    u = K.random_uniform(shape)
    return -K.log(-K.log(u + eps) + eps)
Ejemplo n.º 19
0
 def sample(self, n):
     random_indices = K.cast(K.random_uniform(shape=(n, ), maxval=self.d),
                             'int32')
     return K.one_hot(random_indices, self.d)
Ejemplo n.º 20
0
    def softmax_activation(mem):
        """Softmax activation."""

        return k.cast(
            k.less_equal(k.random_uniform(k.shape(mem)), k.softmax(mem)),
            k.floatx())
Ejemplo n.º 21
0
def initializer(weight_matrix):
    return K.random_uniform(shape=weight_matrix,
                            minval=-1.2,
                            maxval=0.8,
                            seed=(142))
Ejemplo n.º 22
0
 def init_400(self, shape, dtype=None):
     bound = 1 / (400**0.5)
     return K.random_uniform(shape, minval=-bound, maxval=bound, dtype=dtype)
Ejemplo n.º 23
0
def training(epochs=20, batch_size=60):
    image_datagen = ImageDataGenerator(rescale=1 / 255., horizontal_flip=True)
    # build model
    generator = create_generator()
    critic = create_critic()
    #adam(lr=0.0001, beta_1=0.5, beta_2=0.9)
    critic.trainable = False

    generator_input = Input(shape=generator_input_shape)
    generator_layers = generator(generator_input)
    generator_output = critic(generator_layers)

    generator_model = Model(inputs=[generator_input],
                            outputs=[generator_output])
    generator_model.compile(loss=wasserstein_loss, optimizer=opt)

    critic.trainable = True
    generator_predictive_model = Model(inputs=[generator_input],
                                       outputs=[generator_layers])
    generator_predictive_model.compile(loss=perceptual_loss, optimizer=opt)

    real_samples_input = Input(shape=image_shape)
    generator_input_for_critic = Input(shape=generator_input_shape)
    generated_samples_for_critic = generator(generator_input_for_critic)
    random_weights = K.random_uniform((batch_size, 1, 1, 1))
    averaged_samples = Lambda(
        lambda t: random_weights * t[0] + (1 - random_weights) * t[1])(
            inputs=[real_samples_input, generated_samples_for_critic])
    critic_output_avg_samples = critic(averaged_samples)
    critic_output_from_generator = critic(generated_samples_for_critic)
    critic_output_from_real = critic(real_samples_input)

    critic_model = Model(
        inputs=[real_samples_input, generator_input_for_critic],
        outputs=[
            critic_output_from_real, critic_output_from_generator,
            critic_output_avg_samples
        ])
    critic_model.compile(loss=[
        wasserstein_loss, wasserstein_loss,
        gradient_penalty_loss(averaged_samples)
    ],
                         optimizer=opt)

    # loop epochs
    for e in range(1, epochs + 1):
        print(f'Epoch {e}')
        image_generator = image_datagen.flow_from_directory(
            f'./data/{dataset}',
            class_mode=None,
            target_size=(image_shape[0], image_shape[1]),
            batch_size=batch_size)
        num_batches = int(817 / batch_size)

        acc_loss, wr, wf, wa = 0.0, 0.0, 0.0, 0.0
        c_iter_total = num_critic_iter * int(num_batches / num_critic_iter)
        # use tqdm as a progress meter
        for _ in tqdm(range(int(num_batches / num_critic_iter))):
            # allow training discriminator multiple times before engaging gan
            for _ in range(num_critic_iter):
                # generate a random normal vector with 100 dimensions
                noise = np.random.normal(
                    size=[batch_size, generator_input_shape[0]])
                # fake_images = generator.predict(noise)
                real_images = next(image_generator)
                # group our images together so we can run it at once
                loss, a, b, c = critic_model.train_on_batch(
                    [real_images, noise], [
                        -np.ones(batch_size),
                        np.ones(batch_size),
                        np.zeros(batch_size)
                    ])
                acc_loss += loss / c_iter_total
                (wr, wf,
                 wa) = ((wr + a) / c_iter_total, (wf + b) / c_iter_total,
                        (wa + c) / c_iter_total)
            # train the generator using the gan
            # generate a random normal vector with 100 dimensions
            noise = np.random.normal(
                size=[batch_size, generator_input_shape[0]])
            generator_model.train_on_batch(noise, np.ones(batch_size))
        print(f'loss:{acc_loss} real:{wr} fake:{wf} avg:{wa}')
        # occasionally generate images
        if e % 2 == 0:
            test_batch_size = 25
            noise = np.random.normal(
                size=[test_batch_size, generator_input_shape[0]])
            generated_images = generator_predictive_model.predict(noise)
            plot_generated_images(e, generated_images)
Ejemplo n.º 24
0
    def get_graph_edit_fullydifferntiable_model(self):  #Done

        sgs = []
        for i in range(self.num_groups):
            sgs.append(kl.Input((self.num_states, ), name='sg{}'.format(i)))

        #R = kl.Input((self.num_states,))
        #print('self.immunized_nodes.tolist():',self.immunized_nodes.tolist())
        R = np.expand_dims(np.array(self.immunized_nodes.tolist()), axis=0)
        self.temp_var = K.variable(np.ones((1, 1)))
        self.param_schedule.set_temp(self.temp_var, self.temp_decay_factor)
        W = np.expand_dims(np.array(self.net_gs[0]), axis=(0, -1))
        Wd = np.expand_dims(np.zeros(np.array(self.net_gs[0]).shape),
                            axis=(0, -1))
        R_mat = np.expand_dims(np.diag(self.immunized_nodes), axis=(0, -1))
        A = np.expand_dims(np.zeros(self.mask.shape), axis=(0, -1))

        print('W Trx:', np.squeeze(W, axis=(0, -1)))
        W[W > 0] = 1.
        print('W Edge:', np.squeeze(W, axis=(0, -1)))
        print('R:', R)
        print('R_mat:', np.squeeze(R_mat, axis=(0, -1)))
        print('# of editable edges:', np.sum(self.mask))
        print('Budget:', self.budget)
        print('=========================================')

        R = kl.Input(tensor=K.constant(R), name="R")
        temp = kl.Input(tensor=self.temp_var, name="temp")
        W = kl.Input(tensor=K.constant(W), name="W")
        Wd = kl.Input(tensor=K.constant(Wd), name="Wd")
        R_mat = kl.Input(tensor=K.constant(R_mat), name="R_mat")
        E_features = kl.Concatenate(axis=-1)([R_mat, W])
        A = kl.Input(tensor=K.variable(A), name="A")
        self.param_schedule.set_mask(A, self.mask)

        dense_W = kl.Dense(self.num_states, name='dense_W')
        null_input = kl.Lambda(lambda z: 0 * z)(sgs[0])
        dense_W(null_input)
        W_d = dense_W.weights[0]

        yWd = kl.Lambda(lambda z: K.squeeze(z, axis=-1))(Wd)
        yWd = kl.Lambda(lambda z: z + W_d)(yWd)
        yWd = kl.Lambda(lambda z: K.expand_dims(z, axis=-1))(yWd)
        E_features = kl.Concatenate()([E_features, yWd])

        edge_inp = E_features  #kl.Concatenate()([R,])
        for i in range(self.config["edge_num_layers"]):
            edge_inp = kl.Conv2D(self.config['hidden_dims'],
                                 self.config["edge_kernel_size"],
                                 strides=1,
                                 dilation_rate=i + 1,
                                 activation='tanh',
                                 padding='same')(edge_inp)
#         edge_inp = kl.Concatenate()([edge_inp,yWd])
        new_edges = kl.Conv2D(1, 1, strides=1, activation=None,
                              padding='same')(edge_inp)

        new_edges = kl.Lambda(lambda z: z - K.log(-1 * K.log(
            K.random_uniform(shape=K.shape(z), minval=0., maxval=1.0) + K.
            epsilon()) + K.epsilon()))(new_edges)
        new_edges = kl.Lambda(lambda z: z[0] / z[1])([new_edges, temp])

        E = kl.Activation('sigmoid')(new_edges)
        #E = kl.Reshape((self.num_states,self.num_states))(E)
        E = kl.Lambda(lambda z: z[0] * z[1] * (1 - z[2]))([E, A, W])

        W_effect = kl.Lambda(lambda z: K.squeeze(z[0] + z[1], axis=-1))([W, E])
        P = kl.Lambda(lambda z: z / K.sum(z, axis=-1, keepdims=True))(W_effect)
        p_transpose = kl.Permute((2, 1))(P)

        vgs = []
        for i in range(self.num_groups):
            vg = self.get_fd_particle_type_sub_graph(sgs[i], p_transpose, R,
                                                     self.T)
            vgs.append(vg)

        vs = kl.Concatenate(name='value')(vgs)
        num_edits = kl.Lambda(lambda z: K.expand_dims(
            K.sum(z[0] * z[1], axis=(-1, -2, -3)), axis=-1))([A, E])
        num_edits = kl.Layer(name='edit')(num_edits)
        inps = sgs + [temp, R, W, R_mat, A, Wd]

        model = km.Model(inputs=inps, outputs=[vs, num_edits])
        model.layers[-1].trainable_weights.extend([W_d])
        model.summary()

        model.compile(loss={
            'value': self.fair_flow_loss,
            'edit': self.budget_loss,
        },
                      loss_weights={
                          'value': 1,
                          'edit': 1
                      },
                      optimizer=Adam(lr=self.lr),
                      metrics={
                          'value': [
                              self.flow_loss, self.fair_loss,
                              self.diff_bw_groups, self.mean_value
                          ],
                          'edit': [self.num_edits_exceeded]
                      })

        model_predict = km.Model(inputs=inps, outputs=[W_effect, P])
        return model, model_predict
Ejemplo n.º 25
0
 def _merge_function(self, inputs):
     weights = K.random_uniform((K.shape(inputs[0])[0], 1, 1, 1))
     return (weights * inputs[0]) + ((1 - weights) * inputs[1])
Ejemplo n.º 26
0
 def _merge_function(self, inputs):
     weights = K.random_uniform((BATCH_SIZE, 1, 1, 1))
     return (weights * inputs[0]) + ((1 - weights) * inputs[1])
Ejemplo n.º 27
0
 def init_final(self, shape, dtype=None):
     return K.random_uniform(shape, minval=-3e-3, maxval=3e-3, dtype=dtype)
Ejemplo n.º 28
0
 def _merge_function(self, inputs):
     alpha = K.random_uniform((16, IMG_ROWS, IMG_COLS, 3))
     return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
Ejemplo n.º 29
0
 def _merge_function(self, inputs):
     weights = K.random_uniform((batch_size, 1, 1, 1))
     return (weights * inputs[0]) + ((1 - weights) * inputs[1])
Ejemplo n.º 30
0
def sampling(args):
    '''Sample at random one vector'''
    #TODO not fix the maxval param
    return K.random_uniform(shape=[], minval=1, maxval=99999, dtype='int32')
Ejemplo n.º 31
0
 def _merge_function(self, inputs):
     # FD: should this be (hps['nn_smallest_unit']*2, 1, 1 ,1) now?
     alpha = K.random_uniform((32, 1, 1, 1))
     return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
Ejemplo n.º 32
0
 def custom_random(self, shape, dtype=None):
     if self.random_init == "normal":
         return K.random_normal(shape, 0.5, 0.05, dtype=dtype, seed=22)
     else:
         return K.random_uniform(shape, 0, 1, dtype=dtype, seed=22)
def load_generator_network(batch_size,
                           sequence_class,
                           n_classes=1,
                           seq_length=100,
                           supply_inputs=False):

    sequence_class_onehots = np.eye(n_classes)

    #Generator network parameters
    seq_length = 145
    latent_size = 100

    #Generator inputs
    latent_input_1 = Input(tensor=K.ones((batch_size, latent_size)),
                           name='noise_input_1')
    latent_input_2 = Input(tensor=K.ones((batch_size, latent_size)),
                           name='noise_input_2')
    latent_input_1_out = Lambda(lambda inp: inp * K.random_uniform(
        (batch_size, latent_size), minval=-1.0, maxval=1.0),
                                name='lambda_rand_input_1')(latent_input_1)
    latent_input_2_out = Lambda(lambda inp: inp * K.random_uniform(
        (batch_size, latent_size), minval=-1.0, maxval=1.0),
                                name='lambda_rand_input_2')(latent_input_2)

    class_embedding = Lambda(lambda x: K.gather(
        K.constant(sequence_class_onehots), K.cast(x[:, 0], dtype='int32')))(
            sequence_class)

    seed_input_1 = Concatenate(axis=-1)([latent_input_1_out, class_embedding])
    seed_input_2 = Concatenate(axis=-1)([latent_input_2_out, class_embedding])

    #Policy network definition
    policy_dense_1 = Dense(14 * 384,
                           activation='relu',
                           kernel_initializer='glorot_uniform',
                           name='policy_dense_1')

    policy_dense_1_reshape = Reshape((14, 1, 384))

    policy_deconv_0 = Conv2DTranspose(256, (7, 1),
                                      strides=(2, 1),
                                      padding='valid',
                                      activation='linear',
                                      kernel_initializer='glorot_normal',
                                      name='policy_deconv_0')

    policy_deconv_1 = Conv2DTranspose(192, (6, 1),
                                      strides=(2, 1),
                                      padding='valid',
                                      activation='linear',
                                      kernel_initializer='glorot_normal',
                                      name='policy_deconv_1')

    policy_deconv_2 = Conv2DTranspose(128, (7, 1),
                                      strides=(2, 1),
                                      padding='valid',
                                      activation='linear',
                                      kernel_initializer='glorot_normal',
                                      name='policy_deconv_2')

    policy_conv_3 = Conv2D(128, (8, 1),
                           strides=(1, 1),
                           padding='same',
                           activation='linear',
                           kernel_initializer='glorot_normal',
                           name='policy_conv_3')

    policy_conv_4 = Conv2D(64, (8, 1),
                           strides=(1, 1),
                           padding='same',
                           activation='linear',
                           kernel_initializer='glorot_normal',
                           name='policy_conv_4')

    policy_conv_5 = Conv2D(4, (8, 1),
                           strides=(1, 1),
                           padding='same',
                           activation='linear',
                           kernel_initializer='glorot_normal',
                           name='policy_conv_5')

    #policy_deconv_3 = Conv2DTranspose(4, (7, 1), strides=(1, 1), padding='valid', activation='linear', kernel_initializer='glorot_normal', name='policy_deconv_3')

    batch_norm_0 = BatchNormalization(name='policy_batch_norm_0')
    relu_0 = Lambda(lambda x: K.relu(x))
    batch_norm_1 = BatchNormalization(name='policy_batch_norm_1')
    relu_1 = Lambda(lambda x: K.relu(x))
    batch_norm_2 = BatchNormalization(name='policy_batch_norm_2')
    relu_2 = Lambda(lambda x: K.relu(x))

    batch_norm_3 = BatchNormalization(name='policy_batch_norm_3')
    relu_3 = Lambda(lambda x: K.relu(x))

    batch_norm_4 = BatchNormalization(name='policy_batch_norm_4')
    relu_4 = Lambda(lambda x: K.relu(x))

    policy_out_1 = Reshape((seq_length, 4, 1))(policy_conv_5(
        relu_4(
            batch_norm_4(
                policy_conv_4(
                    relu_3(
                        batch_norm_3(
                            policy_conv_3(
                                relu_2(
                                    batch_norm_2(
                                        policy_deconv_2(
                                            relu_1(
                                                batch_norm_1(
                                                    policy_deconv_1(
                                                        relu_0(
                                                            batch_norm_0(
                                                                policy_deconv_0(
                                                                    policy_dense_1_reshape(
                                                                        policy_dense_1(
                                                                            seed_input_1
                                                                        )))))))
                                            ))))))))))))
    policy_out_2 = Reshape((seq_length, 4, 1))(policy_conv_5(
        relu_4(
            batch_norm_4(
                policy_conv_4(
                    relu_3(
                        batch_norm_3(
                            policy_conv_3(
                                relu_2(
                                    batch_norm_2(
                                        policy_deconv_2(
                                            relu_1(
                                                batch_norm_1(
                                                    policy_deconv_1(
                                                        relu_0(
                                                            batch_norm_0(
                                                                policy_deconv_0(
                                                                    policy_dense_1_reshape(
                                                                        policy_dense_1(
                                                                            seed_input_2
                                                                        )))))))
                                            ))))))))))))

    return [latent_input_1, latent_input_2], [policy_out_1, policy_out_2], []
 def build(self, input_shape):
     self.pos = K.random_uniform(shape=(), minval=self.fr, maxval=1.)
     self.built = True
Ejemplo n.º 35
0
 def _merge_function(self, inputs):
     alpha = K.random_uniform((self.batch_size, 1, 1, 1))
     return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
Ejemplo n.º 36
0
 def _merge_function(self, inputs):
     alpha = K.random_uniform((32, 64, 64, 3))  # 在生成图片和真实图片中间采样32个图片
     return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
Ejemplo n.º 37
0
 def _merge_function(self, inputs):
     alpha = K.random_uniform((batch_size, ) + img_shape_d)
     return (alpha * inputs[0]) + (
         (1 - alpha) * inputs[1]
     )  # alpha here represents an epsilon in the paper
Ejemplo n.º 38
0
 def _merge_function(self, inputs):
     weights = K.random_uniform((BATCH_SIZE, 1, 1, 1))
     return (weights * inputs[0]) + ((1 - weights) * inputs[1])