Exemplo n.º 1
0
    def __init__(self, input_size, hidden_size, output_size, lr):
        ''' Train and validate network using notMNIST data.
        Args:
            input_size: size of input feature
            hidden_size: size of hidden layer
            output_size: size of output layer
            lr: learning rate
            name: network's scope name

        Returns:
            nothing
        '''
        self.model = Sequential()
        self.model.add(
            Dense(hidden_size,
                  kernel_initializer=TruncatedNormal(stddev=input_size**-0.5),
                  bias_initializer=Zeros(),
                  activation='relu',
                  input_shape=(input_size, )))
        self.model.add(
            Dense(output_size,
                  kernel_initializer=TruncatedNormal(stddev=hidden_size**-0.5),
                  bias_initializer=Zeros(),
                  activation='softmax'))
        self.model.compile(loss='sparse_categorical_crossentropy',
                           optimizer=SGD(lr=lr),
                           metrics=['accuracy'])
Exemplo n.º 2
0
  def construct_model(n_train_cols, n_target_cols, learn_windows=False):
    batch_size  = None # None translates to unknown size
    window_size = None
    reshape_size=(-1, 500)

    il = Input(batch_shape=(batch_size,window_size,1), name='seq_input')
    main_output = Masking(mask_value=-1337)(il)
    main_output = Embedding(input_dim=n_target_cols,
                            output_dim=500,
                            embeddings_initializer=RandomUniform(minval=-0.1, maxval=0.1, seed=None))(main_output)
    main_output = Reshape(target_shape=reshape_size)(main_output)

    # sizes should be multiple of 32 since it trains faster due to np.float32
    main_output = LSTM(500,
                       batch_input_shape=(batch_size,window_size,1),
                       stateful=False,
                       return_sequences=True,
                       unroll=False,
                       dropout=0.2,
                       kernel_initializer=Zeros())(main_output)
    main_output = LSTM(500,
                       stateful=False,
                       return_sequences=not learn_windows,
                       unroll=False,
                       dropout=0.2,
                       kernel_initializer=Zeros())(main_output)

    main_output = Dense(n_target_cols, activation='softmax', name='dense_final')(main_output)
    full_model = Model(inputs=[il], outputs=[main_output])

    optimizerator = SGD(lr=1)
    full_model.compile(loss='categorical_crossentropy', optimizer=optimizerator, metrics=['categorical_accuracy'])

    return full_model
Exemplo n.º 3
0
def PatchGanDiscriminator(output_dim, patch_size, padding='same', strides=(2,2,2), kernel_size=(4,4,4), batch_norm=True, dropout= True):

    inputs = Input(shape=[patch_size[0], patch_size[1], patch_size[2], output_dim[4]])
    filter_list = [64, 128, 256, 512, 512, 512]

    # Layer1 without Batch Normalization

    disc_out = Conv3D(filters=filter_list[0], kernel_size=kernel_size,kernel_initializer=RandomNormal(mean=0.0, stddev=0.02),
                                   bias_initializer=Zeros(), padding=padding, strides=strides)(inputs)
    disc_out = LeakyReLU(alpha=0.2)(disc_out)
    # disc_out = BatchNormalization(axis=4)(disc_out)  # Original one with Batch normalization


    # build the rest Layers
    # Conv -> BN -> LeakyReLU
    for i, filter_size in enumerate(filter_list[1:]):
        name = 'disc_conv_{}'.format(i+1)


        disc_out = Conv3D(name=name, filters=filter_list[i+1],kernel_initializer=RandomNormal(mean=0.0, stddev=0.02),
                                   bias_initializer=Zeros(), kernel_size=kernel_size, padding=padding, strides=strides)(disc_out)

        if batch_norm:
            disc_out = BatchNormalization(axis=4)(disc_out)  # channel_last convention
        if dropout:
            disc_out = SpatialDropout3D(rate=0.5)(disc_out)

        disc_out = LeakyReLU(alpha=0.2)(disc_out)


    x_flat = Flatten()(disc_out)
    x = Dense(2, activation='sigmoid',name="disc_dense")(x_flat)
    patch_GAN_discriminator = Model(input=inputs, output=x, name="patch_gan")

    return patch_GAN_discriminator
Exemplo n.º 4
0
    def generate_layers(self):
        self.encode_layers = []
        self.decode_layers = []

        # creating encoding and decoding layers
        prev_size = self.number_features
        enc_inputSize = 0
        for _ in range(self.depth):
            enc_inputSize = enc_inputSize + prev_size
            self.encode_layers.append(
                Dense(self.layer_width,
                      bias_initializer=RandomUniform(),
                      kernel_initializer=Orthogonal(),
                      input_shape=(enc_inputSize, )))
            prev_size = self.layer_width
        self.IR_layer = Dense(self.IR_size,
                              bias_initializer=Zeros(),
                              kernel_initializer=Orthogonal(),
                              input_shape=(enc_inputSize, ))
        self.encode_layers.append(self.IR_layer)

        prev_size = self.IR_size
        dec_inputSize = 0
        for _ in range(self.depth):
            dec_inputSize = self.number_features()
            dec_inputSize = dec_inputSize + prev_size
            self.decode_layers.append(
                Dense(self.layer_width,
                      bias_initializer=Zeros(),
                      input_shape=(dec_inputSize, )))
            prev_size = self.layer_width
        dec_inputSize = dec_inputSize + prev_size
        self.decode_layers.append(
            Dense(self.number_features(), input_shape=(dec_inputSize, )))
Exemplo n.º 5
0
def residual_block(feature, dropout=False, instance_norm=True):
    x = Conv2D(256,
               kernel_size=3,
               strides=1,
               padding='same',
               kernel_initializer=RandomNormal(mean=0.0, stddev=0.02),
               bias_initializer=Zeros())(feature)
    if instance_norm:
        x = InstanceNormalization()(x)
    else:
        x = BatchNormalization()(x)
    x = Activation('relu')(x)
    if dropout:
        x = Dropout(0.5)(x)
    x = Conv2D(256,
               kernel_size=3,
               strides=1,
               padding='same',
               kernel_initializer=RandomNormal(mean=0.0, stddev=0.02),
               bias_initializer=Zeros())(x)
    if instance_norm:
        x = InstanceNormalization()(x)
    else:
        x = BatchNormalization()(x)
    x = Activation('relu')(x)
    return Add()([feature, x])
Exemplo n.º 6
0
def box_net(n_anchors, n_filters, act_type, repeats=4, separable_conv=True, survival_prob=None):
    inpt = Input((None, None, n_filters))
    x = inpt
    # conv-bn-swish + id
    for i in range(repeats):
        inpt1 = x
        if separable_conv:
            x = SeparableConv2D(n_filters, kernel_size=3, strides=1, padding='same',
                                depthwise_initializer=VarianceScaling(),
                                pointwise_initializer=VarianceScaling(),
                                bias_initializer=Zeros())(x)
        else:
            x = Conv2D(n_filters, kernel_size=3, strides=1, padding='same',
                       kernel_initializer=RandomNormal(stddev=0.01),
                       bias_initializer=Zeros())(x)
        x = BatchNormalization()(x)
        x = Activation(act_type)(x)
        if i>0 and survival_prob:
            x = Lambda(drop_connect, arguments={'survival_prob': survival_prob, 'is_training': True})(x)
        x = add([x, inpt1])

    # head
    x = Conv2D(4*n_anchors, kernel_size=3, strides=1, padding='same',
               bias_initializer=Zeros())(x)

    model = Model(inpt, x)

    return model
Exemplo n.º 7
0
def conv_block(feature,
               out_channel,
               downsample=True,
               dropout=False,
               instance_norm=True):
    if downsample:
        x = Conv2D(out_channel,
                   kernel_size=4,
                   strides=2,
                   padding='same',
                   kernel_initializer=RandomNormal(mean=0.0, stddev=0.02),
                   bias_initializer=Zeros())(feature)
    else:
        x = Conv2DTranspose(out_channel,
                            kernel_size=4,
                            strides=2,
                            padding='same',
                            kernel_initializer=RandomNormal(mean=0.0,
                                                            stddev=0.02),
                            bias_initializer=Zeros())(feature)
    if instance_norm:
        x = InstanceNormalization()(x)
    else:
        x = BatchNormalization()(x)
    x = Activation('relu')(x)
    if dropout:
        x = Dropout(0.5)(x)
    return x
Exemplo n.º 8
0
def define_actor_critic_models(actions=3):
    state_in = Input(batch_shape=(None, 1, 64, 64, 3))
    state = Reshape((64, 64, 3), input_shape=(1, 64, 64, 3))(state_in)
    action_input = Input(shape=(actions, ), name='action_input')

    # Actor
    shared = build_common(state)
    hidden_actor = Dense(64, name='rl_fc_1', activation='relu')(shared)
    actor_output = Dense(actions,
                         name='rl_fc_2_actor',
                         activation='tanh',
                         kernel_initializer=RandomNormal(stddev=0.001),
                         bias_initializer=Zeros())(hidden_actor)
    actor = Model(inputs=state_in, outputs=actor_output)

    # Critic
    # shared = build_common(state)

    critic_input = Concatenate()([action_input, shared])
    hidden_critic = Dense(64, name='rl_fc_1_critic',
                          activation='relu')(critic_input)
    critic_output = Dense(1,
                          name='rl_fc_2_critic',
                          activation='linear',
                          kernel_initializer=RandomNormal(stddev=0.001),
                          bias_initializer=Zeros())(hidden_critic)
    critic = Model(inputs=[action_input, state_in], outputs=critic_output)

    return actor, critic, action_input
Exemplo n.º 9
0
    def build_net(self):

        model = Sequential()
        model.add(
            Conv2D(16, (8, 8),
                   strides=(4, 4),
                   activation='relu',
                   name='conv1',
                   input_shape=(84, 84, 4),
                   kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05),
                   bias_initializer=Zeros()))  ##Output of 20x20x16

        model.add(
            Conv2D(32, (4, 4),
                   strides=(2, 2),
                   activation='relu',
                   name='conv2',
                   kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05),
                   bias_initializer=Zeros()))  ##Output of 9x9x32

        # model.add(Conv2D(32, (3,3), strides=(1,1), activation='relu', name='conv3',
        #                  kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05), bias_initializer=Zeros()))      ## Output of 7x7x32

        model.add(Flatten())

        # model.add(Dense(512, activation='relu', kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05),          ## Flatten with shape 512
        #                 bias_initializer=Zeros()))

        model.add(
            Dense(
                256,
                activation='relu',
                kernel_initializer=TruncatedNormal(
                    mean=0.0, stddev=0.05),  ## Flatten with shape 128
                bias_initializer=Zeros()))

        model.add(
            Dense(
                self.num_actions,
                kernel_initializer=TruncatedNormal(
                    mean=0.0, stddev=0.05),  ## Flatten with shape num_actions
                bias_initializer=Zeros()))

        model.add(Activation('softmax'))

        sgd = SGD(self.learning_rate)
        model.compile(loss='mse', optimizer=sgd, metrics=['accuracy'])

        return model
Exemplo n.º 10
0
    def _build_actor_model(self):
        """
        Simple fully connected network to learn the probability distribution over the action space where the
        most profitable action as defined by the critic will have the highest probability.

        :return: a Keras model capable of learning the probability distribution of a state space of approx 10K with
        and action space of 5 - 10
        """
        ki = RandomUniform(minval=-0.05, maxval=0.05, seed=self.__seed)
        bi = Zeros()
        model = Sequential()
        model.add(Dense(800, input_dim=self.state_size, activation='relu', kernel_initializer=ki, bias_initializer=bi))
        model.add(Dropout(0.1))
        model.add(Dense(400, activation='relu', kernel_initializer=ki, bias_initializer=bi))
        model.add(Dropout(0.1))
        model.add(Dense(200, activation='relu', kernel_initializer=ki, bias_initializer=bi))
        model.add(Dropout(0.05))
        model.add(Dense(units=self.action_size, activation='linear', kernel_initializer=ki, bias_initializer=bi))
        if self.num_gpu > 0:
            model = multi_gpu_model(model, gpus=self.num_gpu)

        def custom_loss(y_true, y_pred):
            return K.sum(K.mean(K.square(y_pred - y_true), axis=-1)) + K.abs(1 - K.sum(y_pred))

        model.compile(loss=custom_loss,  # 'mean_squared_error',
                      optimizer=Adam(lr=self.learning_rate),
                      metrics=['accuracy']
                      )
        return model
Exemplo n.º 11
0
    def build(self, input_shape):
        u = self.layer_config['bins_init_range']
        l = -u
        bins_init = self.layer_config['bins_init']
        if bins_init == 'linspace':
            initer = [
                np.linspace(l, u, self.output_dim).reshape(1, -1)
                for _ in range(input_shape[1])
            ]
            initer = np.concatenate(initer, axis=0)
            init = Constant(initer)
        elif bins_init == 'uniform':
            init = RandomUniform(l, u)
        else:
            raise Exception(bins_init)

        bias_initializer = Constant(self.layer_config['bias_init'])
        if self.layer_config['pre_sm_dropout'] > 0.0:
            self.dropout_mask = self.add_weight(name='dropout_mask',
                                                shape=(input_shape[1],
                                                       self.output_dim),
                                                initializer=Constant(-10000),
                                                trainable=False)

        width_val = 3. * float(u - l) / input_shape[1]
        super(DiscretizationLayerWide, self).build(input_shape)
        self.bins = self.add_weight(name='bins',
                                    shape=(input_shape[1], self.output_dim),
                                    initializer=init,
                                    trainable=True)

        self.widths = self.add_weight(name='widths',
                                      shape=(input_shape[1], self.output_dim),
                                      initializer=TruncatedNormal(
                                          width_val, width_val / 4),
                                      constraint=NonNeg(),
                                      trainable=True)

        self.biases = self.add_weight(name='biases',
                                      shape=(
                                          input_shape[1],
                                          self.output_dim,
                                      ),
                                      initializer=bias_initializer,
                                      trainable=True)

        self.dense_weight = self.add_weight(
            name='w',
            shape=(input_shape[1], self.output_dim),
            initializer='glorot_uniform',  # RandomUniform(-1, 1),#
            trainable=True)

        self.dense_bias = self.add_weight(
            name='b',
            shape=(input_shape[1], ),
            initializer=Zeros(
            ),  #RandomUniform(-0.1, 0.1),  # 'glorot_uniform',
            trainable=True)

        self.built = True
Exemplo n.º 12
0
 def __init__(self, rate=1e-6):
     self.w1 = np.ones(4, dtype='float32')
     self.w2 = np.ones(4, dtype='float32')
     self.model = Sequential()
     self.model.add(
         Dense(1,
               input_shape=(4, ),
               use_bias=False,
               kernel_initializer=Ones(),
               bias_initializer=Zeros()))
     self.model.add(
         Dense(4,
               use_bias=False,
               kernel_initializer=Ones(),
               bias_initializer=Zeros()))
     self.model.compile(optimizer=SGD(learning_rate=rate), loss='mse')
Exemplo n.º 13
0
    def build(self, input_shape):
        input_size = input_shape[-1]
        hidden_units = [int(input_size)] + list(self.hidden_units)
        self.kernels = [
            self.add_weight(name='kernel' + str(i),
                            shape=(hidden_units[i], hidden_units[i + 1]),
                            initializer=glorot_normal(seed=self.seed),
                            regularizer=l2(self.l2_reg),
                            trainable=True)
            for i in range(len(self.hidden_units))
        ]
        self.bias = [
            self.add_weight(name='bias' + str(i),
                            shape=(self.hidden_units[i], ),
                            initializer=Zeros(),
                            trainable=True)
            for i in range(len(self.hidden_units))
        ]
        if self.use_bn:
            self.bn_layers = [
                keras.layers.BatchNormalization()
                for _ in range(len(self.hidden_units))
            ]

        self.dropout_layers = [
            keras.layers.Dropout(self.dropout_rate, seed=self.seed + i)
            for i in range(len(self.hidden_units))
        ]

        self.activation_layers = [
            activation_layer(self.activation)
            for _ in range(len(self.hidden_units))
        ]

        super(DNN, self).build(input_shape)  # Be sure to call this somewhere!
    def _deconv(self, feature_map, i):
        """The deconvolution operation to upsample the average feature map downstream"""
        x = Input(shape=(None, None, 1))
        if i >= 3:
            k1 = 3
            k2 = 3
            s = 1
        elif i == 2:
            k1 = 6
            k2 = 5
            s = 2
        elif i == 1:
            k1 = 5
            k2 = 6
            s = 2
        elif i <= 0:
            k1 = 6
            k2 = 6
            s = 2
        
        y = Conv2DTranspose(filters=1, 
                            kernel_size=(k1,k2), 
                            padding='valid',
                            bias_initializer=Zeros(),
                            kernel_initializer=Ones(),
                            strides=(s,s))(x)


        deconv_model = Model(inputs=[x], outputs=[y])

        inps = [deconv_model.input, K.learning_phase()]   # input placeholder                                
        outs = [deconv_model.layers[-1].output]           # output placeholder
        deconv_func = K.function(inps, outs)              # evaluation function
        
        return deconv_func([feature_map, 0])[0]
 def build(self, input_shape):
     super(PositionEmbedding, self).build(input_shape)
     self.embeddings = self.add_weight(
         name='embeddings',
         shape=input_shape[1:],
         initializer=Zeros()
     )
Exemplo n.º 16
0
def init():
    model.add(
        Conv2D(input_shape=INPUT_SHAPE,
               filters=CONV1_DEEP,
               kernel_size=(CONV1_SIZE, CONV1_SIZE),
               kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.1),
               activation='relu',
               strides=1,
               padding='same'))

    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same"))

    model.add(
        Conv2D(filters=CONV2_DEEP,
               kernel_size=(CONV2_SIZE, CONV2_SIZE),
               kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.1),
               activation='relu',
               strides=1,
               padding='same'))

    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same"))

    model.add(Flatten())

    model.add(
        Dense(units=DENSE1_SIZE,
              activation="relu",
              use_bias=True,
              bias_initializer=Zeros(),
              kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.1)))

    model.add(
        Dense(units=DENSE2_SIZE,
              activation="relu",
              use_bias=True,
              bias_initializer=Zeros(),
              kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.1)))

    model.add(
        Dense(units=OUTPUT_NODE,
              activation="softmax",
              kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.1)))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer='sgd',
                  metrics=['accuracy'])
Exemplo n.º 17
0
 def build(self, input_shape):
     zeros = Zeros()
     constant = Constant(value=1/n_centroid)
     ones = Ones()
     self.u_p = self.add_weight((latent_dim, n_centroid), initializer=zeros, name='u_p')
     self.theta_p = self.add_weight((n_centroid,), initializer=constant, name='theta_p')
     self.lambda_p = self.add_weight((latent_dim, n_centroid), initializer=ones, name='lambda_p')
     self.built = True
Exemplo n.º 18
0
 def build(self, input_shape):
     hidden_dim = input_shape[-1]
     self.scale = self.add_weight('layer_norm_scale',
                                  shape=[hidden_dim],
                                  initializer=Ones())
     self.bias = self.add_weight('layer_norm_bias', [hidden_dim],
                                 initializer=Zeros())
     super().build(input_shape)
Exemplo n.º 19
0
def _unet_to_lstm(model):
    model_without_softmax = Model(input=model.input,
                                  output=model.layers[-2].output)
    model_without_softmax.build((IMG_HEIGHT, IMG_WIDTH, 3))

    unet_lstm = Sequential()
    unet_lstm.add(
        TimeDistributed(model_without_softmax,
                        input_shape=(FRAMES_PER_SAMPLE, IMG_HEIGHT, IMG_WIDTH,
                                     3)))
    shape = (FRAMES_PER_SAMPLE, IMG_HEIGHT, IMG_WIDTH, CLASS_NUM)
    conv_lstm = ConvLSTM2D(filters=CLASS_NUM, kernel_size=(3, 3), activation='tanh', input_shape=shape, padding='same',\
        kernel_initializer=Zeros(), recurrent_initializer=Zeros(), bias_initializer=Zeros())
    unet_lstm.add(conv_lstm)
    unet_lstm.add(Softmax())
    unet_lstm.build()

    return unet_lstm
Exemplo n.º 20
0
    def build(self, input_shape):

        if self.use_bias:
            self.global_bias = self.add_weight(shape=(1, ),
                                               initializer=Zeros(),
                                               name="global_bias")

        # Be sure to call this somewhere!
        super(PredictionLayer, self).build(input_shape)
Exemplo n.º 21
0
 def build(self, input_shape):
     self._g = self.add_weight(name='gain',
                               shape=input_shape[1:],
                               initializer=Ones(),
                               trainable=True)
     self._b = self.add_weight(name='bias',
                               shape=input_shape[1:],
                               initializer=Zeros(),
                               trainable=True)
Exemplo n.º 22
0
 def build(self, input_shape):
     self.gamma = self.add_weight(name="gamma",
                                  shape=input_shape[-1:],
                                  initializer=Ones(),
                                  trainable=True)
     self.beta = self.add_weight(name="beta",
                                 shape=input_shape[-1:],
                                 initializer=Zeros(),
                                 trainable=True)
     super().build(input_shape)
Exemplo n.º 23
0
 def build(self, input_shape):
     self._g = self.add_weight(name='gain',
                               shape=(input_shape[-1], ),
                               initializer=Ones(),
                               trainable=True)
     self._b = self.add_weight(name='bias',
                               shape=(input_shape[-1], ),
                               initializer=Zeros(),
                               trainable=True)
     super(LayerNormalization, self).build(input_shape)
Exemplo n.º 24
0
 def build(self, input_shape):
     self.gamma = self.add_weight(name='normalize_scale',
                                  shape=input_shape[-1:],
                                  initializer=Ones(),
                                  trainable=True)
     self.beta = self.add_weight(name='normalize_bias',
                                 shape=input_shape[-1:],
                                 initializer=Zeros(),
                                 trainable=True)
     super().build(input_shape)
Exemplo n.º 25
0
 def build(self, input_shape):
     self.gamma = self.add_weight(name='gamma',
                                  shape=input_shape[-1:],
                                  initializer=Ones(),
                                  trainable=True)
     self.beta = self.add_weight(name='beta',
                                 shape=input_shape[-1:],
                                 initializer=Zeros(),
                                 trainable=True)
     super(LayerNormalization, self).build(input_shape)
Exemplo n.º 26
0
def get_generator_unet(n_block=3):
    input = Input(shape=(image_size, image_size, input_channel))
    # encoder
    e0 = Conv2D(64,
                kernel_size=4,
                padding='same',
                kernel_initializer=RandomNormal(mean=0.0, stddev=0.02),
                bias_initializer=Zeros())(
                    input)  # use reflection padding instead
    e0 = BatchNormalization()(e0)
    e0 = Activation('relu')(e0)
    e1 = conv_block(e0, 128, downsample=True, dropout=False)  # 1/2
    e2 = conv_block(e1, 256, downsample=True, dropout=False)  # 1/4
    e3 = conv_block(e2, 512, downsample=True, dropout=False)  # 1/8
    e4 = conv_block(e3, 512, downsample=True, dropout=False)  # 1/16
    e5 = conv_block(e4, 512, downsample=True, dropout=False)  # 1/32
    e6 = conv_block(e5, 512, downsample=True, dropout=False)  # 1/64
    e7 = conv_block(e6, 512, downsample=True, dropout=False)  # 1/128
    # decoder
    d0 = conv_block(e7, 512, downsample=False, dropout=True)  # 1/64
    d1 = Concatenate(axis=-1)([d0, e6])
    d1 = conv_block(d1, 512, downsample=False, dropout=True)  # 1/32
    d2 = Concatenate(axis=-1)([d1, e5])
    d2 = conv_block(d2, 512, downsample=False, dropout=True)  # 1/16
    d3 = Concatenate(axis=-1)([d2, e4])
    d3 = conv_block(d3, 512, downsample=False, dropout=True)  # 1/8
    d4 = Concatenate(axis=-1)([d3, e3])
    d4 = conv_block(d4, 256, downsample=False, dropout=True)  # 1/4
    d5 = Concatenate(axis=-1)([d4, e2])
    d5 = conv_block(d5, 128, downsample=False, dropout=True)  # 1/2
    d6 = Concatenate(axis=-1)([d5, e1])
    d6 = conv_block(d6, 64, downsample=False, dropout=True)  # 1
    # out
    x = Conv2D(output_channel,
               kernel_size=3,
               padding='same',
               kernel_initializer=RandomNormal(mean=0.0, stddev=0.02),
               bias_initializer=Zeros())(d6)  # use reflection padding instead
    x = BatchNormalization()(x)
    x = Activation('tanh')(x)
    generator = Model(inputs=input, outputs=x)
    return generator
Exemplo n.º 27
0
def get_discriminator(name, n_layers=3, use_sigmoid=False, instance_norm=True):
    input = Input(shape=(image_size, image_size, input_channel))
    x = Conv2D(64,
               kernel_size=4,
               padding='same',
               strides=2,
               kernel_initializer=RandomNormal(mean=0.0, stddev=0.02),
               bias_initializer=Zeros())(input)
    x = LeakyReLU(alpha=0.2)(x)
    for i in range(1, n_layers):
        x = Conv2D(64 * 2**i,
                   kernel_size=4,
                   padding='same',
                   strides=2,
                   kernel_initializer=RandomNormal(mean=0.0, stddev=0.02),
                   bias_initializer=Zeros())(x)
        if instance_norm:
            x = InstanceNormalization()(x)
        else:
            x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(64 * 2**n_layers,
               kernel_size=4,
               padding='same',
               strides=1,
               kernel_initializer=RandomNormal(mean=0.0, stddev=0.02),
               bias_initializer=Zeros())(x)
    if instance_norm:
        x = InstanceNormalization()(x)
    else:
        x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(1,
               kernel_size=4,
               padding='same',
               strides=1,
               kernel_initializer=RandomNormal(mean=0.0, stddev=0.02),
               bias_initializer=Zeros())(x)
    if use_sigmoid:
        x = Activation('sigmoid')(x)
    discriminator = Model(inputs=input, outputs=x, name=name)
    return discriminator
Exemplo n.º 28
0
    def build_model(self):
        # init_seed = evo_seeds[0]
        #init_seed = 3
        #np.random.seed(init_seed) #must set this in addition of seed in kernal_initializer for reproducable results
        #random.seed(init_seed)  # Python
        #set_random_seed(init_seed)  # Tensorflow

        # units = []
        # units.append(80)
        # units.append(80)
        #import sys
        #stdout = sys.stdout
        #sys.stdout = open('/dev/null', 'w')
        input_val = 24
        output_val = 3
        model_layer_units = [80]
        layers = len(model_layer_units)
        layer_count = 0
        test = model_layer_units[layer_count]

        model = Sequential()

        while layer_count < layers - 1:
            model.add(
                LSTM(model_layer_units[layer_count],
                     input_shape=(None, input_val),
                     return_sequences=True,
                     kernel_initializer=Zeros(),
                     recurrent_initializer=Zeros(),
                     bias_initializer=Zeros()))

            layer_count += 1

        model.add(
            LSTM(model_layer_units[layer_count],
                 input_shape=(None, input_val),
                 kernel_initializer=Zeros(),
                 recurrent_initializer=Zeros(),
                 bias_initializer=Zeros()))
        model.add(
            Dense(output_val,
                  kernel_initializer=Zeros(),
                  bias_initializer=Zeros(),
                  activation="softmax"))

        #import tensorflow as tf
        #v = tf.Variable(0, name='my_variable')
        #sess = tf.Session()
        #tf.train.write_graph(sess.graph_def, '/tmp/my-model', 'train.pbtxt')

        return model
Exemplo n.º 29
0
    def build_model(self):
        self.x = Input(shape=self.input_shape)
        self.y = self.x

        # CNN layers
        for layer in ['Layer1', 'Layer2']:
            self.y = Conv2D(filters=self.dimensionality[layer],
                            kernel_size=(1, self.kernel_size[layer]),
                            padding=self.padding[layer],
                            data_format='channels_last',
                            activation='relu',
                            kernel_initializer=glorot_uniform(),
                            bias_initializer=Zeros())(self.y)
            self.y = BatchNormalization()(self.y)

        # WidthLayer2
        if self.padding['Layer2'] == 'same':
            WidthLayer2 = self.CNN_window_size
        else:
            WidthLayer2 = np.int(
                np.floor((self.CNN_window_size - self.kernel_size['Layer2']) / self.stride['Layer2']) + 1)

        # 1st Dense layer
        self.y = Reshape((self.num_time_steps, self.dimensionality['Layer2'] * WidthLayer2))(self.y)
        self.y = Dense(units=self.dense_units['DenseLayer1'],
                       activation='relu',
                       kernel_regularizer=l2(self.scale_l2_regularization))(self.y)
        self.y = Dropout(rate=1 - self.dropout_probability)(self.y)
        self.y = BatchNormalization()(self.y)

        # LSTM Layer
        self.y = CuDNNLSTM(self.num_LSTM_units, return_sequences=True)(self.y)

        # 2nd Dense layer
        self.y = Dense(units=self.dense_units['DenseLayer2'],
                       activation='relu',
                       kernel_regularizer=l2(self.scale_l2_regularization))(self.y)
        self.y = Dropout(rate=1 - self.dropout_probability)(self.y)
        self.y = BatchNormalization()(self.y)

        # Softmax
        self.y = Dense(units=1, activation='sigmoid')(self.y)

        self.model = Model(inputs=self.x, outputs=self.y, name='model')

        if self.num_gpus > 1:
            print('{} | [INFO] | Training with {} GPUs '.format(datetime.datetime.now(), self.num_gpus))
            self.parallel_model = multi_gpu_model(self.model, gpus=self.num_gpus, cpu_merge=False)
            self.parallel_model.compile(loss='binary_crossentropy', metrics=['accuracy'],
                                        optimizer=Adam(lr=self.initial_lr))
        else:
            self.model.compile(loss='binary_crossentropy', metrics=['accuracy'],
                               optimizer=Adam(lr=self.initial_lr), sample_weight_mode='temporal')
Exemplo n.º 30
0
 def build(self, input_shape):
     # Create trainable weight variables for this layer.
     # 不同的行共享gamma和beta
     self.gamma = self.add_weight(name='gamma',
                                  shape=input_shape[-1:],
                                  initializer=Ones(),
                                  trainable=True)
     self.beta = self.add_weight(name='beta',
                                 shape=input_shape[-1:],
                                 initializer=Zeros(),
                                 trainable=True)
     super(LayerNormalization,
           self).build(input_shape)  # Be sure to call this at the end