Exemplo n.º 1
0
    def build(self, input_shape):
        assert len(input_shape) >= 2
        self.input_dim = input_shape[-1]

        #trainable parameters : mu_w,mu_b,sigma_w,sigma_b

        self.mu_w = self.add_weight(shape=(self.input_dim, self.units),
                                    initializer=self.kernel_initializer,
                                    name='mu_w',
                                    regularizer=None,
                                    constraint=None)

        self.mu_b = self.add_weight(shape=(self.units, ),
                                    initializer=self.bias_initializer,
                                    name='mu_b',
                                    regularizer=None,
                                    constraint=None)

        self.sigma_w = self.add_weight(
            shape=(self.input_dim, self.units),
            initializer=initializers.Constant(0.01),  #constant initialization
            name='sigma_w',
            regularizer=None,
            constraint=None)

        self.sigma_b = self.add_weight(
            shape=(self.units, ),
            initializer=initializers.Constant(0.01),  #constant initialization
            name='sigma_b',
            regularizer=None,
            constraint=None)

        self.input_spec = InputSpec(min_ndim=2, axes={-1: self.input_dim})
        self.built = True
Exemplo n.º 2
0
    def __init__(self,
                 N,
                 dim1,
                 dim2,
                 channels,
                 intermediate_dim,
                 base_name='attn',
                 **kwargs):

        super(Contextual_Attention, self).__init__(**kwargs)

        self.dim1 = dim1
        self.dim2 = dim2
        self.channels = channels
        self.N = N
        self.base_name = base_name
        self.intermediate_dim = intermediate_dim

        dists, mean_dists, std_dists = compute_distances(self.dim1, self.dim2)
        dists_new = np.repeat(dists[:, :, np.newaxis], self.N, axis=2)
        self.dists = K.constant(dists_new)

        self.mu_initializer = initializers.Constant(value=mean_dists)
        self.sigma_initializer = initializers.Constant(value=std_dists)
        self.alpha_initializer = initializers.Constant(value=1.0)
        self.mu_constraint = constraints.get(None)
        self.sigma_constraint = constraints.get(None)
        self.alpha_constraint = constraints.get(None)
Exemplo n.º 3
0
def residual_zeropad_block(X, f, level_number, direction, batchnorm=0, dilations=None):
    suffix = "_" + direction + "_" + str(level_number)
    shortcut = X

    if batchnorm == 2:
        X = BatchNormalization(name="batchnorm" + suffix + "a")(X)
    X = Conv2D(f, 3, padding="same", kernel_initializer="he_normal",
               name="conv" + suffix + "a")(X)
    X = Activation("relu", name="relu" + suffix + "a")(X)

    if batchnorm:
        X = BatchNormalization(name="batchnorm" + suffix + "b")(X)
    X = Conv2D(f, 3, padding="same", kernel_initializer="he_normal",
               name="conv" + suffix + "b")(X)
    X = Activation("relu", name="relu" + suffix + "b")(X)

    X_channels = X.shape.as_list()[-1]
    shortcut_channels = shortcut.shape.as_list()[-1]
    if X_channels >= shortcut_channels:
        identity_weights = np.eye(shortcut_channels, X_channels, dtype=np.float32)
        shortcut = Conv2D(X_channels, kernel_size=1, strides=1, use_bias=False, trainable=False,
                          kernel_initializer=initializers.Constant(value=identity_weights),
                          name="zeropad" + suffix)(shortcut)
    else:
        identity_weights = np.eye(X_channels, shortcut_channels, dtype=np.float32)
        X = Conv2D(shortcut_channels, kernel_size=1, strides=1, use_bias=False, trainable=False,
                   kernel_initializer=initializers.Constant(value=identity_weights),
                   name="zeropad" + suffix)(X)
    X = Add(name="add" + suffix)([X, shortcut])

    return X
Exemplo n.º 4
0
 def prelu(x, name='default'):
     if name == 'default':
         return PReLU(alpha_initializer=initializers.Constant(
             value=0.25))(x)
     else:
         return PReLU(alpha_initializer=initializers.Constant(value=0.25),
                      name=name)(x)
Exemplo n.º 5
0
 def build(self, input_shape):
     # input_dim = input_shape[-1] - 2 + 2 * self.channels
     # input_dim_signal = input_shape[-1]
     # input_dim = self.channels + 2*self.units ** 2
     # self.embeddings0 = self.add_weight(shape=(self.locs + 1, 1), initializer='uniform', name='embeddings0')
     # self.embeddings1 = self.add_weight(shape=(self.locs + 1, 1), initializer='uniform', name='embeddings1')
     # self.kernel_signal = self.add_weight(shape=(input_dim_signal, self.channels), initializer='glorot_uniform',
     #                                      name='kernel_signal')
     # self.bias_signal = self.add_weight(shape=(self.channels,), initializer='zeros', name='bias_signal')
     # self.kernel = self.add_weight(shape=(input_dim, self.units ** 2), initializer='glorot_uniform', name='kernel')
     # self.bias = self.add_weight(shape=(self.units ** 2,), initializer='zeros', name='bias')
     # self.embeddings0 = self.add_weight(shape=(self.locs, self.channels), initializer='uniform', name='embeddings0')
     # self.embeddings1 = self.add_weight(shape=(self.locs, self.channels), initializer='uniform', name='embeddings1')
     # self.kernel0 = self.add_weight(shape=(input_dim, self.units), initializer='glorot_uniform', name='kernel0')
     # self.bias0 = self.add_weight(shape=(self.units,), initializer='zeros', name='bias0')
     # self.kernel1 = self.add_weight(shape=(input_dim, self.channels), initializer='glorot_uniform', name='kernel')
     # self.bias1 = self.add_weight(shape=(self.channels,), initializer='zeros', name='bias')
     c = np.linspace(0, self.locs, self.units, endpoint=False)
     c0 = np.kron(c, np.ones_like(c))
     c1 = (c0 + np.kron(np.ones_like(c), c)) % self.locs
     self.c0 = K.constant(c0.astype('int32') + 1)
     self.c1 = K.constant(c1.astype('int32') + 1)
     self.a0 = self.add_weight(
         shape=(1, ),
         initializer=initializers.Constant(value=10.0),
         name='a0')
     self.a1 = self.add_weight(
         shape=(1, ),
         initializer=initializers.Constant(value=10.0),
         name='a1')
     self.built = True
Exemplo n.º 6
0
def create_neural_network_model(input_dimension, output_dimension):
    opt = Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False)

    model = Sequential()
    model.add(
        Dense(256,
              input_dim=input_dimension,
              activation='relu',
              kernel_initializer='random_uniform',
              bias_initializer=initializers.Constant(0.1)))
    model.add(
        Dense(256,
              activation='relu',
              kernel_initializer='random_uniform',
              bias_initializer=initializers.Constant(0.1)))
    # model.add(Dense(256, activation='relu', kernel_initializer='random_uniform',
    #                 bias_initializer=initializers.Constant(0.1)))
    model.add(
        Dense(128,
              activation='relu',
              kernel_initializer='random_uniform',
              bias_initializer=initializers.Constant(0.1)))
    model.add(
        Dense(output_dimension,
              activation='sigmoid',
              kernel_initializer='random_uniform',
              bias_initializer='zeros'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
    return model
Exemplo n.º 7
0
def clasif_model():
    individual=[(150, 0), (100, 0), (100,0), (50,0)]

    activation_functions={
        0:"relu",
        1:"sigmoid",
        2:"softmax",
        3:"tanh",
        #4:"selu", 
        4:"softplus",
        #6:"softsign",
        5:"linear"
    }

    dimension=5
    model = Sequential()
    for units,activ_f in individual:       
       if(units>5): 
           model.add(Dense(units=units, input_dim=dimension, kernel_initializer=initializers.Constant(value=0.025), activation=activation_functions[activ_f]))
    
    model.add(Dense(units=5, activation="softmax", kernel_initializer=initializers.Constant(value=0.025)))   

    #SGD(lr=0.05, momentum=0.1, decay=0.001, nesterov=False)
    #model.compile(loss='mean_squared_error', optimizer='sgd')
      
    #Adam(lr=0.1, beta_1=0.09, beta_2=0.999, epsilon=1e-08, decay=0.0) #adam defaults Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    #Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) # Adan defaults
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    
    return model
Exemplo n.º 8
0
    def create_tf_model(self, name):
        no_hidden_layers = len(self.hidden_layers)
        self.inp = Input(shape=(self.input_size, ))
        for i in range(no_hidden_layers):
            if (i == 0):
                outp = Dense(self.hidden_layers[0],
                             activation='linear',
                             kernel_initializer=initializers.TruncatedNormal(
                                 stddev=0.1),
                             bias_initializer=initializers.Constant(1))(
                                 self.inp)
                outp = Activation('relu')(outp)
            else:
                outp = Dense(self.hidden_layers[i],
                             activation='linear',
                             kernel_initializer=initializers.TruncatedNormal(
                                 stddev=0.1),
                             bias_initializer=initializers.Constant(1))(outp)
                outp = Activation('relu')(outp)
            outp = Dropout(0.5)(outp, training=self.mc_dropout)

        if (no_hidden_layers == 0):
            outp = Dense(self.output_classes, activation='linear')(self.inp)
            self.predictions = Activation('softmax')(outp)
        else:
            outp = Dense(self.output_classes, activation='linear')(outp)
            self.predictions = Activation('softmax')(outp)
        self.model = Model(self.inp, self.predictions, name=name + '_keras')

        self.get_final_layer_model_output = K.function(
            [self.model.layers[0].input], [self.model.layers[-3].output])
        self.model.compile(loss='categorical_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])
Exemplo n.º 9
0
    def build(self, input_shape):
        self.identifiers = self.add_weight(shape=(self.grn.size(), ),
                                           initializer=GRNInit(
                                               np.copy(self.grn.identifiers)),
                                           name='identifiers')
        self.enhancers = self.add_weight(shape=(self.grn.size(), ),
                                         initializer=GRNInit(
                                             np.copy(self.grn.enhancers)),
                                         name='enhancers')
        self.inhibitors = self.add_weight(shape=(self.grn.size(), ),
                                          initializer=GRNInit(
                                              np.copy(self.grn.inhibitors)),
                                          name='inhibitors')
        self.beta = self.add_weight(
            shape=(1, ),
            initializer=initializers.Constant(value=self.grn.beta),
            name='beta')
        self.delta = self.add_weight(
            shape=(1, ),
            initializer=initializers.Constant(value=self.grn.delta),
            name='delta')

        self.grn.tf_identifiers = self.identifiers
        self.grn.tf_enhancers = self.enhancers
        self.grn.tf_inhibitors = self.inhibitors
        self.grn.tf_beta = self.beta
        self.grn.tf_delta = self.delta
        self.built = True
Exemplo n.º 10
0
 def _build_model(self):
     # set kernel_initializers: https://stackoverflow.com/questions/45230448/how-to-get-reproducible-result-when-running-keras-with-tensorflow-backend
     model = Sequential()
     model.add(
         Dense(self.state_size,
               input_dim=self.state_size,
               activation='relu',
               kernel_initializer=initializers.glorot_normal(seed=1337),
               bias_initializer=initializers.Constant(value=0)))
     model.add(
         Dense(200,
               activation='relu',
               kernel_initializer=initializers.glorot_normal(seed=1337),
               bias_initializer=initializers.Constant(value=0)))
     model.add(
         Dense(200,
               activation='relu',
               kernel_initializer=initializers.glorot_normal(seed=1337),
               bias_initializer=initializers.Constant(value=0)))
     model.add(
         Dense(self.action_size,
               activation='linear',
               kernel_initializer=initializers.glorot_normal(seed=1337),
               bias_initializer=initializers.Constant(value=0)))
     model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
     return model
Exemplo n.º 11
0
    def build(self, input_shape):
        assert len(input_shape) >= 2
        self.input_dim = input_shape[-1]

        self.kernel = self.add_weight(shape=(self.input_dim, self.units),
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)

        self.sigma_kernel = self.add_weight(
            shape=(self.input_dim, self.units),
            initializer=initializers.Constant(value=self.sigma_init),
            name='sigma_kernel')

        if self.use_bias:
            self.bias = self.add_weight(shape=(self.units, ),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
            self.sigma_bias = self.add_weight(
                shape=(self.units, ),
                initializer=initializers.Constant(value=self.sigma_init),
                name='sigma_bias')
        else:
            self.bias = None
            self.epsilon_bias = None
        # self.sample_noise()
        super(NoisyDense, self).build(input_shape)
def learn_fast_model(num_pixels=256, num_classes=10, initializer_val=0.01):
    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               kernel_initializer=initializers.Constant(value=initializer_val),
               activation='relu',
               input_shape=(16, 16, 1)))
    model.add(
        Conv2D(64, (3, 3),
               kernel_initializer=initializers.Constant(value=initializer_val),
               activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(
        Dense(128,
              kernel_initializer=initializers.Constant(value=initializer_val),
              activation='relu'))
    model.add(Dropout(0.5))
    model.add(
        Dense(num_classes,
              kernel_initializer=initializers.Constant(value=initializer_val),
              activation='sigmoid'))
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer='nadam',
                  metrics=['accuracy'])
    return model
Exemplo n.º 13
0
    def _build_model(self):

        model = Sequential()
        model.add(
            Dense(100,
                  input_dim=self.state_size,
                  activation='relu',
                  kernel_initializer=initializers.glorot_normal(seed=1337),
                  bias_initializer=initializers.Constant(value=0.1)))
        model.add(
            Dense(100,
                  activation='relu',
                  kernel_initializer=initializers.glorot_normal(seed=1337),
                  bias_initializer=initializers.Constant(value=0.1)))
        # model.add(Dense(50,
        #                 activation=self.'relu',
        #                 kernel_initializer=initializers.glorot_normal(seed=1337),
        #                 bias_initializer=initializers.Constant(value=0.1)))
        model.add(
            Dense(self.action_size,
                  activation='linear',
                  kernel_initializer=initializers.glorot_normal(seed=1337),
                  bias_initializer=initializers.Constant(value=0.1)))
        model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
        return model
Exemplo n.º 14
0
Arquivo: ppo.py Projeto: v3rm1/dl2_rl
 def _shared_network_structure(self, state_features):
     dense_d = self.dic_agent_conf["D_DENSE"]
     conv1 = Conv2D(16,
                    kernel_size=2,
                    name="conv_shared_1",
                    input_shape=(80, 80, 1))(state_features)
     conv1_leaky = LeakyReLU(alpha=0.1)(conv1)
     conv2 = Conv2D(32, kernel_size=2, name="conv_shared_2")(conv1_leaky)
     conv2_leaky = LeakyReLU(alpha=0.1)(conv2)
     flatten1 = Flatten()(conv2_leaky)
     hidden1 = Dense(
         dense_d,
         kernel_initializer=initializers.RandomNormal(stddev=0.01),
         bias_initializer=initializers.Constant(0.1),
         activation="linear",
         name="hidden_shared_1")(flatten1)
     hidden1_leaky = LeakyReLU(alpha=.1)(hidden1)
     hidden2 = Dense(
         dense_d,
         kernel_initializer=initializers.RandomNormal(stddev=0.01),
         bias_initializer=initializers.Constant(0.1),
         activation="linear",
         name="hidden_shared_2")(hidden1_leaky)
     hidden2_leaky = LeakyReLU(alpha=.1)(hidden2)
     return hidden2_leaky
Exemplo n.º 15
0
def atrous_block_residual_zeropad(X, f, level_number, direction, batchnorm=0, dilations=(1,)):
    cell_outputs = []
    suffix = "_" + direction + "_" + str(level_number)

    # Atrous convolutions
    for d in dilations:
        cell_outputs.append(atrous_single_cell(X, f, level_number, direction, batchnorm, d))

    # Shortcut
    shortcut_channels = X.shape.as_list()[-1]
    if f >= shortcut_channels:
        identity_weights = np.eye(shortcut_channels, f, dtype=np.float32)
        X = Conv2D(f, kernel_size=1, strides=1, use_bias=False, trainable=False,
                          kernel_initializer=initializers.Constant(value=identity_weights),
                          name="zeropad" + suffix)(X)
    else:
        identity_weights = np.eye(f, shortcut_channels, dtype=np.float32)
        for i in range(len(cell_outputs)):
            cell_outputs[i] = Conv2D(shortcut_channels, kernel_size=1, strides=1, use_bias=False, trainable=False,
                                     kernel_initializer=initializers.Constant(value=identity_weights),
                                     name="zeropad" + suffix + "_" + str(i))(cell_outputs[i])

    cell_outputs.append(X)
    X = Add(name="add" + suffix)(cell_outputs)

    return X
Exemplo n.º 16
0
    def create_tf_model(self, name):
        # self.model = Sequential()
        no_hidden_layers = len(self.hidden_layers)
        #
        # for i in range(no_hidden_layers):
        #    if(i == 0):
        #        self.model.add(Dense(self.hidden_layers[0], input_dim = self.input_size, activation = 'relu'))
        #    else:
        #        self.model.add(Dense(self.hidden_layers[i], activation = 'relu'))
        #
        # if(no_hidden_layers == 0):
        #    self.model.add(Dense(self.output_classes, input_dim = self.input_size, activation = 'sigmoid'))
        # else:
        #    self.model.add(Dense(self.output_classes, activation = 'sigmoid'))
        #
        self.inp = Input(shape=(self.input_size, ))
        for i in range(no_hidden_layers):
            if (i == 0):
                outp = Dense(self.hidden_layers[0],
                             activation='linear',
                             kernel_initializer=initializers.TruncatedNormal(
                                 stddev=0.1),
                             bias_initializer=initializers.Constant(1))(
                                 self.inp)
                #kernel_regularizer = regularizers.l2(0.01)
                #, activity_regularizer = regularizers.l1(0.01)
                #outp = Dense(self.hidden_layers[0], activation='linear')(self.inp)
                #outp = BatchNormalization()(outp)
                outp = Activation('relu')(outp)
            else:
                outp = Dense(self.hidden_layers[i],
                             activation='linear',
                             kernel_initializer=initializers.TruncatedNormal(
                                 stddev=0.1),
                             bias_initializer=initializers.Constant(1))(outp)
                #kernel_regularizer = regularizers.l2(0.01)
                #, activity_regularizer = regularizers.l1(0.01)
                #outp = Dense(self.hidden_layers[i], activation='linear')(outp)
                #outp = BatchNormalization()(outp)
                outp = Activation('relu')(outp)
            outp = Dropout(0.5)(outp, training=self.mc_dropout)

        if (no_hidden_layers == 0):
            outp = Dense(self.output_classes, activation='linear')(self.inp)
            self.predictions = Activation('softmax')(outp)
        else:
            outp = Dense(self.output_classes, activation='linear')(outp)
            self.predictions = Activation('softmax')(outp)
        #self.model = Model(self.inp, outp, name=name + '_keras')
        self.model = Model(self.inp, self.predictions, name=name + '_keras')

        print(self.model.layers[-3].output.shape)
        print(self.model.layers[-2].output.shape)
        self.get_final_layer_model_output = K.function(
            [self.model.layers[0].input], [self.model.layers[-3].output])
        #self.get_preds = K.function([self.model.layers[0].input], [self.predictions])
        self.model.compile(loss='categorical_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])
    def build(self, input_shape):

        self.deltas = self.add_weight(name='deltas',
                                shape=(self.n_classes-1,),
                                initializer=kinit.Constant(np.linspace(-1, 1, self.n_classes-1)),
                                trainable=True)
        self.offset = self.add_weight(name='offset', shape=(1,), 
            initializer=kinit.Constant(0), trainable=True)

        
        super(OrdinalLayer, self).build(input_shape)
Exemplo n.º 18
0
Arquivo: ppo.py Projeto: v3rm1/dl2_rl
    def _build_actor_network_confidence(self):

        state = Input(shape=self.dic_agent_conf["STATE_DIM"], name="state")
        # print("BUILD ACTOR NETWORK: STATE", state.shape)

        advantage = Input(shape=(1, ), name="Advantage")
        old_prediction = Input(shape=(self.n_actions, ), name="Old_Prediction")

        shared_hidden = self._shared_network_structure(state)

        action_dim = self.dic_agent_conf["ACTION_DIM"]

        act_policy = Dense(
            action_dim,
            kernel_initializer=initializers.RandomNormal(stddev=0.01),
            bias_initializer=initializers.Constant(0.1),
            activation="softmax",
            name="actor_output_layer")(shared_hidden)

        act_plus_shared = Concatenate()([act_policy, shared_hidden])

        conf_policy = Dense(
            1,
            kernel_initializer=initializers.RandomNormal(stddev=0.01),
            bias_initializer=initializers.Constant(0.1),
            activation="sigmoid",
            name="confidence_output_layer")(act_plus_shared)

        policy = Concatenate()([act_policy, conf_policy])
        actor_network = Model(inputs=[state, advantage, old_prediction],
                              outputs=policy)

        if self.dic_agent_conf["OPTIMIZER"] is "Adam":
            actor_network.compile(
                optimizer=Adam(lr=self.dic_agent_conf["ACTOR_LEARNING_RATE"]),
                loss=self.confidence_loss(
                    advantage=advantage,
                    old_prediction=old_prediction,
                ))
        elif self.dic_agent_conf["OPTIMIZER"] is "RMSProp":
            actor_network.compile(optimizer=RMSprop(
                lr=self.dic_agent_conf["ACTOR_LEARNING_RATE"]))
        else:
            print(
                "Not such optimizer for actor network. Instead, we use adam optimizer"
            )
            actor_network.compile(optimizer=Adam(
                lr=self.dic_agent_conf["ACTOR_LEARNING_RATE"]))
        print("=== Build Actor Network ===")
        actor_network.summary()

        #time.sleep(1.0)
        return actor_network
Exemplo n.º 19
0
def network():
    inputs = Input((80, 80, 4))
    a = Input(shape=(ACTIONS, ))
    y = Input(shape=(1, ), dtype=float)
    conv1 = Conv2D(filters=32,
                   strides=4,
                   activation='relu',
                   padding='same',
                   use_bias=True,
                   kernel_size=[8, 8],
                   kernel_initializer=initializer.TruncatedNormal(stddev=0.01),
                   bias_initializer=initializer.Constant(value=0.01))(inputs)
    maxpool1 = MaxPooling2D(pool_size=2, strides=2, padding='same')(conv1)
    conv2 = Conv2D(filters=64,
                   strides=2,
                   activation='relu',
                   padding='same',
                   use_bias=True,
                   kernel_size=[4, 4],
                   kernel_initializer=initializer.TruncatedNormal(stddev=0.01),
                   bias_initializer=initializer.Constant(value=0.01))(maxpool1)
    #maxpool2 = MaxPooling2D(pool_size=2, strides=2, padding='same')(conv2)
    conv3 = Conv2D(filters=64,
                   strides=1,
                   activation='relu',
                   padding='same',
                   use_bias=True,
                   kernel_size=[1, 1],
                   kernel_initializer=initializer.TruncatedNormal(stddev=0.01),
                   bias_initializer=initializer.Constant(value=0.01))(conv2)
    #maxpool3 = MaxPooling2D(pool_size=2, strides=2, padding='same')(conv3)
    fci = Flatten()(conv3)
    fc1 = Dense(512,
                activation='relu',
                use_bias=True,
                kernel_initializer=initializer.TruncatedNormal(stddev=0.01),
                bias_initializer=initializer.Constant(value=0.01))(fci)
    fc2 = Dense(ACTIONS,
                activation='linear',
                use_bias=True,
                kernel_initializer=initializer.TruncatedNormal(stddev=0.01),
                bias_initializer=initializer.Constant(value=0.01))(fc1)

    mask = Dot(axes=1)([fc2, a])

    model = Model([inputs, a, y], fc2)

    opt = Adam(lr=0.0001)
    model.compile(optimizer=opt, loss=custom_loss(mask, y))

    model.summary()
    return model
def create_model():
    classifier = Sequential()

    # Adding a first convolutional layer
    classifier.add(
        Conv2D(16, (5, 5),
               input_shape=(80, 80, 3),
               activation='relu',
               kernel_initializer=initializers.random_normal(stddev=0.04,
                                                             mean=0.00),
               bias_initializer=initializers.Constant(value=0.2)))
    classifier.add(MaxPooling2D(pool_size=(2, 2)))

    # Adding a second convolutional layer
    classifier.add(
        Conv2D(32, (5, 5),
               activation='relu',
               kernel_initializer=initializers.random_normal(stddev=0.04,
                                                             mean=0.00),
               bias_initializer=initializers.Constant(value=0.2)))
    classifier.add(MaxPooling2D(pool_size=(2, 2)))

    # Adding a third convolutional layer
    classifier.add(
        Conv2D(48, (4, 4),
               activation='relu',
               kernel_initializer=initializers.random_normal(stddev=0.04,
                                                             mean=0.00),
               bias_initializer=initializers.Constant(value=0.2)))
    classifier.add(MaxPooling2D(pool_size=(2, 2)))

    # Flattening
    classifier.add(Flatten())

    #Full connection
    classifier.add(
        Dense(512,
              activation='relu',
              kernel_initializer=initializers.random_normal(stddev=0.02,
                                                            mean=0.00),
              bias_initializer=initializers.Constant(value=0.1)))

    # output layer
    classifier.add(
        Dense(11,
              activation='softmax',
              kernel_initializer=initializers.random_normal(stddev=0.02,
                                                            mean=0.00),
              bias_initializer=initializers.Constant(value=0.1)))

    return classifier
 def _create_model(self):
     input_x = Input(shape=(self.n_features, ))
     x = Dense(10,
               kernel_initializer=initializers.random_normal(stddev=0.3),
               bias_initializer=initializers.Constant(0.1),
               activation='relu')(input_x)
     predictions = Dense(
         self.n_actions,
         kernel_initializer=initializers.random_normal(stddev=0.3),
         bias_initializer=initializers.Constant(0.1))(x)
     model = Model(inputs=input_x, outputs=predictions)
     model.compile(optimizer=optimizers.RMSprop(lr=self.lr),
                   loss='mean_squared_error')
     return model
Exemplo n.º 22
0
def custom_network(height, width, classes, pre_trained=''):

    input_img = Input(shape=(height, width, 3))
    if pre_trained != '':
        base_model = load_model(pre_trained)
        num_layers = len(base_model.layers)
        base_output = base_model.get_layer(index=num_layers - 2).output
        out = Dense(classes, activation='softmax')(base_output)
        model = Model(inputs=base_model.input, outputs=out)
    else:
        tower_1 = Conv2D(16, (1, 1),
                         padding='same',
                         activation='elu',
                         bias_initializer=initializers.Constant(.1))(input_img)
        tower_x = Conv2D(32, (3, 3),
                         padding='same',
                         activation='elu',
                         bias_initializer=initializers.Constant(.1))(tower_1)
        block1_output = GlobalAveragePooling2D()(tower_1)
        tower_y = MaxPooling2D(pool_size=(2, 2), padding='same')(tower_x)
        tower_y = Dropout(0.1)(tower_y)
        tower_z = Conv2D(32, (1, 1),
                         padding='same',
                         activation='elu',
                         bias_initializer=initializers.Constant(.1))(tower_y)
        tower_a = Conv2D(32, (3, 3),
                         padding='same',
                         activation='elu',
                         bias_initializer=initializers.Constant(.1))(tower_z)
        tower_a = MaxPooling2D(pool_size=(2, 2), padding='same')(tower_a)
        tower_a = Dropout(0.1)(tower_a)

        tower_2 = AveragePooling2D(pool_size=(4, 4), padding='same')(tower_x)
        tower_2 = Dropout(0.1)(tower_2)

        tower_3 = AveragePooling2D(pool_size=(2, 2), padding='same')(tower_z)
        tower_3 = Dropout(0.1)(tower_3)

        output = keras.layers.concatenate([tower_a, tower_2, tower_3], axis=1)
        output = Flatten()(output)
        out1 = keras.layers.concatenate([output, block1_output], axis=1)

        out = Dense(classes, activation='softmax')(out1)

        model = Model(inputs=input_img, outputs=out)

    print(model.summary())
    return model
Exemplo n.º 23
0
def target_model(LEARNING_RATE=1e-7):
    target_model = Sequential()
    target_model.add(Conv2D(32, (8, 8), strides=(4, 4), padding='same', input_shape=(64, 64, 4),
                            kernel_initializer=initializers.random_normal(stddev=0.01), bias_initializer=initializers.Constant(value=0.01), activation='relu'))
    target_model.add(Conv2D(64, (4, 4), strides=(2, 2), padding='same', kernel_initializer=initializers.random_normal(
        stddev=0.01), bias_initializer=initializers.Constant(value=0.01), activation='relu'))
    target_model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same', kernel_initializer=initializers.random_normal(
        stddev=0.01), bias_initializer=initializers.Constant(value=0.01), activation='relu'))
    target_model.add(Flatten())
    target_model.add(Dense(512, kernel_initializer=initializers.random_normal(
        stddev=0.01), bias_initializer=initializers.Constant(value=0.01), activation='relu'))
    target_model.add(Dense(2))
    adam = optimizers.Adam(lr=LEARNING_RATE, beta_1=0.9,
                           beta_2=0.999, epsilon=1e-08)
    target_model.compile(loss=losses.mean_squared_error, optimizer='adam')
    return target_model
Exemplo n.º 24
0
    def build(self, input_shape):
        assert len(input_shape) == 3
        assert input_shape[0] == input_shape[1]
        assert input_shape[0][:-1] == input_shape[2][:-1]

        input_dim, features_dim = input_shape[0][-1], input_shape[2][-1]
        if self.use_intermediate_layer:
            self.first_kernel = self.add_weight(shape=(features_dim,
                                                       self.intermediate_dim),
                                                initializer="random_uniform",
                                                name='first_kernel')
            self.first_bias = self.add_weight(shape=(self.intermediate_dim, ),
                                              initializer="random_uniform",
                                              name='first_bias')
        self.features_kernel = self.add_weight(shape=(features_dim, 1),
                                               initializer="random_uniform",
                                               name='kernel')
        self.features_bias = self.add_weight(shape=(1, ),
                                             initializer=kinit.Constant(
                                                 self.bias_initializer),
                                             name='bias')
        if self.use_dimension_bias:
            self.dimensions_bias = self.add_weight(
                shape=(input_dim, ),
                initializer="random_uniform",
                name='dimension_bias')
        super(WeightedCombinationLayer, self).build(input_shape)
Exemplo n.º 25
0
    def __init__(self, embedding_matrix):
        super(Extractor_Model, self).__init__()

        #hyperparameters
        num_filters = 100
        sequence_length = 540
        embedding_dimension = 100
        num_words = 4860

        #model
        self.model = Sequential()
        self.model.add(layers.Embedding(input_dim=num_words, output_dim=embedding_dimension,
                                        embeddings_initializer=initializers.Constant(embedding_matrix),
                                        input_length=sequence_length, trainable=False))

        self.conv_2 = layers.Conv1D(filters=num_filters, kernel_size=2, padding='same', activation='relu')
        self.conv_3 = layers.Conv1D(filters=num_filters, kernel_size=3, padding='same', activation='relu')
        self.conv_4 = layers.Conv1D(filters=num_filters, kernel_size=4, padding='same', activation='relu')
        self.conv_5 = layers.Conv1D(filters=num_filters, kernel_size=5, padding='same', activation='relu')
        self.conv_6 = layers.Conv1D(filters=num_filters, kernel_size=6, padding='same', activation='relu')

        self.global_max_pool = layers.GlobalMaxPooling1D()

        #optimizer
        learning_rate = 0.001
        decay_rate = learning_rate/((1 + 10 * np.random.randint(0, 2)) ** 0.75)
        self.optimizer = tf.keras.optimizers.Adam(lr=learning_rate, decay=decay_rate)
Exemplo n.º 26
0
    def build(self, input_shape):
        self.input_dim = input_shape[-1]

        #See section 3.2 of Fortunato et al.
        sqr_inputs = self.input_dim**(1 / 2)
        self.sigma_initializer = initializers.Constant(value=.5 / sqr_inputs)
        self.mu_initializer = initializers.RandomUniform(
            minval=(-1 / sqr_inputs), maxval=(1 / sqr_inputs))

        self.mu_weight = self.add_weight(shape=(self.input_dim, self.units),
                                         initializer=self.mu_initializer,
                                         name='mu_weights',
                                         constraint=self.kernel_constraint,
                                         regularizer=self.kernel_regularizer)

        self.sigma_weight = self.add_weight(
            shape=(self.input_dim, self.units),
            initializer=self.sigma_initializer,
            name='sigma_weights',
            constraint=self.kernel_constraint,
            regularizer=self.kernel_regularizer)

        self.mu_bias = self.add_weight(shape=(self.units, ),
                                       initializer=self.mu_initializer,
                                       name='mu_bias',
                                       constraint=self.bias_constraint,
                                       regularizer=self.bias_regularizer)

        self.sigma_bias = self.add_weight(shape=(self.units, ),
                                          initializer=self.sigma_initializer,
                                          name='sigma_bias',
                                          constraint=self.bias_constraint,
                                          regularizer=self.bias_regularizer)

        super(NoisyNetDense, self).build(input_shape=input_shape)
Exemplo n.º 27
0
    def build(self, input_shape):
        dim = input_shape[self.axis]

        if dim is None:
            raise ValueError('Axis ' + str(self.axis) + ' of '
                             'input tensor should have a defined dimension '
                             'but the layer received an input with shape ' +
                             str(input_shape) + '.')

        self.input_spec = InputSpec(ndim=len(input_shape),
                                    axes={self.axis: dim})
        shape = (dim, )

        self.gamma = self.add_weight(shape=shape,
                                     name='gamma',
                                     initializer=self.gamma_initializer,
                                     regularizer=self.gamma_regularizer,
                                     constraint=self.gamma_constraint)
        self.beta = self.add_weight(shape=shape,
                                    name='beta',
                                    initializer=self.beta_initializer,
                                    regularizer=self.beta_regularizer,
                                    constraint=self.beta_constraint)
        self.epsilon_l = self.add_weight(shape=(1, ),
                                         name='epsilon_l',
                                         initializer=initializers.Constant(
                                             self.epsilon),
                                         regularizer=self.epsilon_regularizer,
                                         constraint=self.epsilon_constraint,
                                         trainable=self.learnable_epsilon)

        self.built = True
Exemplo n.º 28
0
def test_TerminateOnNaN():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_data_callbacks()

    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    cbks = [callbacks.TerminateOnNaN()]
    model = Sequential()
    initializer = initializers.Constant(value=1e5)
    for _ in range(5):
        model.add(Dense(num_hidden, input_dim=input_dim, activation='relu',
                        kernel_initializer=initializer))
    model.add(Dense(num_classes, activation='linear'))
    model.compile(loss='mean_squared_error',
                  optimizer='rmsprop')

    # case 1 fit
    history = model.fit(X_train, y_train,
                        batch_size=batch_size,
                        validation_data=(X_test, y_test),
                        callbacks=cbks,
                        epochs=20)
    loss = history.history['loss']
    assert len(loss) == 1
    assert loss[0] == np.inf

    history = model.fit_generator(data_generator(X_train, y_train, batch_size),
                                  len(X_train),
                                  validation_data=(X_test, y_test),
                                  callbacks=cbks,
                                  epochs=20)
    loss = history.history['loss']
    assert len(loss) == 1
    assert loss[0] == np.inf or np.isnan(loss[0])
Exemplo n.º 29
0
	def layer(inputs):
		#non-trainable convolutions
		conv = layers.SeparableConv2D(filters=dim_capsules[0]*(dim_capsules[1]*dim_capsules[2] +1), \
			kernel_size=kernel_size, strides=strides, padding=padding, depth_multiplier=1, 
			depthwise_initializer='ones', pointwise_initializer=initializers.Constant(value=1/(kernel_size*kernel_size)), use_bias=False, name=name)
		conv.trainable = False
		return conv(inputs)
Exemplo n.º 30
0
    def build_model(self,
                    layer_num=3,
                    init_weight=0,
                    lasso=0.01,
                    sammary=False,
                    **kwargs):

        self.model = Sequential()
        self.model.add(
            Dense(int(self.input_dim),
                  kernel_initializer=initializers.Constant(value=init_weight),
                  W_regularizer=l1(lasso),
                  input_shape=(self.input_dim, )))

        self.model.add(Activation("tanh"))

        for i in range(layer_num - 1):
            self.model.add(Dense(self.input_dim))
            self.model.add(Activation("tanh"))

        self.model.add(Dense(int(self.output_dim)))
        self.model.add(Activation("softmax"))

        if sammary is True:
            self.model.summary()

        opt = RMSprop(lr=0.0005, rho=0.9, epsilon=1e-08, decay=0.0)

        self.model.compile(loss="categorical_crossentropy",
                           optimizer=opt,
                           metrics=['acc'])
        return self.model