Exemplo n.º 1
0
def build_ann_classifier(n_hiden_layers,
                         hidden_layer_size,
                         optimizer='adam',
                         input_shape=None):
    ''' function to build the ANN architecture '''

    # intialize a classifier
    classifier = Sequential()

    # input layer
    classifier.add(Input(shape=input_shape))

    # hidden layers
    for n in range(n_hiden_layers):
        classifier.add(
            Dense(units=hidden_layer_size,
                  kernel_initializer='uniform',
                  activation='relu'))
        # each next hidden layer of the network will be 50% smaller
        hidden_layer_size /= hidden_layer_size

    # output layers
    classifier.add(
        Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))

    #compile model
    classifier.compile(optimizer=optimizer,
                       loss='binary_crossentropy',
                       metrics=['accuracy'])

    return classifier
Exemplo n.º 2
0
def build_small_cnn(input_shape, output_size):
    model = Sequential([
        # conv1_*
        Convolution2D(32,
                      kernel_size=3,
                      padding="same",
                      input_shape=input_shape),
        Activation("relu"),
        Convolution2D(32, kernel_size=3, padding="same"),
        Activation("relu"),
        MaxPooling2D(pool_size=(2, 2)),

        # conv2_*
        Convolution2D(64, kernel_size=3, padding="same"),
        Activation("relu"),
        Convolution2D(64, kernel_size=3, padding="same"),
        Activation("relu"),
        MaxPooling2D(pool_size=(2, 2)),

        # Fully connected
        Flatten(),
        Dense(512),
        Activation("relu"),
        Dense(512),
        Activation("relu"),
        Dense(output_size),
        Activation("softmax")
    ])

    model.compile(loss="categorical_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])

    return model
Exemplo n.º 3
0
def build_network(num_actions, agent_history_length, resized_width,
                  resized_height):
    with tf.device("/gpu:0"):
        state = tf.placeholder(
            "float",
            [None, agent_history_length, resized_width, resized_height])
        inputs = Input(shape=(
            agent_history_length,
            resized_width,
            resized_height,
        ))
        model = Convolution2D(filters=16,
                              kernel_size=(8, 8),
                              strides=(4, 4),
                              activation='relu',
                              padding='same')(inputs)
        model = Convolution2D(filters=32,
                              kernel_size=(4, 4),
                              strides=(2, 2),
                              activation='relu',
                              padding='same')(model)
        model = Flatten()(model)
        model = Dense(256, activation='relu')(model)
        q_values = Dense(num_actions, activation='linear')(model)
        m = Model(inputs, outputs=q_values)
    return state, m
Exemplo n.º 4
0
    def _fcn(self, features: tf.Tensor,
             mode: tf.estimator.ModeKeys) -> tf.Tensor:
        """
            Sequence of FullyConnected Layers

            :param features: input of the sub network
            :param mode: standard names for Estimator model modes
            :return: output of the sub network

        """
        activation = 'relu'
        kernel_initializer = initializers.TruncatedNormal(mean=0, stddev=0.1)
        bias_initializer = 'zeros'

        fc6 = Dense(units=496,
                    activation=activation,
                    use_bias=True,
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer)(features)

        drop7 = Dropout(rate=0.5)(fc6)

        fc7 = Dense(units=496,
                    activation=activation,
                    use_bias=True,
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer)(drop7)

        drop8 = Dropout(rate=0.5)(fc7)

        return drop8
Exemplo n.º 5
0
def get_seq_model():
  """Define three channel input shape depending on image data format."""
  if K.image_data_format() == 'channels_first':
    input_shape = (3, img_width, img_height)
  else:
    input_shape = (img_width, img_height, 3)

  # Initialize CNN by creating a sequential model.
  model = Sequential()
  model.add(Conv2D(32, (3, 3), input_shape=input_shape))
  model.add(Activation('relu'))
  model.add(MaxPooling2D(pool_size=(2, 2)))

  model.add(Conv2D(32, (3, 3)))
  model.add(Activation('relu'))
  model.add(MaxPooling2D(pool_size=(2, 2)))

  model.add(Conv2D(64, (3, 3)))
  model.add(Activation('relu'))
  model.add(MaxPooling2D(pool_size=(2, 2)))

  model.add(Flatten())
  model.add(Dense(64))
  model.add(Activation('relu'))
  model.add(Dropout(0.5))
  model.add(Dense(2))
  model.add(Activation('sigmoid'))

  model.compile(
      loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

  return model
Exemplo n.º 6
0
 def DNNclassifier_crps(self, p, num_cut, optimizer, seeding):
     
     tf.set_random_seed(seeding)
     inputs = Input(shape=(p,))
     if isinstance(optimizer, str):
         opt = optimizer
     else:
         opt_name = optimizer.__class__.__name__
         opt_config = optimizer.get_config()
         opt_class = getattr(optimizers, opt_name)
         opt = opt_class(**opt_config)
     
     for i, n_neuron in enumerate(self.hidden_list):
         if i == 0:
             net = Dense(n_neuron, kernel_initializer = 'he_uniform')(inputs)
         else:
             net = Dense(n_neuron, kernel_initializer = 'he_uniform')(net)
         net = Activation(activation = 'elu')(net)
         net = BatchNormalization()(net)
         net = Dropout(rate=self.dropout_list[i])(net)
     
     softmaxlayer = Dense(num_cut + 1, activation='softmax', 
                    kernel_initializer = 'he_uniform')(net)
     
     output = Lambda(self.tf_cumsum)(softmaxlayer)
     model = Model(inputs = [inputs], outputs=[output])
     model.compile(optimizer=opt, loss=self.crps_loss)
 
     return model
Exemplo n.º 7
0
    def _fcn(self, features: tf.Tensor,
             mode: tf.estimator.ModeKeys) -> tf.Tensor:
        """
            Sequence of FullyConnected Layers

            :param features: input of the sub network
            :param mode: standard names for Estimator model modes
            :return: output of the sub network

        """
        activation = 'relu'
        kernel_initializer = tf.compat.v1.keras.initializers.TruncatedNormal(
            mean=0, stddev=0.1)
        bias_initializer = 'zeros'

        # fc3: output is [None, 120]
        fc3 = Dense(units=120,
                    activation=activation,
                    use_bias=True,
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer)(features)

        # fc4: output is [None, 84]
        fc4 = Dense(units=84,
                    activation=activation,
                    use_bias=True,
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer)(fc3)

        return fc4
Exemplo n.º 8
0
def build_model(units,
                inputs_dim,
                output="regression",
                sparse_dim=[],
                with_ts=False,
                ts_maxlen=0):
    assert output == "regression" or output == "binary_clf", "This output type is not supported."
    assert len(sparse_dim) == inputs_dim[1], "Dimension not match."

    # Inputs for basic features.
    inputs1 = Input(shape=(inputs_dim[0], ), name="basic_input")
    x1 = Dense(units, kernel_regularizer='l2', activation="relu")(inputs1)

    # Inputs for long one-hot features.
    inputs2 = Input(shape=(inputs_dim[1], ), name="one_hot_input")
    for i in range(len(sparse_dim)):
        if i == 0:
            x2 = Embedding(sparse_dim[i], units,
                           mask_zero=True)(slice(inputs2, i))
        else:
            tmp = Embedding(sparse_dim[i], units,
                            mask_zero=True)(slice(inputs2, i))
            x2 = Concatenate()([x2, tmp])
    x2 = tf.reshape(x2, [-1, units * inputs_dim[1]])
    x = Concatenate()([x1, x2])

    if with_ts:
        inputs3 = Input(shape=(
            None,
            inputs_dim[2],
        ), name="ts_input")
        x3 = LSTM(units,
                  input_shape=(ts_maxlen, inputs_dim[2]),
                  return_sequences=0)(inputs3)
        x = Concatenate()([x, x3])

    x = Dense(units, kernel_regularizer='l2', activation="relu")(x)
    x = Dropout(0.5)(x)
    x = Dense(units, kernel_regularizer='l2', activation="relu")(x)
    x = Dropout(0.5)(x)

    if output == "regression":
        x = Dense(1, kernel_regularizer='l2')(x)
        model = Model(inputs=[inputs1, inputs2], outputs=x)
        if with_ts:
            model = Model(inputs=[inputs1, inputs2, inputs3], outputs=x)
        model.compile(optimizer='adam', loss='mean_squared_error')

    elif output == "binary_clf":
        x = Dense(1, kernel_regularizer='l2', activation="sigmoid")(x)
        model = Model(inputs=[inputs1, inputs2], outputs=x)
        if with_ts:
            model = Model(inputs=[inputs1, inputs2, inputs3], outputs=x)
        model.compile(optimizer='adam',
                      loss='binary_crossentropy',
                      metrics=['acc'])

    #model.summary()
    return model
Exemplo n.º 9
0
    def create_actor_model(self):
        state_input = Input(shape=6)
        h1 = Dense(400, activation='relu')(state_input)
        h2 = Dense(300, activation='relu')(h1)
        output = Dense(1, activation='tanh')(h2)
        model = Model(inputs=state_input, outputs=output)

        return model
Exemplo n.º 10
0
def create_dqn():
    # Creation of a 2 layer Neural Network
    nn = Sequential()
    nn.add(Dense(36, input_dim=OBSERVATION_SPACE_DIMS, activation='tanh'))
    nn.add(Dense(28, activation='relu'))
    nn.add(Dense(len(ACTION_SPACE), activation='linear'))
    nn.compile(loss='mse', optimizer=Adam(lr=ALPHA, decay=ALPHA_DECAY))
    return nn
Exemplo n.º 11
0
 def network(self):
     """ Assemble Critic network to predict q-values
     """
     state = Input((self.env_dim))
     x = Dense(32, activation='elu')(state)
     x = Dense(16, activation='elu')(x)
     out = Dense(1, activation='linear',
                 kernel_initializer=RandomUniform())(x)
     return Model(state, out)
Exemplo n.º 12
0
def build_svrg_nn(input_shape, output_size):
    model = Sequential([
        Flatten(input_shape=input_shape),
        Dense(100, activation="tanh"),
        Dense(10),
        Activation("softmax")
    ])

    model.compile(loss="categorical_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])

    return model
Exemplo n.º 13
0
    def create_actor(self):
        obs_in = Input(shape = [self.obs_dim])  # 3 states
        # pdb.set_trace()

        h1 = Dense(self.hidden_dim, activation = 'relu')(obs_in)
        h2 = Dense(self.hidden_dim, activation = 'relu')(h1)
        h3 = Dense(self.hidden_dim, activation = 'relu')(h2)

        out = Dense(self.act_dim, activation='tanh')(h3)

        model = Model(inputs = obs_in, outputs = out)

        # no loss function for actor apparently
        return model, model.trainable_weights, obs_in
Exemplo n.º 14
0
def model_fn_LENET_5(features,
                     activation='relu',
                     kernel_initializer=tf.keras.initializers.TruncatedNormal(
                         mean=0, stddev=0.1),
                     bias_initializer='zeros'):

    # conv1: output is [None, 28, 28, 6]
    conv1 = Conv2D(filters=6,
                   kernel_size=(5, 5),
                   strides=(1, 1),
                   padding='valid',
                   activation=activation,
                   use_bias=True,
                   kernel_initializer=kernel_initializer,
                   bias_initializer=bias_initializer)(features)

    # pool1: output is [None, 14, 14, 6]
    pool1 = MaxPool2D(pool_size=(2, 2))(conv1)

    # conv2: output is [None, 10, 10, 16]
    conv2 = Conv2D(filters=16,
                   kernel_size=(5, 5),
                   strides=(1, 1),
                   padding='valid',
                   activation=activation,
                   use_bias=True,
                   kernel_initializer=kernel_initializer,
                   bias_initializer=bias_initializer)(pool1)

    # pool2: output is [None, 5, 5, 16] -> flattened on input of FC to [None, 400]
    pool2 = MaxPool2D(pool_size=(2, 2))(conv2)
    flatten = Flatten()(pool2)

    # fc3: output is [None, 120]
    fc3 = Dense(units=120,
                activation=activation,
                use_bias=True,
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer)(flatten)

    # fc4: output is [None, 84]
    fc4 = Dense(units=84,
                activation=activation,
                use_bias=True,
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer)(fc3)

    return fc4
Exemplo n.º 15
0
def build_mlp(
        obs_spec: Spec,
        act_spec: Spec,
        layer_sizes=(64, 64),
        activation='relu',
        initializer='glorot_uniform',
        value_separate=False,
        obs_shift=False,
        obs_scale=False) -> tf.keras.Model:
    """
    Factory method for a simple fully connected neural network model used in e.g. MuJuCo environment

    If value separate is set to true then a separate path is added for value fn, otherwise branches out of last layer
    If obs shift is set to true then observations are normalized to mean zero with running mean estimate
    If obs scale is set to true then observations are standardized to std.dev one with running std.dev estimate
    """
    inputs = inputs_ = [Input(s.shape, name="input_" + s.name) for s in obs_spec]
    if obs_shift or obs_scale:
        inputs_ = [RunningStatsNorm(obs_shift, obs_scale, name="norm_" + s.name)(x) for s, x in zip(obs_spec, inputs_)]
    inputs_concat = Concatenate()(inputs_) if len(inputs_) > 1 else inputs_[0]

    x = build_fc(inputs_concat, layer_sizes, activation, initializer)
    outputs = [build_logits(space, x, initializer) for space in act_spec]

    if value_separate:
        x = build_fc(inputs_concat, layer_sizes, activation, initializer, 'value_')

    value = Dense(1, name="value_out", kernel_initializer=initializer)(x)
    value = Squeeze(axis=-1)(value)
    outputs.append(value)

    return tf.keras.Model(inputs=inputs, outputs=outputs)
Exemplo n.º 16
0
def build_decoder_model_without_argmax(seq2seq, input_t, output_t):
    # Remove all initializer.
    input_state = Input(shape=(seq2seq.units, ), name="decoder_state")
    decoder_inputs = Input(shape=(None, ), name="decoder_input")
    decoder_embedding = Embedding(seq2seq.tgt_token_size,
                                  seq2seq.units,
                                  input_length=None,
                                  name="decoder_emb")
    decoder_gru = GRU(seq2seq.units,
                      return_sequences=True,
                      return_state=True,
                      name="decoder_gru")
    decoder_dense = Dense(seq2seq.tgt_token_size,
                          activation="softmax",
                          name="output_dense")

    state = input_state
    for t in range(input_t, output_t):
        inputs = Lambda(slice, arguments={"index": t})(
            decoder_inputs)  # Count encoder output as time 0.
        inputs_embedding = decoder_embedding(inputs)
        decoder_outputs_time, state = decoder_gru(inputs_embedding,
                                                  initial_state=state)
    if input_t == output_t:
        decoder_outputs_time = Lambda(lambda x: K.expand_dims(x, axis=1))(
            state)
    softmax = decoder_dense(decoder_outputs_time)
    decoder_model = Model([decoder_inputs, input_state], [softmax] + [state])

    return decoder_model
Exemplo n.º 17
0
def build_model(hidden_size):
    inputs = Input(shape=(28, 28))
    x1 = Flatten()(inputs)
    x2 = Dense(hidden_size, activation=tf.nn.relu)(x1)
    x3 = Dropout(0.2)(x2)
    x4 = Dense(10, activation=tf.nn.softmax)(x3)
    model = Model(inputs=inputs, outputs=x4)

    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    # Train and fit model
    model.fit(x_train, y_train, epochs=5)
    [loss, acc] = model.evaluate(x_test, y_test)
    return [model, acc]
Exemplo n.º 18
0
def gru_keras(max_features,
              maxlen,
              bidirectional,
              dropout_rate,
              embed_dim,
              rec_units,
              mtype='GRU',
              reduction=None,
              classes=4,
              lr=0.001):

    if K.backend == 'tensorflow':
        K.clear_session()

    input_layer = Input(shape=(maxlen, ))
    embedding_layer = Embedding(max_features,
                                output_dim=embed_dim,
                                trainable=True)(input_layer)
    x = SpatialDropout1D(dropout_rate)(embedding_layer)

    if reduction:
        if mtype == 'GRU':
            if bidirectional:
                x = Bidirectional(
                    CuDNNGRU(units=rec_units, return_sequences=True))(x)
            else:
                x = CuDNNGRU(units=rec_units, return_sequences=True)(x)
        elif mtype == 'LSTM':
            if bidirectional:
                x = Bidirectional(
                    CuDNNLSTM(units=rec_units, return_sequences=True))(x)
            else:
                x = CuDNNLSTM(units=rec_units, return_sequences=True)(x)

        if reduction == 'average':
            x = GlobalAveragePooling1D()(x)
        elif reduction == 'maximum':
            x = GlobalMaxPool1D()(x)
    else:
        if mtype == 'GRU':
            if bidirectional:
                x = Bidirectional(
                    CuDNNGRU(units=rec_units, return_sequences=False))(x)
            else:
                x = CuDNNGRU(units=rec_units, return_sequences=False)(x)
        elif mtype == 'LSTM':
            if bidirectional:
                x = Bidirectional(
                    CuDNNLSTM(units=rec_units, return_sequences=False))(x)
            else:
                x = CuDNNLSTM(units=rec_units, return_sequences=False)(x)

    output_layer = Dense(classes, activation="sigmoid")(x)
    model = Model(inputs=input_layer, outputs=output_layer)
    model.compile(loss='categorical_crossentropy',
                  optimizer=RMSprop(learning_rate=lr, clipvalue=1, clipnorm=1),
                  metrics=['acc'])
    return model
Exemplo n.º 19
0
def NN_huaweiv1(maxlen, embedding_matrix=None, class_num1=17, class_num2=12):
    emb_layer = Embedding(
        embedding_matrix.shape[0],
        embedding_matrix.shape[1],
        input_length=maxlen,
        weights=[embedding_matrix],
        trainable=False,
    )
    seq1 = Input(shape=(maxlen, ))

    x1 = emb_layer(seq1)
    sdrop = SpatialDropout1D(rate=0.2)
    lstm_layer = Bidirectional(CuDNNGRU(128, return_sequences=True))
    gru_layer = Bidirectional(CuDNNGRU(128, return_sequences=True))
    cnn1d_layer = Conv1D(64,
                         kernel_size=3,
                         padding="same",
                         kernel_initializer="he_uniform")
    x1 = sdrop(x1)
    lstm1 = lstm_layer(x1)
    gru1 = gru_layer(lstm1)
    att_1 = Attention(maxlen)(lstm1)
    att_2 = Attention(maxlen)(gru1)
    cnn1 = cnn1d_layer(lstm1)

    avg_pool = GlobalAveragePooling1D()
    max_pool = GlobalMaxPooling1D()

    x1 = concatenate([
        att_1, att_2,
        Attention(maxlen)(cnn1),
        avg_pool(cnn1),
        max_pool(cnn1)
    ])

    x = Dropout(0.2)(Activation(activation="relu")(BatchNormalization()(
        Dense(128)(x1))))
    x = Activation(activation="relu")(BatchNormalization()(Dense(64)(x)))
    pred1 = Dense(class_num1, activation='sigmoid', name='pred1')(x)
    y = concatenate([x1, x])
    y = Activation(activation="relu")(BatchNormalization()(Dense(64)(x)))
    pred2 = Dense(class_num2, activation='sigmoid', name='pred2')(y)

    model = Model(inputs=seq1, outputs=[pred1, pred2])
    return model
Exemplo n.º 20
0
    def build_model(self):
        model = Sequential()
        # Input layer and hidden layer 1. kernel_initializer gives random values to weights according to specified dist.
        model.add(
            Dense(128,
                  input_dim=self.state_size,
                  activation='relu',
                  kernel_initializer='he_uniform'))
        # Hidden layer 2
        model.add(Dense(64, activation='relu'))
        # Output layer
        model.add(Dense(self.action_size, activation='relu'))

        # Compile the model
        # model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate, decay=0.00001))
        model.compile(loss='mse',
                      optimizer=Adam(lr=self.learning_rate, decay=0.0))
        return model
Exemplo n.º 21
0
def build_cnn(input_shape, output_size):
    kwargs = {"kernel_size": 3, "activation": "relu", "padding": "same"}
    model = Sequential([
        # conv1_*
        Convolution2D(64, input_shape=input_shape, **kwargs),
        BatchRenormalization(),
        Convolution2D(64, **kwargs),
        BatchRenormalization(),
        MaxPooling2D(pool_size=(2, 2)),
        Dropout(0.25),

        # conv2_*
        Convolution2D(128, **kwargs),
        BatchRenormalization(),
        Convolution2D(128, **kwargs),
        BatchRenormalization(),
        MaxPooling2D(pool_size=(2, 2)),
        Dropout(0.25),

        # conv3_*
        Convolution2D(256, **kwargs),
        BatchRenormalization(),
        Convolution2D(256, **kwargs),
        BatchRenormalization(),
        MaxPooling2D(pool_size=(2, 2)),
        Dropout(0.25),

        # Fully connected
        Flatten(),
        Dense(1024),
        Activation("relu"),
        Dropout(0.5),
        Dense(512),
        Activation("relu"),
        Dropout(0.5),
        Dense(output_size),
        Activation("softmax")
    ])

    model.compile(loss="categorical_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])

    return model
Exemplo n.º 22
0
def build_fully_conv(obs_spec,
                     act_spec,
                     data_format='channels_first',
                     broadcast_non_spatial=False,
                     fc_dim=256):
    screen, screen_input = spatial_block('screen', obs_spec.spaces[0],
                                         conv_cfg(data_format, 'relu'))
    minimap, minimap_input = spatial_block('minimap', obs_spec.spaces[1],
                                           conv_cfg(data_format, 'relu'))

    non_spatial_inputs = [Input(s.shape) for s in obs_spec.spaces[2:]]

    if broadcast_non_spatial:
        non_spatial, spatial_dim = non_spatial_inputs[1], obs_spec.spaces[
            0].shape[1]
        non_spatial = tf.log(non_spatial + 1e-5)
        broadcasted_non_spatial = Broadcast2D(spatial_dim)(non_spatial)
        state = tf.concat([screen, minimap, broadcasted_non_spatial], axis=1)
    else:
        state = tf.concat([screen, minimap], axis=1)

    fc = Flatten(name="state_flat")(state)
    fc = Dense(fc_dim, **dense_cfg('relu'))(fc)

    value = Dense(1, name="value_out", **dense_cfg(scale=0.1))(fc)
    value = tf.squeeze(value, axis=-1)

    logits = []
    for space in act_spec:
        if space.is_spatial():
            logits.append(
                Conv2D(1, 1, **conv_cfg(data_format, scale=0.1))(state))
            logits[-1] = Flatten()(logits[-1])
        else:
            logits.append(Dense(space.size(), **dense_cfg(scale=0.1))(fc))

    mask_actions = Lambda(lambda x: tf.where(non_spatial_inputs[0] > 0, x,
                                             -1000 * tf.ones_like(x)),
                          name="mask_unavailable_action_ids")
    logits[0] = mask_actions(logits[0])

    return Model(inputs=[screen_input, minimap_input] + non_spatial_inputs,
                 outputs=logits + [value])
Exemplo n.º 23
0
    def Train(self):
        # self.loadDataFeature()
        self.loadDataTxt()
        self.train_and_test_split(0.75)
        # model
        model = Sequential()

        # model.add(Dense(392, activation='relu'))
        # model.add(Dense(128, activation='relu'))
        # model.add(Dense(36, activation='softmax'))

        #cnn model

        model.add(
            Conv2D(64, (3, 3), activation='relu', input_shape=(28, 28, 1)))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D((2, 2)))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D((2, 2)))
        model.add(Flatten())
        model.add(Dense(128, activation='relu'))
        model.add(Dense(128, activation='relu'))
        model.add(Dense(36, activation='softmax'))

        # model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        model.fit(
            self.train_data['data'],
            self.train_data['class_name'],
            batch_size=25,
            epochs=100,
            verbose=1,
            validation_data=(self.test_data['data'],
                             self.test_data['class_name']),
        )
        self.model = model
        model.save('digit_classification_model1.h5')
        # Y_pred = model.predict(self.test_data['data'])
        # self.metric(self.test_data['class_name'], Y_pred, data_type='binary')
        self.metric()
Exemplo n.º 24
0
def create_model(layers, activation, input_dim, output_dim):
    '''
    Builds and compiles a Keras Sequential model based on the given
    parameters.

    :param layers: [hiddenlayer1_nodes,hiddenlayer2_nodes,...]
    :param activation: e.g. relu
    :param input_dim: number of input nodes
    :return: Keras model
    '''
    model = Sequential()
    for i, nodes in enumerate(layers):
        if i == 0:
            model.add(Dense(nodes, input_dim=input_dim, activation=activation))
        else:
            model.add(Dense(nodes, activation=activation))
    model.add(Dense(output_dim, activation='linear'))
    model.compile(loss='mse', optimizer='adam')

    return model
Exemplo n.º 25
0
    def face_impl(input_shape, output_size):
        x = Input(shape=input_shape)
        e = modelf(input_shape, embedding)(x)
        y = Dense(output_size)(e)
        y = Activation("softmax")(y)

        model = Model(x, y)
        model.compile("adam",
                      "sparse_categorical_crossentropy",
                      metrics=["accuracy"])

        return model
Exemplo n.º 26
0
def build_lstm_model(input_data, output_size, neurons=20, activ_func='linear',
                     dropout=0.25, loss='mae', optimizer='adam'):
    model = Sequential()
    model.add(CuDNNLSTM(neurons, input_shape=(input_data.shape[1], input_data.shape[2]), return_sequences=True))
    model.add(Dropout(dropout))
    model.add(CuDNNLSTM(neurons, input_shape=(input_data.shape[1], input_data.shape[2])))
    model.add(Dropout(dropout))
    model.add(Dense(units=output_size))
    model.add(Activation(activ_func))

    model.compile(loss=loss, optimizer=optimizer)
    return model
Exemplo n.º 27
0
def build_GRU_with_h_gate_model(seq2seq):  # A new one.
    units = seq2seq.units
    h_tm1_input = Input(shape=(units, ), name="h_input")
    x_input = Input(shape=(units, ), name="x_input")
    z_input = Input(shape=(units, ), name="z_input")
    r_input = Input(shape=(units, ), name="r_input")

    x_h = Dense(units, name="wx_h")(
        x_input)  # x_h = K.bias_add(K.dot(inputs, kernel_h), input_bias_h)
    r_h_tm1 = layers.Multiply()([r_input, h_tm1_input])  # r * h_tm1
    recurrent_h = Dense(units, use_bias=False, name="uh_h")(
        r_h_tm1)  # recurrent_h = K.dot(r * h_tm1, recurrent_kernel_h)
    hh_ = layers.Add()([x_h, recurrent_h])
    hh = tanh(hh_)  # hh = tanh(x_h + recurrent_h)
    h1 = layers.Multiply()([z_input, h_tm1_input])
    h2 = layers.Multiply()([1 - z_input, hh])
    h = layers.Add()([h1, h2])  # h = z * h_tm1 + (1 - z) * hh
    GRU_with_h_gate_model = Model([h_tm1_input, x_input, z_input, r_input], h)
    #print("h gate model.")
    #GRU_with_h_gate_model.summary()
    return GRU_with_h_gate_model
def neural_network(input_shape):
    inputs = keras.Input(shape=input_shape)

    #Layer 1
    x = MaxPooling2D(pool_size=(2, 2), name='MaxPooling2D_1')(inputs)
    x = Conv2D(32, kernel_size=(5, 5), padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=(4, 4))(x)

    #Layer 2
    x = Conv2D(64, kernel_size=(5, 5), padding='same', name='Conv2D_2')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=(2, 2), name='MaxPooling2D_3')(x)

    x = Flatten(name='Flatten')(x)

    #Layer 3
    #model.add(Dense(256,name = 'Dense_1'))
    #model.add(BatchNormalization(name = 'BatchNormalization_2'))
    #model.add(LeakyReLU(alpha=0.1))
    #model.add(Dropout(0.5,name = 'Dropout_1'))

    #Layer 4
    x = Dense(128, name='Dense_2')(x)
    x = BatchNormalization(name='BatchNormalization_3')(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Dropout(0.5, name='Dropout_2')(x)

    #Layer 5
    x = Dense(128, name='Dense_3')(x)
    x = BatchNormalization(name='BatchNormalization_4')(x)
    x = LeakyReLU(alpha=0.1)(x)
    #model.add(Dropout(0.5,name = 'Dropout_3'))

    outputs = Dense(1, activation='sigmoid', name='Dense_4')(x)

    model = Model(inputs, outputs)
    return model
Exemplo n.º 29
0
def build_lstm_mnist(input_shape, output_size):
    """Build a small LSTM to recognize MNIST digits as permuted sequences"""
    model = Sequential([
        CuDNNLSTM(128, input_shape=input_shape),
        Dense(output_size),
        Activation("softmax")
    ])

    model.compile(optimizer="adam",
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])

    return model
Exemplo n.º 30
0
def build_lstm_timit(input_shape, output_size):
    """Build a simple LSTM to classify the phonemes in the TIMIT dataset"""
    model = Sequential([
        LSTM(256, unroll=True, input_shape=input_shape),
        Dense(output_size),
        Activation("softmax")
    ])

    model.compile(optimizer="adam",
                  loss="sparse_categorical_crossentropy",
                  metrics=["accuracy"])

    return model