Ejemplo n.º 1
0
def mlp_model(x_train, y_train, x_val, y_val, params):
    model = Sequential()
    model.add(
        Dense(params['layer_size'],
              activation=params['activation'],
              input_dim=x_train.shape[1],
              kernel_regularizer=l2(params['regularization'])))
    model.add(Dropout(params['dropout']))
    for i in range(params['layers'] - 1):
        model.add(
            Dense(params['layer_size'],
                  activation=params['activation'],
                  kernel_regularizer=l2(params['regularization'])))
        model.add(Dropout(params['dropout']))
    model.add(Dense(2, activation='softmax'))
    model.compile(
        optimizer=params['optimizer'](params['lr']),
        loss=params['loss_functions'],
        # loss=params['loss_functions']([params['weights1'], params['weights2']]),
        metrics=['accuracy', Recall(), Precision(), f1])
    history = model.fit(x_train,
                        y_train,
                        batch_size=params['batch_size'],
                        validation_data=(x_val, y_val),
                        epochs=100,
                        callbacks=[
                            EarlyStopping(monitor='val_acc',
                                          patience=5,
                                          min_delta=0.01)
                        ],
                        verbose=0)
    return history, model
Ejemplo n.º 2
0
    def build(width, height, depth, classes):
        # initialize the model along with the input shape to be
        # "channels last"

        model = Sequential()
        #the image input
        inputShape = (height, width, depth)

        # if we are using "channels first", update the input shape
        if image_data_format() == "channels_first":
            inputShape = (depth, height, width)

        #Every CNN that you implement will have a build method this function will accept a
        #number of parameters, construct the network architecture, and then return it to the calling function
        #It will accept a number of parameters

        #define the first (and only) CONV=>RELU layer
        #This layer will have 32 filters each of which are 3x3, apply the asame padding 
        #to ensure the size of the output of the convolution operations matches the input
        #(using same padding isn't strictly neccessary for this example, but it's a good)
        #habbit to start forming now
        model.add(Conv2D(32,(3,3),padding="same"))
        model.add(Activation("relu"))

        #softmax classifier
        model.add(Flatten())
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        #return the constructed network architechture
        return model
Ejemplo n.º 3
0
 def define_gan(self):
     self.discriminator.trainable = False
     model = Sequential()
     model.add(self.generator)
     model.add(self.discriminator)
     model.compile(loss='binary_crossentropy', optimizer='adam')
     return model
Ejemplo n.º 4
0
def create_model():
    model = Sequential()
    model.add(Dense(1, input_shape=(3, ), activation='sigmoid'))
    #   model.add(Dense(NUMBER_OF_ACTIONS, activation='sigmoid'))
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss="mse", optimizer=sgd, metrics=["accuracy"])
    return model
Ejemplo n.º 5
0
 def _define_composite(generators, discriminators):
     model_list = []
     for i in range(len(discriminators)):
         g_models, d_models = generators[i], discriminators[i]
         # straight-through model
         d_models[0].trainable = False
         model1 = Sequential()
         model1.add(g_models[0])
         model1.add(d_models[0])
         model1.compile(loss=wasserstein_loss,
                        optimizer=Adam(lr=0.001,
                                       beta_1=0,
                                       beta_2=0.99,
                                       epsilon=10e-8))
         # fade-in model
         d_models[1].trainable = False
         model2 = Sequential()
         model2.add(g_models[1])
         model2.add(d_models[1])
         model2.compile(loss=wasserstein_loss,
                        optimizer=Adam(lr=0.001,
                                       beta_1=0,
                                       beta_2=0.99,
                                       epsilon=10e-8))
         # store
         model_list.append([model1, model2])
     return model_list
Ejemplo n.º 6
0
def make_model(input_shape):
    ret = Sequential()
    ret.add(Dense(10, input_shape=input_shape, activation='sigmoid'))
    # ret.add(Dense(3, input_shape=input_shape, activation='sigmoid'))
    # ret.add(Dense(3, activation='relu'))
    ret.add(Dense(1, activation='linear'))
    return ret
Ejemplo n.º 7
0
Archivo: nn.py Proyecto: zhxuan300/FATE
def build_nn_model(input_shape,
                   nn_define,
                   loss,
                   optimizer,
                   metrics,
                   is_supported_layer=has_builder,
                   default_layer=None) -> KerasNNModel:
    model = Sequential()
    is_first_layer = True
    for layer_config in nn_define:
        layer = layer_config.get("layer", default_layer)
        if layer and is_supported_layer(layer):
            del layer_config["layer"]
            if is_first_layer:
                layer_config["input_shape"] = input_shape
                is_first_layer = False
            builder = get_builder(layer)
            model.add(builder(**layer_config))

        else:
            raise ValueError(f"dnn not support layer {layer}")

    return from_keras_sequential_model(model=model,
                                       loss=loss,
                                       optimizer=optimizer,
                                       metrics=metrics)
Ejemplo n.º 8
0
def build_gru_model():
    model = Sequential()
    #CuDNNGRU 代替GRU, 提高速度
    model.add(CuDNNGRU(32, input_shape=(None, float_data.shape[-1])))
    model.add(Dense(1))
    model.compile(optimizer=RMSprop(), loss='mae')
    return model
Ejemplo n.º 9
0
class PPOValueBrain:
    def __init__(
        self,
        learning_rate: float = 0.0001,
        hidden_layers_count: int = 0,
        neurons_per_hidden_layer: int = 0,
    ):
        self.model = Sequential()

        for i in range(hidden_layers_count):
            self.model.add(Dense(neurons_per_hidden_layer, activation=tanh))

        self.model.add(Dense(1, activation=linear, use_bias=True))
        self.model.compile(loss=mse, optimizer=Adam(lr=learning_rate))

    def predict(self, state: np.ndarray) -> np.ndarray:
        return self.model.predict(np.array((state,)))[0]

    def train(self, states: np.ndarray, targets: np.ndarray):
        self.model.train_on_batch(states, targets)

    def save_model(self, filename: str):
        self.model.save(f"{filename}_critic.h5")

    def load_model(self, filename: str):
        self.model = load_model(filename)
Ejemplo n.º 10
0
def upsample(units,
             input_shape=None,
             apply_dropout=False,
             layer_type='dense',
             output_padding=(1, 1)):
    initializer = random_normal_initializer(0., 0.02)

    seq = Sequential()
    if layer_type == 'dense':
        seq.add(
            layers.Dense(units,
                         input_shape=[
                             input_shape,
                         ],
                         kernel_initializer=initializer,
                         use_bias=False))
    elif layer_type == 'conv':
        seq.add(
            layers.Conv2DTranspose(filters=units,
                                   kernel_size=3,
                                   strides=(2, 2),
                                   padding='same',
                                   input_shape=input_shape,
                                   kernel_initializer=initializer,
                                   use_bias=False,
                                   output_padding=output_padding))
    else:
        raise ValueError('wrong layer_type!')
    seq.add(layers.BatchNormalization())
    if apply_dropout:
        seq.add(layers.Dropout(0.5))
    seq.add(layers.ReLU())

    return seq
Ejemplo n.º 11
0
def build_model(layers):
    model = Sequential()
    for layer in layers:
        model.add(layer)
    model.compile(optimizer="adam",
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    return model
Ejemplo n.º 12
0
    def build_resblock(self, filter_num, blocks, stride=1):

        res_blocks = Sequential()
        # may down sample
        res_blocks.add(Basic_Block(filter_num, stride))
        # do not down sample
        for _ in range(1, blocks):
            res_blocks.add(Basic_Block(filter_num, stride=1))
        return res_blocks
Ejemplo n.º 13
0
    def build_resblock(self, filter_num, blocks, stride=1):
        res_blocks = Sequential()
        # may down sample 也许进行下采样。
        # 对于当前Res Block中的Basic Block,我们要求每个Res Block只有一次下采样的能力。
        res_blocks.add(BasicBlock(filter_num, stride))

        for _ in range(1, blocks):
            res_blocks.add(BasicBlock(filter_num, stride=1))

        return res_blocks
Ejemplo n.º 14
0
class Discriminator(Model):
    def __init__(self, num_layer: int, num_cat: int):
        super().__init__()

        self.num_layer = num_layer
        self.num_cat = num_cat

    def build(self, input_shape):
        kernel_size = 5
        stride_size = 2

        width = input_shape[2]
        height = input_shape[1]
        num_channel = input_shape[3]
        next_channel = num_channel * 32

        kernel_init = keras.initializers.RandomNormal(stddev=0.02)
        bias_init = keras.initializers.zeros()

        self.conv2ds = Sequential()

        for i in range(self.num_layer - 1):
            conv = Conv2D(next_channel,
                          kernel_size,
                          stride_size,
                          padding="same",
                          activation=tf.nn.leaky_relu,
                          kernel_initializer=kernel_init,
                          bias_initializer=bias_init,
                          input_shape=(height, width, num_channel))
            self.conv2ds.add(conv)

            width = conv.output_shape[2]
            height = conv.output_shape[1]
            num_channel = next_channel
            next_channel *= stride_size

        conv = Conv2D(1 + self.num_cat, (height, width), (height, width),
                      padding="valid",
                      kernel_initializer=kernel_init,
                      bias_initializer=bias_init)
        self.conv2ds.add(conv)

        self.flatten = Flatten()

    def call(self, inputs: keras.layers.Input, **kwargs):
        output = inputs

        output = self.conv2ds(output)
        output = self.flatten(output)

        classify_result: tf.Tensor = output[:, :1]
        cat_result: tf.Tensor = output[:, 1:]

        return classify_result, cat_result
Ejemplo n.º 15
0
def create_model(num_frame, num_joint, num_output):
    model = Sequential()
    model.add(
        CuDNNLSTM(50,
                  input_shape=(num_frame, num_joint),
                  return_sequences=False))
    model.add(Dropout(0.4))  #使用Dropout函数可以使模型有更多的机会学习到多种独立的表征
    model.add(Dense(60))
    model.add(Dropout(0.4))
    model.add(Dense(num_output, activation='softmax'))
    return model
Ejemplo n.º 16
0
def fit_mlp(x_train,
            y_train,
            conf,
            seed=3,
            epochs=1500,
            batch_size=300,
            lr=0.05):
    """
    :param x_train: training data
    :param y_train: training target
    :param conf: model structure
    :param seed: seed
    :param epochs: number of epochs (using early stopping)
    :param batch_size: batch size
    :param lr: learning rate
    :return: model and history
    """
    early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
                                               mode='min',
                                               verbose=0,
                                               patience=3,
                                               restore_best_weights=True)
    model = Sequential()
    for n, a in conf:
        model.add(
            Dense(
                n,
                activation=a,
                kernel_initializer=keras.initializers.glorot_normal(seed=seed),
                bias_initializer='zeros'))

    opt = keras.optimizers.Adam(lr=lr)
    if conf[-1][1] == 'linear':
        model.compile(loss=keras.losses.mse, optimizer=opt)
    elif conf[-1][1] == 'sigmoid':
        model.compile(loss=keras.losses.binary_crossentropy,
                      optimizer=opt,
                      metrics=['accuracy'])
    else:
        model.compile(loss=keras.losses.sparse_categorical_crossentropy,
                      optimizer=opt,
                      metrics=['accuracy'])

    history = model.fit(x_train,
                        y_train,
                        batch_size=batch_size,
                        epochs=epochs,
                        validation_split=0.2,
                        verbose=0,
                        shuffle=False,
                        callbacks=[early_stop])

    return model, history
Ejemplo n.º 17
0
 def define_generator(self):
     model = Sequential()
     model.add(
         Dense(128,
               activation='relu',
               kernel_initializer='he_uniform',
               input_dim=self.latent_dim))
     model.add(BatchNormalization())
     model.add(Dense(256, activation='relu'))
     model.add(BatchNormalization())
     model.add(Dense(self.n_outputs, activation='linear'))
     return model
Ejemplo n.º 18
0
def define_model(src_vocab, tar_vocab, src_timesteps, tar_timesteps, n_units):
    model = Sequential()
    model.add(
        Embedding(src_vocab,
                  n_units,
                  input_length=src_timesteps,
                  mask_zero=True))
    model.add(LSTM(n_units))
    model.add(RepeatVector(tar_timesteps))
    model.add(LSTM(n_units, return_sequences=True))
    model.add(TimeDistributed(Dense(tar_vocab, activation='softmax')))
    return model
Ejemplo n.º 19
0
def gru_test():
    '''
    使用return_sequences 返回所有time steps的输出
    不适用的时候只返回最后一次
    '''
    model = Sequential()
    model.add(CuDNNGRU(128))
    # model.add(CuDNNGRU(128, return_sequences=True))
    model.compile('rmsprop', 'mse')
    input_array = np.random.normal(size=(32, 10, 1))
    output_array = model.predict(input_array)
    print(output_array.shape)
    return model
class PointwiseNN(Ranker):
    def __init__(self):
        self.model = Sequential()
        self.model.add(Dense(128))
        self.model.add(Dense(1))
        self.model.compile(optimizer='adam', loss='mse')
        super().__init__()

    def fit(self, X_train, y_train):
        self.model.fit(x=X_train, y=y_train)

    def predict(self, X) -> ndarray:
        return self.model.predict(x=X)
Ejemplo n.º 21
0
def make_cnn(filters,
             kernels,
             strides,
             activation='tanh',
             reg=1e-6,
             flat=False):
    _reg = l2(reg)
    cnn = Sequential([
        Conv2D(f, k, s, activation=activation, kernel_regularizer=_reg)
        for f, k, s in zip(filters, kernels, strides)
    ])
    if flat:
        cnn.add(Flatten())
    return cnn
Ejemplo n.º 22
0
    def get_bidirectional_model(self, pre_embeddings, dp_rate=0.0, use_lstm=False):
        """
        follow the common model construction step shown in keras manual
        :param pre_embeddings:
        :param dp_rate: drop out rate
        :param use_lstm: utilize LSTM or GRU unit
        :return: the model
        """
        # Embedding part can try multichannel as same as origin paper
        embedding_layer = Embedding(self.max_features,  # 字典长度
                                    self.embedding_dims,  # 词向量维度
                                    weights=[pre_embeddings],  # 预训练的词向量
                                    input_length=self.maxlen,  # 每句话的最大长度
                                    trainable=False  # 是否在训练过程中更新词向量
                                    )
        model = Sequential()
        model.add(embedding_layer)

        if use_lstm:
            model.add(Bidirectional(LSTM(RNN_DIM, recurrent_dropout=dp_rate)))
        else:
            model.add(Bidirectional(GRU(RNN_DIM, recurrent_dropout=dp_rate)))

        # model.add(Dropout(dp_rate))
        model.add(Dense(self.class_num, activation=self.last_activation))

        return model
Ejemplo n.º 23
0
def downsample(units,
               input_shape=None,
               apply_batchnorm=True,
               layer_type='dense'):

    initializer = random_normal_initializer(0., 0.02)

    seq = Sequential()
    if layer_type == 'dense':
        seq.add(
            layers.Dense(units,
                         input_shape=[
                             input_shape,
                         ],
                         kernel_initializer=initializer,
                         use_bias=False))
    elif layer_type == 'conv':
        seq.add(
            layers.Conv2D(filters=units,
                          kernel_size=3,
                          strides=(2, 2),
                          padding='same',
                          input_shape=input_shape,
                          kernel_initializer=initializer,
                          use_bias=False))
    else:
        raise ValueError('wrong layer type!')
    if apply_batchnorm:
        seq.add(layers.BatchNormalization())

    seq.add(layers.LeakyReLU())
    return seq
Ejemplo n.º 24
0
class BiLSTM(NNBaseModel):
    def train(self):
        batch_size = 64
        units = 100
        embedding_matrix = np.zeros((self.vocab_size, 100))
        for word, index in self.tk.word_index.items():
            embedding_vector = self.word2vec.get(word)
            if embedding_vector is not None:
                embedding_matrix[index] = embedding_vector

        self.model = Sequential()
        self.model.add(
            Embedding(self.vocab_size,
                      units,
                      weights=[embedding_matrix],
                      trainable=False))
        self.model.add(
            Bidirectional(LSTM(units, return_sequences=True, dropout=0.2)))
        self.model.add(Bidirectional(LSTM(units, dropout=0.2)))
        self.model.add(Dense(self.output_size, activation='sigmoid'))
        print(self.model.summary())
        self.model.compile(optimizer='adam',
                           loss='sparse_categorical_crossentropy',
                           metrics=['acc'])
        history = self.model.fit(self.X_train,
                                 self.y_train,
                                 epochs=100,
                                 batch_size=batch_size,
                                 verbose=1)
        def train_model(lowdata, labels, results, p):

            # Print parameters
            print('\n' + ' '.join(map(str, p)))

            # Split data
            #X_train, X_test, y_train, y_test = train_test_split(lowdata, labels,  test_size=0.2, random_state=29, shuffle=True)

            # Cross validation
            n_split = 5
            scores = []
            for train_index, test_index in KFold(n_split).split(lowdata):

                X_train, X_test = lowdata[train_index], lowdata[test_index]
                y_train, y_test = labels[train_index], labels[test_index]

                # Define model (complexity is a function of input dimensionality)
                if p[5] > 8:
                    k = 32
                elif p[5] >= 3:
                    k = 8
                else:
                    k = 3

                model = Sequential()
                model.add(
                    Dense(2 * k, activation='relu',
                          input_dim=X_train.shape[1]))
                model.add(Dropout(0.5))
                model.add(Dense(k, activation='relu'))
                model.add(Dense(n_classes, activation='softmax'))

                loss = categorical_crossentropy
                #optimizer = Adadelta(lr=0.0005)
                optimizer = Adam(lr=0.0005)
                model.compile(loss=loss,
                              optimizer=optimizer,
                              metrics=['categorical_accuracy'])
                #print(model.summary())

                # Train model
                n_epochs = 30
                history = model.fit(X_train,
                                    y_train,
                                    batch_size=32,
                                    epochs=n_epochs,
                                    verbose=0,
                                    validation_data=(X_test, y_test))

                #plot(history)
                scores.append(eval_metrics(model, X_test, y_test, class_names))

            # Evaluate model
            results = {
                'params': p,
                'history': history.history,
                'score': scores
            }
            return results
Ejemplo n.º 26
0
def get_model():
    model = Sequential()
    model.add(
        Dense(99,
              input_dim=1,
              activation='softmax',
              kernel_initializer='he_uniform'))
    model.add(Dense(120, activation='tanh', kernel_initializer='he_uniform'))
    model.add(Dense(256, activation='tanh', kernel_initializer='he_uniform'))
    model.add(Dense(90, activation='relu', kernel_initializer='he_uniform'))
    model.add(Dense(20, activation='tanh', kernel_initializer='he_uniform'))
    model.add(Dense(10, activation='tanh', kernel_initializer='he_uniform'))
    model.add(Dense(1))
    model.compile(loss='mse', optimizer='adam')
    return model
Ejemplo n.º 27
0
def embedding_test():
    '''
        模型将输入一个大小为 (batch, input_length) 的整数矩阵。
        输入中最大的整数(即词索引)不应该大于 999 (词汇表大小)
        现在 model.output_shape == (None, 10, 64),其中 None 是 batch 的维度。
    '''
    model = Sequential()
    # model.add(Embedding(1000, 64, input_length=10))
    model.add(Embedding(160, 4))
    # input_array = np.random.randint(1000, size=(32, 10))
    input_array = np.random.randint(160, size=(5, 4, 4))
    model.compile('rmsprop', 'mse')
    output_array = model.predict(input_array)
    print(output_array.shape)
    return model
Ejemplo n.º 28
0
 def define_discriminator(self):
     model = Sequential()
     model.add(
         Dense(256,
               activation='relu',
               kernel_initializer='he_uniform',
               input_dim=self.input_dim))
     model.add(Dropout(0.3))
     model.add(Dense(128, activation='relu'))
     model.add(Dropout(0.3))
     model.add(Dense(1, activation='sigmoid'))
     model.compile(loss='binary_crossentropy',
                   optimizer='adam',
                   metrics=['accuracy'])
     return model
Ejemplo n.º 29
0
    def create_rnn_model(self, n_timesteps, n_features, n_outputs, model_type):
        model = Sequential()
        if model_type == "GRU":
            model.add(GRU(100, input_shape=(n_timesteps, n_features)))
        else:
            model.add(LSTM(100, input_shape=(n_timesteps, n_features)))
        model.add(Dropout(0.5))
        model.add(Dense(100, activation='relu'))
        model.add(Dense(n_outputs, activation='softmax'))
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        model.summary()

        return model
Ejemplo n.º 30
0
    def custom_net(self, units, dropout, batch_size, epochs):

        model = Sequential()
        model.add(
            Dense(units[0],
                  activation="elu",
                  input_shape=(len(cat_cols + con_cols), )))
        for i in range(1, len(units)):
            model.add(Dense(units[i], activation="elu"))
            model.add(BatchNormalization())
            model.add(Dropout(dropout))
        model.add(Dense(1))
        model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.001),
                      loss='mse')
        # self.nn_inp_names = model.input_names
        return model
Ejemplo n.º 31
0
    def __init__(self, game: GridGame):
        super().__init__(game)
        # game params
        self.board_height = game.board_height
        self.board_width = game.board_width
        example_board = game.create_board()
        self.action_size = len(game.get_valid_moves(example_board))
        self.epochs_completed = 0
        self.epochs_to_train = 100
        args = Namespace(lr=0.001,
                         dropout=0.3,
                         epochs=10,
                         batch_size=64,
                         num_channels=512)
        self.checkpoint_name = 'random weights'
        self.args = args

        num_channels = 512
        kernel_size = [3, 3]
        dropout = 0.3
        model = Sequential()
        # regularizer = regularizers.l2(0.00006)
        regularizer = regularizers.l2(0.0001)
        model.add(Conv2D(num_channels,
                         kernel_size,
                         padding='same',
                         activation='relu',
                         input_shape=(self.board_height, self.board_width, 1),
                         activity_regularizer=regularizer))
        model.add(Conv2D(num_channels,
                         kernel_size,
                         padding='same',
                         activation='relu',
                         activity_regularizer=regularizer))
        model.add(Conv2D(num_channels,
                         kernel_size,
                         activation='relu',
                         activity_regularizer=regularizer))
        model.add(Conv2D(num_channels,
                         kernel_size,
                         activation='relu',
                         activity_regularizer=regularizer))
        model.add(Dropout(dropout))
        model.add(Dropout(dropout))
        model.add(Flatten())
        model.add(Dense(self.action_size + 1))
        model.compile('adam', 'mean_squared_error')
        self.model = model