def create_model(learning_rate, num_dense_layers, num_dense_nodes):
    '''This function takes in values for learning rate, number of layers and number of nodes in each layer, and creates,compiles and returns a Keras model
        with those hyperparameters

        Parameters
       -------
       learning_rate: float32
          The learning rate for the optimization technique for the neural network

       num_dense_layers: int32
          The number of hidden layers to be implemented in the neural network

       num_dense_nodes: int32
          The number of hidden units per layer of the neural network

        Returns
        -------
        model: Keras model'''

    model = Sequential()
    model.add(InputLayer(input_shape=(np.shape(train_data)[0], )))
    for i in range(num_dense_layers):
        model.add(Dense(num_dense_nodes, activation='selu'))
    model.add(Dense(np.shape(train_data)[0], activation='linear'))
    optimizer = RMSprop(lr=learning_rate)
    model.compile(optimizer=optimizer, loss='mmse', metrics=['accuracy'])

    return model
示例#2
0
def build_simple_model():
    model = Sequential()
    model.add(Flatten(input_shape=(lookback // step, float_data.shape[-1])))
    model.add(Dense(32, activation='relu'))
    model.add(Dense(1))
    model.compile(optimizer=RMSprop(), loss='mae')
    return model
示例#3
0
def build_gru_model():
    model = Sequential()
    #CuDNNGRU 代替GRU, 提高速度
    model.add(CuDNNGRU(32, input_shape=(None, float_data.shape[-1])))
    model.add(Dense(1))
    model.compile(optimizer=RMSprop(), loss='mae')
    return model
def create_model():
    # Our input feature map is 150x150x3: 150x150 for the image pixels, and 3 for
    # the three color channels: R, G, and B
    img_input = layers.Input(shape=(150, 150, 3))

    # Flatten feature map to a 1-dim tensor so we can add fully connected layers
    x = layers.Flatten()(img_input)

    # Create a fully connected layer with Sigmoid activation and 500 hidden units
    x = layers.Dense(500, activation='sigmoid')(x)

    # Create a second fully connected layer with Sigmoid activation and 500 hidden units
    x = layers.Dense(500, activation='sigmoid')(x)

    # Add a dropout rate of 0.5
    x = layers.Dropout(0.5)(x)

    # Create output layer with a single node and sigmoid activation
    output = layers.Dense(1, activation='sigmoid')(x)

    # Create model:
    # input = input feature map
    # output = input feature map + stacked convolution/maxpooling layers + fully
    # connected layer + sigmoid output layer
    model = Model(img_input, output)

    model.compile(loss='binary_crossentropy',
                  optimizer=RMSprop(lr=0.01),
                  metrics=['acc'])

    # model.summary()

    return model
示例#5
0
    def build_model_CNN(self):
        ''' CNN '''

        model = Sequential()
        embedding_layer = Embedding(self.vocab_length,
                                    300,
                                    weights=[self.embedding_matrix],
                                    input_length=self.length_long_sentence,
                                    trainable=False)
        model.add(embedding_layer)
        model.add(
            Conv1D(filters=150,
                   kernel_regularizer=l2(0.01),
                   kernel_size=5,
                   strides=1,
                   padding='valid'))
        model.add(MaxPooling1D(2, padding='valid'))
        model.add(
            Conv1D(filters=150,
                   kernel_regularizer=l2(0.01),
                   kernel_size=5,
                   strides=1,
                   padding='valid'))
        model.add(MaxPooling1D(2, padding='valid'))
        model.add(Flatten())
        model.add(Dense(80, kernel_regularizer=l2(0.01), activation='relu'))
        model.add(Dense(40, kernel_regularizer=l2(0.01), activation='relu'))
        model.add(Dense(20, kernel_regularizer=l2(0.01), activation='relu'))
        model.add(Dense(2, activation='sigmoid'))
        model.compile(loss='binary_crossentropy',
                      optimizer=RMSprop(lr=0.001),
                      metrics=['accuracy'])
        model.summary()
    def __init__(self, n_actions, n_features, eval_model, target_model):

        self.params = {
            'n_actions': n_actions,
            'n_features': n_features,
            'learning_rate': 0.01,
            'reward_decay': 0.9,
            'e_greedy': 0.9,
            'replace_target_iter': 300,
            'memory_size': 500,
            'batch_size': 32,
            'e_greedy_increment': None
        }

        # total learning step

        self.learn_step_counter = 0

        # initialize zero memory [s, a, r, s_]
        self.epsilon = 0 if self.params[
            'e_greedy_increment'] is not None else self.params['e_greedy']
        self.memory = np.zeros(
            (self.params['memory_size'], self.params['n_features'] * 2 + 2))

        self.eval_model = eval_model
        self.target_model = target_model

        self.eval_model.compile(
            optimizer=RMSprop(lr=self.params['learning_rate']), loss='mse')
        self.cost_his = []
示例#7
0
def create_generator():
    # Create the Generator network structure
    generator = Sequential()

    generator.add(Dense(12544, input_dim=100))
    generator.add(BatchNormalization(momentum=0.9))
    generator.add(Activation('relu'))
    generator.add(Reshape((7, 7, 256)))
    generator.add(Dropout(0.4))

    generator.add(UpSampling2D())
    generator.add(Conv2DTranspose(int(128), 5, padding='same'))
    generator.add(BatchNormalization(momentum=0.9))
    generator.add(Activation('relu'))
    generator.add(UpSampling2D())
    generator.add(Conv2DTranspose(int(64), 5, padding='same'))
    generator.add(BatchNormalization(momentum=0.9))
    generator.add(Activation('relu'))
    generator.add(Conv2DTranspose(int(32), 5, padding='same'))
    generator.add(BatchNormalization(momentum=0.9))
    generator.add(Activation('relu'))

    generator.add(Conv2DTranspose(1, 5, padding='same'))
    generator.add(Activation('sigmoid'))

    generator.compile(optimizer=RMSprop(lr=0.0004, clipvalue=1.0, decay=3e-8),
                      loss='binary_crossentropy',
                      metrics=['accuracy'])

    generator.summary()

    return generator
        def build_model():

            # epoch, dropout = best_model()
            epoch, dropout = 5, 0.2
            print('EPOCH = ', epoch)
            print('DROPOUT = ', dropout)

            model = Sequential()
            model.add(
                Embedding(input_dim=ger_vocab_size,
                          output_dim=128,
                          input_length=11))
            model.add(LSTM(128))
            model.add(RepeatVector(11))
            model.add(LSTM(128, return_sequences=True))
            model.add(Dropout(dropout))
            model.add(Dense(eng_vocab_size, activation='softmax'))
            model.compile(optimizer=RMSprop(lr=0.01),
                          loss='sparse_categorical_crossentropy',
                          metrics=['acc'])
            model.summary()

            # Train model
            history = model.fit(X_train,
                                y_train.reshape(y_train.shape[0],
                                                y_train.shape[1], 1),
                                epochs=epoch,
                                batch_size=128,
                                verbose=1,
                                validation_split=0.2)
            # Evaluate the model
            loss, accuracy = model.evaluate(X_test,
                                            y_test.reshape(
                                                y_test.shape[0],
                                                y_test.shape[1], 1),
                                            verbose=1)
            print('Accuracy: %f' % (accuracy * 100))

            def display():
                plt.plot(history.history['acc'])
                plt.plot(history.history['val_acc'])

                plt.title('model accuracy')
                plt.ylabel('accuracy')
                plt.xlabel('epoch')
                plt.legend(['train', 'test'], loc='upper left')
                plt.show()

                plt.plot(history.history['loss'])
                plt.plot(history.history['val_loss'])

                plt.title('model loss')
                plt.ylabel('loss')
                plt.xlabel('epoch')
                plt.legend(['train', 'test'], loc='upper left')
                plt.show()

            display()
示例#9
0
 def _setup(self, config):
     self.train_stories, self.test_stories = read_data()
     model = self.build_model()
     rmsprop = RMSprop(lr=self.config.get("lr", 1e-3),
                       rho=self.config.get("rho", 0.9))
     model.compile(optimizer=rmsprop,
                   loss="sparse_categorical_crossentropy",
                   metrics=["accuracy"])
     self.model = model
示例#10
0
 def discriminator_model(self):
     if self.DM:
         return self.DM
     optimizer = RMSprop(lr=0.0002, decay=6e-8)
     self.DM = Sequential()
     self.DM.add(self.discriminator())
     self.DM.compile(loss='binary_crossentropy', optimizer=optimizer,\
         metrics=['accuracy'])
     return self.DM
        def best_model():
            epochs = [5, 10, 15, 20]
            dropout_rate = [0.1, 0.2, 0.3]
            list_of_all_scores = list()
            list_of_scores = list()
            list_of_dropout = list()
            list_of_all_dropouts = list()
            list_of_epochs = list()

            for i in dropout_rate:

                model = Sequential()
                model.add(
                    Embedding(input_dim=ger_vocab_size,
                              output_dim=128,
                              input_length=11))
                model.add(LSTM(128))
                model.add(RepeatVector(11))
                model.add(LSTM(128, return_sequences=True))
                model.add(Dropout(i))
                model.add(Dense(eng_vocab_size, activation='softmax'))
                model.compile(optimizer=RMSprop(lr=0.01),
                              loss='sparse_categorical_crossentropy',
                              metrics=['acc'])

                list_of_dropout.append(i)

                for e in epochs:
                    list_of_all_dropouts.append(i)
                    list_of_epochs.append(e)

                    model.fit(X_train,
                              y_train.reshape(y_train.shape[0],
                                              y_train.shape[1], 1),
                              epochs=e,
                              batch_size=128,
                              verbose=1,
                              validation_split=0.2)
                    score = model.evaluate(X_test,
                                           y_test.reshape(
                                               y_test.shape[0],
                                               y_test.shape[1], 1),
                                           verbose=1)
                    list_of_all_scores.append(score)

                    if score not in list_of_scores:
                        list_of_scores.append(score)

#print('Dropout:', i, '\n', 'Epoch:', e, '\n', 'Score:', float(score))
            lowest = min(list_of_all_scores)
            num = list_of_scores.index(lowest)
            epoch = list_of_epochs[num]
            dropout = list_of_all_dropouts[num]
            print('Lowest score:', lowest, 'Epoch:', epoch, 'Dropout', dropout)

            return epoch, dropout
示例#12
0
 def adversarial_model(self):
     if self.AM:
         return self.AM
     optimizer = RMSprop(lr=0.0001, decay=3e-8)
     self.AM = Sequential()
     self.AM.add(self.generator())
     self.AM.add(self.discriminator())
     self.AM.compile(loss='binary_crossentropy', optimizer=optimizer,\
         metrics=['accuracy'])
     return self.AM
示例#13
0
def textgenrnn_model(num_classes,
                     cfg,
                     context_size=None,
                     weights_path=None,
                     dropout=0.0,
                     optimizer=RMSprop(lr=4e-3, rho=0.99)):
    '''
    Builds the model architecture for textgenrnn_tf and
    loads the specified weights for the model.
    '''

    input = Input(shape=(cfg['max_length'], ), name='input')
    embedded = Embedding(num_classes,
                         cfg['dim_embeddings'],
                         input_length=cfg['max_length'],
                         name='embedding')(input)

    if dropout > 0.0:
        embedded = SpatialDropout1D(dropout, name='dropout')(embedded)

    rnn_layer_list = []
    for i in range(cfg['rnn_layers']):
        prev_layer = embedded if i is 0 else rnn_layer_list[-1]
        rnn_layer_list.append(new_rnn(cfg, i + 1)(prev_layer))

    seq_concat = concatenate([embedded] + rnn_layer_list, name='rnn_concat')
    attention = AttentionWeightedAverage(name='attention')(seq_concat)
    output = Dense(num_classes, name='output', activation='softmax')(attention)

    if context_size is None:
        model = Model(inputs=[input], outputs=[output])
        if weights_path is not None:
            model.load_weights(weights_path, by_name=True)
        model.compile(loss='categorical_crossentropy', optimizer=optimizer)

    else:
        context_input = Input(shape=(context_size, ), name='context_input')
        context_reshape = Reshape((context_size, ),
                                  name='context_reshape')(context_input)
        merged = concatenate([attention, context_reshape], name='concat')
        main_output = Dense(num_classes,
                            name='context_output',
                            activation='softmax')(merged)

        model = Model(inputs=[input, context_input],
                      outputs=[main_output, output])
        if weights_path is not None:
            model.load_weights(weights_path, by_name=True)
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      loss_weights=[0.8, 0.2])

    return model
示例#14
0
def create_discriminator():
    # Create the Discriminator network structure
    discriminator = Sequential()

    discriminator.add(
        Conv2D(64,
               kernel_size=(5, 5),
               strides=2,
               activation=LeakyReLU(alpha=0.2),
               padding='same',
               input_shape=(param['img_rows'], param['img_cols'], 1)))
    discriminator.add(Dropout(0.4))
    discriminator.add(
        Conv2D(
            128,
            kernel_size=(5, 5),
            strides=2,
            activation=LeakyReLU(alpha=0.2),
            padding='same',
        ))
    discriminator.add(Dropout(0.4))
    discriminator.add(
        Conv2D(
            256,
            kernel_size=(5, 5),
            strides=2,
            activation=LeakyReLU(alpha=0.2),
            padding='same',
        ))
    discriminator.add(Dropout(0.4))
    discriminator.add(
        Conv2D(
            256,
            kernel_size=(5, 5),
            strides=2,
            activation=LeakyReLU(alpha=0.2),
            padding='same',
        ))
    discriminator.add(Dropout(0.4))

    discriminator.add(Flatten())
    discriminator.add(Dense(1, activation='sigmoid'))

    # Compile it
    discriminator.compile(optimizer=RMSprop(lr=0.0008,
                                            clipvalue=1.0,
                                            decay=6e-8),
                          loss='binary_crossentropy',
                          metrics=['accuracy'])

    discriminator.summary()

    return discriminator
示例#15
0
def build_cnn_1d_model(maxlen=500):
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen))
    model.add(Conv1D(32, 7, activation='relu'))
    model.add(MaxPooling1D(5))
    model.add(Conv1D(32, 7, activation='relu'))
    model.add(GlobalMaxPooling1D())
    model.add(Dense(1))
    model.summary()
    model.compile(optimizer=RMSprop(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['acc'])
    return model
def create_model():

    model = Sequential()
    model.add(
        Dense(64,
              activation='relu',
              kernel_initializer='normal',
              input_shape=(16, )))
    model.add(Dense(64, kernel_initializer='normal', activation='relu'))
    model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
    model.compile(loss="binary_crossentropy",
                  optimizer=RMSprop(),
                  metrics=['accuracy'])
    return model
示例#17
0
    def trainModel(self, model, epochs, train_generator, valid_generator):

        model.compile(loss='categorical_crossentropy',
                      optimizer=RMSprop(lr=0.001),
                      metrics=['acc'])

        history = model.fit_generator(generator=train_generator,
                                      steps_per_epoch=100,
                                      epochs=epochs,
                                      validation_data=valid_generator,
                                      validation_steps=50,
                                      verbose=1)

        return history  # the history of the trained model
    def discriminator_model(self):
        if self.DM:
            return self.DM
        optimizer = RMSprop(lr=2e-4, decay=1e-8)
        self.DM = Sequential()
        self.DM.add(self.discriminator())

        #multi_gpu
        with tf.device('/cpu:0'):
            self.DM = multi_gpu_model(self.DM, gpus=4)

        self.DM.compile(loss='binary_crossentropy', optimizer=optimizer,\
            metrics=['accuracy'])
        return self.DM
示例#19
0
def create_model():
    model = Sequential()
    model.add(Dense(20, activation='relu', input_shape=(9, )))
    model.add(Dropout(0.2))
    model.add(Dense(15, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(10, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(5, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=RMSprop(),
                  metrics=['accuracy'])

    model.summary()

    return model
示例#20
0
def keras_model_fn(hyperparameters):
    """keras_model_fn receives hyperparameters from the training job and returns a compiled keras model.
    The model will be transformed into a TensorFlow Estimator before training and it will be saved in a 
    TensorFlow Serving SavedModel at the end of training.

    Args:
        hyperparameters: The hyperparameters passed to the SageMaker TrainingJob that runs your TensorFlow 
                         training script.
    Returns: A compiled Keras model
    """
    model = Sequential()

    # TensorFlow Serving default prediction input tensor name is PREDICT_INPUTS.
    # We must conform to this naming scheme.
    model.add(
        InputLayer(input_shape=(HEIGHT, WIDTH, DEPTH), name=PREDICT_INPUTS))
    model.add(Conv2D(32, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(NUM_CLASSES))
    model.add(Activation('softmax'))

    _model = tf.keras.Model(inputs=model.input, outputs=model.output)

    opt = RMSprop(lr=hyperparameters['learning_rate'],
                  decay=hyperparameters['decay'])

    _model.compile(loss='categorical_crossentropy',
                   optimizer=opt,
                   metrics=['accuracy'])

    return _model
示例#21
0
    def __init__(self):
        # Input shape
        self.class_num = config.class_num
        self.img_shape = config.img_shape
        self.img_width = config.img_width
        self.img_height = config.img_height
        self.img_channel = config.img_channel
        self.gf = 32
        self.df = 64
        self.patch = int(config.img_height // (2**4))
        self.patch_size = (self.patch, self.patch, 1)
        self.s1 = Dataset('./t1ce', './tice_label')
        self.s2 = Dataset('./t2', './t2_label')
        self.target = Dataset('./OpenBayes',
                              './Openbayes_label',
                              need_resize=1)
        optimizer = RMSprop(0.0002)
        self.D = self.build_discriminator()
        self.G = self.build_generator()
        self.G.trainable = False
        real_img = Input(shape=config.img_shape)
        real_src, real_cls = self.D(real_img)
        fake_cls = Input(shape=(self.class_num, ))
        fake_img = self.G([real_img, fake_cls])
        fake_src, fake_output = self.D(fake_img)
        self.Train_D = Model([real_img, fake_cls],
                             [real_src, real_cls, fake_src, fake_output])
        self.Train_D.compile(loss=[
            'mse', self.classification_loss, 'mse', self.classification_loss
        ],
                             optimizer=optimizer,
                             loss_weights=[1.0, 1.0, 1.0, 1.0])

        self.G.trainable = True
        self.D.trainable = False
        real_x = Input(shape=self.img_shape)
        now_label = Input(shape=(self.class_num, ))
        target_label = Input(shape=(self.class_num, ))
        fake_x = self.G([real_x, target_label])
        fake_out_src, fake_out_cls = self.D(fake_x)
        x_rec = self.G([fake_x, now_label])
        self.train_G = Model([real_x, now_label, target_label],
                             [fake_out_src, fake_out_cls, x_rec])
        self.train_G.compile(loss=['mse', self.classification_loss, 'mae'],
                             optimizer=optimizer,
                             loss_weights=[1.0, 1.0, 1.0])
        '''
示例#22
0
def model_DenseNet():
    model_dense = RGCSA.ResneXt_IN((1, img_rows, img_cols, img_channels), cardinality=8, classes=9)

    RMS = RMSprop(lr=0.0003)

    # Let's train the model using RMSprop

    def mycrossentropy(y_true, y_pred, e=0.1):
        loss1 = K.categorical_crossentropy(y_true, y_pred)

        loss2 = K.categorical_crossentropy(K.ones_like(y_pred) / nb_classes, y_pred)  # K.ones_like(y_pred) / nb_classes

        return (1 - e) * loss1 + e * loss2

    model_dense.compile(loss=mycrossentropy, optimizer=RMS, metrics=['accuracy'])

    return model_dense
示例#23
0
def get_optimizer(optimizer='sgd', learning_rate=0.1, momentum=0.9, log=True):
    """Create an optimizer and wrap it for Horovod distributed training. Default is SGD."""
    if log:
        print('Creating optimizer on rank ' + str(hvd.rank()))
    opt = None
    if optimizer == 'sgd+nesterov':
        opt = SGD(lr=learning_rate, momentum=momentum, nesterov=True)
    elif optimizer == 'rmsprop':
        opt = RMSprop(lr=learning_rate, rho=0.9)
    elif optimizer == 'adam':
        opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, amsgrad=False)
    elif optimizer == 'adadelta':
        opt = Adadelta(lr=learning_rate, rho=0.95)
    else:
        opt = SGD(lr=learning_rate, momentum=momentum, nesterov=False)
    # Wrap optimizer for data distributed training
    return hvd.DistributedOptimizer(opt)
def get_unet_64(input_shape, num_classes, filters):
    inputs = Input(shape=input_shape)
    # 128

    down1 = Conv2D(filters, (3, 3), padding='same')(inputs)
    down1 = BatchNormalization()(down1)
    down1 = Activation('relu')(down1)
    down1 = Conv2D(filters, (3, 3), padding='same')(down1)
    down1 = BatchNormalization()(down1)
    down1 = Activation('relu')(down1)
    down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)
    # 64

    center = Conv2D(filters * 2, (3, 3), padding='same')(down1_pool)
    center = BatchNormalization()(center)
    center = Activation('relu')(center)
    center = Conv2D(filters * 2, (3, 3), padding='same')(center)
    center = BatchNormalization()(center)
    center = Activation('relu')(center)
    # center

    up1 = UpSampling2D((2, 2))(center)
    up1 = concatenate([down1, up1], axis=3)
    up1 = Conv2D(filters, (3, 3), padding='same')(up1)
    up1 = BatchNormalization()(up1)
    up1 = Activation('relu')(up1)
    up1 = Conv2D(filters, (3, 3), padding='same')(up1)
    up1 = BatchNormalization()(up1)
    up1 = Activation('relu')(up1)
    up1 = Conv2D(filters, (3, 3), padding='same')(up1)
    up1 = BatchNormalization()(up1)
    up1 = Activation('relu')(up1)

    # 16

    classify = Conv2D(num_classes, (1, 1), activation='sigmoid')(up1)

    model = Model(inputs=inputs, outputs=classify)

    #model.compile(optimizer=RMSprop(lr=0.0001), loss=bce_dice_loss, metrics=[dice_coeff])
    model.compile(optimizer=RMSprop(lr=0.0001),
                  metrics=['accuracy'],
                  loss='binary_crossentropy')

    return model
示例#25
0
    def create_model_from_ResNet50():

        model = Sequential()

        model.add(vgg_model)
        model.add(Flatten())
        model.add(Dense(1024, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(1024, activation='relu'))
        model.add(Dense(102, activation='softmax'))

        model.layers[0].trainable = False

        model.compile(loss='categorical_crossentropy',
                      optimizer=RMSprop(lr=0.001),
                      metrics=['acc'])  # optimizer=RMSprop(lr=0.001)

        return model
示例#26
0
def model_DenseNet():
    model_dense = RGCSA.ResneXt_IN((1, img_rows, img_cols, img_channels), classes=16)

    RMS = RMSprop(lr=0.0003)

    def mycrossentropy(y_true, y_pred, e=0.1):
        loss1 = K.categorical_crossentropy(y_true, y_pred)

        loss2 = K.categorical_crossentropy(K.ones_like(y_pred) / nb_classes, y_pred)  # K.ones_like(y_pred) / nb_classes

        return (1 - e) * loss1 + e * loss2

    model_dense.compile(loss=mycrossentropy, optimizer=RMS, metrics=['accuracy'])  # categorical_crossentropy

    model_dense.summary()
    # plot_model(model_dense, show_shapes=True, to_file='./model_ResNeXt_GroupChannel_Space_Attention.png')

    return model_dense
示例#27
0
    def build_model_LSTM(self):
        ''' LSTM model '''

        model = Sequential()
        embedding_layer = Embedding(self.vocab_length,
                                    300,
                                    weights=[self.embedding_matrix],
                                    input_length=self.length_long_sentence,
                                    trainable=False)
        model.add(embedding_layer)
        model.add(LSTM(84, kernel_regularizer=l2(0.1), dropout=0.2))
        model.add(Dense(64, kernel_regularizer=l2(0.1), activation='relu'))
        model.add(Dense(2, activation='softmax'))
        model.compile(optimizer=RMSprop(lr=0.001),
                      loss='binary_crossentropy',
                      metrics=['accuracy'])
        model.summary()
        return model
示例#28
0
    def build_model_FeedForward(self):
        ''' Feed forward model '''

        model = Sequential()
        embedding_layer = Embedding(self.vocab_length,
                                    300,
                                    weights=[self.embedding_matrix],
                                    input_length=self.length_long_sentence,
                                    trainable=False)
        model.add(embedding_layer)
        model.add(Flatten())
        model.add(Dense(200, kernel_regularizer=l2(0.01), activation='relu'))
        model.add(Dense(80, activation='relu'))
        model.add(Dense(2, activation='softmax'))
        model.compile(optimizer=RMSprop(lr=0.0001),
                      loss='binary_crossentropy',
                      metrics=['accuracy'])

        return model
示例#29
0
def check_model_score(model_name, weights, fold):
    model = MODELS[model_name].factory(lock_base_model=True)
    model.load_weights(weights, by_name=True)
    model.compile(optimizer=RMSprop(lr=3e-4),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    dataset = SingleFrameCNNDataset(
        preprocess_input_func=MODELS[model_name].preprocess_input,
        batch_size=MODELS[model_name].batch_size,
        validation_batch_size=4,  # MODELS[model_name].batch_size,
        fold=fold,
        use_non_blank_frames=True)
    gt = []
    pred = []
    steps = dataset.validation_steps() // 500
    print('steps:', steps)
    step = 0
    for X, y in tqdm(dataset.generate_test(verbose=True)):
        gt.append(y)
        pred.append(model.predict_on_batch(X))
        step += 1
        if step >= steps:
            break

    # print(gt)
    # print(pred)

    gt = np.vstack(gt).astype(np.float64)
    pred = np.vstack(pred).astype(np.float64)
    print(gt.shape, pred.shape)
    # print(gt)
    # print(pred)
    checkpoint_name = os.path.basename(weights)
    out_dir = f'../output/check_model_score/gt{model_name}_{fold}_{checkpoint_name}'
    os.makedirs(out_dir, exist_ok=True)
    print(out_dir)
    np.save(f'{out_dir}/gt.npy', gt)
    np.save(f'{out_dir}/pred.npy', pred)

    for clip in [0.0, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]:
        print(clip, metrics.pri_matrix_loss(gt,
                                            np.clip(pred, clip, 1.0 - clip)))
示例#30
0
def create_model():
    # Our input feature map is 150x150x3: 150x150 for the image pixels, and 3 for
    # the three color channels: R, G, and B
    img_input = layers.Input(shape=(150, 150, 3))

    # First convolution extracts 128 filters that are 5x5
    # Convolution is followed by max-pooling layer with a 2x2 window
    x = layers.Conv2D(128, 5, activation='relu')(img_input)
    x = layers.MaxPooling2D(2)(x)

    x = layers.Dropout(0.5)(x)

    # Flatten feature map to a 1-dim tensor so we can add fully connected layers
    x = layers.Flatten()(x)

    x = layers.Dense(96, activation='relu')(x)

    # Add a dropout rate of 0.5
    x = layers.Dropout(0.25)(x)

    x = layers.Dense(54, activation='relu')(x)

    # Add a dropout rate of 0.5
    x = layers.Dropout(0.25)(x)

    # Create output layer with a single node and sigmoid activation
    output = layers.Dense(1, activation='softmax')(x)

    # Create model:
    # input = input feature map
    # output = input feature map + stacked convolution/maxpooling layers + fully
    # connected layer + sigmoid output layer
    model = Model(img_input, output)

    model.compile(loss='binary_crossentropy',
                  optimizer=RMSprop(lr=0.001),
                  metrics=['acc'])

    # model.summary()

    return model