Exemple #1
0
    def get_model(self):
        num_words = len(self.dataset.alphabet)
        num_labels = len(self.dataset.labels)

        model = Sequential()

        # MIR Embedding turns positive integers into dense vectors, for 1st layer of model. Why? Paper uses one-hot
        model.add(
            Embedding(
                num_words,  # MIR input dimension
                self.config.embed_size,  # MIR dense embedding
                mask_zero=True))
        model.add(Dropout(self.config.input_dropout))

        for _ in range(self.config.recurrent_stack_depth):
            model.add(
                Bidirectional(
                    LSTM(self.config.num_lstm_units, return_sequences=True)))
            # MIR return_sequences means return full sequence, not just last output

        model.add(Dropout(self.config.output_dropout))
        # MIR Does the paper have both input- and output-dropout?
        model.add(TimeDistributed(Dense(num_labels, activation='softmax')))

        # TODO Add Viterbi decoder here, see Kuru et al.

        optimizer = Adam(lr=self.config.learning_rate, clipnorm=1.0)

        # MIR Add precision, recall, f1 metrics for all labels
        extra_metrics = {
            "precision_null": keras_metrics.precision(label=0),
            "precision_loc": keras_metrics.precision(label=1),
            "precision_org": keras_metrics.precision(label=2),
            "precision_per": keras_metrics.precision(label=3),
            "recall_null": keras_metrics.recall(label=0),
            "recall_loc": keras_metrics.recall(label=1),
            "recall_org": keras_metrics.recall(label=2),
            "recall_per": keras_metrics.recall(label=3),
            "f1_null": keras_metrics.f1_score(label=0),
            "f1_loc": keras_metrics.f1_score(label=1),
            "f1_org": keras_metrics.f1_score(label=2),
            "f1_per": keras_metrics.f1_score(label=3),
        }

        model.compile(optimizer=optimizer,
                      loss='categorical_crossentropy',
                      metrics=[
                          'categorical_accuracy',
                          self.non_null_label_accuracy,
                      ].extend(extra_metrics))
        # MIR non_null_label_accuracy is a func
        return model
 def construct_model(self):
     self.model = Sequential()
     self.model.add(
         Dense(units=100, activation='relu', input_dim=self.n_inputs))
     self.model.add(Dense(units=64, activation='relu'))
     self.model.add(Dense(units=self.n_classes, activation='softmax'))
     # Calculate precision for the second label.
     pa = keras_metrics.precision(label=1)
     pna = keras_metrics.precision(label=0)
     # Calculate recall for the first label.
     ra = keras_metrics.recall(label=1)
     rna = keras_metrics.recall(label=0)
     self.model.compile(loss=self.loss, \
                   optimizer=self.optimizer, \
                   metrics=['accuracy',pa,pna,ra,rna])
Exemple #3
0
def build_model_architecture(x_shape):
    model = Sequential()
    model.add(
        Dense(128,
              input_dim=x_shape,
              activation='relu',
              kernel_regularizer=l2(0.01)))
    model.add(Dropout(0.2))
    model.add(Dense(64, activation='relu', kernel_regularizer=l2(0.01)))
    model.add(Dropout(0.2))
    # for binary classifiers
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=[
                      'accuracy',
                      keras_metrics.precision(),
                      keras_metrics.recall()
                  ])
    model.summary()

    # early stop
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0.01,
                               mode='auto',
                               verbose=1,
                               patience=5)

    return early_stop, model
Exemple #4
0
    def evaluate_model(self):
        if os.path.isfile(self.weights_file):
            print("Saved model found")
            self.model = load_model(self.weights_file, custom_objects={'binary_recall': keras_metrics.recall(), 'binary_precision': keras_metrics.precision(
            ), 'categorical_recall': keras_metrics.recall(), 'categorical_precision': keras_metrics.precision(), 'recall': keras_metrics.recall(), 'precision': keras_metrics.precision()})
            print("Saved model loaded successfully")
            self.model.compile(optimizer=self.optimizer,
                               loss=self.args.get('loss'),
                               metrics=["accuracy", keras_metrics.recall(), keras_metrics.precision()])
            test_datagen = ImageDataGenerator(
                rescale=1./255,
                shear_range=0.5,
                zoom_range=0.5,
                horizontal_flip=True,
                vertical_flip=True,
                rotation_range=90,
            )
            test_set = test_datagen.flow_from_directory(
                self.test_path,
                target_size=self.target_size,
                batch_size=1,
                class_mode=self.args.get('class_mode'),
            )
            self.history_test = self.model.evaluate_generator(
                test_set, steps=self.steps_per_epoch)

            print(self.model.metrics_names, self.history_test)

        else:
            print("Model not found")
Exemple #5
0
 def compile_model_for_fine_tuning(self, lr=0.0001):
   # we need to recompile the model for these modifications to take effect
   self.model.compile(
     optimizer=optimizers.SGD(lr=lr, momentum=0.9), 
     loss='sparse_categorical_crossentropy', 
     metrics=['accuracy', f1_score, keras_metrics.precision(), keras_metrics.recall()]
   )
Exemple #6
0
 def compile_model(self, lr=0.001):
   optimizer = optimizers.Adam(learning_rate=lr, beta_1=0.9, beta_2=0.999, amsgrad=False)
   self.model.compile(
     loss='sparse_categorical_crossentropy', 
     optimizer=optimizer,
     metrics=['accuracy', f1_score, keras_metrics.precision(), keras_metrics.recall()]
   )
Exemple #7
0
def cnn(filters,
        pooling_size=2,
        epochs=15,
        table_folder="/",
        kernel_size=3,
        input_dim=34,
        batch_size=32,
        nb_filters=34,
        time_from=32,
        time_to=8,
        downsample_ratio=None,
        oversample=None):
    timesteps = time_from - time_to

    X_train, X_test, y_train, y_test, churn_number, total_number, feature_names = import_and_preprocess_table(
        timesteps, time_from, time_to, filters, table_folder, downsample_ratio,
        oversample)

    print("Creating layers...")
    model = Sequential()
    model.add(
        Conv1D(nb_filters,
               kernel_size=kernel_size,
               input_shape=(timesteps, input_dim),
               activation='relu'))
    model.add(MaxPooling1D(pooling_size))
    # model.add(Conv1D(nb_filters, kernel_size=kernel_size, activation='relu'))
    # model.add(Conv1D(nb_filters*2, kernel_size=3, activation='relu'))
    # model.add(GlobalAveragePooling1D())
    # model.add(Dropout(0.5))
    model.add(Flatten())
    # model.add(Dense(128, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))

    print("Compiling model...")
    model.compile(loss='mean_squared_error',
                  optimizer='rmsprop',
                  metrics=[
                      'accuracy',
                      keras_metrics.precision(),
                      keras_metrics.recall(),
                      keras_metrics.f1_score()
                  ])
    print("Fitting model...")
    print(model.summary())
    callback = [EarlyStopping(monitor='loss', patience=5)]

    history = model.fit(X_train,
                        y_train,
                        validation_data=(X_test, y_test),
                        batch_size=batch_size,
                        epochs=epochs)  #, callbacks=callback)
    score = model.evaluate(X_test, y_test, batch_size=batch_size)
    y_pred = model.predict(X_test)

    log_to_csv("cnn", score, history, filters,
               table_folder, input_dim, batch_size, time_from, time_to,
               model.to_json(), nb_filters, kernel_size)

    return [score, history, churn_number, total_number, y_pred]
    def test_save_load(self):
        custom_objects = {
            "true_positive": keras_metrics.true_positive(sparse=self.sparse),
            "true_negative": keras_metrics.true_negative(sparse=self.sparse),
            "false_positive": keras_metrics.false_positive(sparse=self.sparse),
            "false_negative": keras_metrics.false_negative(sparse=self.sparse),
            "precision": keras_metrics.precision(sparse=self.sparse),
            "recall": keras_metrics.recall(sparse=self.sparse),
            "f1_score": keras_metrics.f1_score(sparse=self.sparse),
            "sin": keras.backend.sin,
            "abs": keras.backend.abs,
        }

        x, y = self.samples(100)
        self.model.fit(x, y, epochs=10)

        with tempfile.NamedTemporaryFile() as file:
            self.model.save(file.name, overwrite=True)
            model = keras.models.load_model(file.name,
                                            custom_objects=custom_objects)

            expected = self.model.evaluate(x, y)[1:]
            received = model.evaluate(x, y)[1:]

            self.assertEqual(expected, received)
Exemple #9
0
def seq_boi(tokenizer): #loads the function and creates a BI_rnn network 
  embedding_matrix = get_embedding_vectors(tokenizer)
  model = Sequential()
  model.add(Embedding(len(tokenizer.word_index)+1,
                      EMBEDDING_SIZE,
                      weights=[embedding_matrix],
                      trainable=False,
                      input_length =SEQUENCE_LENGTH))

  from keras.layers import Dense, Activation, Flatten
 
  model.add(Dropout(0.8))
  model.add(Conv1D(filters,
                 kernel_size,
                 padding='same',
                 activation='softmax',
                 strides=1))
  model.add(MaxPooling1D(pool_size=pool_size))
  model.add(Bidirectional(LSTM(1024))) #creates given network
  model.add(Dense(2, activation='relu'))
  model.add(Dense(2, activation='softmax'))

  plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
  model.compile(optimizer="adam", loss="categorical_crossentropy",
                  metrics=["accuracy", keras_metrics.precision(), keras_metrics.recall()])
  model.summary()
  return model
Exemple #10
0
def get_model(alpha=1, depth_multiplier=1, pooling='avg', lr=0.00001):

    base_mobilenetv2_model = MobileNetV2(alpha=alpha,
                                         depth_multiplier=depth_multiplier,
                                         input_shape=(224, 224, 1),
                                         include_top=False,
                                         weights=None,
                                         classes=1,
                                         pooling=pooling)

    top_model = Sequential()
    top_model.add(Dense(512))
    top_model.add(Dropout(0.5))
    top_model.add(Dense(1, activation='sigmoid'))
    # Create model.
    model = Model(inputs=base_mobilenetv2_model.input,
                  outputs=top_model(base_mobilenetv2_model.output))
    optimizer = Adam(lr=lr)
    model.compile(
        optimizer=optimizer,
        loss='binary_crossentropy',
        metrics=[keras.metrics.binary_accuracy,
                 keras_metrics.precision()])

    return model
    def test_metrics(self):
        tp = keras_metrics.true_positive()
        fp = keras_metrics.false_positive()
        fn = keras_metrics.false_negative()

        precision = keras_metrics.precision()
        recall = keras_metrics.recall()

        model = keras.models.Sequential()
        model.add(keras.layers.Dense(1, activation="sigmoid", input_dim=2))
        model.add(keras.layers.Dense(1, activation="softmax"))

        model.compile(optimizer="sgd",
                      loss="binary_crossentropy",
                      metrics=[tp, fp, fn, precision, recall])

        samples = 1000
        x = numpy.random.random((samples, 2))
        y = numpy.random.randint(2, size=(samples, 1))

        model.fit(x, y, epochs=1, batch_size=10)
        metrics = model.evaluate(x, y, batch_size=10)[1:]

        tp_val = metrics[0]
        fp_val = metrics[1]
        fn_val = metrics[2]

        precision = metrics[3]
        recall = metrics[4]

        expected_precision = tp_val / (tp_val + fp_val)
        expected_recall = tp_val / (tp_val + fn_val)

        self.assertAlmostEqual(expected_precision, precision, delta=0.05)
        self.assertAlmostEqual(expected_recall, recall, delta=0.05)
def create_model(input_length):
    print ('Creating model...')
    model = Sequential()

    model.add(Dense(64, input_dim=9, init='uniform'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(64, init='uniform'))
    model.add(Activation('tanh'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(1, init='uniform'))
    model.add(Activation('softmax'))

    sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    adam = Adam(lr=0.05)


    print ('Compiling...')
    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=0.12, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False),
                  metrics=['accuracy',keras_metrics.precision(), keras_metrics.recall(),keras_metrics.false_positive(), keras_metrics.false_negative()])

                  #keras_metrics.false_positive(), keras_metrics.false_negative()
    return model
    def load(self, file_name=None):
        """
        :param file_name: [model_file_name, weights_file_name]
        :return:
        """
        with self.graph.as_default():
            with self.session.as_default():
                try:
                    model_name = file_name[0]
                    weights_name = file_name[1]

                    if model_name is not None:
                        # load the model
                        filepath = os.path.join(self.model_folder, model_name)
                        self.model = tf.keras.models.load_model(
                            filepath,
                            custom_objects={
                                "f1_score": f1_score,
                                "binary_precision": keras_metrics.precision(),
                                "binary_recall": keras_metrics.recall(),
                            })
                    if weights_name is not None:
                        # load the weights
                        weights_path = os.path.join(self.model_folder,
                                                    weights_name)
                        self.model.load_weights(weights_path)
                    print("Neural Network loaded: ")
                    print('\t' + "Neural Network model: " + model_name)
                    print('\t' + "Neural Network weights: " + weights_name)
                    return True
                except Exception as e:
                    print(e)
                    return False
    def baseline_model(self):
        model = Sequential()
        model.add(
            Dense(30,
                  input_dim=75,
                  kernel_initializer='normal',
                  activation='elu'))
        # model.add(ActivityRegularization(l1=0.05, l2=0.05))
        model.add(Dropout(0.3, noise_shape=None, seed=None))
        model.add(Dense(20, kernel_initializer='normal', activation='elu'))
        model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
        model.compile(loss='binary_crossentropy',
                      optimizer='adamax',
                      metrics=[
                          'accuracy',
                          keras_metrics.precision(),
                          keras_metrics.recall()
                      ])

        if self.summary == True:
            print(model.summary())

        if self.plot == True:
            plot_model(model,
                       to_file='model_plot.png',
                       show_shapes=True,
                       show_layer_names=False)

        return model
def get_model(tokenizer, lstm_units):
    """
    Constructs the model,
    Embedding vectors => LSTM => 2 output Fully-Connected neurons with softmax activation
    """
    # get the GloVe embedding vectors
    embedding_matrix = get_embedding_vectors(tokenizer)
    model = Sequential()
    model.add(
        Embedding(
            len(tokenizer.word_index) + 1,
            EMBEDDING_SIZE,
            weights=[embedding_matrix],
            trainable=False,
            input_length=SEQUENCE_LENGTH,
        ))

    model.add(LSTM(lstm_units, recurrent_dropout=0.2))
    model.add(Dropout(0.3))
    model.add(Dense(2, activation="sigmoid"))
    # compile as rmsprop optimizer
    # aswell as with recall metric
    model.compile(
        optimizer="adam",
        loss="categorical_crossentropy",
        metrics=[
            "accuracy",
            keras_metrics.precision(),
            keras_metrics.recall()
        ],
    )
    model.summary()
    return model
Exemple #16
0
    def __model__(self):
        """Build & compile model keras.

        :return: (Keras.Sequential) model deep
        """
        # TODO refactor this shit code
        mobilenet = MobileNet(weights='imagenet',
                              include_top=False,
                              input_shape=(224, 224, 3))
        init = mobilenet.output
        pool1 = GlobalAveragePooling2D()(init)
        l1 = Dense(1024)(pool1)
        act1 = Activation(activation="relu")(l1)
        drop1 = Dropout(0.2)(act1)
        l2 = Dense(self.number_classes)(drop1)
        output = Activation(activation="softmax")(l2)
        model = Model(inputs=mobilenet.input, outputs=output)
        for layer in model.layers[:-6]:
            layer.trainable = False
        metrics = [
            'accuracy',
            keras_metrics.precision(),
            keras_metrics.recall()
        ]
        model.compile(optimizer='Adam', loss=self.loss, metrics=metrics)

        return model
Exemple #17
0
    def __model__(self):
        """Build & compile the keras model.

        :return: (Keras.Sequential) model deep
        """
        model = Sequential()
        model.add(
            Conv2D(64, (5, 5), activation='relu', input_shape=(48, 48, 1)))
        model.add(MaxPooling2D(pool_size=(5, 5), strides=(2, 2)))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))
        model.add(Conv2D(128, (3, 3), activation='relu'))
        model.add(Conv2D(128, (3, 3), activation='relu'))
        model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))
        model.add(Flatten())
        model.add(Dense(256, activation='relu'))
        model.add(Dropout(0.2))
        model.add(Dense(128, activation='relu'))
        model.add(Dropout(0.2))
        model.add(Dense(len(self.classes), activation='softmax'))
        metrics = [
            'accuracy',
            keras_metrics.precision(),
            keras_metrics.recall()
        ]
        model.compile(loss='categorical_crossentropy',
                      optimizer="Adam",
                      metrics=metrics)

        return model
def build_bert(nclass):
    #config_path = str(config_path)
    #checkpoint_path = str(checkpoint_path)

    # 读取bert预训练模型
    # keras_bert是在Keras下对Bert最好的封装是
    # 真正调用bert的就这么一行代码
    bert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=None)

    for l in bert_model.layers:
        l.trainable = True

    x1_in = Input(shape=(None,))
    x2_in = Input(shape=(None,))

    x = bert_model([x1_in, x2_in])

    # # 取出[CLS]对应的向量用来做分类
    x = Lambda(lambda x: x[:, 0])(x)
    p = Dense(nclass, activation='softmax')(x)

    model = Model([x1_in, x2_in], p)
    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(1e-5),
                  metrics=['accuracy', keras_metrics.precision(), keras_metrics.recall()])
    print(model.summary())
    return model
Exemple #19
0
def train_cnn(x1, x2, y, num_epoch, batch_size):
    # Get and compile model
    model = siamese_net()
    model.summary()
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy', keras_metrics.precision(), keras_metrics.recall(), keras_metrics.f1_score()])
    # Shuffle data
    x, y = unison_shuffled_copies(np.array([x1, x2]), y)
    x_train, x_test, y_train, y_test = split_data(x, y, dual_input=True)
    # Train model
    start_time = time()
    history = model.fit(x=x_train, y=y_train, epochs=num_epoch, verbose=1, validation_data=(x_test, y_test), shuffle=True, batch_size=batch_size, callbacks=[callbacks.EarlyStopping(monitor='val_loss', patience=2)])
    end_time = time()
    time_taken = end_time - start_time
    epochs_trained = len(history.history['loss'])
    print("Took {} to train over {} epochs with batch size {}".format(end_time - start_time, epochs_trained, batch_size))
    # Plot train vs test metric graphs
    plot_graph(history.history['loss'], history.history['val_loss'], 'loss', 'Loss', "loss_epoch{}_batch{}.png".format(epochs_trained, batch_size), 200)
    plot_graph(history.history['acc'], history.history['val_acc'], 'accuracy', 'Accuracy', "acc_epoch{}_batch{}.png".format(epochs_trained, batch_size), 201)
    plot_graph(history.history['recall'], history.history['val_recall'], 'recall', 'Recall', "rec_epoch{}_batch{}.png".format(epochs_trained, batch_size), 202)
    plot_graph(history.history['precision'], history.history['val_precision'], 'precision', 'Precision', "pre_epoch{}_batch{}.png".format(epochs_trained, batch_size), 203)
    plot_graph(history.history['f1_score'], history.history['val_f1_score'], 'f1', 'F1 Score', "f1_epoch{}_batch{}.png".format(epochs_trained, batch_size), 204)  
    # Save metrics
    save_metrics(history, time_taken, num_epoch, batch_size, "metrics_epoch{}_batch{}.txt".format(epochs_trained, batch_size))
    # Save model
    save_model(model, 'model_epoch{}_batch{}.h5'.format(epochs_trained, batch_size))
Exemple #20
0
    def init_model(self):
        image_shape = (IMAGE_HEIGHT, IMAGE_WIDTH, 3)

        self.datagen = ImageDataGenerator(rotation_range=20,
                                          width_shift_range=0.15,
                                          height_shift_range=0.15,
                                          zoom_range=0.1,
                                          horizontal_flip=True,
                                          vertical_flip=True,
                                          fill_mode='nearest')

        input_a, input_b = Input(shape=image_shape), Input(shape=image_shape)

        if cnt.USE_VGG:
            shared_model = get_shared_model_vgg(image_shape)
        else:
            shared_model = get_shared_model(image_shape)

        if cnt.PRE_TRAIN_CLASSIFIER:
            self.init_classifier()
            self.classifier.load_weights(cnt.CLASSIFIER_MODEL_PATH)

            shared_model.set_weights(self.classifier.layers[1].get_weights())

            for layer in shared_model.layers:
                layer.trainable = False

        nlayer1 = shared_model(input_a)
        nlayer2 = shared_model(input_b)

        n_layer = Flatten()
        nlayer1 = n_layer(nlayer1)
        nlayer2 = n_layer(nlayer2)

        n_layer = Dense(cnt.EMBEDDING_SIZE, activation='relu')
        nlayer1 = n_layer(nlayer1)
        nlayer2 = n_layer(nlayer2)

        n_layer = BatchNormalization()
        nlayer1 = n_layer(nlayer1)
        nlayer2 = n_layer(nlayer2)

        n_layer = Lambda(lambda x: K.l2_normalize(x, axis=1))
        nlayer1 = n_layer(nlayer1)
        nlayer2 = n_layer(nlayer2)

        n_layer = Lambda(lambda x: K.sqrt(
            K.sum(K.square(x[0] - x[1]), axis=1, keepdims=True)))(
                [nlayer1, nlayer2])
        out = Dense(1, activation="sigmoid")(n_layer)

        self.model = Model(inputs=[input_a, input_b], outputs=[out])

        adam = optimizers.Adam(lr=0.001)
        self.model.compile(
            optimizer=adam,
            loss="mean_squared_error",
            metrics=['accuracy',
                     km.precision(label=0),
                     km.recall(label=0)])
Exemple #21
0
def MLP(train_x, train_y, test_x, test_y, epoch, batch_size):
    # One hidden layer with 60 neurons
    model = models.Sequential()
    layer = layers.Dense(200,
                         activation="relu",
                         input_shape=(train_x.shape[1], ),
                         kernel_regularizer=regularizers.l2(1e-10),
                         activity_regularizer=regularizers.l1(1e-10))
    model.add(layer)
    model.add(layers.Dropout(0.1, noise_shape=None, seed=None))
    #model.add(layers.Dropout(0.2, noise_shape=None, seed=None))
    model.add(
        layers.Dense(100,
                     activation="relu",
                     kernel_regularizer=regularizers.l2(1e-10),
                     activity_regularizer=regularizers.l1(1e-10)))
    model.add(layers.Dropout(0.1, noise_shape=None, seed=None))
    model.add(
        layers.Dense(60,
                     activation="relu",
                     kernel_regularizer=regularizers.l2(1e-10),
                     activity_regularizer=regularizers.l1(1e-10)))
    model.add(
        layers.Dense(30,
                     activation="relu",
                     kernel_regularizer=regularizers.l2(1e-10),
                     activity_regularizer=regularizers.l1(1e-10)))
    #model.add(layers.Dropout(0.1, noise_shape=None, seed=None))
    model.add(
        layers.Dense(10,
                     activation="relu",
                     kernel_regularizer=regularizers.l2(1e-10),
                     activity_regularizer=regularizers.l1(1e-10)))
    model.add(layers.Dense(1, activation="sigmoid"))
    model.summary()
    adam = Adam(lr=0.0001,
                beta_1=0.9,
                beta_2=0.999,
                epsilon=None,
                decay=0.0,
                amsgrad=False)
    model.compile(loss='binary_crossentropy',
                  metrics=[
                      'accuracy',
                      keras_metrics.precision(),
                      keras_metrics.recall()
                  ],
                  optimizer=adam)

    # Get the loss and accuracy by using cross validation
    results_mlp = model.fit(train_x,
                            train_y,
                            epochs=epoch,
                            batch_size=batch_size,
                            validation_data=(test_x, test_y))
    #predict_label = model.predict_classes(train_x)
    weights_mlp = layer.get_weights()
    return results_mlp, weights_mlp
Exemple #22
0
def train(model, X_train, X_test, y_train, y_test, save_path):
    model.compile(Adam(lr=0.001), loss="sparse_categorical_crossentropy", \
            metrics=['accuracy', keras_metrics.precision(), keras_metrics.recall()])
    model.fit(X_train, y_train, \
            validation_data=(X_test, y_test), \
            batch_size=32, \
            epochs=20
        )
    model.save(save_path)
Exemple #23
0
    def getModel(self):

        num_classes = 10
        optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)

        model = Sequential()

        model.add(
            Conv2D(128, (3, 3),
                   input_shape=(28, 28, 1),
                   activation='relu',
                   padding='same'))
        model.add(AveragePooling2D(pool_size=(2, 2), strides=2))
        model.add(Dropout(0.2))

        model.add(BatchNormalization())

        model.add(
            Conv2D(256, (3, 3),
                   input_shape=(28, 28, 1),
                   activation='relu',
                   padding='same'))
        model.add(AveragePooling2D(pool_size=(2, 2), strides=2))
        model.add(Dropout(0.2))

        model.add(BatchNormalization())

        model.add(
            Conv2D(512, (3, 3),
                   input_shape=(28, 28, 1),
                   activation='relu',
                   padding='same'))
        model.add(AveragePooling2D(pool_size=(2, 2), strides=2))
        model.add(Dropout(0.3))

        model.add(BatchNormalization())
        model.add(GlobalMaxPooling2D())

        model.add(Dense(1024, activation='relu'))
        model.add(BatchNormalization())
        model.add(Dropout(0.5))
        model.add(Dense(num_classes, activation='softmax', name='predict'))

        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=[
                          'accuracy',
                          keras_metrics.precision(),
                          keras_metrics.recall()
                      ])

        from keras.models import model_from_json
        model_json = model.to_json()
        with open("modelBrabo.json", "w") as json_file:
            json_file.write(model_json)

        return model
Exemple #24
0
 def load_model(self, verbose=0):
     self.model = load_model(
         self.save_name(),
         custom_objects={'binary_precision': km.precision()})
     if verbose >= 1:
         print(self.model.summary())
     history_name = self.save_name().split('.')[0] + '.json'
     with open(history_name, 'r') as history_file:
         self.history = json.load(history_file)
Exemple #25
0
def lstm5(filters,
          epochs=15,
          table_folder="/",
          save_file=None,
          input_dim=34,
          batch_size=32,
          time_from=32,
          time_to=8,
          downsample_ratio=None,
          oversample=None):
    timesteps = time_from - time_to

    X_train, X_test, y_train, y_test, churn_number, total_number, feature_names = import_and_preprocess_table(
        timesteps, time_from, time_to, filters, table_folder, downsample_ratio,
        oversample)

    print("Creating layers...")

    model = Sequential()
    model.add(
        LSTM(34, input_length=timesteps, input_dim=34, return_sequences=True))
    model.add(Dropout(0.2))
    model.add(LSTM(input_dim, return_sequences=True))
    model.add(Dropout(0.2))
    # model.add(LSTM(input_dim, return_sequences=True))
    # model.add(Dropout(0.2))
    # model.add(LSTM(input_dim, return_sequences=True))
    # model.add(Dropout(0.2))
    model.add(LSTM(input_dim))
    model.add(Dropout(0.2))
    model.add(Dense(1, activation='sigmoid'))
    print("Compiling model...")
    model.compile(loss='mean_squared_error',
                  optimizer='rmsprop',
                  metrics=[
                      'accuracy',
                      keras_metrics.precision(),
                      keras_metrics.recall(),
                      keras_metrics.f1_score()
                  ])
    print("Fitting model...")
    print(model.summary())
    callback = [EarlyStopping(monitor='val_loss', patience=5)]

    history = model.fit(X_train,
                        y_train,
                        validation_data=(X_test, y_test),
                        batch_size=batch_size,
                        epochs=epochs)  #, callbacks=callback)
    score = model.evaluate(X_test, y_test, batch_size=batch_size)
    y_pred = model.predict(X_test)

    log_to_csv("lstm", score, history, filters, table_folder, input_dim,
               batch_size, time_from, time_to, model.to_json())

    return [score, history, churn_number, total_number, y_pred]
Exemple #26
0
def main(model_name, model_file_path, training_data_path, epochs=20):
    samples, labels = get_samples_labels(training_data_path)
    vocabulary, tokenizer = get_vocabulary_tokenizer(samples)
    num_classes = len(INDEX_NAME_CLASS_MAP)

    num_tokens = len(vocabulary)
    tokenized_samples = [tokenizer.tokenize(sample.text) for sample in samples]
    max_document_length = tokenizer.get_max_length(tokenized_samples)
    transformer = TextTransformer(class_map=NAME_INDEX_CLASS_MAP,
                                  max_sequence_length=max_document_length,
                                  vocabulary=vocabulary,
                                  tokenizer=tokenizer)

    X = [transformer.transform(sample.text) for sample in samples]
    sample_batch_provider = SampleBatchProvider(
        X=X,
        y=labels,
        num_labels=num_classes,
        max_document_length=max_document_length,
        max_token_length=num_tokens)
    X, y = sample_batch_provider.get_batch(X, labels)

    experiment = NgramCNNModel()
    model = experiment.get_model(max_document_length=max_document_length,
                                 num_classes=num_classes,
                                 vocabulary_size=len(vocabulary))
    model.compile(optimizer=Adam(),
                  loss="binary_crossentropy",
                  metrics=[
                      "accuracy",
                      keras_metrics.precision(),
                      keras_metrics.recall()
                  ])
    plot_model(model, show_shapes=True, to_file=model_name + '.png')

    callbacks = [
        GradientDebugger(),
        TensorBoard(log_dir='/tmp/shooting_ngram_cnn'),
        ModelCheckpoint(model_file_path,
                        monitor='val_acc',
                        verbose=1,
                        save_best_only=True,
                        mode='max'),
        EarlyStopping(monitor='val_acc', patience=5, mode='max'),
        ReduceLROnPlateau(factor=0.1, verbose=1),
    ]
    model.fit(x=[X, X, X],
              y=y,
              batch_size=None,
              epochs=epochs,
              verbose=1,
              callbacks=callbacks,
              validation_split=0.2,
              shuffle=True,
              steps_per_epoch=20,
              validation_steps=100)
 def load_model(self, filepath):
     print('[Model] Loading model from file %s' % filepath)
     self.model = tf.keras.models.load_model(filepath,
                                             custom_objects={
                                                 "f1_score":
                                                 f1_score,
                                                 "binary_precision":
                                                 keras_metrics.precision(),
                                                 "binary_recall":
                                                 keras_metrics.recall(),
                                             })
Exemple #28
0
def create_model_single():
    opt = keras.optimizers.SGD(lr=0.01)
    model = create_base()
    model.compile(optimizer=opt,
                  loss="categorical_crossentropy",
                  metrics=[
                      "accuracy",
                      keras_metrics.precision(),
                      keras_metrics.recall()
                  ])
    return model
Exemple #29
0
def create_model_multi():
    opt = keras.optimizers.SGD(lr=0.01)
    model = create_base()
    parallel_model = multi_gpu_model(model, gpus=2)
    parallel_model.compile(optimizer=opt,
                           loss="categorical_crossentropy",
                           metrics=[
                               "accuracy",
                               keras_metrics.precision(),
                               keras_metrics.recall()
                           ])
    return parallel_model
Exemple #30
0
def model_impl():
	x_train, y_train, x_test, y_test = get_train_data()
	model = Sequential()
	#model.add(LSTM(300, activation='relu', input_shape=(15, 300)))
	model.add(LSTM(300, activation='tanh', input_shape=(15, 300)))
	model.add(Dropout(0.2))
	model.add(Dense(1, activation='sigmoid'))
	model.summary()
			
	model.compile(loss='binary_crossentropy', optimizer='sgd', 
		metrics=['accuracy', keras_metrics.precision(), keras_metrics.recall()])
	model.fit(x_train, y_train, verbose=1, epochs=20, validation_data=(x_test, y_test))
	model.save('lstm_model.h5')