Esempio n. 1
0
    def evaluate_model(self):
        if os.path.isfile(self.weights_file):
            print("Saved model found")
            self.model = load_model(self.weights_file, custom_objects={'binary_recall': keras_metrics.recall(), 'binary_precision': keras_metrics.precision(
            ), 'categorical_recall': keras_metrics.recall(), 'categorical_precision': keras_metrics.precision(), 'recall': keras_metrics.recall(), 'precision': keras_metrics.precision()})
            print("Saved model loaded successfully")
            self.model.compile(optimizer=self.optimizer,
                               loss=self.args.get('loss'),
                               metrics=["accuracy", keras_metrics.recall(), keras_metrics.precision()])
            test_datagen = ImageDataGenerator(
                rescale=1./255,
                shear_range=0.5,
                zoom_range=0.5,
                horizontal_flip=True,
                vertical_flip=True,
                rotation_range=90,
            )
            test_set = test_datagen.flow_from_directory(
                self.test_path,
                target_size=self.target_size,
                batch_size=1,
                class_mode=self.args.get('class_mode'),
            )
            self.history_test = self.model.evaluate_generator(
                test_set, steps=self.steps_per_epoch)

            print(self.model.metrics_names, self.history_test)

        else:
            print("Model not found")
Esempio n. 2
0
    def get_model(self):
        num_words = len(self.dataset.alphabet)
        num_labels = len(self.dataset.labels)

        model = Sequential()

        # MIR Embedding turns positive integers into dense vectors, for 1st layer of model. Why? Paper uses one-hot
        model.add(
            Embedding(
                num_words,  # MIR input dimension
                self.config.embed_size,  # MIR dense embedding
                mask_zero=True))
        model.add(Dropout(self.config.input_dropout))

        for _ in range(self.config.recurrent_stack_depth):
            model.add(
                Bidirectional(
                    LSTM(self.config.num_lstm_units, return_sequences=True)))
            # MIR return_sequences means return full sequence, not just last output

        model.add(Dropout(self.config.output_dropout))
        # MIR Does the paper have both input- and output-dropout?
        model.add(TimeDistributed(Dense(num_labels, activation='softmax')))

        # TODO Add Viterbi decoder here, see Kuru et al.

        optimizer = Adam(lr=self.config.learning_rate, clipnorm=1.0)

        # MIR Add precision, recall, f1 metrics for all labels
        extra_metrics = {
            "precision_null": keras_metrics.precision(label=0),
            "precision_loc": keras_metrics.precision(label=1),
            "precision_org": keras_metrics.precision(label=2),
            "precision_per": keras_metrics.precision(label=3),
            "recall_null": keras_metrics.recall(label=0),
            "recall_loc": keras_metrics.recall(label=1),
            "recall_org": keras_metrics.recall(label=2),
            "recall_per": keras_metrics.recall(label=3),
            "f1_null": keras_metrics.f1_score(label=0),
            "f1_loc": keras_metrics.f1_score(label=1),
            "f1_org": keras_metrics.f1_score(label=2),
            "f1_per": keras_metrics.f1_score(label=3),
        }

        model.compile(optimizer=optimizer,
                      loss='categorical_crossentropy',
                      metrics=[
                          'categorical_accuracy',
                          self.non_null_label_accuracy,
                      ].extend(extra_metrics))
        # MIR non_null_label_accuracy is a func
        return model
 def construct_model(self):
     self.model = Sequential()
     self.model.add(
         Dense(units=100, activation='relu', input_dim=self.n_inputs))
     self.model.add(Dense(units=64, activation='relu'))
     self.model.add(Dense(units=self.n_classes, activation='softmax'))
     # Calculate precision for the second label.
     pa = keras_metrics.precision(label=1)
     pna = keras_metrics.precision(label=0)
     # Calculate recall for the first label.
     ra = keras_metrics.recall(label=1)
     rna = keras_metrics.recall(label=0)
     self.model.compile(loss=self.loss, \
                   optimizer=self.optimizer, \
                   metrics=['accuracy',pa,pna,ra,rna])
Esempio n. 4
0
 def compile_model(self, lr=0.001):
   optimizer = optimizers.Adam(learning_rate=lr, beta_1=0.9, beta_2=0.999, amsgrad=False)
   self.model.compile(
     loss='sparse_categorical_crossentropy', 
     optimizer=optimizer,
     metrics=['accuracy', f1_score, keras_metrics.precision(), keras_metrics.recall()]
   )
Esempio n. 5
0
def cnn(filters,
        pooling_size=2,
        epochs=15,
        table_folder="/",
        kernel_size=3,
        input_dim=34,
        batch_size=32,
        nb_filters=34,
        time_from=32,
        time_to=8,
        downsample_ratio=None,
        oversample=None):
    timesteps = time_from - time_to

    X_train, X_test, y_train, y_test, churn_number, total_number, feature_names = import_and_preprocess_table(
        timesteps, time_from, time_to, filters, table_folder, downsample_ratio,
        oversample)

    print("Creating layers...")
    model = Sequential()
    model.add(
        Conv1D(nb_filters,
               kernel_size=kernel_size,
               input_shape=(timesteps, input_dim),
               activation='relu'))
    model.add(MaxPooling1D(pooling_size))
    # model.add(Conv1D(nb_filters, kernel_size=kernel_size, activation='relu'))
    # model.add(Conv1D(nb_filters*2, kernel_size=3, activation='relu'))
    # model.add(GlobalAveragePooling1D())
    # model.add(Dropout(0.5))
    model.add(Flatten())
    # model.add(Dense(128, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))

    print("Compiling model...")
    model.compile(loss='mean_squared_error',
                  optimizer='rmsprop',
                  metrics=[
                      'accuracy',
                      keras_metrics.precision(),
                      keras_metrics.recall(),
                      keras_metrics.f1_score()
                  ])
    print("Fitting model...")
    print(model.summary())
    callback = [EarlyStopping(monitor='loss', patience=5)]

    history = model.fit(X_train,
                        y_train,
                        validation_data=(X_test, y_test),
                        batch_size=batch_size,
                        epochs=epochs)  #, callbacks=callback)
    score = model.evaluate(X_test, y_test, batch_size=batch_size)
    y_pred = model.predict(X_test)

    log_to_csv("cnn", score, history, filters,
               table_folder, input_dim, batch_size, time_from, time_to,
               model.to_json(), nb_filters, kernel_size)

    return [score, history, churn_number, total_number, y_pred]
    def test_save_load(self):
        custom_objects = {
            "true_positive": keras_metrics.true_positive(sparse=self.sparse),
            "true_negative": keras_metrics.true_negative(sparse=self.sparse),
            "false_positive": keras_metrics.false_positive(sparse=self.sparse),
            "false_negative": keras_metrics.false_negative(sparse=self.sparse),
            "precision": keras_metrics.precision(sparse=self.sparse),
            "recall": keras_metrics.recall(sparse=self.sparse),
            "f1_score": keras_metrics.f1_score(sparse=self.sparse),
            "sin": keras.backend.sin,
            "abs": keras.backend.abs,
        }

        x, y = self.samples(100)
        self.model.fit(x, y, epochs=10)

        with tempfile.NamedTemporaryFile() as file:
            self.model.save(file.name, overwrite=True)
            model = keras.models.load_model(file.name,
                                            custom_objects=custom_objects)

            expected = self.model.evaluate(x, y)[1:]
            received = model.evaluate(x, y)[1:]

            self.assertEqual(expected, received)
Esempio n. 7
0
def seq_boi(tokenizer): #loads the function and creates a BI_rnn network 
  embedding_matrix = get_embedding_vectors(tokenizer)
  model = Sequential()
  model.add(Embedding(len(tokenizer.word_index)+1,
                      EMBEDDING_SIZE,
                      weights=[embedding_matrix],
                      trainable=False,
                      input_length =SEQUENCE_LENGTH))

  from keras.layers import Dense, Activation, Flatten
 
  model.add(Dropout(0.8))
  model.add(Conv1D(filters,
                 kernel_size,
                 padding='same',
                 activation='softmax',
                 strides=1))
  model.add(MaxPooling1D(pool_size=pool_size))
  model.add(Bidirectional(LSTM(1024))) #creates given network
  model.add(Dense(2, activation='relu'))
  model.add(Dense(2, activation='softmax'))

  plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
  model.compile(optimizer="adam", loss="categorical_crossentropy",
                  metrics=["accuracy", keras_metrics.precision(), keras_metrics.recall()])
  model.summary()
  return model
Esempio n. 8
0
 def compile_model_for_fine_tuning(self, lr=0.0001):
   # we need to recompile the model for these modifications to take effect
   self.model.compile(
     optimizer=optimizers.SGD(lr=lr, momentum=0.9), 
     loss='sparse_categorical_crossentropy', 
     metrics=['accuracy', f1_score, keras_metrics.precision(), keras_metrics.recall()]
   )
Esempio n. 9
0
    def init_model(self):
        image_shape = (IMAGE_HEIGHT, IMAGE_WIDTH, 3)

        self.datagen = ImageDataGenerator(rotation_range=20,
                                          width_shift_range=0.15,
                                          height_shift_range=0.15,
                                          zoom_range=0.1,
                                          horizontal_flip=True,
                                          vertical_flip=True,
                                          fill_mode='nearest')

        input_a, input_b = Input(shape=image_shape), Input(shape=image_shape)

        if cnt.USE_VGG:
            shared_model = get_shared_model_vgg(image_shape)
        else:
            shared_model = get_shared_model(image_shape)

        if cnt.PRE_TRAIN_CLASSIFIER:
            self.init_classifier()
            self.classifier.load_weights(cnt.CLASSIFIER_MODEL_PATH)

            shared_model.set_weights(self.classifier.layers[1].get_weights())

            for layer in shared_model.layers:
                layer.trainable = False

        nlayer1 = shared_model(input_a)
        nlayer2 = shared_model(input_b)

        n_layer = Flatten()
        nlayer1 = n_layer(nlayer1)
        nlayer2 = n_layer(nlayer2)

        n_layer = Dense(cnt.EMBEDDING_SIZE, activation='relu')
        nlayer1 = n_layer(nlayer1)
        nlayer2 = n_layer(nlayer2)

        n_layer = BatchNormalization()
        nlayer1 = n_layer(nlayer1)
        nlayer2 = n_layer(nlayer2)

        n_layer = Lambda(lambda x: K.l2_normalize(x, axis=1))
        nlayer1 = n_layer(nlayer1)
        nlayer2 = n_layer(nlayer2)

        n_layer = Lambda(lambda x: K.sqrt(
            K.sum(K.square(x[0] - x[1]), axis=1, keepdims=True)))(
                [nlayer1, nlayer2])
        out = Dense(1, activation="sigmoid")(n_layer)

        self.model = Model(inputs=[input_a, input_b], outputs=[out])

        adam = optimizers.Adam(lr=0.001)
        self.model.compile(
            optimizer=adam,
            loss="mean_squared_error",
            metrics=['accuracy',
                     km.precision(label=0),
                     km.recall(label=0)])
Esempio n. 10
0
    def __model__(self):
        """Build & compile the keras model.

        :return: (Keras.Sequential) model deep
        """
        model = Sequential()
        model.add(
            Conv2D(64, (5, 5), activation='relu', input_shape=(48, 48, 1)))
        model.add(MaxPooling2D(pool_size=(5, 5), strides=(2, 2)))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))
        model.add(Conv2D(128, (3, 3), activation='relu'))
        model.add(Conv2D(128, (3, 3), activation='relu'))
        model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))
        model.add(Flatten())
        model.add(Dense(256, activation='relu'))
        model.add(Dropout(0.2))
        model.add(Dense(128, activation='relu'))
        model.add(Dropout(0.2))
        model.add(Dense(len(self.classes), activation='softmax'))
        metrics = [
            'accuracy',
            keras_metrics.precision(),
            keras_metrics.recall()
        ]
        model.compile(loss='categorical_crossentropy',
                      optimizer="Adam",
                      metrics=metrics)

        return model
def create_model(input_length):
    print ('Creating model...')
    model = Sequential()

    model.add(Dense(64, input_dim=9, init='uniform'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(64, init='uniform'))
    model.add(Activation('tanh'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(1, init='uniform'))
    model.add(Activation('softmax'))

    sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    adam = Adam(lr=0.05)


    print ('Compiling...')
    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=0.12, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False),
                  metrics=['accuracy',keras_metrics.precision(), keras_metrics.recall(),keras_metrics.false_positive(), keras_metrics.false_negative()])

                  #keras_metrics.false_positive(), keras_metrics.false_negative()
    return model
def get_model(tokenizer, lstm_units):
    """
    Constructs the model,
    Embedding vectors => LSTM => 2 output Fully-Connected neurons with softmax activation
    """
    # get the GloVe embedding vectors
    embedding_matrix = get_embedding_vectors(tokenizer)
    model = Sequential()
    model.add(
        Embedding(
            len(tokenizer.word_index) + 1,
            EMBEDDING_SIZE,
            weights=[embedding_matrix],
            trainable=False,
            input_length=SEQUENCE_LENGTH,
        ))

    model.add(LSTM(lstm_units, recurrent_dropout=0.2))
    model.add(Dropout(0.3))
    model.add(Dense(2, activation="sigmoid"))
    # compile as rmsprop optimizer
    # aswell as with recall metric
    model.compile(
        optimizer="adam",
        loss="categorical_crossentropy",
        metrics=[
            "accuracy",
            keras_metrics.precision(),
            keras_metrics.recall()
        ],
    )
    model.summary()
    return model
Esempio n. 13
0
    def load(self, file_name=None):
        """
        :param file_name: [model_file_name, weights_file_name]
        :return:
        """
        with self.graph.as_default():
            with self.session.as_default():
                try:
                    model_name = file_name[0]
                    weights_name = file_name[1]

                    if model_name is not None:
                        # load the model
                        filepath = os.path.join(self.model_folder, model_name)
                        self.model = tf.keras.models.load_model(
                            filepath,
                            custom_objects={
                                "f1_score": f1_score,
                                "binary_precision": keras_metrics.precision(),
                                "binary_recall": keras_metrics.recall(),
                            })
                    if weights_name is not None:
                        # load the weights
                        weights_path = os.path.join(self.model_folder,
                                                    weights_name)
                        self.model.load_weights(weights_path)
                    print("Neural Network loaded: ")
                    print('\t' + "Neural Network model: " + model_name)
                    print('\t' + "Neural Network weights: " + weights_name)
                    return True
                except Exception as e:
                    print(e)
                    return False
Esempio n. 14
0
def build_model_architecture(x_shape):
    model = Sequential()
    model.add(
        Dense(128,
              input_dim=x_shape,
              activation='relu',
              kernel_regularizer=l2(0.01)))
    model.add(Dropout(0.2))
    model.add(Dense(64, activation='relu', kernel_regularizer=l2(0.01)))
    model.add(Dropout(0.2))
    # for binary classifiers
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=[
                      'accuracy',
                      keras_metrics.precision(),
                      keras_metrics.recall()
                  ])
    model.summary()

    # early stop
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0.01,
                               mode='auto',
                               verbose=1,
                               patience=5)

    return early_stop, model
def build_bert(nclass):
    #config_path = str(config_path)
    #checkpoint_path = str(checkpoint_path)

    # 读取bert预训练模型
    # keras_bert是在Keras下对Bert最好的封装是
    # 真正调用bert的就这么一行代码
    bert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=None)

    for l in bert_model.layers:
        l.trainable = True

    x1_in = Input(shape=(None,))
    x2_in = Input(shape=(None,))

    x = bert_model([x1_in, x2_in])

    # # 取出[CLS]对应的向量用来做分类
    x = Lambda(lambda x: x[:, 0])(x)
    p = Dense(nclass, activation='softmax')(x)

    model = Model([x1_in, x2_in], p)
    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(1e-5),
                  metrics=['accuracy', keras_metrics.precision(), keras_metrics.recall()])
    print(model.summary())
    return model
Esempio n. 16
0
    def __model__(self):
        """Build & compile model keras.

        :return: (Keras.Sequential) model deep
        """
        # TODO refactor this shit code
        mobilenet = MobileNet(weights='imagenet',
                              include_top=False,
                              input_shape=(224, 224, 3))
        init = mobilenet.output
        pool1 = GlobalAveragePooling2D()(init)
        l1 = Dense(1024)(pool1)
        act1 = Activation(activation="relu")(l1)
        drop1 = Dropout(0.2)(act1)
        l2 = Dense(self.number_classes)(drop1)
        output = Activation(activation="softmax")(l2)
        model = Model(inputs=mobilenet.input, outputs=output)
        for layer in model.layers[:-6]:
            layer.trainable = False
        metrics = [
            'accuracy',
            keras_metrics.precision(),
            keras_metrics.recall()
        ]
        model.compile(optimizer='Adam', loss=self.loss, metrics=metrics)

        return model
Esempio n. 17
0
    def test_metrics(self):
        tp = keras_metrics.true_positive()
        fp = keras_metrics.false_positive()
        fn = keras_metrics.false_negative()

        precision = keras_metrics.precision()
        recall = keras_metrics.recall()

        model = keras.models.Sequential()
        model.add(keras.layers.Dense(1, activation="sigmoid", input_dim=2))
        model.add(keras.layers.Dense(1, activation="softmax"))

        model.compile(optimizer="sgd",
                      loss="binary_crossentropy",
                      metrics=[tp, fp, fn, precision, recall])

        samples = 1000
        x = numpy.random.random((samples, 2))
        y = numpy.random.randint(2, size=(samples, 1))

        model.fit(x, y, epochs=1, batch_size=10)
        metrics = model.evaluate(x, y, batch_size=10)[1:]

        tp_val = metrics[0]
        fp_val = metrics[1]
        fn_val = metrics[2]

        precision = metrics[3]
        recall = metrics[4]

        expected_precision = tp_val / (tp_val + fp_val)
        expected_recall = tp_val / (tp_val + fn_val)

        self.assertAlmostEqual(expected_precision, precision, delta=0.05)
        self.assertAlmostEqual(expected_recall, recall, delta=0.05)
Esempio n. 18
0
    def baseline_model(self):
        model = Sequential()
        model.add(
            Dense(30,
                  input_dim=75,
                  kernel_initializer='normal',
                  activation='elu'))
        # model.add(ActivityRegularization(l1=0.05, l2=0.05))
        model.add(Dropout(0.3, noise_shape=None, seed=None))
        model.add(Dense(20, kernel_initializer='normal', activation='elu'))
        model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
        model.compile(loss='binary_crossentropy',
                      optimizer='adamax',
                      metrics=[
                          'accuracy',
                          keras_metrics.precision(),
                          keras_metrics.recall()
                      ])

        if self.summary == True:
            print(model.summary())

        if self.plot == True:
            plot_model(model,
                       to_file='model_plot.png',
                       show_shapes=True,
                       show_layer_names=False)

        return model
Esempio n. 19
0
def MLP(train_x, train_y, test_x, test_y, epoch, batch_size):
    # One hidden layer with 60 neurons
    model = models.Sequential()
    layer = layers.Dense(200,
                         activation="relu",
                         input_shape=(train_x.shape[1], ),
                         kernel_regularizer=regularizers.l2(1e-10),
                         activity_regularizer=regularizers.l1(1e-10))
    model.add(layer)
    model.add(layers.Dropout(0.1, noise_shape=None, seed=None))
    #model.add(layers.Dropout(0.2, noise_shape=None, seed=None))
    model.add(
        layers.Dense(100,
                     activation="relu",
                     kernel_regularizer=regularizers.l2(1e-10),
                     activity_regularizer=regularizers.l1(1e-10)))
    model.add(layers.Dropout(0.1, noise_shape=None, seed=None))
    model.add(
        layers.Dense(60,
                     activation="relu",
                     kernel_regularizer=regularizers.l2(1e-10),
                     activity_regularizer=regularizers.l1(1e-10)))
    model.add(
        layers.Dense(30,
                     activation="relu",
                     kernel_regularizer=regularizers.l2(1e-10),
                     activity_regularizer=regularizers.l1(1e-10)))
    #model.add(layers.Dropout(0.1, noise_shape=None, seed=None))
    model.add(
        layers.Dense(10,
                     activation="relu",
                     kernel_regularizer=regularizers.l2(1e-10),
                     activity_regularizer=regularizers.l1(1e-10)))
    model.add(layers.Dense(1, activation="sigmoid"))
    model.summary()
    adam = Adam(lr=0.0001,
                beta_1=0.9,
                beta_2=0.999,
                epsilon=None,
                decay=0.0,
                amsgrad=False)
    model.compile(loss='binary_crossentropy',
                  metrics=[
                      'accuracy',
                      keras_metrics.precision(),
                      keras_metrics.recall()
                  ],
                  optimizer=adam)

    # Get the loss and accuracy by using cross validation
    results_mlp = model.fit(train_x,
                            train_y,
                            epochs=epoch,
                            batch_size=batch_size,
                            validation_data=(test_x, test_y))
    #predict_label = model.predict_classes(train_x)
    weights_mlp = layer.get_weights()
    return results_mlp, weights_mlp
Esempio n. 20
0
    def getModel(self):

        num_classes = 10
        optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)

        model = Sequential()

        model.add(
            Conv2D(128, (3, 3),
                   input_shape=(28, 28, 1),
                   activation='relu',
                   padding='same'))
        model.add(AveragePooling2D(pool_size=(2, 2), strides=2))
        model.add(Dropout(0.2))

        model.add(BatchNormalization())

        model.add(
            Conv2D(256, (3, 3),
                   input_shape=(28, 28, 1),
                   activation='relu',
                   padding='same'))
        model.add(AveragePooling2D(pool_size=(2, 2), strides=2))
        model.add(Dropout(0.2))

        model.add(BatchNormalization())

        model.add(
            Conv2D(512, (3, 3),
                   input_shape=(28, 28, 1),
                   activation='relu',
                   padding='same'))
        model.add(AveragePooling2D(pool_size=(2, 2), strides=2))
        model.add(Dropout(0.3))

        model.add(BatchNormalization())
        model.add(GlobalMaxPooling2D())

        model.add(Dense(1024, activation='relu'))
        model.add(BatchNormalization())
        model.add(Dropout(0.5))
        model.add(Dense(num_classes, activation='softmax', name='predict'))

        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=[
                          'accuracy',
                          keras_metrics.precision(),
                          keras_metrics.recall()
                      ])

        from keras.models import model_from_json
        model_json = model.to_json()
        with open("modelBrabo.json", "w") as json_file:
            json_file.write(model_json)

        return model
Esempio n. 21
0
def train(model, X_train, X_test, y_train, y_test, save_path):
    model.compile(Adam(lr=0.001), loss="sparse_categorical_crossentropy", \
            metrics=['accuracy', keras_metrics.precision(), keras_metrics.recall()])
    model.fit(X_train, y_train, \
            validation_data=(X_test, y_test), \
            batch_size=32, \
            epochs=20
        )
    model.save(save_path)
Esempio n. 22
0
def main(model_name, model_file_path, training_data_path, epochs=20):
    samples, labels = get_samples_labels(training_data_path)
    vocabulary, tokenizer = get_vocabulary_tokenizer(samples)
    num_classes = len(INDEX_NAME_CLASS_MAP)

    num_tokens = len(vocabulary)
    tokenized_samples = [tokenizer.tokenize(sample.text) for sample in samples]
    max_document_length = tokenizer.get_max_length(tokenized_samples)
    transformer = TextTransformer(class_map=NAME_INDEX_CLASS_MAP,
                                  max_sequence_length=max_document_length,
                                  vocabulary=vocabulary,
                                  tokenizer=tokenizer)

    X = [transformer.transform(sample.text) for sample in samples]
    sample_batch_provider = SampleBatchProvider(
        X=X,
        y=labels,
        num_labels=num_classes,
        max_document_length=max_document_length,
        max_token_length=num_tokens)
    X, y = sample_batch_provider.get_batch(X, labels)

    experiment = NgramCNNModel()
    model = experiment.get_model(max_document_length=max_document_length,
                                 num_classes=num_classes,
                                 vocabulary_size=len(vocabulary))
    model.compile(optimizer=Adam(),
                  loss="binary_crossentropy",
                  metrics=[
                      "accuracy",
                      keras_metrics.precision(),
                      keras_metrics.recall()
                  ])
    plot_model(model, show_shapes=True, to_file=model_name + '.png')

    callbacks = [
        GradientDebugger(),
        TensorBoard(log_dir='/tmp/shooting_ngram_cnn'),
        ModelCheckpoint(model_file_path,
                        monitor='val_acc',
                        verbose=1,
                        save_best_only=True,
                        mode='max'),
        EarlyStopping(monitor='val_acc', patience=5, mode='max'),
        ReduceLROnPlateau(factor=0.1, verbose=1),
    ]
    model.fit(x=[X, X, X],
              y=y,
              batch_size=None,
              epochs=epochs,
              verbose=1,
              callbacks=callbacks,
              validation_split=0.2,
              shuffle=True,
              steps_per_epoch=20,
              validation_steps=100)
Esempio n. 23
0
def lstm5(filters,
          epochs=15,
          table_folder="/",
          save_file=None,
          input_dim=34,
          batch_size=32,
          time_from=32,
          time_to=8,
          downsample_ratio=None,
          oversample=None):
    timesteps = time_from - time_to

    X_train, X_test, y_train, y_test, churn_number, total_number, feature_names = import_and_preprocess_table(
        timesteps, time_from, time_to, filters, table_folder, downsample_ratio,
        oversample)

    print("Creating layers...")

    model = Sequential()
    model.add(
        LSTM(34, input_length=timesteps, input_dim=34, return_sequences=True))
    model.add(Dropout(0.2))
    model.add(LSTM(input_dim, return_sequences=True))
    model.add(Dropout(0.2))
    # model.add(LSTM(input_dim, return_sequences=True))
    # model.add(Dropout(0.2))
    # model.add(LSTM(input_dim, return_sequences=True))
    # model.add(Dropout(0.2))
    model.add(LSTM(input_dim))
    model.add(Dropout(0.2))
    model.add(Dense(1, activation='sigmoid'))
    print("Compiling model...")
    model.compile(loss='mean_squared_error',
                  optimizer='rmsprop',
                  metrics=[
                      'accuracy',
                      keras_metrics.precision(),
                      keras_metrics.recall(),
                      keras_metrics.f1_score()
                  ])
    print("Fitting model...")
    print(model.summary())
    callback = [EarlyStopping(monitor='val_loss', patience=5)]

    history = model.fit(X_train,
                        y_train,
                        validation_data=(X_test, y_test),
                        batch_size=batch_size,
                        epochs=epochs)  #, callbacks=callback)
    score = model.evaluate(X_test, y_test, batch_size=batch_size)
    y_pred = model.predict(X_test)

    log_to_csv("lstm", score, history, filters, table_folder, input_dim,
               batch_size, time_from, time_to, model.to_json())

    return [score, history, churn_number, total_number, y_pred]
Esempio n. 24
0
def create_model_single():
    opt = keras.optimizers.SGD(lr=0.01)
    model = create_base()
    model.compile(optimizer=opt,
                  loss="categorical_crossentropy",
                  metrics=[
                      "accuracy",
                      keras_metrics.precision(),
                      keras_metrics.recall()
                  ])
    return model
 def load_model(self, filepath):
     print('[Model] Loading model from file %s' % filepath)
     self.model = tf.keras.models.load_model(filepath,
                                             custom_objects={
                                                 "f1_score":
                                                 f1_score,
                                                 "binary_precision":
                                                 keras_metrics.precision(),
                                                 "binary_recall":
                                                 keras_metrics.recall(),
                                             })
Esempio n. 26
0
 def get_class_predictions(self):
     if os.path.isfile(self.weights_file):
         print("Saved model found")
         self.model = load_model(self.weights_file, custom_objects={'binary_recall': keras_metrics.recall(), 'binary_precision': keras_metrics.precision(
         ), 'categorical_recall': keras_metrics.recall(), 'categorical_precision': keras_metrics.precision(), 'recall': keras_metrics.recall(), 'precision': keras_metrics.precision()})
         print("Saved model loaded successfully")
         self.model.compile(optimizer=self.optimizer,
                            loss=self.args.get('loss'),
                            metrics=["accuracy", keras_metrics.recall(), keras_metrics.precision()])
     for folder in os.listdir(self.test_path):
         actual_class = folder
         for image in os.listdir(os.path.join(self.test_path, folder)):
             predictions = self.model.predict(
                 image, batch_size=1, verbose=1)
             print(predictions)
             predicted_class = predictions.index(max(predictions))
             confusion_matrix = pd.DataFrame(
                 columns=['actual_class', 'predicted_class'])
             confusion_matrix.append([actual_class, predicted_class])
     print(confusion_matrix)
Esempio n. 27
0
def create_model_multi():
    opt = keras.optimizers.SGD(lr=0.01)
    model = create_base()
    parallel_model = multi_gpu_model(model, gpus=2)
    parallel_model.compile(optimizer=opt,
                           loss="categorical_crossentropy",
                           metrics=[
                               "accuracy",
                               keras_metrics.precision(),
                               keras_metrics.recall()
                           ])
    return parallel_model
Esempio n. 28
0
def model_impl():
	x_train, y_train, x_test, y_test = get_train_data()
	model = Sequential()
	#model.add(LSTM(300, activation='relu', input_shape=(15, 300)))
	model.add(LSTM(300, activation='tanh', input_shape=(15, 300)))
	model.add(Dropout(0.2))
	model.add(Dense(1, activation='sigmoid'))
	model.summary()
			
	model.compile(loss='binary_crossentropy', optimizer='sgd', 
		metrics=['accuracy', keras_metrics.precision(), keras_metrics.recall()])
	model.fit(x_train, y_train, verbose=1, epochs=20, validation_data=(x_test, y_test))
	model.save('lstm_model.h5')
Esempio n. 29
0
    def fit(self):
        with self.tensorflow_device:
            try:
                self.keras_model = self.create_model()
                if self.initial_weights_path:
                    self.keras_model.load_weights(self.initial_weights_path,
                                                  by_name=True,
                                                  skip_mismatch=True)
                if self.kernel_regularizer:
                    for layer in self.keras_model.layers:
                        if hasattr(layer, 'kernel_regularizer'):
                            layer.kernel_regularizer = KERAS_REGULARIZERS[
                                self.kernel_regularizer](
                                    self.kernel_regularizer_value)

                metrics = self._get_metrics() + [
                    "acc",
                    keras_metrics.precision(),
                    keras_metrics.recall()
                ]

                self.keras_model.compile(optimizer=self._get_optimizer(),
                                         loss=self._get_loss_function(),
                                         metrics=metrics)

                print(self.keras_model.summary())

                with open("%s/summary.txt" % self.output_path,
                          "w") as summary_file:
                    with redirect_stdout(summary_file):
                        self.keras_model.summary()

                try:

                    self.keras_model.fit(x=self.train_generator[0],
                                         y=self.train_generator[1],
                                         epochs=self.epochs,
                                         validation_data=self.val_generator,
                                         verbose=1,
                                         callbacks=self._get_callbacks(
                                             self.keras_model))
                except KeyboardInterrupt:
                    print("Finalizando o treinamento a pedido do usuário...")

                history_df = pd.read_csv(get_history_path(self.output_path))

                plot_history(history_df).savefig(
                    get_history_plot_path(self.output_path))

            finally:
                K.clear_session()
def train_and_save_model(model='xvector',
                         binary_class=False,
                         single_class='glass'):
    model = define_xvector()
    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(lr=0.001),
                  metrics=['acc',
                           km.precision(label=1),
                           km.recall(label=0)])
    model.summary()
    callback_list = [
        ModelCheckpoint(
            'checkpoint-{epoch:02d}.h5',
            monitor='loss',
            verbose=1,
            save_best_only=True,
            period=2
        ),  # do the check point each epoch, and save the best model
        ReduceLROnPlateau(
            monitor='loss', patience=3, verbose=1, min_lr=1e-6
        ),  # reducing the learning rate if the val_loss is not improving
        CSVLogger(filename='training_log.csv'),  # logger to csv
        EarlyStopping(
            monitor='loss',
            patience=5)  # early stop if there's no improvment of the loss
    ]
    tr_data, tr_label, ts_data, ts_label = train_test_split()
    encoder = LabelBinarizer()
    tr_label = encoder.fit_transform(tr_label)
    ts_label = encoder.transform(ts_label)
    print(
        "Start Training process \nTraining data shape {} \nTraining label shape {}"
        .format(tr_data.shape, tr_label.shape))
    model.fit(tr_data,
              tr_label,
              batch_size=16,
              epochs=100,
              verbose=1,
              validation_split=0.2)
    model.save('5class_segmentYoutube_model.h5')
    pred = model.predict(ts_data)
    pred = encoder.inverse_transform(pred)
    ts_label = encoder.inverse_transform(ts_label)
    cm = confusion_matrix(y_target=ts_label, y_predicted=pred, binary=False)
    cm = confusion_matrix(y_target=ts_label, y_predicted=pred, binary=False)
    plt.figure(figsize=(10, 10))
    fig, ax = plot_confusion_matrix(conf_mat=cm)
    ax.set_xticklabels([''] + CLASS_TYPE, rotation=40, ha='right')
    ax.set_yticklabels([''] + CLASS_TYPE)
    plt.savefig("ConfusionMatrix_segment_youtube.png")
    plt.show()