Exemple #1
0
def model_train(x_train, y_train):
    model = SqueezeNet(weights='imagenet',
                       include_top=False,
                       input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))

    top_model = Sequential()
    top_model.add(Flatten(input_shape=model.output_shape[1:]))
    top_model.add(Dense(256, activation='relu'))
    top_model.add(Dropout(0.5))
    top_model.add(Dense(NUM_LABELS, activation='softmax'))

    model = Model(inputs=model.input, outputs=top_model(model.output))

    for layer in model.layers[:15]:
        layer.trainable = False

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.summary()

    callback = EarlyStopping(monitor='val_loss',
                             patience=5,
                             verbose=1,
                             mode='auto')
    model.fit(x_train,
              y_train,
              batch_size=BATCH_SIZE,
              epochs=EPOCH,
              validation_split=0.1,
              callbacks=callback)

    return model
validation_images = np.concatenate(
    [validation_f20_images, validation_axis_images], axis=0)
validation_images = preprocess_images(validation_images)
train_data = (train_images, train_classes)
validation_data = (validation_images, validation_classes)

model = SqueezeNet(weights=None, input_shape=image_size, classes=2)
opt = optimizers.SGD(lr=0.001)
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=['accuracy'])
#model.compile(optimizer='adam', loss='categorical_crossentropy',
#                                        metrics=['accuracy'])

model.summary()
model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'
model_checkpoint = ModelCheckpoint(model_names,
                                   'val_acc',
                                   verbose=1,
                                   save_best_only=True)
callbacks = [model_checkpoint]

model.fit(train_images,
          train_classes,
          batch_size,
          num_epochs,
          verbose=1,
          callbacks=callbacks,
          validation_data=validation_data,
          shuffle=True)
Exemple #3
0
    model = SqueezeNet(
        include_top=False
    )  # Construct the feature extraction layers in squeezenet.
    model = add_squeezenet_top(
        model, args.classes,
        False)  # Add the classification layers to squeezenet.
    data_set = Dataloader(args.data_base,
                          args.label_path)  # Construct the Dataloader class
    data, label = data_set.read_data(args.pos_neg_num,
                                     args.target_img_size)  # Read the data
    X_train, X_test, y_train, y_test = train_test_split(
        data, label, test_size=args.split_ratio, random_state=42)
    # Split the data into training and validation
    y_train = keras.utils.to_categorical(y_train, args.classes)
    y_test = keras.utils.to_categorical(y_test, args.classes)
    # Convert the label to one-hot label
    batch_size = args.batch_epoch_size[
        0]  # Set the batch size of training, normally 16 or 32
    nb_epoch = args.batch_epoch_size[1]  # Set the epoch size of training

    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              epochs=nb_epoch,
              verbose=1,
              validation_data=(X_test, y_test))
    # Start training
    model.save(args.model_path)
    # Save the model to specific path
Exemple #4
0
    def execute_model(self, qn):
        # Get data
        with tf.device('/cpu:0'):
            (train_data, train_labels), (eval_data,
                                         eval_labels) = self.get_data()

        # Build model
        model = SqueezeNet(input_shape=train_data[0].shape,
                           weights=None,
                           classes=10)
        # model = get_model(train_data[0].shape)
        model.summary()

        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        # Train model
        if self.log_dir is not None:
            checkpoint_folder = self.log_dir + "/" + qn
        else:
            checkpoint_folder = "./" + qn

        last_epoch_ran = 0
        from os import path, makedirs
        print("****Checkpoint folder:", checkpoint_folder)
        checkpoint_path = checkpoint_folder + "/train.ckpt"
        if path.exists(checkpoint_path):
            print("\tRestoring checkpoint from %s" % checkpoint_path)
            model.load_weights(checkpoint_path)
            with open(path.join(checkpoint_folder, "epoch.txt"), "r") as f:
                last_epoch_ran = f.read()
            last_epoch_ran = int(last_epoch_ran)
            print("\tInitial epoch: %d" % last_epoch_ran)
        else:
            print("****Creating folder", checkpoint_folder)
            makedirs(checkpoint_folder, exist_ok=True)
        # checkpoint_path = checkpoint_folder + "/train-{epoch:04d}.ckpt"

        class SaveCheckpoint(keras.callbacks.ModelCheckpoint):
            def __init__(self,
                         filepath,
                         monitor='val_loss',
                         verbose=0,
                         save_best_only=False,
                         save_weights_only=False,
                         mode='auto',
                         period=1):
                super(SaveCheckpoint,
                      self).__init__(filepath,
                                     monitor=monitor,
                                     verbose=verbose,
                                     save_best_only=save_best_only,
                                     save_weights_only=save_weights_only,
                                     mode=mode,
                                     period=period)

            def on_epoch_end(self, epoch, logs=None):
                super(SaveCheckpoint, self).on_epoch_end(epoch, logs)
                with open(path.join(path.dirname(self.filepath), "epoch.txt"),
                          "w") as f:
                    f.write(str(epoch))

        save_checkpoint = SaveCheckpoint(checkpoint_path,
                                         save_weights_only=True,
                                         verbose=1)
        callbacks = [save_checkpoint]

        history = model.fit(train_data,
                            train_labels,
                            batch_size=self.batch_size,
                            epochs=self.epochs,
                            initial_epoch=last_epoch_ran,
                            verbose=1,
                            shuffle=True,
                            validation_split=self.validation_split,
                            callbacks=callbacks)
        # history = model.fit(train_data, train_labels, epochs=self.epochs)
        # Test model
        print("Training done. Evaluating model")
        test_loss, test_acc = model.evaluate(eval_data,
                                             eval_labels,
                                             batch_size=self.batch_size,
                                             verbose=1)

        print("test_loss: {}. test_acc: {}".format(test_loss, test_acc))

        # confusion matrix
        preds = model.predict(eval_data, batch_size=self.batch_size, verbose=1)
        preds = np.argmax(preds, 1)
        model.summary()

        print("eval_labels: {}. max: {}.\npreds: {}. max: {}.".format(
            eval_labels.shape, np.max(eval_labels), preds.shape,
            np.max(preds)))
        # with keras.backend.get_session() as sess:
        #     conf_mat = tf.confusion_matrix(eval_labels, preds)
        #     conf_mat = sess.run(conf_mat)
        from sklearn.metrics import confusion_matrix
        confusion_matrix(eval_data, preds)
        # clear memory
        keras.backend.clear_session()

        return history, conf_mat, test_loss, test_acc