Beispiel #1
0
def model_train(x_train, y_train):
    model = SqueezeNet(weights='imagenet',
                       include_top=False,
                       input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))

    top_model = Sequential()
    top_model.add(Flatten(input_shape=model.output_shape[1:]))
    top_model.add(Dense(256, activation='relu'))
    top_model.add(Dropout(0.5))
    top_model.add(Dense(NUM_LABELS, activation='softmax'))

    model = Model(inputs=model.input, outputs=top_model(model.output))

    for layer in model.layers[:15]:
        layer.trainable = False

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.summary()

    callback = EarlyStopping(monitor='val_loss',
                             patience=5,
                             verbose=1,
                             mode='auto')
    model.fit(x_train,
              y_train,
              batch_size=BATCH_SIZE,
              epochs=EPOCH,
              validation_split=0.1,
              callbacks=callback)

    return model
Beispiel #2
0
def get_model(train=True):

  if Path('model.h5').is_file():
    return load_model('model.h5')

  datagen = ImageDataGenerator(
    rotation_range=10,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.1,
    zoom_range=0.2,
    horizontal_flip=False,
    preprocessing_function=gen_preprocess,
    fill_mode='nearest')

  data_generator = datagen.flow_from_directory(
    directory='train_data/',
    target_size=IMG_SIZE,
    class_mode='categorical')
  print(data_generator.classes)

  validgen = ImageDataGenerator(preprocessing_function=gen_preprocess)
  valid_generator = validgen.flow_from_directory(
    directory='valid_data/',
    target_size=IMG_SIZE,
    class_mode='categorical',
    shuffle=False
  )

  test_generator = validgen.flow_from_directory(
    directory='test_data/',
    target_size=IMG_SIZE,
    class_mode='categorical',
    shuffle=False
  )

  model = SqueezeNet()
  print(model.summary())
  x = Convolution2D(4, (1, 1), padding='same', name='conv11')(model.layers[-5].output)
  x = Activation('relu', name='relu_conv11')(x)
  x = GlobalAveragePooling2D()(x)
  x = Activation('softmax')(x)
  # x= Dense(4, activation='softmax')(x)
  # x = Dense(4, activation='softmax')(model.layers[-2].output)
  model = Model(model.inputs, x)
  print(model.summary())

  # Following is the original model I was training
  # model = Sequential()
  #
  # model.add(Convolution2D(16, 3, 3,
  #                         border_mode='same',
  #                         input_shape=IMG_SHAPE))
  # model.add(MaxPooling2D(pool_size=(3, 3)))
  # model.add(Activation('relu'))
  # model.add(Dropout(0.2))
  #
  # model.add(Convolution2D(32, 3, 3,
  #                         border_mode='same'))
  # model.add(MaxPooling2D(pool_size=(3, 3)))
  # model.add(Activation('relu'))
  # model.add(Dropout(0.2))
  #
  # model.add(Convolution2D(48, 3, 3,
  #                         border_mode='same'))
  # model.add(MaxPooling2D(pool_size=(2, 2)))
  # model.add(Activation('relu'))
  # model.add(Dropout(0.2))
  # #
  # model.add(Convolution2D(64, 3, 3,
  #                         border_mode='same'))
  # model.add(MaxPooling2D(pool_size=(2, 2)))
  # model.add(Activation('relu'))
  # model.add(Dropout(0.2))
  # #
  # model.add(Convolution2D(64, 3, 3,
  #                         border_mode='same'))
  # model.add(MaxPooling2D(pool_size=(2, 2)))
  # model.add(Activation('relu'))
  # model.add(Dropout(0.2))
  #
  # # 1st Layer - Add a flatten layer
  # model.add(Flatten())
  #
  # model.add(Dense(1164))
  # model.add(Activation('relu'))
  # model.add(Dropout(0.2))
  #
  # model.add(Dense(128))
  # model.add(Activation('tanh'))
  # model.add(Dropout(0.2))
  #
  # # 2nd Layer - Add a fully connected layer
  # model.add(Dense(50))
  # model.add(Activation('relu'))
  # model.add(Dropout(0.2))
  #
  # model.add(Dense(10))
  # model.add(Activation('relu'))
  # model.add(Dropout(0.2))
  #
  # # 4th Layer - Add a fully connected layer
  # model.add(Dense(4))
  # # 5th Layer - Add a ReLU activation layer
  # model.add(Activation('softmax'))
  # TODO: Build a Multi-layer feedforward neural network with Keras here.
  # TODO: Compile and train the model
  filepath = "weights-improvement-{epoch:02d}-{loss:.2f}.hdf5"
  callbacks = [
    EarlyStopping(monitor='loss', min_delta=0.01, patience=2, verbose=1),
    LambdaCallback(on_epoch_end=lambda batch,logs: evaluate_model(model, test_generator)),
    ModelCheckpoint(filepath=filepath, monitor='loss', save_best_only=True, verbose=1),
  ]

  model.compile(keras.optimizers.Adam(lr=0.0001), 'categorical_crossentropy', ['accuracy'])
  model.fit_generator(data_generator, steps_per_epoch=400, epochs=30, verbose=1, callbacks=callbacks)
  evaluate_model(model, test_generator)

  model.save('model.h5', True)

  return model
train_images = preprocess_images(train_images)
validation_images = np.concatenate(
    [validation_f20_images, validation_axis_images], axis=0)
validation_images = preprocess_images(validation_images)
train_data = (train_images, train_classes)
validation_data = (validation_images, validation_classes)

model = SqueezeNet(weights=None, input_shape=image_size, classes=2)
opt = optimizers.SGD(lr=0.001)
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=['accuracy'])
#model.compile(optimizer='adam', loss='categorical_crossentropy',
#                                        metrics=['accuracy'])

model.summary()
model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'
model_checkpoint = ModelCheckpoint(model_names,
                                   'val_acc',
                                   verbose=1,
                                   save_best_only=True)
callbacks = [model_checkpoint]

model.fit(train_images,
          train_classes,
          batch_size,
          num_epochs,
          verbose=1,
          callbacks=callbacks,
          validation_data=validation_data,
          shuffle=True)
Beispiel #4
0
    def execute_model(self, qn):
        # Get data
        with tf.device('/cpu:0'):
            (train_data, train_labels), (eval_data,
                                         eval_labels) = self.get_data()

        # Build model
        model = SqueezeNet(input_shape=train_data[0].shape,
                           weights=None,
                           classes=10)
        # model = get_model(train_data[0].shape)
        model.summary()

        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        # Train model
        if self.log_dir is not None:
            checkpoint_folder = self.log_dir + "/" + qn
        else:
            checkpoint_folder = "./" + qn

        last_epoch_ran = 0
        from os import path, makedirs
        print("****Checkpoint folder:", checkpoint_folder)
        checkpoint_path = checkpoint_folder + "/train.ckpt"
        if path.exists(checkpoint_path):
            print("\tRestoring checkpoint from %s" % checkpoint_path)
            model.load_weights(checkpoint_path)
            with open(path.join(checkpoint_folder, "epoch.txt"), "r") as f:
                last_epoch_ran = f.read()
            last_epoch_ran = int(last_epoch_ran)
            print("\tInitial epoch: %d" % last_epoch_ran)
        else:
            print("****Creating folder", checkpoint_folder)
            makedirs(checkpoint_folder, exist_ok=True)
        # checkpoint_path = checkpoint_folder + "/train-{epoch:04d}.ckpt"

        class SaveCheckpoint(keras.callbacks.ModelCheckpoint):
            def __init__(self,
                         filepath,
                         monitor='val_loss',
                         verbose=0,
                         save_best_only=False,
                         save_weights_only=False,
                         mode='auto',
                         period=1):
                super(SaveCheckpoint,
                      self).__init__(filepath,
                                     monitor=monitor,
                                     verbose=verbose,
                                     save_best_only=save_best_only,
                                     save_weights_only=save_weights_only,
                                     mode=mode,
                                     period=period)

            def on_epoch_end(self, epoch, logs=None):
                super(SaveCheckpoint, self).on_epoch_end(epoch, logs)
                with open(path.join(path.dirname(self.filepath), "epoch.txt"),
                          "w") as f:
                    f.write(str(epoch))

        save_checkpoint = SaveCheckpoint(checkpoint_path,
                                         save_weights_only=True,
                                         verbose=1)
        callbacks = [save_checkpoint]

        history = model.fit(train_data,
                            train_labels,
                            batch_size=self.batch_size,
                            epochs=self.epochs,
                            initial_epoch=last_epoch_ran,
                            verbose=1,
                            shuffle=True,
                            validation_split=self.validation_split,
                            callbacks=callbacks)
        # history = model.fit(train_data, train_labels, epochs=self.epochs)
        # Test model
        print("Training done. Evaluating model")
        test_loss, test_acc = model.evaluate(eval_data,
                                             eval_labels,
                                             batch_size=self.batch_size,
                                             verbose=1)

        print("test_loss: {}. test_acc: {}".format(test_loss, test_acc))

        # confusion matrix
        preds = model.predict(eval_data, batch_size=self.batch_size, verbose=1)
        preds = np.argmax(preds, 1)
        model.summary()

        print("eval_labels: {}. max: {}.\npreds: {}. max: {}.".format(
            eval_labels.shape, np.max(eval_labels), preds.shape,
            np.max(preds)))
        # with keras.backend.get_session() as sess:
        #     conf_mat = tf.confusion_matrix(eval_labels, preds)
        #     conf_mat = sess.run(conf_mat)
        from sklearn.metrics import confusion_matrix
        confusion_matrix(eval_data, preds)
        # clear memory
        keras.backend.clear_session()

        return history, conf_mat, test_loss, test_acc