batch_size = 64
epochs = 1
validation_split = 0.2
step_size_train = 150

NAME = "Smart_Fridge_BaseSet_imageAugmentation:{}_batchSize:{}_epochs:{}_valSplit:{}_stepSize:{}_trainedFromLayer:{}_time:{}".format(
    imageAugmentation, batch_size, epochs, validation_split, step_size_train,
    train_from_layer, int(time.time()))
tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))

train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   validation_split=validation_split)

if imageAugmentation == True:
    train_datagen.height_shift_range = 0.2
    train_datagen.width_shift_range = 0.2
    train_datagen.zoom_range = zoom_range = [0.1, 0.3]
    train_datagen.shear_range = 30

train_generator = train_datagen.flow_from_directory(train_data_dir,
                                                    target_size=(224, 224),
                                                    color_mode='rgb',
                                                    batch_size=batch_size,
                                                    class_mode='categorical',
                                                    subset='training',
                                                    shuffle=True)

validation_generator = train_datagen.flow_from_directory(
    train_data_dir,  # same directory as training data
    target_size=(224, 224),
Пример #2
0
def run(x_train_fn,
        x_test_fn,
        y_train_fn,
        y_test_fn,
        img_size=[256, 256],
        do_featurewise_norm=True,
        rotation_range=0,
        width_shift_range=.0,
        height_shift_range=.0,
        zoom_range=[1.0, 1.0],
        horizontal_flip=False,
        vertical_flip=False,
        batch_size=32,
        nb_epoch=100,
        pos_cls_weight=1.0,
        nb_init_filter=32,
        init_filter_size=5,
        init_conv_stride=2,
        pool_size=2,
        pool_stride=2,
        weight_decay=.0001,
        alpha=.0001,
        l1_ratio=.0,
        inp_dropout=.0,
        hidden_dropout=.0,
        init_lr=.01,
        lr_patience=20,
        es_patience=40,
        resume_from=None,
        best_model='./modelState/roi_clf.h5',
        final_model="NOSAVE"):
    '''Train a deep learning model for ROI classifications
    '''

    # =========== Load training data =============== #
    X_train = np.load(x_train_fn)
    X_test = np.load(x_test_fn)
    X_train = resize_img_dat(X_train, img_size)
    X_test = resize_img_dat(X_test, img_size)
    y_train = np.load(y_train_fn)
    y_test = np.load(y_test_fn)

    # ============ Train & validation set =============== #
    if do_featurewise_norm:
        imgen = ImageDataGenerator(featurewise_center=True,
                                   featurewise_std_normalization=True)
        imgen.fit(X_train)
    else:
        imgen = ImageDataGenerator(samplewise_center=True,
                                   samplewise_std_normalization=True)
    imgen.rotation_range = rotation_range
    imgen.width_shift_range = width_shift_range
    imgen.height_shift_range = height_shift_range
    imgen.zoom_range = zoom_range
    imgen.horizontal_flip = horizontal_flip
    imgen.vertical_flip = vertical_flip
    train_generator = imgen.flow(X_train,
                                 y_train,
                                 batch_size=batch_size,
                                 shuffle=True,
                                 seed=12345)

    X_test -= imgen.mean
    X_test /= imgen.std
    validation_set = (X_test, y_test)

    # ================= Model training ============== #
    nb_worker = int(os.getenv('NUM_CPU_CORES', 4))
    if resume_from is not None:
        model = load_model(resume_from,
                           custom_objects={
                               'sensitivity': DMMetrics.sensitivity,
                               'specificity': DMMetrics.specificity
                           })
    else:
        model = ResNetBuilder.build_resnet_50(
            (1, img_size[0], img_size[1]), 1, nb_init_filter, init_filter_size,
            init_conv_stride, pool_size, pool_stride, weight_decay, alpha,
            l1_ratio, inp_dropout, hidden_dropout)
    sgd = SGD(lr=init_lr, momentum=0.9, decay=0.0, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='binary_crossentropy',
                  metrics=[DMMetrics.sensitivity, DMMetrics.specificity])
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=lr_patience,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   patience=es_patience,
                                   verbose=1)
    auc_checkpointer = DMAucModelCheckpoint(best_model,
                                            validation_set,
                                            batch_size=batch_size)
    hist = model.fit_generator(
        train_generator,
        samples_per_epoch=len(X_train),
        nb_epoch=nb_epoch,
        class_weight={
            0: 1.0,
            1: pos_cls_weight
        },
        validation_data=validation_set,
        callbacks=[reduce_lr, early_stopping, auc_checkpointer],
        nb_worker=nb_worker,
        pickle_safe=True,  # turn on pickle_safe to avoid a strange error.
        verbose=2)

    # Training report.
    min_loss_locs, = np.where(
        hist.history['val_loss'] == min(hist.history['val_loss']))
    best_val_loss = hist.history['val_loss'][min_loss_locs[0]]
    best_val_sensitivity = hist.history['val_sensitivity'][min_loss_locs[0]]
    best_val_specificity = hist.history['val_specificity'][min_loss_locs[0]]
    print "\n==== Training summary ===="
    print "Minimum val loss achieved at epoch:", min_loss_locs[0] + 1
    print "Best val loss:", best_val_loss
    print "Best val sensitivity:", best_val_sensitivity
    print "Best val specificity:", best_val_specificity

    if final_model != "NOSAVE":
        model.save(final_model)

    return hist