Exemplo n.º 1
0
def train_and_test_multi(train_datasets):
    # use 3 separate datasets
    img_size = (224, 224)
    color_mode = 'grayscale'

    X1, Y1 = train_datasets[0].get_data(img_size, color_mode, 1)
    X2, Y2 = train_datasets[1].get_data(img_size, color_mode, 1)
    X3, Y3 = train_datasets[2].get_data(img_size, color_mode, 1)

    # put into combined numpy array
    X = np.ndarray(shape=(X1.shape[0:3] + (3, )))
    X[:, :, :, 0] = X1.squeeze()
    X[:, :, :, 1] = X2.squeeze()
    X[:, :, :, 2] = X3.squeeze()

    # labels should be the same between datasets (b/c shuffle=false)
    Y = Y1

    labels = np.asarray([np.argmax(y) for y in Y])

    kfold = StratifiedKFold(n_splits=5, shuffle=True)

    scores = [None] * 5
    i = 0
    for train_idx, test_idx in kfold.split(X, labels):
        print('fold ' + str(i + 1) + ' of 5')

        model = vgg16_hybrid_1365(1)

        model.add(Dense(train_datasets[0].nb_classes, activation='softmax'))
        # train with slow SGD
        model.compile(optimizer=SGD(lr=1e-3,
                                    decay=1e-6,
                                    momentum=0.9,
                                    nesterov=True),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        model.fit(
            X[train_idx],
            Y[train_idx],
            batch_size=32,
            # more epochs since we're using new channels schema
            # approaching RGB performance with e=10...
            # maybe do even more epochs for this method
            #  epochs = 10,
            epochs=15,
            validation_data=(X[test_idx], Y[test_idx]))
        # evaluate
        score = model.evaluate(X[test_idx], Y[test_idx])
        print(score)
        # only append accuracy score
        scores[i] = score[1]

        i += 1
        K.clear_session()

    print(scores)
    print(np.mean(scores))
    print('done')
Exemplo n.º 2
0
def train_and_test(datasets):
    # import vgg16 base with places weights
    # (exclude top 4 layers (dense & flatten))
    #  model = places205_vgg16(4)
    model = vgg16_hybrid_1365(4)

    # variable
    batch_size = 16

    # use first listed dataset for training
    train_dataset = datasets[0]

    # standard image size for vgg16
    img_size = (224, 224)
    # 3 channels for vgg16 compatibility
    color_mode = 'rgb'

    # generate numpy arrays of all data
    # use rescale=1 to match places conventions
    x_train, y_train = train_dataset.train_data(img_size, color_mode, 1)
    x_test, y_test = train_dataset.test_data(img_size, color_mode, 1)

    # calculate bottleneck features (output of vgg conv layers)
    bneck_train = model.predict(x_train)

    bneck_test = model.predict(x_test)

    # TRAINING/TESTING
    # train on bottleneck features
    epochs = 50
    lr = 1e-5
    top_model_weights = train_top_model(bneck_train, y_train, bneck_test,
                                        y_test, batch_size, epochs, lr)

    # get model base weights
    base_weights = model.get_weights()

    # append top model to base
    model = add_top_model(model, train_dataset.nb_classes)

    # load base & top weights
    model.set_weights(np.concatenate((base_weights, top_model_weights)))

    # freeze all layers except top conv block and dense layers
    nb_layers_trainable = 8
    epochs = 10
    #  epochs = 100
    # train all layers together
    model = train_vgg16(model, x_train, y_train, x_test, y_test,
                        nb_layers_trainable, batch_size, epochs)

    # final evaluation
    score = model.evaluate(x_test, y_test, batch_size=batch_size)

    print(score)

    return model
Exemplo n.º 3
0
def train_and_test(datasets):
    # use first listed daaset for training
    train_dataset = datasets[0]

    # import vgg16 with hybrid weights, w/o softmax layer
    model = vgg16_hybrid_1365(1)

    # standard
    img_size = (224, 224)
    color_mode = 'rgb'

    # load data as numpy arrays
    # (use rescale=1 for places CNN's)
    X, Y, train_fnames = train_dataset.get_data(img_size, color_mode, 1)

    # init 5-fold cross validation
    kfold = StratifiedKFold(n_splits=5, shuffle=True)

    # one-hot -> class labels
    labels = np.asarray([np.argmax(y) for y in Y])

    # generate bottleneck features (output of conv layers)
    X_bneck = model.predict(X)

    # make array to store filename + prediction
    predictions = [[f, -1] for f in train_fnames]

    i = 0
    for train_idx, test_idx in kfold.split(X, labels):
        print('fold ' + str(i + 1) + ' of 5')

        # train linear svm
        svc = SVC(kernel='linear')
        svc.fit(X_bneck[train_idx], labels[train_idx])

        # evaluate
        print('predicting...')
        print(train_dataset.str)
        fold_predictions = svc.predict(X_bneck[test_idx])

        # add predictions to final list, by index
        for j in range(len(fold_predictions)):
            # get index from test_idx list
            pred_idx = test_idx[j]
            # set prediction value for that specific image file
            predictions[pred_idx][1] = fold_predictions[j]

        i += 1

    # pretty print predictions, one per line
    for p in predictions:
        print(p)
    print('done')
Exemplo n.º 4
0
def train_and_test(datasets):
  # import vgg16 with hybrid weights, w/o softmax layer
  model = vgg16_hybrid_1365(1)

  # standard
  img_size = (224, 224)
  color_mode = 'grayscale'

  # load 3 datasets separately
  X1,Y1 = datasets[0].get_data(img_size, color_mode,1)
  X2,Y2 = datasets[1].get_data(img_size, color_mode,1)
  X3,Y3 = datasets[2].get_data(img_size, color_mode,1)

  # combined numpy array to hold data together
  X = np.ndarray(shape=(X1.shape[0:3] + (3,)))
  # put each dataset in respective channels
  X[:,:,:,0] = X1.squeeze()
  X[:,:,:,1] = X2.squeeze()
  X[:,:,:,2] = X3.squeeze()

  # init 5-fold cross validation
  kfold = StratifiedKFold(n_splits=5, shuffle=True)

  # one-hot -> class labels
  labels = np.asarray([np.argmax(y) for y in Y1])

  # generate bottleneck features (output of conv layers)
  X_bneck = model.predict(X)

  # store performance of each fold
  scores = [None] * 5
  i=0
  for train_idx, test_idx in kfold.split(X,labels):
    print('fold ' + str(i+1) + ' of 5')

    # train linear svm
    svc = SVC(kernel='linear')
    svc.fit(X_bneck[train_idx], labels[train_idx])

    # evaluate
    train_score = svc.score(X_bneck[train_idx], labels[train_idx])
    test_score = svc.score(X_bneck[test_idx], labels[test_idx]) 
    print(train_score, test_score)

    scores[i] = test_score

    i+=1

  print('mean: ' + str(np.mean(scores)))
  print('done')
Exemplo n.º 5
0
def train_and_test_single(train_datasets):
    # only use single training dataset
    train_dataset = train_datasets[0]

    img_size = (224, 224)
    color_mode = 'rgb'

    X, Y = train_dataset.get_data(img_size, color_mode, 1)

    kfold = StratifiedKFold(n_splits=5, shuffle=True)

    labels = np.asarray([np.argmax(y) for y in Y])

    scores = [None] * 5
    i = 0
    for train_idx, test_idx in kfold.split(X, labels):
        print('fold ' + str(i + 1) + ' of 5')

        # import vgg16 with hybrid weights, w/o softmax
        model = vgg16_hybrid_1365(1)
        # append new softmax layer
        model.add(Dense(train_dataset.nb_classes, activation='softmax'))
        # train with slow SGD
        model.compile(optimizer=SGD(lr=1e-3,
                                    decay=1e-6,
                                    momentum=0.9,
                                    nesterov=True),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        model.fit(
            X[train_idx],
            Y[train_idx],
            batch_size=32,
            # overfits extremely quickly
            epochs=5,
            validation_data=(X[test_idx], Y[test_idx]))
        # evaluate
        score = model.evaluate(X[test_idx], Y[test_idx])
        print(score)
        # only append accuracy score
        scores[i] = score[1]

        i += 1
        K.clear_session()

    print(scores)
    print(np.mean(scores))
    print('done')
Exemplo n.º 6
0
def train_and_test(datasets):
    train_dataset = datasets[0]

    model = vgg16_hybrid_1365(1)
    model.add(Dense(67, activation='softmax'))
    model.compile(optimizer=SGD(lr=1e-3,
                                decay=1e-6,
                                momentum=0.9,
                                nesterov=True),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    img_size = (256, 256)
    color_mode = 'rgb'
    batch_size = 64

    train_datagen = ImageDataGenerator(rescale=1,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)

    test_datagen = ImageDataGenerator(rescale=1)

    train_gen = train_datagen.flow_from_directory(train_dataset.dir +
                                                  '/train/',
                                                  target_size=img_size,
                                                  batch_size=batch_size,
                                                  class_mode='categorical',
                                                  color_mode=color_mode)

    test_gen = test_datagen.flow_from_directory(train_dataset.dir + '/test/',
                                                target_size=img_size,
                                                batch_size=batch_size,
                                                class_mode='categorical',
                                                color_mode=color_mode)

    crop_length = 224
    train_crops = crop_generator(train_gen, crop_length, 3)
    test_crops = crop_generator(test_gen, crop_length, 3)

    model.fit_generator(
        train_crops,
        steps_per_epoch=train_dataset.nb_train_samples / batch_size,
        validation_data=test_crops,
        validation_steps=train_dataset.nb_test_samples / batch_size,
        epochs=10)
Exemplo n.º 7
0
def train_and_test(datasets):
    # import vgg16 with hybrid weights, w/o softmax layer
    model = vgg16_hybrid_1365(1)

    # standard
    img_size = (224, 224)
    color_mode = 'grayscale'

    # load 3 datasets separately
    x1_train, y1_train = datasets[0].train_data(img_size, color_mode, 1)
    x2_train, y2_train = datasets[1].train_data(img_size, color_mode, 1)
    x3_train, x3_test = datasets[2].train_data(img_size, color_mode, 1)

    x1_test, y1_test = datasets[0].test_data(img_size, color_mode, 1)
    x2_test, y2_test = datasets[1].test_data(img_size, color_mode, 1)
    x3_test, y3_test = datasets[2].test_data(img_size, color_mode, 1)

    # combined numpy array to hold data together
    x_train = np.ndarray(shape=(x1_train.shape[0:3] + (3, )))
    x_test = np.ndarray(shape=(x1_test.shape[0:3] + (3, )))
    # put each dataset in respective channels
    x_train[:, :, :, 0] = x1_train.squeeze()
    x_train[:, :, :, 1] = x2_train.squeeze()
    x_train[:, :, :, 2] = x3_train.squeeze()
    x_test[:, :, :, 0] = x1_test.squeeze()
    x_test[:, :, :, 1] = x2_test.squeeze()
    x_test[:, :, :, 2] = x3_test.squeeze()

    # one-hot -> class labels
    train_labels = np.asarray([np.argmax(y) for y in y1_train])
    test_labels = np.asarray([np.argmax(y) for y in y1_test])

    # generate bottleneck features (output of conv layers)
    bneck_train = model.predict(x_train)
    bneck_test = model.predict(x_test)

    # train linear svm
    svc = SVC(kernel='linear')
    print('training svm...')
    svc.fit(bneck_train, train_labels)

    # evaluate
    score = svc.score(bneck_test, test_labels)
    print(score)

    print('done')
Exemplo n.º 8
0
def trial(x_train, y_train, x_test, y_test, scores, model_str, i):
    # load proper model
    if model_str == 'vgg16_hybrid_1365':
        model = vgg16_hybrid_1365(1)
    elif model_str == 'vgg16_hybrid_1365_stride':
        model = vgg16_hybrid_1365_stride(1)
    # places line drawing network
    elif model_str == 'places365_vgg11_runaway_weights.h5':
        model = vgg11(365, 1)
        weights_file = 'models/' + model_str
        model.load_weights(weights_file)
        # remove old softmax
        model.pop()
    else:
        print 'ERROR: model not implemented'
        sys.exit()

    # append new softmax layer
    model.add(Dense(y_test[0].shape[1], activation='softmax'))

    # fine tuning stats
    model.compile(optimizer=SGD(lr=1e-3,
                                decay=1e-6,
                                momentum=0.9,
                                nesterov=True),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit(
        x_train,
        y_train,
        batch_size=32,
        # converges very quickly, but need more (10) for mit67
        #  epochs = 5,
        epochs=10,
        validation_data=(x_test[0], y_test[0]))

    # take accuracy score for each dataset
    print('evaluating on all datasets...')
    for j in range(len(x_test)):
        scores[j][i] = model.evaluate(x_test[j], y_test[j])[1]

    return
Exemplo n.º 9
0
def train_and_visualize(dataset):
  # load pre-trained vgg16 hybrid w/o dense layers
  model = vgg16_hybrid_1365(4)

  # standards for VGG16
  img_size = (224, 224)
  color_mode = 'rgb'
  # for places
  rescale = 1


  # load data as numpy arrays
  X,Y = dataset.get_data(img_size, color_mode, rescale)
  
  # generate bottleneck features
  X = model.predict(X)

  # since we're just visualizing activations, we can use the
  # whole dataset for training

  batch_size = 32
  epochs = 50
  lr=1e-5

  top_model_weights = train_top_model(X, Y, X, Y, batch_size, epochs, lr)

  # concatenate base and top model
  base_weights = model.get_weights()
  model = add_top_model(model, dataset.nb_classes)
  # load base & top weights
  model.set_weights(np.concatenate((base_weights, top_model_weights)))

  # visualize actvations for some filters in dense layer
  layer_idx = 19
  # directory to save images to 
  dir_name = 'to_rgb_activations/' if dataset.str == 'rgb' else 'to_ld_activations/'
  for i in range(32):
    print(i)
    activation = visualize_activation(model, layer_idx, i)
    img = Image.fromarray(activation)
    # name format: layer_idx, filter_idx
    img.save(dir_name + 'activation_' + str(layer_idx) + '_' + str(i) + '.png')
Exemplo n.º 10
0
def main():
    dataset_str = 'rgb'

    dataset = MIT67Dataset(dataset_str)

    img_size = (224, 224)
    color_mode = 'rgb'

    x_train, y_train = dataset.train_data(img_size, color_mode, 1)
    x_test, y_test = dataset.test_data(img_size, color_mode, 1)

    # First Trial -- Import UNMODIFIED VGG16
    model = vgg16_hybrid_1365(1)

    train_and_test(model, x_train, y_train, x_test, y_test)

    # clear memory
    K.clear_session()

    # Second Trial -- Replace Pooling layers with larger stride
    model = vgg16_hybrid_1365_stride(1)
    train_and_test(model, x_train, y_train, x_test, y_test)
Exemplo n.º 11
0
def train_and_visualize(dataset):
    # load vgg16 w/o top layer
    model = vgg16_hybrid_1365(1)

    # add new softmax layer
    model.add(Dense(dataset.nb_classes, activation='softmax'))

    # standards
    img_size = (224, 224)
    color_mode = 'rgb'
    rescale = 1

    x_train, y_train = dataset.train_data(img_size, color_mode, rescale)
    x_test, y_test = dataset.test_data(img_size, color_mode, rescale)

    model.compile(optimizer=SGD(lr=1e-3,
                                decay=1e-6,
                                momentum=0.9,
                                nesterov=True),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # generate images with no training:
    visualize(model, 0, dataset)

    # fit for one epoch at a time, generate images after each epoch
    for epoch in range(5):
        print('epoch ' + str(epoch) + ' of 5')
        # train for single epoch
        model.fit(x_train,
                  y_train,
                  epochs=1,
                  batch_size=32,
                  validation_data=(x_test, y_test))
        # visualize new filters
        visualize(model, epoch + 1, dataset)
Exemplo n.º 12
0
def bottleneck_features():
    model = vgg16_hybrid_1365(1)

    img_size = (224, 224)
    color_mode = 'rgb'

    # Load RGB dataset
    rgb_dataset = MIT67Dataset('rgb')

    x_rgb_train, y_rgb_train = rgb_dataset.train_data(img_size, color_mode, 1)
    x_rgb_test, y_rgb_test = rgb_dataset.test_data(img_size, color_mode, 1)

    rgb_train_labels = np.asarray([np.argmax(y) for y in y_rgb_train])
    rgb_test_labels = np.asarray([np.argmax(y) for y in y_rgb_test])

    color_mode = 'grayscale'

    # load weighted line drawings
    intact_dataset = MIT67Dataset('smooth')
    x1_train, y1_train = intact_dataset.train_data(img_size, color_mode, 1)
    x1_test, y1_test = intact_dataset.test_data(img_size, color_mode, 1)

    dR_dataset = MIT67Dataset('dR_weighted')
    x2_train, y2_train = dR_dataset.train_data(img_size, color_mode, 1)
    x2_test, y2_test = dR_dataset.test_data(img_size, color_mode, 1)

    d2R_dataset = MIT67Dataset('d2R_weighted')
    x3_train, y3_train = d2R_dataset.train_data(img_size, color_mode, 1)
    x3_test, y3_test = d2R_dataset.test_data(img_size, color_mode, 1)

    x_train = np.ndarray(shape=(x1_train.shape[0:3] + (3, )))
    x_test = np.ndarray(shape=(x1_test.shape[0:3] + (3, )))
    # put each dataset in respective channels
    x_train[:, :, :, 0] = x1_train.squeeze()
    x_train[:, :, :, 1] = x2_train.squeeze()
    x_train[:, :, :, 2] = x3_train.squeeze()
    x_test[:, :, :, 0] = x1_test.squeeze()
    x_test[:, :, :, 1] = x2_test.squeeze()
    x_test[:, :, :, 2] = x3_test.squeeze()

    x_train_labels = np.asarray([np.argmax(y) for y in y1_train])
    x_test_labels = np.asarray([np.argmax(y) for y in y1_test])

    # get rgb bottleneck features
    rgb_bneck_train = model.predict(x_rgb_train)
    rgb_bneck_test = model.predict(x_rgb_test)

    # other bneck features
    x_bneck_train = model.predict(x_train)
    x_bneck_test = model.predict(x_test)

    # concatenate features
    bneck_train = np.concatenate((rgb_bneck_train, x_bneck_train), axis=0)
    bneck_test = np.concatenate((rgb_bneck_test, x_bneck_test), axis=0)

    train_labels = np.concatenate((rgb_train_labels, x_train_labels), axis=0)
    test_labels = np.concatenate((rgb_test_labels, x_test_labels), axis=0)

    np.save('bneck_train', bneck_train)
    np.save('bneck_test', bneck_test)
    np.save('train_labels', train_labels)
    np.save('test_labels', test_labels)

    print('done')
Exemplo n.º 13
0
# Visualize activations in pre-trained hybrid VGG16
from vis.visualization import visualize_activation
from scipy.misc import imsave
import numpy as np
from vgg16_utils import vgg16_hybrid_1365

np.random.seed(2018)

# import pre-trained vgg16
model = vgg16_hybrid_1365()

# first dense layer
#  layer_idx = 19
# first conv layer
layer_idx = 0

nb_filters = 64

# generate activations for all filters
#  activation = visualize_activation(model, layer_idx)

# generate activations for some filters at specified layer
for i in range(nb_filters):
  print(i)
  activation = visualize_activation(model, layer_idx, i)
  # name: layer_idx, filter_idx
  imsave(('vgg16_hybrid_1365_activations/' +
    'act_' + str(layer_idx) + '_' + str(i) + '.png'),
    activation)

Exemplo n.º 14
0
def train_and_test(datasets):
  # use first listed daaset for training
  train_dataset = datasets[0]

  # import vgg16 with hybrid weights, w/o softmax layer
  model = vgg16_hybrid_1365(1)

  # standard
  img_size = (224, 224)
  color_mode = 'rgb'

  # load data as numpy arrays
  # (use rescale=1 for places CNN's)
  X,Y = train_dataset.get_data(img_size, color_mode,1)
  # load testing datasets
  nb_test_sets = len(datasets[1:])
  X_test,Y_test = [None]*nb_test_sets, [None]*nb_test_sets
  for i in range(nb_test_sets):
    X_test[i],Y_test[i] = datasets[i+1].get_data(img_size, color_mode,1)
    # generate bottleneck features
    X_test[i] = model.predict(X_test[i])

  # init 5-fold cross validation
  kfold = StratifiedKFold(n_splits=5, shuffle=True)

  # one-hot -> class labels
  labels = np.asarray([np.argmax(y) for y in Y])

  # generate bottleneck features (output of conv layers)
  X_bneck = model.predict(X)

  # store performance of each fold
  scores = [None] * 5
  test_scores = [None] * 5
  for i in range(nb_test_sets): test_scores[i] = [None] * 5
  i=0
  for train_idx, test_idx in kfold.split(X,labels):
    print('fold ' + str(i+1) + ' of 5')

    # train linear svm
    svc = SVC(kernel='linear')
    svc.fit(X_bneck[train_idx], labels[train_idx])

    # evaluate
    train_score = svc.score(X_bneck[train_idx], labels[train_idx])
    test_score = svc.score(X_bneck[test_idx], labels[test_idx]) 
    print('evaluating...')
    print(train_dataset.str)
    print(train_score, test_score)

    for j in range(nb_test_sets):
      print(datasets[j+1].str)
      score = svc.score(X_test[j][test_idx], labels[test_idx])
      print(score)
      test_scores[j][i] = score


    scores[i] = test_score

    i+=1

  print('\nMeans:')
  print(train_dataset.str)
  print(str(np.mean(scores)))
  for j in range(nb_test_sets):
    print(datasets[j+1].str)
    print(np.mean(test_scores[j]))

  print('done')
Exemplo n.º 15
0
def train_and_test(datasets, model_str):
    train_dataset = datasets[0]

    img_size = (224, 224)
    color_mode = 'grayscale'

    # load each dataset
    x1_train, y1_train = train_dataset.train_data(img_size, color_mode, 1)
    x1_test, y1_test = train_dataset.test_data(img_size, color_mode, 1)
    x2_train, y2_train = train_dataset.train_data(img_size, color_mode, 1)
    x2_test, y2_test = train_dataset.test_data(img_size, color_mode, 1)
    x3_train, y3_train = train_dataset.train_data(img_size, color_mode, 1)
    x3_test, y3_test = train_dataset.test_data(img_size, color_mode, 1)

    x_train = np.ndarray(shape=(x1_train.shape[0:3] + (3, )))
    x_train[:, :, :, 0] = x1_train.squeeze()
    x_train[:, :, :, 1] = x2_train.squeeze()
    x_train[:, :, :, 2] = x3_train.squeeze()

    x_test = np.ndarray(shape=(x1_test.shape[0:3] + (3, )))
    x_test[:, :, :, 0] = x1_test.squeeze()
    x_test[:, :, :, 1] = x2_test.squeeze()
    x_test[:, :, :, 2] = x3_test.squeeze()

    # labels are the same for each
    y_train = y1_train
    y_test = y1_test

    # train 5 times and take the mean
    scores = [None] * 5
    for i in range(5):
        print('trial ' + str(i + 1) + ' of 5')

        # load proper model
        if model_str == 'vgg16_hybrid_1365':
            model = vgg16_hybrid_1365(1)
        elif model_str == 'vgg16_hybrid_1365_stride':
            model = vgg16_hybrid_1365_stride(1)
        else:
            print 'ERROR: model not implemented'
            return

        # append new softmax layer
        model.add(Dense(train_dataset.nb_classes, activation='softmax'))

        # fine tuning stats
        model.compile(optimizer=SGD(lr=1e-3,
                                    decay=1e-6,
                                    momentum=0.9,
                                    nesterov=True),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        model.fit(
            x_train,
            y_train,
            batch_size=32,
            # converges very quickly, but takes more time than intact
            epochs=10,
            validation_data=(x_test, y_test))

        scores[i] = model.evaluate(x_test, y_test)[1]

        K.clear_session()

    print(scores)
    print(np.mean(scores))
    print('done')