Beispiel #1
0
def get_model(model, **kwargs):
    if model == 'vgg16':
        return vgg16.VGG16(**kwargs), vgg16.preprocess_input
    if model == 'resnet50v2':
        return resnetv2.ResNet50V2(**kwargs), resnetv2.preprocess_input
    if model == 'mobilenetv2':
        return mobilenetv2.MobileNetV2(**kwargs), mobilenetv2.preprocess_input

    raise ValueError
def train_net(X_train, y_train, params):
    """

    :param X_train:
    :param y_train:
    :param params:
    :return:
    """
    # Params
    input_shape = params['input_shape']
    epochs = params['epochs']
    batch_size = params['batch_size']
    steps = params['steps']

    # Init the ResNet
    resnet = resnet_v2.ResNet50V2(include_top=False,
                                  weights='imagenet',
                                  pooling='avg',
                                  input_shape=input_shape)

    # Make all layers un-trainable
    for layer in resnet.layers:
        layer.trainable = False

    # Add ResNet to your net with some more layers
    net = Sequential()
    net.add(resnet)
    net.add(Dropout(0.5))
    net.add(Dense(1, activation='sigmoid'))

    # Compile net
    net.compile(loss='binary_crossentropy',
                optimizer=optimizers.RMSprop(lr=2e-5),
                metrics=['accuracy'])

    # Print the net summary
    net.summary()

    # Fit the net
    # net.fit(np.asarray(X_train), np.asarray(y_train), batch_size=batch_size, epochs=epochs)

    # For using data augmentation
    train_datagen = ImageDataGenerator(rotation_range=20,
                                       width_shift_range=0.2,
                                       height_shift_range=0.2,
                                       shear_range=0.15,
                                       horizontal_flip=False,
                                       fill_mode="nearest",
                                       vertical_flip=True)

    train_generator = train_datagen.flow(np.asarray(X_train),
                                         np.asarray(y_train),
                                         batch_size=batch_size)

    net.fit_generator(train_generator, epochs=epochs, steps_per_epoch=steps)

    return net
 def create_model():
     inputs = keras.Input((32, 32, 1))
     preprocessed = keras.layers.Conv2D(3, (1, 1))(
         inputs)  # ResNet requires 3 channels
     features = resnet_v2.ResNet50V2(include_top=False,
                                     input_tensor=preprocessed,
                                     pooling='avg',
                                     weights=None).output
     return keras.Model(inputs, features)
def extract(mode='classic'):
    print('---------- Start MI4 - Feature Extraction ----------')

    # Get all the subjects' folders
    days = os.listdir(params['data']['subject_folder'])

    if mode == 'cnn':
        cnn = resnet_v2.ResNet50V2(
            include_top=False,
            weights='imagenet',
            pooling='avg',
            input_shape=params['features']['image_size'][::-1] + (3, ))
    else:
        cnn = None

    # For each day extract features
    for day in days:

        # Create paths for files
        day_path = os.path.join(params['data']['subject_folder'], day)
        trials_path = os.path.join(day_path,
                                   params['data']['filenames']['trials'])
        info_path = os.path.join(day_path, params['data']['filenames']['info'])

        # Get the current day trials & sample freq
        trials = pickle.load(open(trials_path, 'rb'))
        s_freq = json.load(open(info_path, 'r'))['effective_srate']

        # Extract features from each trial
        print("Extracting Features for subject: {}".format(day_path))

        if mode == 'cnn':
            features = extract_features_cnn(trials, cnn)
        else:
            features = extract_features_classic(trials, s_freq)

        # Save the features
        features_path = os.path.join(day_path,
                                     params['data']['filenames']['features'])
        print("Saving features to: {}".format(features_path))
        np.savetxt(features_path, features, delimiter=',')
Beispiel #5
0
def trainModel(train, train_labels, train_model_params):
    """
        creates net and trains it.
        :param train: numpy array of the train images
        :param train_labels: list of the labels of the train images
        :param train_model_params: parameters for the split stage
        :return: a trained network on the relevant data
        """
    print('\nStart creating the CNN model')
    input_shape = train_model_params['input_shape']
    epochs = train_model_params['epochs']
    batch_size = train_model_params['batch_size']

    resnet = resnet_v2.ResNet50V2(include_top=False, weights='imagenet', pooling='avg', input_shape=input_shape)
    for layer in resnet.layers:
        layer.trainable = False

    network = Sequential()
    network.add(resnet)
    network.add(Dense(512, activation='relu', input_dim=input_shape))
    network.add(Dropout(0.3))
    network.add(Dense(512, activation='relu'))
    network.add(Dropout(0.3))
    network.add(Dense(1, activation='sigmoid'))
    network.compile(loss='binary_crossentropy',
                    optimizer=optimizers.RMSprop(lr=2e-5),
                    metrics=['accuracy'])
    # Print the summary of the model
    network.summary()

    # For using data augmentation
    train_datagen = ImageDataGenerator(rotation_range=50,
                                       width_shift_range=0.2, height_shift_range=0.2, shear_range=0.15,
                                       horizontal_flip=True, fill_mode="nearest")

    train_generator = train_datagen.flow(train, train_labels, batch_size=batch_size)

    network.fit_generator(train_generator, epochs=epochs,
                          steps_per_epoch=math.ceil(len(train_labels) / batch_size))

    return network
Beispiel #6
0
def main():

    # Get data
    X_train, X_test, y_train, y_test = get_CNN_data()

    # Get ResNet
    resnet = resnet_v2.ResNet50V2(include_top=False,
                                  weights='imagenet',
                                  pooling='avg',
                                  input_shape=X_train[0].shape)

    # Feature extraction using ResNet
    x_train_net = resnet.predict(np.asarray(X_train))
    x_test_net = resnet.predict(np.asarray(X_test))

    # Dim reductions using PCA
    pca = PCA(n_components=100)
    x_train_net = pca.fit_transform(x_train_net)
    x_test_net = pca.transform(x_test_net)

    # Scale the data
    # scaler = StandardScaler()
    # x_train_net = scaler.fit_transform(x_train_net)
    # x_test_net = scaler.transform(x_test_net)

    # Train model
    model = train_model(x_train_net, y_train)

    # Cross-validate model
    # svm = SVC()
    # clf = GridSearchCV(svm, cv_params)

    # Test
    print('Predictions: {}'.format(model.predict(x_test_net)))
    print('True Labels: {}'.format(np.asarray(y_test)))
    print('Score: {}'.format(model.score(x_test_net, y_test)))
Beispiel #7
0
import os
import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
from keras.applications import resnet_v2
from sklearn.decomposition import PCA

image_size = (100, 600)

resnet = resnet_v2.ResNet50V2(include_top=False, weights='imagenet', pooling='avg',
                              input_shape=image_size[::-1] + (3,))
# Get the current subject trials
subject_path = os.path.join('../bci4als/data\\evyatar', '1')
trials_path = os.path.join(subject_path, 'EEG_trials.pickle')
trials = pickle.load(open(trials_path, 'rb'))





# f = extract_features_resnet(trials, resnet)
Beispiel #8
0
def trainWithTuning(train, train_labels, params):
    """
        Tunes the model using kfold for epoch and batchsize parameters
        :param train: numpy array of the train images
        :param train_labels: list of the labels of the train images
        :param params: parameters for the tuning
        """

    # Define the K parameter
    k = params['k']

    # Get all the ranges to check
    epochs_range = params['epochs_range']
    batch_range = params['batch_range']
    input_shape = (224, 224, 3)

    results = {'Type': '(Epoch, Batch)'}

    for epoch in epochs_range:
        for batch in batch_range:

            # Run K-Folds algorithm
            kfold = StratifiedKFold(n_splits=k, shuffle=True, random_state=7)
            errors = []

            for train_index, test_index in kfold.split(train, train_labels):

                # Initiate the current cross-validation train and test datasets
                cv_train = np.zeros((len(train_index),) + train[0].shape)
                cv_test = np.zeros((len(test_index),) + train[0].shape)

                for i, j in enumerate(train_index):
                    cv_train[i, :] = train[j, :]

                for i, j in enumerate(test_index):
                    cv_test[i, :] = train[j, :]

                cv_train_labels = [train_labels[i] for i in train_index]
                cv_test_labels = [train_labels[i] for i in test_index]

                # For using data augmentation
                train_datagen = ImageDataGenerator(rotation_range=50,
                                                   width_shift_range=0.2, height_shift_range=0.2, shear_range=0.15,
                                                   horizontal_flip=True, fill_mode="nearest")

                train_generator = train_datagen.flow(cv_train, cv_train_labels, batch_size=batch)

                # Create the network
                resnet = resnet_v2.ResNet50V2(include_top=False, weights='imagenet', pooling='avg',
                                              input_shape=input_shape)

                for layer in resnet.layers:
                    layer.trainable = False

                network = Sequential()
                network.add(resnet)
                network.add(Dense(512, activation='relu', input_dim=input_shape))
                network.add(Dropout(0.3))
                network.add(Dense(512, activation='relu'))
                network.add(Dropout(0.3))
                network.add(Dense(1, activation='sigmoid'))
                network.compile(loss='binary_crossentropy',
                                optimizer=optimizers.RMSprop(lr=2e-5),
                                metrics=['accuracy'])

                # network.summary()

                network.fit_generator(train_generator, epochs=epoch,
                                      steps_per_epoch=math.ceil(len(cv_train_labels) / batch))

                # network.fit(cv_train, cv_train_labels, epochs=epoch, batch_size=batch, verbose=1)

                predict = network.predict_classes(cv_test)
                current_error = getError(predict, cv_test_labels)
                errors.append(current_error)
                print(current_error)

            results['(' + str(epoch) + ',' + str(batch) + ')'] = np.mean(errors)