def model (train_data_whole,train_labels_whole,test_data,test_labels,opt,epoch,batch_size_factor,num_classes,result_path,feature_extraction,feature_extractor_parameters):
    
    train_data, valid_data, train_labels, valid_labels = train_test_split(train_data_whole, train_labels_whole,test_size=0.1, random_state=13)
    train_labels_one_hot=data_preprocessing.labels_convert_one_hot(train_labels)
    valid_labels_one_hot=data_preprocessing.labels_convert_one_hot(valid_labels)
    test_labels_one_hot=data_preprocessing.labels_convert_one_hot(test_labels)    
    batch_size=round(train_data.shape[0]/batch_size_factor)
    
    input_shape= (224,224,3)
    vgg_model = VGG16(weights='imagenet',
                               include_top=False,
                               input_shape=input_shape)
    # Creating dictionary that maps layer names to the layers
    layer_dict = dict([(layer.name, layer) for layer in vgg_model.layers])
    # Getting output tensor of the last VGG layer that we want to include
    x = layer_dict['block2_pool'].output
    
    x = Conv2D(filters=64, kernel_size=(3, 3), activation='relu',padding='same')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Conv2D(filters=128, kernel_size=(3, 3), activation='relu',padding='same')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Conv2D(filters=128, kernel_size=(3, 3), activation='relu',padding='same')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    
    x = Flatten()(x)
    x = Dense(4096, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(2, activation='softmax')(x)
    
    # Creating new model. Please note that this is NOT a Sequential() model.
    classification_model = Model(input=vgg_model.input, output=x)
    for layer in classification_model.layers[:7]:
        layer.trainable = True
    
    es=keras.callbacks.EarlyStopping(monitor='val_acc',
                                  min_delta=0,
                                  patience=5000,
                                  verbose=1, mode='auto')

    mc = ModelCheckpoint(os.path.join(result_path,'best_model.h5'), monitor='val_acc', mode='auto', save_best_only=True)
    
    classification_model.compile(loss='mean_squared_error',optimizer=opt,metrics=['accuracy'])
    classification_train = classification_model.fit(train_data, train_labels_one_hot, batch_size=batch_size, epochs=epoch,
                                                    verbose=1, validation_data=(valid_data, valid_labels_one_hot),callbacks=[mc,es])
    best_model=load_model(os.path.join(result_path,'best_model.h5'))
    file_name=os.path.split(result_path)[1]    
    date=os.path.split(os.path.split(result_path)[0])[1]
    classification_model.save(os.path.join(result_path,date+'_'+file_name+'_'+'VGGpretrained_model.h5')) 

    if feature_extraction==1:
        feature_extractor_parameters['CNN_model']=classification_model
        CNN_feature_extractor.CNN_feature_extraction_classsification(feature_extractor_parameters,result_path)
        return    
    model_evaluation.testing_and_printing(classification_model,classification_train,best_model,test_data, test_labels_one_hot, 'VGG_pretrained', result_path,epoch)
def model(train_data_whole, train_labels_whole, test_data, test_labels, opt,
          epoch, batch_size_factor, num_classes, result_path,
          feature_extraction, feature_extractor_parameters):

    train_data, valid_data, train_labels, valid_labels = train_test_split(
        train_data_whole, train_labels_whole, test_size=0.1, random_state=13)
    train_labels_one_hot = data_preprocessing.labels_convert_one_hot(
        train_labels)
    valid_labels_one_hot = data_preprocessing.labels_convert_one_hot(
        valid_labels)
    test_labels_one_hot = data_preprocessing.labels_convert_one_hot(
        test_labels)
    '''
    train_data=data_preprocessing.depth_reshapeing(train_data)
    test_data=data_preprocessing.depth_reshapeing(test_data)
    valid_data=data_preprocessing.depth_reshapeing(valid_data)

    train_data = data_preprocessing.size_editing(train_data, 224)
    valid_data= data_preprocessing.size_editing(valid_data, 224)
    test_data = data_preprocessing.size_editing(test_data, 224)
    '''
    batch_size = round(train_data.shape[0] / batch_size_factor)
    input_shape = (224, 224, 3)
    densenet121_model = keras.applications.densenet.DenseNet121(
        include_top=False,
        weights='imagenet',
        input_shape=input_shape,
        pooling=None,
        classes=num_classes)

    layer_dict = dict([(layer.name, layer)
                       for layer in densenet121_model.layers])
    # Getting output tensor of the last VGG layer that we want to include
    x = layer_dict[list(layer_dict.keys())[-1]].output
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Flatten()(x)
    x = Dense(4096, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    classification_model = Model(input=densenet121_model.input, output=x)

    for layer in classification_model.layers:
        layer.trainable = True
    classification_model.compile(loss='mean_squared_error',
                                 optimizer=opt,
                                 metrics=['accuracy'])
    es = keras.callbacks.EarlyStopping(monitor='val_acc',
                                       min_delta=0,
                                       patience=500,
                                       verbose=1,
                                       mode='auto')
    mc = ModelCheckpoint(os.path.join(result_path, 'best_model.h5'),
                         monitor='val_acc',
                         mode='auto',
                         save_best_only=True)

    classification_train = classification_model.fit(
        train_data,
        train_labels_one_hot,
        batch_size=batch_size,
        epochs=epoch,
        verbose=1,
        validation_data=(valid_data, valid_labels_one_hot),
        callbacks=[es, mc])
    file_name = os.path.split(result_path)[1]
    date = os.path.split(os.path.split(result_path)[0])[1]
    classification_model.save(
        os.path.join(result_path,
                     date + '_' + file_name + '_' + 'DenseNet121.h5'))
    #best_model=load_model(os.path.join(result_path,'best_model.h5'))
    best_model = classification_model
    if feature_extraction == 1:
        feature_extractor_parameters['CNN_model'] = classification_model
        CNN_feature_extractor.CNN_feature_extraction_classsification(
            feature_extractor_parameters, result_path)
        return
    model_evaluation.testing_and_printing(classification_model,
                                          classification_train, best_model,
                                          test_data, test_labels_one_hot,
                                          'InceptionResNetV2', result_path,
                                          epoch)
Ejemplo n.º 3
0
def model(train_data_whole, train_labels_whole, test_data, test_labels, opt,
          epoch, batch_size_factor, num_classes, result_path,
          feature_extraction, feature_extractor_parameters):

    train_data, valid_data, train_labels, valid_labels = train_test_split(
        train_data_whole, train_labels_whole, test_size=0.1, random_state=13)
    train_labels_one_hot = data_preprocessing.labels_convert_one_hot(
        train_labels)
    valid_labels_one_hot = data_preprocessing.labels_convert_one_hot(
        valid_labels)
    test_labels_one_hot = data_preprocessing.labels_convert_one_hot(
        test_labels)
    batch_size = round(train_data.shape[0] / batch_size_factor)
    classification_model = Sequential()
    # C1 Convolutional Layer
    classification_model.add(
        layers.Conv2D(6,
                      kernel_size=(5, 5),
                      strides=(1, 1),
                      activation='relu',
                      input_shape=(train_data.shape[1], train_data.shape[2],
                                   train_data.shape[3]),
                      padding='same'))

    classification_model.add(Dropout(0.7))

    # S2 Pooling Layer
    classification_model.add(
        layers.AveragePooling2D(pool_size=(2, 2),
                                strides=(1, 1),
                                padding='valid'))
    # C3 Convolutional Layer
    classification_model.add(
        layers.Conv2D(16,
                      kernel_size=(5, 5),
                      strides=(1, 1),
                      padding='valid',
                      activation='relu'))

    classification_model.add(Dropout(0.7))

    # S4 Pooling Layer
    classification_model.add(
        layers.AveragePooling2D(pool_size=(2, 2),
                                strides=(2, 2),
                                padding='valid'))
    # C5 Fully Connected Convolutional Layer
    classification_model.add(
        layers.Conv2D(120,
                      kernel_size=(5, 5),
                      strides=(1, 1),
                      padding='valid',
                      activation='relu'))

    classification_model.add(Dropout(0.8))
    #Flatten the CNN output so that we can connect it with fully connected layers
    classification_model.add(layers.Flatten())
    # FC6 Fully Connected Layer
    classification_model.add(layers.Dense(84, activation='relu'))

    classification_model.add(Dropout(0.8))

    #Output Layer with softmax activation
    classification_model.add(layers.Dense(num_classes, activation='softmax'))

    classification_model.compile(loss=keras.losses.categorical_crossentropy,
                                 optimizer=opt,
                                 metrics=['accuracy'])

    es = keras.callbacks.EarlyStopping(monitor='val_acc',
                                       min_delta=0,
                                       patience=100,
                                       verbose=1,
                                       mode='auto')
    mc = ModelCheckpoint(os.path.join(result_path, 'best_model.h5'),
                         monitor='val_acc',
                         mode='auto',
                         save_best_only=True)
    classification_train = classification_model.fit(
        train_data,
        train_labels_one_hot,
        batch_size=batch_size,
        epochs=epoch,
        verbose=1,
        validation_data=(valid_data, valid_labels_one_hot),
        callbacks=[es, mc])
    best_model = load_model(os.path.join(result_path, 'best_model.h5'))
    file_name = os.path.split(result_path)[1]
    date = os.path.split(os.path.split(result_path)[0])[1]
    classification_model.save(
        os.path.join(result_path,
                     date + '_' + file_name + '_' + 'leNet_model.h5'))
    if feature_extraction == 1:
        feature_extractor_parameters['CNN_model'] = classification_model
        CNN_feature_extractor.CNN_feature_extraction_classsification(
            train_data_whole, train_labels_whole, test_data, test_labels,
            feature_extractor_parameters, result_path)
        return
    model_evaluation.testing_and_printing(classification_model,
                                          classification_train, best_model,
                                          test_data, test_labels_one_hot,
                                          'LeNet', result_path, epoch)
def model(train_data_whole, train_labels_whole, test_data, test_labels, opt,
          epoch, batch_size_factor, num_classes, result_path,
          feature_extraction, feature_extractor_parameters):

    train_data, valid_data, train_labels, valid_labels = train_test_split(
        train_data_whole, train_labels_whole, test_size=0.1, random_state=13)
    train_labels_one_hot = data_preprocessing.labels_convert_one_hot(
        train_labels)
    valid_labels_one_hot = data_preprocessing.labels_convert_one_hot(
        valid_labels)
    test_labels_one_hot = data_preprocessing.labels_convert_one_hot(
        test_labels)

    batch_size = round(train_data.shape[0] / batch_size_factor)

    #Instantiate an empty model
    classification_model = Sequential()

    # 1st Convolutional Layer
    classification_model.add(
        Conv2D(filters=64,
               input_shape=(train_data.shape[1], train_data.shape[2],
                            train_data.shape[3]),
               activation='relu',
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same'))
    classification_model.add(
        Conv2D(filters=64,
               activation='relu',
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same'))
    classification_model.add(Dropout(0.4))
    # Max Pooling
    classification_model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # 2nd Convolutional Layer
    classification_model.add(
        Conv2D(filters=128,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu'))
    classification_model.add(
        Conv2D(filters=128,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu'))
    classification_model.add(Dropout(0.4))
    # Max Pooling
    classification_model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # 3rd Convolutional Layer
    classification_model.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu'))
    classification_model.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu'))
    classification_model.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu'))
    classification_model.add(Dropout(0.4))
    # Max Pooling
    classification_model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # 4th Convolutional Layer
    classification_model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu'))
    classification_model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu'))
    classification_model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu'))
    classification_model.add(Dropout(0.4))
    # 5th Convolutional Layer
    classification_model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu'))
    classification_model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu'))
    classification_model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu'))
    classification_model.add(Dropout(0.4))

    # Max Pooling
    classification_model.add(
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

    # Passing it to a Fully Connected layer
    classification_model.add(Flatten())
    # 1st Fully Connected Layer
    classification_model.add(Dense(4096, activation='relu'))
    # Add Dropout to prevent overfitting
    classification_model.add(Dropout(0.5))

    # 2nd Fully Connected Layer
    classification_model.add(Dense(4096, activation='relu'))

    # Add Dropout
    classification_model.add(Dropout(0.5))
    # Output Layer
    classification_model.add(Dense(num_classes, activation='softmax'))
    classification_model.summary()
    classification_model.compile(loss='mean_squared_error',
                                 optimizer=opt,
                                 metrics=['accuracy'])

    es = keras.callbacks.EarlyStopping(monitor='val_acc',
                                       min_delta=0,
                                       patience=1000,
                                       verbose=1,
                                       mode='auto')

    mc = ModelCheckpoint(os.path.join(result_path, 'best_model.h5'),
                         monitor='val_acc',
                         mode='auto',
                         save_best_only=True)
    classification_train = classification_model.fit(
        train_data,
        train_labels_one_hot,
        batch_size=batch_size,
        epochs=epoch,
        verbose=1,
        validation_data=(valid_data, valid_labels_one_hot),
        callbacks=[mc, es])
    best_model = load_model(os.path.join(result_path, 'best_model.h5'))
    file_name = os.path.split(result_path)[1]
    date = os.path.split(os.path.split(result_path)[0])[1]
    classification_model.save(
        os.path.join(result_path,
                     date + '_' + file_name + '_' + 'VGG_model.h5'))

    if feature_extraction == 1:
        feature_extractor_parameters['CNN_model'] = classification_model
        CNN_feature_extractor.CNN_feature_extraction_classsification(
            feature_extractor_parameters, result_path)
        return
    model_evaluation.testing_and_printing(classification_model,
                                          classification_train, best_model,
                                          test_data, test_labels_one_hot,
                                          'VGG', result_path, epoch)
def model(train_data_whole, train_labels_whole, test_data, test_labels, opt,
          epoch, batch_size_factor, num_classes, result_path,
          feature_extraction, feature_extractor_parameters):
    train_data, valid_data, train_labels, valid_labels = train_test_split(
        train_data_whole, train_labels_whole, test_size=0.2, random_state=13)

    train_labels_one_hot = data_preprocessing.labels_convert_one_hot(
        train_labels)
    valid_labels_one_hot = data_preprocessing.labels_convert_one_hot(
        valid_labels)
    test_labels_one_hot = data_preprocessing.labels_convert_one_hot(
        test_labels)
    batch_size = round(train_data.shape[0] / batch_size_factor)
    classification_model = Sequential()
    classification_model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               padding='same',
               activation='relu',
               input_shape=(train_data.shape[1], train_data.shape[2],
                            train_data.shape[3])))
    #classification_model.add(BatchNormalization())

    classification_model.add(MaxPooling2D((2, 2), padding='same'))
    classification_model.add(Dropout(0.5))
    classification_model.add(Conv2D(64, (3, 3), padding='same'))
    classification_model.add(LeakyReLU(alpha=0.1))
    #classification_model.add(BatchNormalization())

    classification_model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classification_model.add(Dropout(0.5))
    classification_model.add(Conv2D(128, (3, 3), padding='same'))
    classification_model.add(LeakyReLU(alpha=0.1))
    #classification_model.add(BatchNormalization())
    classification_model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classification_model.add(Dropout(0.5))
    classification_model.add(Flatten())
    classification_model.add(Dense(128))
    classification_model.add(LeakyReLU(alpha=0.1))
    #classification_model.add(BatchNormalization())
    classification_model.add(Dense(128, ))
    classification_model.add(Dropout(0.5))
    classification_model.add(LeakyReLU(alpha=0.1))

    classification_model.add(Dense(num_classes, activation='softmax'))
    classification_model.compile(loss=keras.losses.categorical_crossentropy,
                                 optimizer=opt,
                                 metrics=['accuracy'])

    es = keras.callbacks.EarlyStopping(monitor='val_acc',
                                       min_delta=0,
                                       patience=50,
                                       verbose=1,
                                       mode='auto',
                                       baseline=0.9)

    mc = ModelCheckpoint(os.path.join(result_path, 'best_model.h5'),
                         monitor='val_acc',
                         mode='auto',
                         save_best_only=True)
    classification_train = classification_model.fit(
        train_data,
        train_labels_one_hot,
        batch_size=batch_size,
        epochs=epoch,
        verbose=1,
        validation_data=(valid_data, valid_labels_one_hot),
        callbacks=[es, mc])
    best_model = load_model(os.path.join(result_path, 'best_model.h5'))
    file_name = os.path.split(result_path)[1]
    date = os.path.split(os.path.split(result_path)[0])[1]

    classification_model.save(
        os.path.join(result_path,
                     date + '_' + file_name + '_' + 'simple_model.h5'))

    if feature_extraction == 1:
        feature_extractor_parameters['CNN_model'] = classification_model
        CNN_feature_extractor.CNN_feature_extraction_classsification(
            train_data_whole, train_labels_whole, test_data, test_labels,
            feature_extractor_parameters, result_path)
        return

    model_evaluation.testing_and_printing(classification_model,
                                          classification_train, best_model,
                                          test_data, test_labels_one_hot,
                                          'simple_architecture', result_path,
                                          epoch)