예제 #1
0
        hidden2 = Dense(1024, activation='relu', kernel_initializer='he_normal')(drop5)
        drop6 = Dropout(0.40)(hidden2)
        output_img = Dense(1, activation='sigmoid')(drop6)

        attributes_input = Input(shape=input_attributes_s)
        hidden3 = Dense(16, activation='relu')(attributes_input)
        drop6 = Dropout(0.10)(hidden3)
        hidden4 = Dense(8, activation='relu')(drop6)
        drop7 = Dropout(0.05)(hidden4)
        output_attributes = Dense(1, activation='sigmoid')(drop7)

        concat = concatenate([output_img, output_attributes])
        hidden5 = Dense(4, activation='relu')(concat)
        output = Dense(1, activation='sigmoid')(hidden5)

    attr.model = Model(inputs=[visible, attributes_input], outputs=output)

    plot_model(attr.model, to_file=attr.summ_basename + '-architecture.png')

    # compile model using accuracy as main metric, rmsprop (gradient descendent)
    attr.model.compile(loss='binary_crossentropy',
                  optimizer=RMSprop(lr=0.000001),
                  metrics=['accuracy'])

    # this is the augmentation configuration we will use for training
    train_datagen = create_image_generator(False, True)

    # this is the augmentation configuration we will use for testing:
    # nothing is done.
    test_datagen = create_image_generator(False, False)
예제 #2
0
# attr.path='/home/amenegotto/dataset/2d/sem_pre_proc_mini/
attr.path = '/mnt/data/image/2d/com_pre_proc/'
attr.summ_basename = get_base_name(SUMMARY_BASEPATH)
attr.s3_path = NETWORK_FORMAT + '/' + IMAGE_FORMAT
attr.epochs = 100
attr.batch_size = 128
attr.set_dir_names()

if K.image_data_format() == 'channels_first':
    input_s = (3, attr.img_width, attr.img_height)
else:
    input_s = (attr.img_width, attr.img_height, 3)

for i in range(0, CYCLES):
    # define model
    attr.model = Sequential()
    attr.model.add(Conv2D(128, (3, 3), input_shape=input_s, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0005)))
    attr.model.add(BatchNormalization())
    attr.model.add(Activation('relu'))
    attr.model.add(Dropout(0.25))
    attr.model.add(Conv2D(128, (3, 3), input_shape=input_s, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0005)))
    attr.model.add(BatchNormalization())
    attr.model.add(Activation('relu'))
    attr.model.add(Dropout(0.25))
    attr.model.add(MaxPooling2D(pool_size=(3, 3)))

    attr.model.add(Flatten())  # this converts our 3D feature maps to 1D feature vectors
    attr.model.add(Dense(256, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0005)))
    attr.model.add(Activation('relu'))
    attr.model.add(Dropout(0.40))
    attr.model.add(Dense(256, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0005)))
예제 #3
0
    # Top Model Block
    glob1 = GlobalAveragePooling2D()(base_model.output)
    hidout = Dense(1024, activation='relu')(glob1)
    if INTERMEDIATE_FUSION:
        attr.fusion = "Intermediate Fusion"

        attributes_input = Input(shape=input_attributes_s)
        concat = concatenate([hidout, attributes_input])
        hidden1 = Dense(128, activation='relu')(concat)
        drop6 = Dropout(0.20)(hidden1)
        hidden2 = Dense(64, activation='relu')(drop6)
        drop6 = Dropout(0.20)(hidden2)
        output = Dense(2, activation='softmax')(drop6)

        attr.model = Model(inputs=[base_model.input, attributes_input],
                           outputs=output)

    if LATE_FUSION:
        attr.fusion = "Late Fusion"
        output_img = Dense(2, activation='softmax')(hidout)

        model_img = Model(inputs=base_model.input, outputs=output_img)

        attributes_input = Input(shape=input_attributes_s)
        hidden3 = Dense(128, activation='relu')(attributes_input)
        drop6 = Dropout(0.20)(hidden3)
        hidden4 = Dense(64, activation='relu')(drop6)
        drop7 = Dropout(0.20)(hidden4)
        output_attributes = Dense(1, activation='sigmoid')(drop7)
        model_attr = Model(inputs=attributes_input, outputs=output_attributes)
예제 #4
0
    # dimensions of our images.
    # Inception input size
    attr.img_width, attr.img_height = 299, 299

    # add a global spatial average pooling layer
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    # let's add a fully-connected layer
    x = Dense(1024, activation='relu')(x)
    drop = Dropout(0.20)(x)

    # and a logistic layer -- we have 2 classes
    predictions = Dense(2, activation='softmax')(drop)

    # this is the model we will train
    attr.model = Model(inputs=base_model.input, outputs=predictions)

    # first: train only the top layers (which were randomly initialized)
    # i.e. freeze all convolutional InceptionV3 layers
    for layer in base_model.layers:
        layer.trainable = False

    # compile the model (should be done *after* setting layers to non-trainable)
    attr.model.compile(
        optimizer=SGD(lr=0.0001, momentum=0.9),
        loss='categorical_crossentropy',
        metrics=['accuracy'],
    )

    # prepare data augmentation configuration
    train_datagen = create_image_generator(True, True)
예제 #5
0
        attributes_input = Input(shape=input_attributes_s)
        concat = concatenate([flat, attributes_input])

        hidden1 = Dense(1024,
                        activation='relu',
                        kernel_initializer='he_normal',
                        kernel_regularizer=regularizers.l2(0.0005))(concat)
        drop6 = Dropout(0.30)(hidden1)
        hidden2 = Dense(128, activation='relu')(drop6)
        drop7 = Dropout(0.20)(hidden2)
        hidden3 = Dense(64, activation='relu')(drop7)
        drop8 = Dropout(0.20)(hidden3)
        output = Dense(2, activation='softmax')(drop8)

        attr.model = Model(inputs=[vgg_conv.input, attributes_input],
                           outputs=output)

    if LATE_FUSION:
        attr.fusion = "Late Fusion"
        hidden1 = Dense(1024, activation='relu')(flat)
        drop3 = Dropout(0.30)(hidden1)
        output_img = Dense(2, activation='softmax')(drop3)

        model_img = Model(inputs=vgg_conv.input, outputs=output_img)

        attributes_input = Input(shape=input_attributes_s)
        hidden3 = Dense(128, activation='relu')(attributes_input)
        drop6 = Dropout(0.20)(hidden3)
        hidden4 = Dense(64, activation='relu')(drop6)
        drop7 = Dropout(0.20)(hidden4)
        output_attributes = Dense(1, activation='sigmoid')(drop7)
예제 #6
0
attr.test_generator = test_datagen.flow_from_directory(
attr.test_data_dir,
target_size=(attr.img_width, attr.img_height),
batch_size=1,
class_mode='categorical',
shuffle=False)

# Persist execution attributes for session resume
save_execution_attributes(attr, attr.summ_basename + '-execution-attributes.properties')

# training time
# add the best weights from the train top model
# at this point we have the pre-train weights of the base model and the trained weight of the new/added top model
# we re-load model weights to ensure the best epoch is selected and not the last one.
attr.model = load_model('/mnt/data/fine-tuning/Xception/20190202-122953-mid-ckweights.h5')

# based_model_last_block_layer_number points to the layer in your model you want to train.
# For example if you want to train the last block of a 19 layer VGG16 model this should be 15
# If you want to train the last Two blocks of an Inception model it should be 172
# layers before this number will used the pre-trained weights, layers above and including this number
# will be re-trained based on the new data.
for layer in attr.model.layers[:based_model_last_block_layer_number]:
    layer.trainable = False
for layer in attr.model.layers[based_model_last_block_layer_number:]:
    layer.trainable = True

# compile the model with a SGD/momentum optimizer
# and a very slow learning rate.
attr.model.compile(optimizer='nadam',
              loss='categorical_crossentropy',
예제 #7
0
# how many times to execute the training/validation/test cycle
CYCLES = 1

for i in range(0, CYCLES):

    # Pre-Trained CNN Model using imagenet dataset for pre-trained weights
    base_model = Xception(input_shape=(attr.img_width, attr.img_height, 3), weights='imagenet', include_top=False)

    # Top Model Block
    x = GlobalAveragePooling2D()(base_model.output)
    hidden1 = Dense(1024, activation='relu', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0005))(x)
    drop = Dropout(0.20)(hidden1)
    predictions = Dense(nb_classes, activation='softmax')(drop)

    # add your top layer block to your base model
    attr.model = Model(base_model.input, predictions)
    print(attr.model.summary())

    # # let's visualize layer names and layer indices to see how many layers/blocks to re-train
    # # uncomment when choosing based_model_last_block_layer
    # for i, layer in enumerate(attr.model.layers):
    #     print(i, layer.name)

    # first: train only the top layers (which were randomly initialized)
    # i.e. freeze all layers of the based model that is already pre-trained.
    for layer in base_model.layers:
        layer.trainable = False

    # Read Data and Augment it: Make sure to select augmentations that are appropriate to your images.
    # To save augmentations un-comment save lines and add to your flow parameters.
    train_datagen = create_image_generator(True, True)
예제 #8
0
        attributes_input = Input(shape=input_attributes_s)
        concat = concatenate([flat, attributes_input])

        hidden1 = Dense(512,
                        activation='relu',
                        kernel_initializer='he_normal',
                        kernel_regularizer=regularizers.l2(0.0005))(concat)
        drop5 = Dropout(0.40)(hidden1)
        hidden2 = Dense(1024,
                        activation='relu',
                        kernel_initializer='he_normal',
                        kernel_regularizer=regularizers.l2(0.0005))(drop5)
        drop6 = Dropout(0.40)(hidden2)
        output = Dense(1, activation='sigmoid')(drop6)

        attr.model = Model(inputs=[visible, attributes_input], outputs=output)

    if LATE_FUSION:
        attr.fusion = "Late Fusion"

        hidden1 = Dense(512,
                        activation='relu',
                        kernel_initializer='he_normal',
                        kernel_regularizer=regularizers.l2(0.0005))(flat)
        drop5 = Dropout(0.40)(hidden1)
        hidden2 = Dense(1024,
                        activation='relu',
                        kernel_initializer='he_normal',
                        kernel_regularizer=regularizers.l2(0.0005))(drop5)
        drop6 = Dropout(0.40)(hidden2)
        output_img = Dense(1, activation='sigmoid')(drop6)
예제 #9
0
for i in range(0, CYCLES):
    
    #Load the VGG model
    vgg_conv = VGG19(weights='imagenet', include_top=False, input_shape=(attr.img_width, attr.img_height, 3))

    # Freeze the layers except the last 4 layers
    for layer in vgg_conv.layers[:-4]:
        layer.trainable = False

    # Check the trainable status of the individual layers
    for layer in vgg_conv.layers:
        print(layer, layer.trainable)


    # Create the model
    attr.model = models.Sequential()
     
    # Add the vgg convolutional base model
    attr.model.add(vgg_conv)
     
    # Add new layers
    attr.model.add(layers.Flatten())
    attr.model.add(layers.Dense(1024, activation='relu'))
    attr.model.add(layers.Dropout(0.3))
    attr.model.add(layers.Dense(2, activation='softmax'))
     
    # Show a summary of the model. Check the number of trainable parameters
    attr.model.summary()
    plot_model(attr.model, to_file=attr.summ_basename + '-architecture.png')

    # prepare data augmentation configuration