def autoencoder_train(folder, batch_size, epoch_size, model_name):
    """
    Autoencoding, inherently UNET, is a data compression algorithm where the compression and decompression functions are:
    - data specific, ie, only compress data similar to what they have been trained on
    - lossy, ie, decompressed output will be degraded
    - learned automatically from examples.

    Two practical applications of autoencoders are data removal and dimensionality reduction

    There is an implementation from scikit-learn:
    http://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html

    :param folder: image folder for training
    :param batch_size: training batch size
    :param epoch_size: training epoch size
    :param model_name: IR2, InceptionResNetV2; NL, NASNetLarge; NM, NASNetLarge
    :return: None
    """
    image_wh = system_config['image_wh']

    image_size = (image_wh, image_wh)
    image_shape = (image_wh, image_wh, 1)

    train_list, valid_list = create_tv_list(folder)
    print(f'Train size: {len(train_list)}, valid size: {len(valid_list)}')

    train_df = pd.DataFrame(train_list, columns=['fname', 'class'])
    valid_df = pd.DataFrame(valid_list, columns=['fname', 'class'])

    model = None
    if 'NM' in model_name:
        model_name = 'NM'
        model = NASNetMobile(include_top=True,
                             weights=None,
                             input_tensor=None,
                             input_shape=image_shape,
                             pooling='max',
                             classes=6)
    elif 'NL' in model_name:
        model_name = 'NL'
        model = NASNetLarge(include_top=True,
                            weights=None,
                            input_tensor=None,
                            input_shape=image_shape,
                            pooling='max',
                            classes=6)
    elif 'XC' in model_name:
        model_name = 'XC'
        model = Xception(include_top=True,
                         weights=None,
                         input_tensor=None,
                         input_shape=image_shape,
                         pooling='max',
                         classes=6)
    elif 'D21' in model_name:
        model_name = 'D21'
        model = DenseNet201(include_top=True,
                            weights=None,
                            input_tensor=None,
                            input_shape=image_shape,
                            pooling='max',
                            classes=6)
    elif 'IV3' in model_name:
        model_name = 'IV3'
        model = InceptionV3(include_top=True,
                            weights=None,
                            input_tensor=None,
                            input_shape=image_shape,
                            pooling='max',
                            classes=6)
    elif 'SC' in model_name:
        model_name = 'SC'
        model = simple_cnn(input_shape=image_shape, classes=6)
    else:
        model_name = 'IR2'
        model = InceptionResNetV2(include_top=True,
                                  weights=None,
                                  input_tensor=None,
                                  input_shape=image_shape,
                                  pooling='max',
                                  classes=6)

    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=lr_schedule(0)),
                  metrics=['accuracy'])
    model.summary()

    # Image generator does data augmentation:
    datagen = data_generator()

    train_gen = datagen.flow_from_dataframe(dataframe=train_df,
                                            directory=folder,
                                            x_col="fname",
                                            y_col="class",
                                            class_mode="categorical",
                                            target_size=image_size,
                                            color_mode='grayscale',
                                            batch_size=batch_size,
                                            shuffle=False)

    valid_gen = datagen.flow_from_dataframe(dataframe=valid_df,
                                            directory=folder,
                                            x_col="fname",
                                            y_col="class",
                                            class_mode="categorical",
                                            target_size=image_size,
                                            color_mode='grayscale',
                                            batch_size=batch_size,
                                            shuffle=False)

    # Prepare model model saving directory.
    save_dir = Path(os.path.dirname(
        os.path.realpath(__file__))).joinpath('models')
    if not save_dir.is_dir():
        save_dir.mkdir(exist_ok=True)
    filepath = f'{str(save_dir)}/{MODEL_NAMES[model_name]}'
    print(f'{filepath}\n')

    # Prepare callbacks for model saving and for learning rate adjustment.
    checkpoint = ModelCheckpoint(filepath=filepath,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True)

    lr_scheduler = LearningRateScheduler(lr_schedule)

    lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                                   cooldown=0,
                                   patience=5,
                                   min_lr=0.5e-6)

    callbacks = [checkpoint, lr_reducer, lr_scheduler]

    # Fit the model on the batches generated by datagen.flow().
    steps_per_epoch = int(len(train_list) / batch_size)
    history = model.fit_generator(generator=train_gen,
                                  steps_per_epoch=steps_per_epoch,
                                  validation_data=valid_gen,
                                  validation_steps=steps_per_epoch,
                                  epochs=epoch_size,
                                  use_multiprocessing=False,
                                  verbose=1,
                                  workers=4,
                                  callbacks=callbacks)

    # Score trained model.
    scores = model.evaluate_generator(generator=valid_gen,
                                      steps=steps_per_epoch,
                                      verbose=1)
    print('Test loss:', scores[0])
    print('Test accuracy:', scores[1])

    # Save score in configuration file
    system_config[f'{model_name}_Accuracy'] = scores[1]
    save_config()

    return history
from keras.applications.inception_v3 import InceptionV3
from keras.applications.densenet import DenseNet201

from keras.optimizers import Adam
from keras.callbacks import ReduceLROnPlateau, EarlyStopping

import cv2
import csv

import matplotlib.pyplot as plt

input_shape = [500, 400, 3]
model_input = Input(shape=input_shape)

denseNet = DenseNet201(input_shape=input_shape,
                       input_tensor=model_input,
                       include_top=False,
                       weights=None)

for layer in denseNet.layers:
    layer.trainable = True

denseNet_last_layer = denseNet.get_layer('relu')
print('last layer output shape:', denseNet_last_layer.output_shape)
denseNet_last_output = denseNet_last_layer.output

# Flatten the output layer to 1 dimension
x_denseNet = layers.GlobalMaxPooling2D()(denseNet_last_output)
# Add a fully connected layer with 512 hidden units and ReLU activation
x_denseNet = layers.Dense(512, activation='relu')(x_denseNet)
# Add a dropout rate of 0.7
x_denseNet = layers.Dropout(0.5)(x_denseNet)
def get_test_neural_net(type):
    model = None
    if type == 'mobilenet_small':
        from keras.applications.mobilenet import MobileNet
        model = MobileNet((128, 128, 3),
                          depth_multiplier=1,
                          alpha=0.25,
                          include_top=True,
                          weights='imagenet')
    elif type == 'mobilenet':
        from keras.applications.mobilenet import MobileNet
        model = MobileNet((224, 224, 3),
                          depth_multiplier=1,
                          alpha=1.0,
                          include_top=True,
                          weights='imagenet')
    elif type == 'mobilenet_v2':
        from keras.applications.mobilenetv2 import MobileNetV2
        model = MobileNetV2((224, 224, 3),
                            depth_multiplier=1,
                            alpha=1.4,
                            include_top=True,
                            weights='imagenet')
    elif type == 'resnet50':
        from keras.applications.resnet50 import ResNet50
        model = ResNet50(input_shape=(224, 224, 3),
                         include_top=True,
                         weights='imagenet')
    elif type == 'inception_v3':
        from keras.applications.inception_v3 import InceptionV3
        model = InceptionV3(input_shape=(299, 299, 3),
                            include_top=True,
                            weights='imagenet')
    elif type == 'inception_resnet_v2':
        from keras.applications.inception_resnet_v2 import InceptionResNetV2
        model = InceptionResNetV2(input_shape=(299, 299, 3),
                                  include_top=True,
                                  weights='imagenet')
    elif type == 'xception':
        from keras.applications.xception import Xception
        model = Xception(input_shape=(299, 299, 3),
                         include_top=True,
                         weights='imagenet')
    elif type == 'densenet121':
        from keras.applications.densenet import DenseNet121
        model = DenseNet121(input_shape=(224, 224, 3),
                            include_top=True,
                            weights='imagenet')
    elif type == 'densenet169':
        from keras.applications.densenet import DenseNet169
        model = DenseNet169(input_shape=(224, 224, 3),
                            include_top=True,
                            weights='imagenet')
    elif type == 'densenet201':
        from keras.applications.densenet import DenseNet201
        model = DenseNet201(input_shape=(224, 224, 3),
                            include_top=True,
                            weights='imagenet')
    elif type == 'nasnetmobile':
        from keras.applications.nasnet import NASNetMobile
        model = NASNetMobile(input_shape=(224, 224, 3),
                             include_top=True,
                             weights='imagenet')
    elif type == 'nasnetlarge':
        from keras.applications.nasnet import NASNetLarge
        model = NASNetLarge(input_shape=(331, 331, 3),
                            include_top=True,
                            weights='imagenet')
    elif type == 'vgg16':
        from keras.applications.vgg16 import VGG16
        model = VGG16(input_shape=(224, 224, 3),
                      include_top=False,
                      pooling='avg',
                      weights='imagenet')
    elif type == 'vgg19':
        from keras.applications.vgg19 import VGG19
        model = VGG19(input_shape=(224, 224, 3),
                      include_top=False,
                      pooling='avg',
                      weights='imagenet')
    return model
Ejemplo n.º 4
0
def main(args):

    # Hyper-parameters
    BATCH_SIZE = 1
    EPOCHS = 25
    SAVE_DIR = 'results'
    TRAIN_DATA = '/vol/bitbucket/qn14/train_lstm_data.p'
    VAL_DATA = '/vol/bitbucket/qn14/validate_lstm_data.p'
    MODEL_NAME = args[0]

    # Set up network training instance
    # Define CNN model
    base_model = DenseNet201(include_top=False, input_shape=(224,224,3), 
                        weights='imagenet')
    x = GlobalAveragePooling2D()(base_model.output)

    cnn = Model(inputs=base_model.input, outputs = x)

    # define LSTM model
    model = Sequential()
    model.add(TimeDistributed(cnn, input_shape=(5,224,224,3)))
    model.add(LSTM(128, return_sequences=True))
    model.add(LSTM(128))
    model.add(Dense(2))

    opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)

    model.compile(loss='mean_squared_error', optimizer=opt)


    # Prepare data for training
    [X_train,y_train, _ ] = pickle.load(open(TRAIN_DATA, 'rb'))
    [X_val,y_val, _ ] = pickle.load(open(VAL_DATA, 'rb'))
    X_train = np.array(X_train)
    X_val = np.array(X_val)
    
    # reshape input to be [samples, time steps, features]
    # X_train = numpy.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))
    # X_val = numpy.reshape(X_val, (X_val.shape[0], 1, X_val.shape[1]))

    y_train = np.array([item[0] for item in y_train])
    y_val = np.array([item[0] for item in y_val])
    
    print(X_train.shape)
    print(y_train.shape)
    print(X_val.shape)
    print(y_val.shape)
    
    # Normalise input
    X_train = X_train.astype('float32')
    X_val = X_val.astype('float32')
    X_train /= 255
    X_val /= 255

    # Normalise output
    y_train = y_train.astype('float32')
    y_val = y_val.astype('float32')
    y_train /= 255
    y_val /= 255

    # Train the CNN
    history = customValidationCallback()
    model.fit(X_train, y_train, 
        epochs=EPOCHS, batch_size=BATCH_SIZE,
        validation_data = (X_val, y_val),
        callbacks = [history]
    )

    history_data = {
        'loss_history': history.losses,
        'val_error_means': history.val_error_means,
        'val_error_stds': history.val_error_stds, 
    }

    model.save(MODEL_NAME + '.h5')


    pickle.dump(history_data, open(SAVE_DIR + '/' + MODEL_NAME + '.p', 'wb'))

    history = None
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
from PIL import Image
from keras.applications.densenet import DenseNet201, preprocess_input
from keras.callbacks import ModelCheckpoint
from keras.layers import Dense, GlobalAveragePooling2D
from keras.models import Model
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import confusion_matrix, classification_report

base_model = DenseNet201(weights='imagenet', include_top=False)

x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(
    x
)  # we add dense layers so that the model can learn more complex functions and classify for better results.
x = Dense(1024, activation='relu')(x)  # dense layer 2
x = Dense(512, activation='relu')(x)  # dense layer 3
preds = Dense(120,
              activation='softmax')(x)  # final layer with softmax activation

model = Model(inputs=base_model.input, outputs=preds)
# specify the inputs
# specify the outputs
# now a model has been created based on our architecture

for i, layer in enumerate(model.layers):
Ejemplo n.º 6
0
    def make_model(self, hp_value):
        # set architecture
        input_img = Input(shape=self.input_shape)
        x = Conv2D(filters=3, kernel_size=3, padding="same",
                   activation="relu")(input_img)
        if hp_value["network"] == "VGG16":
            transfer = VGG16(include_top=False, weights=None, input_tensor=x)
        if hp_value["network"] == "VGG19":
            transfer = VGG19(include_top=False, weights=None, input_tensor=x)
        if hp_value["network"] == "DenseNet121":
            transfer = DenseNet121(include_top=False,
                                   weights=None,
                                   input_tensor=x)
        if hp_value["network"] == "DenseNet169":
            transfer = DenseNet169(include_top=False,
                                   weights=None,
                                   input_tensor=x)
        if hp_value["network"] == "DenseNet201":
            transfer = DenseNet201(include_top=False,
                                   weights=None,
                                   input_tensor=x)
        if hp_value["network"] == "InceptionV3":
            transfer = InceptionV3(include_top=False,
                                   weights=None,
                                   input_tensor=x)
        if hp_value["network"] == "ResNet50":
            transfer = ResNet50(include_top=False,
                                weights=None,
                                input_tensor=x)
        if hp_value["network"] == "Xception":
            transfer = Xception(include_top=False,
                                weights=None,
                                input_tensor=x)

        top_model = Sequential()
        top_model.add(Flatten(input_shape=transfer.output_shape[1:]))
        top_model.add(Dense(hp_value["dense_units1"], activation='relu'))
        #    top_model.add(Dropout(0.5))
        if hp_value["dense_layer_num"] > 1:
            top_model.add(Dense(2, activation='softmax'))

        model = Model(input=input_img, output=top_model(transfer.output))

        #    model = Model(input_img, output)
        # set optimizer
        if hp_value["optimizer"] == "Adam":
            opt_generator = Adam(lr=hp_value["learning_rate"],
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=1e-08)
        elif hp_value["optimizer"] == "SGD":
            opt_generator = SGD(lr=hp_value["learning_rate"],
                                momentum=hp_value["momentum"],
                                decay=0.0,
                                nesterov=False)

        # set loss function
        if hp_value["if_loss_ambiguous"]:

            def loss_amb(y_true, y_pred):
                return loss_ambiguous(y_true, y_pred, eps=hp_value["eps"])

            loss = loss_amb
        else:
            loss = "binary_crossentropy"

        model.summary()

        if int(self.nb_gpus) > 1:
            model_multiple_gpu = multi_gpu_model(model, gpus=self.nb_gpus)
        else:
            model_multiple_gpu = model

        # compile the model
        model_multiple_gpu.compile(loss=loss,
                                   optimizer=opt_generator,
                                   metrics=['acc'])

        return model, model_multiple_gpu
Ejemplo n.º 7
0
    from keras.applications.densenet import preprocess_input
    preprocessing_function = preprocess_input
    base_model = DenseNet121(weights='imagenet',
                             include_top=False,
                             input_shape=(HEIGHT, WIDTH, 3))
elif args.model == "DenseNet169":
    from keras.applications.densenet import preprocess_input
    preprocessing_function = preprocess_input
    base_model = DenseNet169(weights='imagenet',
                             include_top=False,
                             input_shape=(HEIGHT, WIDTH, 3))
elif args.model == "DenseNet201":
    from keras.applications.densenet import preprocess_input
    preprocessing_function = preprocess_input
    base_model = DenseNet201(weights='imagenet',
                             include_top=False,
                             input_shape=(HEIGHT, WIDTH, 3))
elif args.model == "NASNetLarge":
    from keras.applications.nasnet import preprocess_input
    preprocessing_function = preprocess_input
    base_model = NASNetLarge(weights='imagenet',
                             include_top=True,
                             input_shape=(HEIGHT, WIDTH, 3))
elif args.model == "NASNetMobile":
    from keras.applications.nasnet import preprocess_input
    preprocessing_function = preprocess_input
    base_model = NASNetMobile(weights='imagenet',
                              include_top=False,
                              input_shape=(HEIGHT, WIDTH, 3))
else:
    ValueError("The model you requested is not supported in Keras")
Ejemplo n.º 8
0
def get_tst_neural_net(type):
    model = None
    custom_objects = dict()
    if type == 'mobilenet_small':
        try:
            from keras.applications.mobilenet import MobileNet
        except:
            from tensorflow.keras.applications.mobilenet import MobileNet
        model = MobileNet((128, 128, 3), depth_multiplier=1, alpha=0.25, include_top=True, weights='imagenet')
    elif type == 'mobilenet':
        try:
            from keras.applications.mobilenet import MobileNet
        except:
            from tensorflow.keras.applications.mobilenet import MobileNet
        model = MobileNet((224, 224, 3), depth_multiplier=1, alpha=1.0, include_top=True, weights='imagenet')
    elif type == 'mobilenet_v2':
        try:
            from keras.applications.mobilenet_v2 import MobileNetV2
        except:
            from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2
        model = MobileNetV2((224, 224, 3), alpha=1.4, include_top=True, weights='imagenet')
    elif type == 'resnet50':
        try:
            from keras.applications.resnet50 import ResNet50
        except:
            from tensorflow.keras.applications.resnet50 import ResNet50
        model = ResNet50(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'inception_v3':
        try:
            from keras.applications.inception_v3 import InceptionV3
        except:
            from tensorflow.keras.applications.inception_v3 import InceptionV3
        model = InceptionV3(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
    elif type == 'inception_resnet_v2':
        try:
            from keras.applications.inception_resnet_v2 import InceptionResNetV2
        except:
            from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2
        model = InceptionResNetV2(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
    elif type == 'xception':
        try:
            from keras.applications.xception import Xception
        except:
            from tensorflow.keras.applications.xception import Xception
        model = Xception(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
    elif type == 'densenet121':
        try:
            from keras.applications.densenet import DenseNet121
        except:
            from tensorflow.keras.applications.densenet import DenseNet121
        model = DenseNet121(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'densenet169':
        try:
            from keras.applications.densenet import DenseNet169
        except:
            from tensorflow.keras.applications.densenet import DenseNet169
        model = DenseNet169(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'densenet201':
        try:
            from keras.applications.densenet import DenseNet201
        except:
            from tensorflow.keras.applications.densenet import DenseNet201
        model = DenseNet201(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'nasnetmobile':
        try:
            from keras.applications.nasnet import NASNetMobile
        except:
            from tensorflow.keras.applications.nasnet import NASNetMobile
        model = NASNetMobile(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'nasnetlarge':
        try:
            from keras.applications.nasnet import NASNetLarge
        except:
            from tensorflow.keras.applications.nasnet import NASNetLarge
        model = NASNetLarge(input_shape=(331, 331, 3), include_top=True, weights='imagenet')
    elif type == 'vgg16':
        try:
            from keras.applications.vgg16 import VGG16
        except:
            from tensorflow.keras.applications.vgg16 import VGG16
        model = VGG16(input_shape=(224, 224, 3), include_top=False, pooling='avg', weights='imagenet')
    elif type == 'vgg19':
        try:
            from keras.applications.vgg19 import VGG19
        except:
            from tensorflow.keras.applications.vgg19 import VGG19
        model = VGG19(input_shape=(224, 224, 3), include_top=False, pooling='avg', weights='imagenet')
    elif type == 'multi_io':
        model = get_custom_multi_io_model()
    elif type == 'multi_model_layer_1':
        model = get_custom_model_with_other_model_as_layer()
    elif type == 'multi_model_layer_2':
        model = get_small_model_with_other_model_as_layer()
    elif type == 'Conv2DTranspose':
        model = get_Conv2DTranspose_model()
    elif type == 'RetinaNet':
        model, custom_objects = get_RetinaNet_model()
    elif type == 'conv3d_model':
        model = get_simple_3d_model()
    elif type == 'conv1d_model':
        model = get_simple_1d_model()
    return model, custom_objects
Ejemplo n.º 9
0
    train_dir,
    target_size=(IMG_WIDTH, IMG_HEIGHT),
    batch_size=BS,
    class_mode='categorical',
    subset='training',
    shuffle=True)  # set as training data

validation_generator = train_datagen.flow_from_directory(
    train_dir,  # same directory as training data
    target_size=(IMG_WIDTH, IMG_HEIGHT),
    batch_size=BS,
    class_mode='categorical',
    subset='validation')  # set as validation data

base_model = DenseNet201(include_top=False,
                         weights='imagenet',
                         input_shape=(IMG_WIDTH, IMG_HEIGHT, 3))

x = base_model.output
x = AveragePooling2D((7, 7))(x)
x = Flatten()(x)

concat1 = GlobalAveragePooling2D()(
    base_model.get_layer("conv5_block22_concat").output)
x = Concatenate()([x, concat1])

predictions = Dense(196, activation='softmax')(x)
model = Model(inputs=base_model.inputs, outputs=predictions)

opt = Adam(lr=3e-4,
           beta_1=0.9,
Ejemplo n.º 10
0
    # and a logistic layer
    predictions = Dense(14, activation="sigmoid")(x)
    # this is the model we will train
    model = Model(inputs=densenet_121_base_model.input, outputs=predictions)
elif model_structure == '169':
    densenet_169_base_model = DenseNet169(include_top=False,
                                          weights='imagenet',
                                          input_shape=(imageDim, imageDim, 3))
    x = densenet_169_base_model.output
    x = GlobalAveragePooling2D()(x)
    predictions = Dense(14, activation="sigmoid")(x)
    # this is the model we will train
    model = Model(inputs=densenet_169_base_model.input, outputs=predictions)
elif model_structure == '201':
    densenet_201_base_model = DenseNet201(include_top=False,
                                          weights='imagenet',
                                          input_shape=(imageDim, imageDim, 3))
    x = densenet_201_base_model.output
    x = GlobalAveragePooling2D()(x)
    predictions = Dense(14, activation="sigmoid")(x)
    # this is the model we will train
    model = Model(inputs=densenet_201_base_model.input, outputs=predictions)
else:
    print('error model structure : ' + model_structure)
    exit()

#model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[])

#mp = sys.argv[4]
#model = keras.models.load_model(mp)
#fig, ax = plt.subplots(nrows=2, ncols=3)
#ax = ax.ravel()
#plt.tight_layout(pad=0.2, h_pad=2)
#
#for i in range(6):
#    ax[i].imshow(x_train[i])
#    ax[i].set_title('has_cactus = {}'.format(y_train.iloc[i]))

# create CNN model using DenseNet
batch_size = 32
epochs = 15
steps = x_train.shape[0] // batch_size
# Input dimensions
inputs = Input(shape=img_dim)
# DenseNet
densenet201 = DenseNet201(include_top=False)(inputs)
# Fully connected layer
flat1 = Flatten()(densenet201)
dense1 = Dense(units=256, use_bias=True)(flat1)
batchnorm1 = BatchNormalization()(dense1)
act1 = Activation(activation='relu')(batchnorm1)
drop1 = Dropout(rate=0.5)(act1)
# Output
out = Dense(units=1, activation='sigmoid')(drop1)

# Create Model
model = Model(inputs=inputs, outputs=out)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])

# Fix plateau for learning rate
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
Ejemplo n.º 12
0
    def __init__(self, net='VGG16', weights='imagenet', size=None):

        print('[+] initialize: {}, weights: {}'.format(net, weights))

        if net == 'Xception':
            from keras.applications.xception import Xception
            from keras.applications.xception import preprocess_input
            self.model = Xception(weights=weights,
                                  include_top=False,
                                  pooling='avg')
            self.input_size = size if size is not None else (224, 224)
        elif net == 'VGG16':
            print('[+] initialize using VGG16...')
            from keras.applications.vgg16 import VGG16
            from keras.applications.vgg16 import preprocess_input
            self.model = VGG16(weights=weights,
                               include_top=False,
                               pooling='avg')
            self.input_size = size if size is not None else (224, 224)
        elif net == 'VGG19':
            from keras.applications.vgg19 import VGG19
            from keras.applications.vgg19 import preprocess_input
            self.model = VGG19(weights=weights,
                               include_top=False,
                               pooling='avg')
            self.input_size = size if size is not None else (224, 224)
        elif net == 'ResNet50':
            from keras.applications.resnet50 import ResNet50
            from keras.applications.resnet50 import preprocess_input
            self.model = ResNet50(weights=weights,
                                  include_top=False,
                                  pooling='avg')
            self.input_size = size if size is not None else (224, 224)
        elif net == 'InceptionV3':
            from keras.applications.inception_v3 import InceptionV3
            from keras.applications.inception_v3 import preprocess_input
            self.model = InceptionV3(weights=weights,
                                     include_top=False,
                                     pooling='avg')
            self.input_size = size if size is not None else (299, 299)
        elif net == 'InceptionResNetV2':
            from keras.applications.inception_resnet_v2 import InceptionResNetV2
            from keras.applications.inception_resnet_v2 import preprocess_input
            self.model = InceptionResNetV2(weights=weights,
                                           include_top=False,
                                           pooling='avg')
            self.input_size = size if size is not None else (299, 299)
        elif net == 'DenseNet121':
            from keras.applications.densenet import DenseNet121
            from keras.applications.densenet import preprocess_input
            self.model = DenseNet121(weights=weights,
                                     include_top=False,
                                     pooling='avg')
            self.input_size = size if size is not None else (224, 224)
        elif net == 'DenseNet169':
            from keras.applications.densenet import DenseNet169
            from keras.applications.densenet import preprocess_input
            self.model = DenseNet169(weights=weights,
                                     include_top=False,
                                     pooling='avg')
            self.input_size = size if size is not None else (224, 224)
        elif net == 'DenseNet201':
            from keras.applications.densenet import DenseNet201
            from keras.applications.densenet import preprocess_input
            self.model = DenseNet201(weights=weights,
                                     include_top=False,
                                     pooling='avg')
            self.input_size = size if size is not None else (224, 224)
        elif net == 'NASNetLarge':
            from keras.applications.nasnet import NASNetLarge
            from keras.applications.nasnet import preprocess_input
            self.model = NASNetLarge(weights=weights,
                                     include_top=False,
                                     pooling='avg')
            self.input_size = size if size is not None else (224, 224)
        elif net == 'NASNetMobile':
            from keras.applications.nasnet import NASNetMobile
            from keras.applications.nasnet import preprocess_input
            self.model = NASNetMobile(weights=weights,
                                      include_top=False,
                                      pooling='avg')
            self.input_size = size if size is not None else (224, 224)
        else:
            print("FeatureExtractor: Net not found")

        self.preprocess_input = preprocess_input
Ejemplo n.º 13
0
train_df["image"] = train_df["image"].apply(lambda x: x + ".jpg")
print(train_df.head())

train_df["label"] = train_df["label"].apply(lambda x: str(x))
print(train_df.head())

test_df = pd.read_csv(test_df_path)

test_df["image"] = test_df["image"].apply(lambda x: x + ".jpg")
print(test_df.head())

test_df["label"] = test_df["label"].apply(lambda x: str(x))
print(test_df.head())

pre_trained_model = DenseNet201(input_shape=(500, 400, 3),
                                include_top=False,
                                weights="imagenet")

for layer in pre_trained_model.layers:
    print(layer.name)
    layer.trainable = False

print(len(pre_trained_model.layers))

last_layer = pre_trained_model.get_layer('relu')
print('last layer output shape:', last_layer.output_shape)
last_output = last_layer.output

# Flatten the output layer to 1 dimension
x = layers.GlobalMaxPooling2D()(last_output)
# Add a fully connected layer with 512 hidden units and ReLU activation
Ejemplo n.º 14
0
def get_base_model(model_type, train_type):  
    def add_regularizers_l2(model):
        for layer in model.layers:
            if type(layer)==keras.engine.training.Model:
                add_regularizers_l2(layer)
            elif hasattr(layer, 'kernel_regularizer'):
                layer.kernel_regularizer= regularizers.l2(0.01)
    #             print(layer, layer.kernel_regularizer)
    
    def lock_some_layer(model):
        for layer in model.layers:
            if type(layer)==keras.engine.training.Model:
                for l in layer.layers[:115]:
                    l.trainable = False
                for l in layer.layers[115:]:
                    l.trainable = True
    base_model = None
    if model_type=="MobileNet":
        base_model = MobileNet(include_top=False, 
                               weights=None,
                               input_tensor=None,
                               pooling=None)
    
    # elif model_type=="DenseNet":
    #     !git clone https://github.com/titu1994/DenseNet.git
    #     import sys
    #     sys.path.append('DenseNet')
    #     import densenet
    #     base_model = densenet.DenseNet((64,64,3), include_top=False, depth=40, nb_dense_block=4,
    #                               growth_rate=32, nb_filter=12, dropout_rate=0.0,
    #                               bottleneck=True, reduction=0.5, weights=None)
    #     base_model = models.Model(base_model.input, base_model.layers[-2].output)
        
    elif model_type=="DenseNet121":
        base_model = DenseNet121(include_top=False,
                                 weights=None,
                                 input_tensor=None)

    elif model_type=="DenseNet201":
        base_model = DenseNet201(include_top=False,
                                 weights=None,
                                 input_tensor=None)
    
    elif model_type=="Xception":
        base_model = Xception(include_top=False, 
                              weights=None,
                              input_tensor=None)

    elif model_type=="ResNet50":
        base_model = ResNet50(include_top=False, 
                                  weights=None,
                                  input_tensor=None)
        
    elif model_type=="VGG16":
        base_model = VGG16(include_top=False, 
                                  weights=None,
                                  input_tensor=None)
        
    elif model_type=="NASNet":
        base_model = NASNetLarge(include_top=False, 
                                  weights=None,
                                  input_tensor=None)
    elif model_type=="ResNet":
        base_model = resnet()
    return base_model
Ejemplo n.º 15
0
# ## 搭建网络
width = height = 512
import keras
import keras.callbacks
from keras.layers import Dense, GlobalAveragePooling2D, Dropout
from keras.models import Model
from keras.optimizers import Adam, rmsprop, SGD
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
# from keras.applications.xception import Xception,preprocess_input
# from keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input
from keras.applications.densenet import DenseNet201, preprocess_input

base_model = DenseNet201(weights=None,
                         input_shape=(width, height, 3),
                         include_top=False)
model_out = base_model.output
avg = GlobalAveragePooling2D()(model_out)
dense = Dense(256, activation='relu')(avg)
dense = Dropout(0.5)(dense)
predictions = Dense(11, activation='softmax')(dense)
model_test = Model(inputs=base_model.input, outputs=predictions)


# ## 预测
def build_csv(df_test, test_np, csv_name):
    print(test_np.shape)  #加油
    all_prob = []
    for i in range(n_test):
        prob = str(test_np[i]).split('[')[1].split(']')[0].split(' ')
Ejemplo n.º 16
0
#!/usr/bin/env python
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications.densenet import preprocess_input
from keras.applications.densenet import decode_predictions
from keras.applications.densenet import DenseNet201

# iteration count
_iter = 1
"""
    Main
"""
if __name__ == '__main__':
    # load the model
    model = DenseNet201()
    # load an image from file
    image = load_img('mug.jpg', target_size=(224, 224))
    # convert the image pixels to a numpy array
    image = img_to_array(image)
    # reshape data for the model
    image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
    # prepare the image for the VGG model
    image = preprocess_input(image)

    # predict the probability across all output classes
    for i in range(_iter):
        raw_input('{} iteration, press any key to perform...'.format(str(i)))
        yhat = model.predict(image)

    # return if not iter
    if not _iter: exit()
Ejemplo n.º 17
0
    # Example to fine-tune on 3000 samples from Cifar10

    img_rows, img_cols = 224, 224  # Resolution of inputs
    channel = 3
    num_classes = 10
    batch_size = 16
    nb_epoch = 10

    # Load Cifar10 data. Please implement your own load_data() module for your own dataset
    X_train, Y_train, X_valid, Y_valid = load_cifar10_data(img_rows, img_cols)

    # Load our model
    # model = densenet169_model(img_rows=img_rows, img_cols=img_cols, color_type=channel, num_classes=num_classes)

    # load keras model
    model = DenseNet201(weights=None, classes=10)
    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # Start Fine-tuning
    model.fit(
        X_train,
        Y_train,
        batch_size=batch_size,
        epochs=nb_epoch,
        shuffle=True,
        verbose=1,
        validation_data=(X_valid, Y_valid),
    )
Ejemplo n.º 18
0
                                                    target_size=(img_height,
                                                                 img_width),
                                                    batch_size=n_batch_size,
                                                    shuffle=True,
                                                    subset='training')

vali_generator = train_datagen.flow_from_directory('./train',
                                                   target_size=(img_height,
                                                                img_width),
                                                   batch_size=n_batch_size,
                                                   shuffle=True,
                                                   subset='validation')

# 以訓練好的 Xception 為基礎來建立模型
net = DenseNet201(input_shape=(img_height, img_width, 3),
                  include_top=False,
                  weights='imagenet',
                  pooling='max')

# 增加 Dense layer 與 softmax 產生個類別的機率值
x = net.output
x = Dense(2048, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(256, activation='relu')(x)
output_layer = Dense(6, activation='softmax', name='softmax')(x)

# 設定要進行訓練的網路層
model = Model(inputs=net.input, outputs=output_layer)

# 取ImageNet中的起始Weight,不使他隨機產生,故凍結最底層
FREEZE_LAYERS = 1
for layer in model.layers[:FREEZE_LAYERS]:
Ejemplo n.º 19
0
# Flask utils
from flask import Flask, redirect, url_for, request, render_template
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer

# Define a flask app
app = Flask(__name__)

# Using a pretrained model from Keras
# In this case we are using DenseNet201

from keras.applications.densenet import DenseNet201
model = DenseNet201(include_top=True,
                    weights='imagenet',
                    input_tensor=None,
                    input_shape=None,
                    pooling=None,
                    classes=1000)
print('Model loaded. Check http://127.0.0.1:5000/')


def model_predict(img_path, model):
    img = image.load_img(img_path, target_size=(224, 224))

    # Preprocessing the image
    x = image.img_to_array(img)
    # x = np.true_divide(x, 255)
    x = np.expand_dims(x, axis=0)

    #Changed mode from caffe to 'torch' so the model makes accurate predictions with DenseNet
    x = preprocess_input(x, mode='torch')
from keras.applications.densenet import DenseNet201
from keras.preprocessing import image
from keras.applications.densenet import preprocess_input, decode_predictions
import numpy as np
import os
import sys

data_dir = sys.argv[1]

model = DenseNet201(weights='imagenet')

for filename in os.listdir(data_dir):
    imgfile = data_dir + '/' + filename
    img = image.load_img(imgfile, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    preds = model.predict(x)
    print(filename + ' --- ' + str(decode_predictions(preds, top=5)[0]))

Ejemplo n.º 21
0
def main(args):

    # Hyper-parameters
    BATCH_SIZE = 32
    EPOCHS = 25
    SAVE_DIR = 'results'
    TRAIN_DATA = 'train_raw_data.p'
    VAL_DATA = 'validate_raw_data.p'
    MODEL_NAME = args[0]
    RANDOM_CROP_NUMBER = 1
    RESIZE_DIM = 256
    RANDOM_CROP_DIM = 224

    # Set up network training instance
    base_model = DenseNet201(include_top=False,
                             input_shape=(224, 224, 3),
                             weights='imagenet')

    # x = AveragePooling2D((7, 7), name='avg_pool')(base_model.output)
    x = GlobalAveragePooling2D()(base_model.output)
    # x = Flatten()(x)
    x = Dense(1000, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(2, activation='linear')(x)

    model = Model(inputs=base_model.input, outputs=x)

    opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)

    model.compile(loss='mean_squared_error', optimizer=opt)

    # Prepare data for training
    [X_train, y_train] = pickle.load(open(TRAIN_DATA, 'rb'))
    [X_val, y_val] = pickle.load(open(VAL_DATA, 'rb'))
    X_train = np.array(X_train)
    X_val = np.array(X_val)

    y_train = np.array([item[0] for item in y_train])
    y_val = np.array([item[0] for item in y_val])

    X_train, y_train, X_val, y_val = augment(X_train, y_train, X_val, y_val,
                                             args, RANDOM_CROP_NUMBER,
                                             RESIZE_DIM, RANDOM_CROP_DIM)

    print(X_train.shape)
    print(y_train.shape)
    print(X_val.shape)
    print(y_val.shape)

    # Normalise input
    n = X_train.shape[0]
    d1 = X_train[:n // 2, :].astype('float32')
    print(d1.shape)
    d2 = X_train[n // 2:, :].astype('float32')
    print(d2.shape)
    X_train = np.vstack((d1, d2))
    # X_train = X_train.astype('float32')
    X_val = X_val.astype('float32')
    X_train /= 255
    X_val /= 255

    # Normalise output
    y_train = y_train.astype('float32')
    y_val = y_val.astype('float32')
    y_train /= 255
    y_val /= 255

    # Train the CNN
    history = customValidationCallback()
    model.fit(X_train,
              y_train,
              epochs=EPOCHS,
              batch_size=BATCH_SIZE,
              validation_data=(X_val, y_val),
              callbacks=[history])

    history_data = {
        'loss_history': history.losses,
        'val_error_means': history.val_error_means,
        'val_error_stds': history.val_error_stds,
    }

    model.save(MODEL_NAME + '.h5')

    pickle.dump(history_data, open(SAVE_DIR + '/' + MODEL_NAME + '.p', 'wb'))

    history = None
Ejemplo n.º 22
0
def build_model(params_dict, n_class):
    if '/' in params_dict['model_params']['pretrain_model']:
        model = load_model(params_dict['model_params']['pretrain_model'])
    else:
        imagenet_pretrain_model = params_dict['model_params']['pretrain_model'].split(':')[1]

        if imagenet_pretrain_model=='ResNet50':
            base_model = ResNet50(weights='imagenet', include_top=False, input_shape=params_dict['train_params']['img_shape'])
        elif imagenet_pretrain_model=='VGG16':
            base_model = VGG16(weights='imagenet', include_top=False, input_shape=params_dict['train_params']['img_shape'])
        elif imagenet_pretrain_model=='InceptionV3':
            base_model = InceptionV3(weights='imagenet', include_top=False, input_shape=params_dict['train_params']['img_shape'])
        elif imagenet_pretrain_model=='InceptionResNetV2':
            base_model = InceptionResNetV2(weights='imagenet', include_top=False, input_shape=params_dict['train_params']['img_shape'])
        elif imagenet_pretrain_model=='DenseNet201':
            base_model = DenseNet201(weights='imagenet', include_top=False, input_shape=params_dict['train_params']['img_shape'])
        elif imagenet_pretrain_model=='NASNetLarge':
            base_model = NASNetLarge(weights='imagenet', include_top=False, input_shape=params_dict['train_params']['img_shape'])
        else:
            print('parameter in config.model_params.pretrain_model should be either a path or imagenet:\'pretain_model_name\'')

        # layer freeze:
        if params_dict['train_params']['freeze_layer']:
            for i in params_dict['train_params']['freeze_layer'] :
                base_model.layers[i].trainable = False

        x = base_model.output
        x = Flatten()(x)
        x = Dropout(params_dict['train_params']['dropout_rate'])(x)

        if params_dict['train_params']['weight_regularization']:
            reg = keras.regularizers.l1_l2(l1 = params_dict['train_params']['weight_regularization']['l1'],
                                                   l2 = params_dict['train_params']['weight_regularization']['l2'])
        else:
            reg = None
        predictions = Dense(n_class, activation='softmax', kernel_regularizer=reg)(x)


        model = Model(inputs=base_model.input, outputs=predictions)

    if params_dict['train_params']['optimizer']['optimizer']=='SGD':
        optimizer = keras.optimizers.SGD(lr=params_dict['train_params']['optimizer']['lr'],
                                         momentum=params_dict['train_params']['optimizer']['momentum'],
                                         decay=params_dict['train_params']['optimizer']['decay'],
                                         nesterov=params_dict['train_params']['optimizer']['nesterov'])
    elif params_dict['train_params']['optimizer']['optimizer']=='RMSprop':
        optimizer = keras.optimizers.RMSprop(lr=params_dict['train_params']['optimizer']['lr'],
                                             rho=params_dict['train_params']['optimizer']['rho'],
                                             epsilon=params_dict['train_params']['optimizer']['epsilon'],
                                             decay=params_dict['train_params']['optimizer']['decay'])
    elif params_dict['train_params']['optimizer']['optimizer']=='Adagrad':
        optimizer = keras.optimizers.Adagrad(lr=params_dict['train_params']['optimizer']['lr'],
                                             epsilon=params_dict['train_params']['optimizer']['epsilon'],
                                             decay=params_dict['train_params']['optimizer']['decay'])
    elif params_dict['train_params']['optimizer']['optimizer']=='Adam':
        optimizer = keras.optimizers.Adam(lr=params_dict['train_params']['optimizer']['lr'],
                                          epsilon=params_dict['train_params']['optimizer']['epsilon'],
                                          decay=params_dict['train_params']['optimizer']['decay'])

    model.compile(loss=params_dict['train_params']['loss'],
                  optimizer=optimizer, metrics=['accuracy'])
    
    model_path = params_dict['model_params']['model_dir']+'/{}.h5'.format(params_dict['model_params']['model_name'])

    if params_dict['train_params']['early_stop_round']!=None:
        earlystop = EarlyStopping(monitor='val_loss', patience=params_dict['train_params']['early_stop_round'], verbose=1)
        checkpoint = ModelCheckpoint(model_path, monitor='val_loss', save_best_only=True, verbose=1)
        callbacks = [checkpoint, earlystop]
    else:
        checkpoint = ModelCheckpoint(model_path, monitor='loss', save_best_only=True, verbose=1)
        callbacks = [checkpoint]
        
    return model, callbacks
Ejemplo n.º 23
0
    def create_model():
        base_model = None
        if params["model"] == "InceptionV3":
            params["train_threshold"] = 249
            base_model = InceptionV3(weights='imagenet',
                                     include_top=False,
                                     input_tensor=None,
                                     input_shape=(params["img_width"],
                                                  params["img_height"], 3))
        elif params["model"] == "xception":
            params["train_threshold"] = 106
            base_model = Xception(weights='imagenet',
                                  include_top=False,
                                  input_tensor=None,
                                  input_shape=(params["img_width"],
                                               params["img_height"], 3))
        elif params["model"] == "InceptionResNetV2":
            params["train_threshold"] = 727
            base_model = InceptionResNetV2(weights='imagenet',
                                           include_top=False,
                                           input_tensor=None,
                                           input_shape=(params["img_width"],
                                                        params["img_height"],
                                                        3))
        elif params["model"] == "DenseNet121":
            params["train_threshold"] = 403
            base_model = DenseNet121(weights='imagenet',
                                     include_top=False,
                                     input_tensor=None,
                                     input_shape=(params["img_width"],
                                                  params["img_height"], 3))
        elif params["model"] == "DenseNet169":
            params["train_threshold"] = 571
            base_model = DenseNet169(weights='imagenet',
                                     include_top=False,
                                     input_tensor=None,
                                     input_shape=(params["img_width"],
                                                  params["img_height"], 3))
        elif params["model"] == "DenseNet201":
            params["train_threshold"] = 683
            base_model = DenseNet201(weights='imagenet',
                                     include_top=False,
                                     input_tensor=None,
                                     input_shape=(params["img_width"],
                                                  params["img_height"], 3))
        elif params["model"] == "ResNet50":
            params["train_threshold"] = 140
            base_model = ResNet50(weights='imagenet',
                                  include_top=False,
                                  input_tensor=None,
                                  pooling=None,
                                  input_shape=(params["img_width"],
                                               params["img_height"], 3))
        else:
            print("unknown model")

        count = 0
        modelx = base_model.output

        while count < params["dense_num"]:
            count += 1
            string = "dense" + str(count)

            if "pool" in params[string]:
                if params[string]["pool"] == "avg_poolx":
                    modelx = GlobalAveragePooling2D(
                        name=params[string]["pool"])(modelx)

            modelx = Dense(params[string]["num"],
                           activation=params[string]["activation"])(modelx)

            if "dropout" in params[string]:
                modelx = Dropout(params[string]["dropout"])(modelx)

        model = Model(inputs=base_model.input, output=modelx)

        for layer in base_model.layers:
            layer.trainable = False

        model.compile(loss=params["loss"],
                      optimizer=params["phase1_optimizer"],
                      metrics=params["metrics"])

        return model
Ejemplo n.º 24
0
# -*- coding: utf-8 -*-

from keras.applications.xception import Xception
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras.applications.resnet50 import ResNet50
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras.applications.mobilenet import MobileNet
from keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from keras.applications.nasnet import NASNetLarge, NASNetMobile

xception = Xception()
vgg16 = VGG16()
vgg19 = VGG19()
res50 = ResNet50()
inception3 = InceptionV3()
inception_res2 = InceptionResNetV2()
mobile = MobileNet()
dense121 = DenseNet121()
dense169 = DenseNet169()
dense201 = DenseNet201()
nasnet_l = NASNetLarge()
nasnet_m = NASNetMobile()
Ejemplo n.º 25
0
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True

import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
for physical_device in physical_devices:
    tf.config.experimental.set_memory_growth(physical_device, True)

models = []
models = [
    NASNetLarge(weights='imagenet', include_top=False),
    InceptionV3(weights='imagenet', include_top=False),
    MobileNetV2(weights='imagenet', include_top=False),
    Xception(weights='imagenet', include_top=False),
    DenseNet201(weights='imagenet', include_top=False),
    InceptionResNetV2(weights='imagenet', include_top=False)
]
models_name = [
    'NASNetLarge', 'InceptionV3', 'MobileNetV2', 'Xception', 'DenseNet201',
    'InceptionResNetV2'
]

csv_ok = pd.read_csv('d:\\dane\\HARRISON\\data_list.txt', header=None)
for model_id in range(len(models)):
    base_model = models[model_id]
    #print(models_name[model_id])
    x = base_model.output
    x = GlobalAveragePooling2D()(x)

    model = Model(inputs=base_model.input, outputs=x)
Ejemplo n.º 26
0
BASE_MODEL = None
preprocessing_function = None

if NAME == "VGG19":
    HEIGHT, WIDTH = 224, 224
    BASE_MODEL = VGG19(include_top=False, weights="imagenet",
                       input_shape=(HEIGHT, WIDTH, DEPTH))
    preprocessing_function = vgg19_preprocess_input
elif NAME == "VGG16":
    HEIGHT, WIDTH = 224, 224
    BASE_MODEL = VGG16(include_top=False, weights="imagenet",
                       input_shape=(HEIGHT, WIDTH, DEPTH))
    preprocessing_function = vgg16_preprocess_input
elif NAME == "Densenet":
    HEIGHT, WIDTH = 128, 128
    BASE_MODEL = DenseNet201(include_top=False, weights="imagenet",
                             input_shape=(HEIGHT, WIDTH, DEPTH))
    preprocessing_function = densenet_preprocess_input
elif NAME == "efficientnet":
    HEIGHT, WIDTH = 128, 128
    BASE_MODEL = EfficientNetB5(include_top=False, weights="imagenet",
                                input_shape=(HEIGHT, WIDTH, DEPTH))
    preprocessing_function = preprocess_input
else:
    HEIGHT, WIDTH = 224, 224
    BASE_MODEL = VGG19(include_top=False, weights="imagenet",
                       input_shape=(HEIGHT, WIDTH, DEPTH))
    preprocessing_function = vgg19_preprocess_input


##################################################################################################
# Read details from CSV
Ejemplo n.º 27
0
                                    height_shift_range=0.2,
                                    zoom_range=0.2,
                                    shear_range=0.2)
train_generator = train_data_gen.flow_from_directory(directory='assets/train/',
                                                     target_size=(224, 224),
                                                     batch_size=BATCH_SIZE,
                                                     class_mode='categorical')

valid_data_gen = ImageDataGenerator(rescale=1. / 255)
valid_generator = valid_data_gen.flow_from_directory(directory='assets/valid/',
                                                     target_size=(224, 224),
                                                     batch_size=BATCH_SIZE,
                                                     class_mode='categorical')

base_model = DenseNet201(weights='imagenet',
                         include_top=False,
                         input_shape=(224, 224, 3))
main = TimeDistributed(Bidirectional(CuDNNGRU(512)))(base_model.output)
main = SpatialDropout1D(0.5)(main)
main = Bidirectional(CuDNNGRU(512))(main)
main = Dropout(0.5)(main)
predictions = Dense(128,
                    activation='softmax',
                    kernel_regularizer=regularizers.l2(0.0001))(main)

model = Model(inputs=base_model.input, outputs=predictions)

model.summary()
model.compile(optimizer=Adam(lr=0.00003),
              loss='categorical_crossentropy',
              metrics=[top1_loss])
Ejemplo n.º 28
0
                                           include_top=False)
        elif cur_model_name == 'nasnet':
            base_model = NASNetLarge(input_tensor=input_tensor,
                                     weights='imagenet',
                                     include_top=False)
        elif model_name == 'densenet121':
            base_model = DenseNet121(input_tensor=input_tensor,
                                     weights='imagenet',
                                     include_top=False)
        elif model_name == 'densenet169':
            base_model = DenseNet169(input_tensor=input_tensor,
                                     weights='imagenet',
                                     include_top=False)
        elif model_name == 'densenet201':
            base_model = DenseNet201(input_tensor=input_tensor,
                                     weights='imagenet',
                                     include_top=False)
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(1024, activation='relu',
                  name='dense1024-' + cur_model_name)(x)
        feature_total.append(x)

    predictions = Dense(num_classes, activation='softmax',name='last-layer')\
        (concatenate(feature_total))

    if gpu_number > 1:
        with tf.device("/cpu:0"):
            model = Model(inputs=input_tensor, outputs=predictions)
        model = multi_gpu_model(model, gpus=gpu_number)
    else:
Ejemplo n.º 29
0
    print('resume from checkpoint')
    message = job_name + ' b_end'
    send_signal.send(args.node, 10002, message)
    model = keras.models.load_model(save_file)
    message = job_name + ' c_end'
    send_signal.send(args.node, 10002, message)
else:
    print('train from start')
    model = models.Sequential()
    
    if '121' in args_model:
        base_model = DenseNet121(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
    elif '169' in args_model:
        base_model = DenseNet169(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
    elif '201' in args_model:
        base_model = DenseNet201(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
        
       
    model.add(base_model)
    #model.add(layers.Flatten())
    #model.add(layers.BatchNormalization())
    #model.add(layers.Dense(128, activation='relu'))
    #model.add(layers.Dropout(0.5))
    #model.add(layers.BatchNormalization())
    #model.add(layers.Dense(64, activation='relu'))
    #model.add(layers.Dropout(0.5))
    #model.add(layers.BatchNormalization())
    model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
    
    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=args_lr),
Ejemplo n.º 30
0
training_gen = generate_processed_batch(train_imdir, train_label, batchsize)
val_gen = generate_processed_batch(val_imdir, val_label, batchsize)

from keras.applications.densenet import DenseNet201

img_rows, img_cols = img_rows, img_rows  # Resolution of inputs
channel = 3
num_classes = 4
batch_size = batchsize
nb_epoch = 50

n = batchsize
# Load our model
base_model = DenseNet201(include_top=False,
                         weights='imagenet',
                         input_shape=(img_rows, img_cols, channel),
                         pooling=None,
                         classes=None)

x = base_model.output
x_avg = GlobalAveragePooling2D()(x)
zz = Dense(10, activation='softmax')(x_avg)

model = Model(inputs=base_model.input, outputs=zz)

xx = Dense(4, activation='softmax')(x_avg)
model_new = Model(inputs=base_model.input, outputs=xx)

model_new.summary()
sgd = SGD(lr=1e-03, decay=1e-6, momentum=0.9, nesterov=True)
model_new.compile(optimizer=sgd,