コード例 #1
0
ファイル: utils_models.py プロジェクト: valeriomieuli/LLwE
def build_expert(model_name, input_shape, n_classes, weight_decay):
    if model_name == 'InceptionResNetV2':
        base_model = applications.inception_resnet_v2.InceptionResNetV2(include_top=False, weights='imagenet',
                                                                        input_shape=input_shape)
        head_model = layers.GlobalAveragePooling2D()(base_model.output)
        head_model = layers.Dense(units=n_classes, activation="softmax")(head_model)

    elif model_name == 'VGG16':
        base_model = applications.VGG16(weights='imagenet', include_top=False, input_shape=input_shape)
        head_model = layers.Flatten()(base_model.output)
        head_model = layers.Dense(units=1024, activation='relu')(head_model)
        head_model = layers.Dense(units=1024, activation='relu')(head_model)
        head_model = layers.Dense(units=n_classes, activation='softmax')(head_model)

    elif model_name == 'VGG19':
        base_model = applications.VGG16(include_top=False, weights='imagenet', input_shape=input_shape)
        head_model = layers.Flatten()(base_model.output)
        head_model = layers.Dense(units=1024)(head_model)
        head_model = layers.Dense(units=1024)(head_model)
        head_model = layers.Dense(units=n_classes, activation='softmax')(head_model)
    else:
        raise ValueError("Specified base model is not available !")

    model = keras.Model(inputs=base_model.input, outputs=head_model)
    if weight_decay != -1:
        for layer in model.layers:
            if hasattr(layer, 'kernel_regularizer'):
                layer.kernel_regularizer = keras.regularizers.l2(weight_decay)

    return model
コード例 #2
0
def collect_models():
    models = dict()
    models["MobileNetV2"] = tfapp.MobileNetV2(input_shape=IMG_SHAPE,
                                              include_top=False,
                                              weights='imagenet')
    models["NASNetMobile"] = tfapp.NASNetMobile(input_shape=(130, 386, 3),
                                                include_top=False,
                                                weights='imagenet')
    models["DenseNet121"] = tfapp.DenseNet121(input_shape=IMG_SHAPE,
                                              include_top=False,
                                              weights='imagenet')
    models["VGG16"] = tfapp.VGG16(input_shape=IMG_SHAPE,
                                  include_top=False,
                                  weights='imagenet')
    models["Xception"] = tfapp.Xception(input_shape=(134, 390, 3),
                                        include_top=False,
                                        weights='imagenet')
    models["ResNet50V2"] = tfapp.ResNet50V2(input_shape=IMG_SHAPE,
                                            include_top=False,
                                            weights='imagenet')
    models["NASNetLarge"] = tfapp.NASNetLarge(input_shape=(130, 386, 3),
                                              include_top=False,
                                              weights='imagenet')

    # omit non 2^n shape
    # models["InceptionV3"] = tfapp.InceptionV3(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')
    # models["InceptionResNetV2"] = \
    #     tfapp.InceptionResNetV2(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')
    return models
コード例 #3
0
ファイル: train.py プロジェクト: rubentea16/dvc-versioning
def save_bottlebeck_features():
    datagen = ImageDataGenerator(rescale=1. / 255)

    # build the VGG16 network
    model = applications.VGG16(include_top=False, weights='imagenet')

    generator = datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False)
    bottleneck_features_train = model.predict_generator(
        generator, nb_train_samples // batch_size)
    np.save(open('bottleneck_features_train.npy', 'wb'),
            bottleneck_features_train)

    generator = datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False)
    bottleneck_features_validation = model.predict_generator(
        generator, nb_validation_samples // batch_size)
    np.save(open('bottleneck_features_validation.npy', 'wb'),
            bottleneck_features_validation)
コード例 #4
0
ファイル: models.py プロジェクト: seculayer/AI_Competitions_2
def vgg_func(input_shape, classes_num):
    vgg16 = applications.VGG16(include_top=False,
                               weights='imagenet',
                               input_shape=input_shape)

    # freeze layer
    vgg16.trainable = False

    add_model = Sequential()
    add_model.add(vgg16)
    add_model.add(Flatten())
    add_model.add(Dense(256, activation='relu'))
    add_model.add(Dropout(0.2))
    add_model.add(Dense(classes_num, activation='softmax'))

    optimizer = optimizers.SGD(lr=0.01,
                               decay=1e-5,
                               momentum=0.9,
                               nesterov=True)

    add_model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'])

    add_model.summary()
    return add_model
コード例 #5
0
    def _build_model(self):
        base_model = applications.VGG16(weights='imagenet',
                                        include_top=False,
                                        input_tensor=Input(shape=(128, 128,
                                                                  3)),
                                        classes=2)

        for layer in base_model.layers:
            layer.trainable = False

        x = base_model.output
        x = Flatten()(x)
        x = Dense(10, activation='elu')(x)
        x = Dropout(0.4)(x)
        x = Dense(10, activation='elu')(x)
        x = Dropout(0.1)(x)
        predictions = Dense(2, activation='softmax', name='predictions')(x)

        model = Model(inputs=base_model.input, outputs=predictions)

        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizers.Adam(lr=1e-3),
                      metrics=['accuracy'])

        return model
コード例 #6
0
ファイル: utils.py プロジェクト: marshka/ml-20-21
def load_vgg16(filename='nn_task2.pkl', img_h=224, img_w=224):
    """
    Loads the model saved with save_vgg16.

    :param filename: string, path to the file storing the model.
    :param img_h: int, the height of the input image.
    :param img_w: int, the width of the input image.
    :return: the model.
    """
    K.clear_session()

    vgg16 = applications.VGG16(weights='imagenet',
                               include_top=False,
                               input_shape=(img_h, img_w, 3))
    model = Sequential()
    model.add(vgg16)

    with open(filename, 'rb') as fp:
        layers = pickle.load(fp)
    for l in layers:
        cls = getattr(keras_layers, l['class'])
        layer = cls(**l['kwargs'])
        model.add(layer)
        if 'weights' in l:
            model.layers[-1].set_weights(l['weights'])

    model.trainable = False
    return model
コード例 #7
0
def save_bottlebeck_features():
    datagen = ImageDataGenerator(rescale=1. / 255)

    # build the VGG16 network
    print("Loading VGG16 model")
    model_vgg16 = applications.VGG16(include_top=False, weights='imagenet')
    sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    model_vgg16.compile(optimizer=sgd, loss='categorical_crossentropy')

    generator = datagen.flow_from_directory(train_data_dir,
                                            target_size=(img_width,
                                                         img_height),
                                            batch_size=batch_size,
                                            class_mode=None,
                                            shuffle=False)
    bottleneck_features_train = model_vgg16.predict_generator(
        generator, nb_train_samples // batch_size)
    with open('models/bottleneck_features_train.npy', 'wb') as f:
        np.save(f, bottleneck_features_train)

    generator = datagen.flow_from_directory(validation_data_dir,
                                            target_size=(img_width,
                                                         img_height),
                                            batch_size=batch_size,
                                            class_mode=None,
                                            shuffle=False)
    bottleneck_features_validation = model_vgg16.predict_generator(
        generator, nb_validation_samples // batch_size)
    with open('models/bottleneck_features_validation.npy', 'wb') as f:
        np.save(f, bottleneck_features_validation)
コード例 #8
0
def save_bottleneck_features():
    '''
    Use the pre-trained VGG16 model's convolutional layers to run predictions on training 
    and validation data without providing it with the categories.
    The results are saved into external numpy files to be processed into the top model classifier.
    '''
    datagen = ImageDataGenerator(rescale=1./255)

    # build the VGG16 network without the fully-connected layers
    model = applications.VGG16(include_top=False, weights='imagenet')

    train_generator = datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False)
    bottleneck_features_train = model.predict(train_generator, steps=(nb_train_samples//batch_size), verbose=1)
    
    np.save(open('train.npy', 'wb'), bottleneck_features_train)

    validation_generator = datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False)
    bottleneck_features_validation = model.predict(validation_generator, steps=(nb_validation_samples//batch_size), verbose=1)
    np.save(open('validation.npy', 'wb'), bottleneck_features_validation)
コード例 #9
0
def predict():
    # load the class_indices saved in the earlier step
    class_dictionary = np.load('class_indices.npy').item()

    num_classes = len(class_dictionary)

    # add the path to your test image below
    image_path = 'path/to/your/test_image'

    orig = cv2.imread(image_path)

    print("[INFO] loading and preprocessing image...")
    image = load_img(image_path, target_size=(224, 224))
    image = img_to_array(image)

    # important! otherwise the predictions will be '0'
    image = image / 255

    image = np.expand_dims(image, axis=0)

    # build the VGG16 network
    model = applications.VGG16(include_top=False, weights='imagenet')

    # get the bottleneck prediction from the pre-trained VGG16 model
    bottleneck_prediction = model.predict(image)

    # build top model
    model = Sequential()
    model.add(Flatten(input_shape=bottleneck_prediction.shape[1:]))
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='sigmoid'))

    model.load_weights(top_model_weights_path)

    # use the bottleneck prediction on the top model to get the final
    # classification
    class_predicted = model.predict_classes(bottleneck_prediction)

    probabilities = model.predict_proba(bottleneck_prediction)

    inID = class_predicted[0]

    inv_map = {v: k for k, v in class_dictionary.items()}

    label = inv_map[inID]

    # get the prediction label
    print("Image ID: {}, Label: {}".format(inID, label))

    # display the predictions with the image
    cv2.putText(orig, "Predicted: {}".format(label), (10, 30),
                cv2.FONT_HERSHEY_PLAIN, 1.5, (43, 99, 255), 2)

    cv2.imshow("Classification", orig)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #10
0
def compute_mean_and_std(model_name, X, input_shape):
    if model_name == 'Xception':
        model = applications.Xception(weights='imagenet',
                                      include_top=False,
                                      input_shape=input_shape)
    elif model_name == 'VGG16':
        model = applications.VGG16(weights='imagenet',
                                   include_top=False,
                                   input_shape=input_shape)
    elif model_name == 'VGG19':
        model = applications.VGG19(weights='imagenet',
                                   include_top=False,
                                   input_shape=input_shape)
    elif model_name == 'ResNet50':
        model = applications.ResNet50(weights='imagenet',
                                      include_top=False,
                                      input_shape=input_shape)
    elif model_name == 'InceptionResNetV2':
        model = applications.InceptionResNetV2(weights='imagenet',
                                               include_top=False,
                                               input_shape=input_shape)
    elif model_name == 'InceptionV3':
        model = applications.InceptionV3(weights='imagenet',
                                         include_top=False,
                                         input_shape=input_shape)
    elif model_name == 'MobileNet':
        model = applications.MobileNet(weights='imagenet',
                                       include_top=False,
                                       input_shape=input_shape)
    elif model_name == 'DenseNet121':
        model = applications.DenseNet121(weights='imagenet',
                                         include_top=False,
                                         input_shape=input_shape)
    elif model_name == 'DenseNet169':
        model = applications.DenseNet169(weights='imagenet',
                                         include_top=False,
                                         input_shape=input_shape)
    elif model_name == 'DenseNet201':
        model = applications.DenseNet201(weights='imagenet',
                                         include_top=False,
                                         input_shape=input_shape)
    elif model_name == 'NASNetMobile':
        model = applications.NASNetMobile(weights='imagenet',
                                          include_top=False,
                                          input_shape=input_shape)
    elif model_name == 'NASNetLarge':
        model = applications.NASNetLarge(weights='imagenet',
                                         include_top=False,
                                         input_shape=input_shape)
    else:
        assert (False), "Specified base model is not available !"

    features = model.predict(X)[:, 0, 0, :]

    return features.mean(axis=0), features.std(axis=0)
 def __init__(self):
     super(AdvClassifierMk2, self).__init__()
     self.base_model = applications.VGG16(weights='imagenet',
                                          include_top=False)
     #self.base_model.trainable = False
     self.top_layer = models.Sequential([
         layers.Dense(20),
         layers.Activation(tf.nn.leaky_relu),
         layers.Dropout(0.5),
         layers.Dense(2),
         layers.Activation(tf.nn.softmax),
     ])
コード例 #12
0
ファイル: utils_models.py プロジェクト: valeriomieuli/LLwE
def build_features_extractor(model_name, input_shape):
    if model_name == 'VGG16':
        model = applications.VGG16(weights='imagenet', include_top=False, input_shape=input_shape)
    elif model_name == 'VGG19':
        model = applications.VGG19(weights='imagenet', include_top=False, input_shape=input_shape)
    elif model_name == 'ResNet50':
        model = applications.ResNet50(weights='imagenet', include_top=False, input_shape=input_shape)
    else:
        assert (False), "Specified base model is not available !"

    x = model.output
    x = layers.Flatten()(x)
    return keras.Model(inputs=model.input, outputs=x)
コード例 #13
0
def build_model_quoc(input_size, d_model, learning_rate=3e-4):
    """
    Reference: attention layer as per Quoc. https://github.com/pbcquoc/vietnamese_ocr

    """

    input_data = Input(name='input', shape=input_size, dtype='float32')
    base_model = applications.VGG16(weights='imagenet', include_top=False)
    base_model = maxpooling(base_model)
    inner = base_model(input_data)

    #Adding attention
    shape = inner.get_shape()
    attn = Reshape((shape[1], shape[2] * shape[3]))(inner)

    # attn = Reshape(target_shape=(int(cnn.shape[1]), -1), name='reshape')(cnn)
    attn = Dense(512,
                 activation='relu',
                 kernel_initializer='he_normal',
                 name='dense1')(attn)
    attn = Dropout(0.25)(attn)
    attn = attention_rnn(attn)

    blstm = Bidirectional(
        LSTM(units=256,
             return_sequences=True,
             kernel_initializer='he_normal',
             dropout=0.5))(attn)
    blstm = Bidirectional(
        LSTM(units=256,
             return_sequences=True,
             kernel_initializer='he_normal',
             dropout=0.5))(blstm)
    blstm = Bidirectional(
        LSTM(units=256,
             return_sequences=True,
             kernel_initializer='he_normal',
             dropout=0.5))(blstm)
    #     blstm = Bidirectional(LSTM(units=256, return_sequences=True, dropout=0.5))(blstm)
    #     blstm = Bidirectional(LSTM(units=256, return_sequences=True, dropout=0.5))(blstm)

    blstm = Dropout(rate=0.5)(blstm)
    output_data = Dense(units=d_model, activation="softmax")(blstm)

    #     optimizer = RMSprop(learning_rate=learning_rate)
    optimizer = Adam(learning_rate=learning_rate)

    model = Model(inputs=input_data, outputs=output_data)
    model.compile(optimizer=optimizer, loss=ctc_loss_lambda_func)

    return model
コード例 #14
0
ファイル: test_model.py プロジェクト: hin1/ProductDetection
def initialise_model(model_path):
    '''
    Initialise and load the classifier created and trained under train_model.py, 
    on top of the ImageNet pre-trained VGG16 base model.
    '''
    base_model = applications.VGG16(include_top=False, weights='imagenet')
    top_model = models.load_model(model_path)
    top_model.summary()

    model = Sequential()
    model.add(base_model)
    model.add(top_model)

    return model
コード例 #15
0
ファイル: vgg_model.py プロジェクト: kduy410/FaceRecognition
def convnet_model_(input_shape):
    global x
    vgg_model = applications.VGG16(weights=None,
                                   include_top=False,
                                   input_shape=input_shape)
    x = vgg_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(4096, activation='relu')(x)
    x = Dropout(rate=0.6)(x)
    x = Dense(4096, activation='relu')(x)
    x = Dropout(rate=0.6)(x)
    x = Lambda(lambda x: K.l2_normalize(x, axis=1))(x)

    convnet_model = Model(inputs=vgg_model.input, outputs=x)
    return convnet_model
コード例 #16
0
ファイル: Models.py プロジェクト: PavlosChatz/chickn
def vgg16_model():

    
    dims = (64, 64, 3)
    conv_model = applications.VGG16(include_top = False, weights = 'imagenet', input_tensor = None, input_shape = dims, pooling = 'max')
    conv_model.trainable = False
    inputs = Input(shape = dims)
    vgg16_conv_outputs = conv_model(inputs)
    x = Flatten(name = 'flatten')(vgg16_conv_outputs)
    x = Dense(1000, activation = 'relu', name = 'fc1') (x)
    x = Dense(1000, activation = 'relu', name = 'fc2') (x)
    x = Dense(10, activation = 'softmax', name = 'predictions') (x)

    model = Model(inputs = inputs, outputs = x)
    model.summary()
    return model
コード例 #17
0
def prediction_using_saved_model(model_path=top_model_weights_path,
                                 img_path=TARGET_IMAGES):
    img = load_img(img_path, target_size=(img_width, img_height))
    img = img_to_array(img) / 255
    img = img.reshape((1, ) + img.shape)

    #print(img.shape)
    vgg_model = applications.VGG16(include_top=False, weights='imagenet')

    img_features = vgg_model.predict(imgarr)

    model = create_top_model(img_features.shape[1:])

    model.load_weights(top_model_weights_path)

    img_features = img_features.reshape((1, ) + img_features.shape)

    print(model.predict(img_features))
コード例 #18
0
def save_bottlebeck_features():
    # build the VGG16 network
    model = applications.VGG16(include_top=True, weights='imagenet')

    datagen = ImageDataGenerator(rescale=1. / 255)

    generator = datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False)

    print(len(generator.filenames))
    print(generator.class_indices)
    print(len(generator.class_indices))

    nb_train_samples = len(generator.filenames)
    num_classes = len(generator.class_indices)

    predict_size_train = int(math.ceil(nb_train_samples / batch_size))

    bottleneck_features_train = model.predict_generator(
        generator, predict_size_train)

    np.save('bottleneck_features_train.npy', bottleneck_features_train)

    generator = datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False)

    nb_validation_samples = len(generator.filenames)

    predict_size_validation = int(
        math.ceil(nb_validation_samples / batch_size))

    bottleneck_features_validation = model.predict_generator(
        generator, predict_size_validation)

    np.save('bottleneck_features_validation.npy',
            bottleneck_features_validation)
コード例 #19
0
def create_vgg16_features(filedir, img_size_vertical, img_size_horizontal, batch_size, train_val):
    
    feature_extractor = applications.VGG16(include_top=False, weights='imagenet',
                                        input_shape=(img_size_vertical, img_size_horizontal, 3))

    vgg_data_gen = ImageDataGenerator(preprocessing_function=preprocess_input)

    generator = vgg_data_gen.flow_from_directory(
            filedir + train_val,
            target_size=(img_size_vertical, img_size_horizontal),
            batch_size=batch_size,
            class_mode='binary',
            classes=['other', 'car'],
            seed=12345,
            shuffle=False)

    cnn_features = feature_extractor.predict(generator)

    with open(train_val+'.txt', 'wb') as f:
        np.save(f, cnn_features)
コード例 #20
0
    def __init__(self,
                 n_outputs=7,
                 pretrained=False,
                 freeze=False,
                 size=256,
                 depth=3):

        super(MyModel, self).__init__()

        if pretrained:
            self.model_weights = 'imagenet'
        else:
            self.model_weights = None

        # Download the architecture of VGG16 with ImageNet weights
        self.vgg = applications.VGG16(include_top=False,
                                      weights=self.model_weights,
                                      input_shape=(width, width, depth))

        # Taking the output of the last convolution block in VGG16
        self.res_out = self.vgg.output
        self.res_in = self.vgg.input

        self.conv2d = Conv2D(1024, 3, padding='same', activation='relu')
        self.GlobPoll = GlobalAveragePooling2D()
        #self.drop = Dropout(0.2)

        # Adding a fully connected layer having 1024 neurons
        self.fc1 = Dense(1024, activation='relu')
        self.fc2 = Dense(512, activation='relu')
        #self.flatten = Flatten()

        # Sigmoid Out
        self.out = Dense(outs, activation='sigmoid')

        if freeze:
            # Training only top layers i.e. the layers which we have added in the end
            self.vgg.trainable = False
コード例 #21
0
def cnn_model(X_train, X_test , y_train, y_test):

    base_model = applications.VGG16(include_top=False, input_shape=X_train.shape[1:], weights='imagenet',classes=CLASSES)

    # Freezing VGG16 layers
    for layer in base_model.layers:
        layer.trainable=False
    
    last_layer = 'block5_pool'
    model = Model(base_model.input, base_model.get_layer(last_layer).output)

    model.layers[-1].output.shape
    model = Sequential()

    model.add(base_model)      # Stack vgg16 

    model.add(Conv2D(128,(3,3),activation="relu", input_shape=model.layers[-1].output.shape, data_format='channels_first'))
    model.add(MaxPooling2D(2,2))

    # model.add(Conv2D(128,(3,3),activation="relu"))
    # model.add(MaxPooling2D(2,2))

    model.add(Flatten())        # Flatten the output

    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.2))

    # Output layer
    model.add(Dense(CLASSES, activation="sigmoid"))

    model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
    model.fit(X_train, y_train, batch_size=bs, epochs=ep, validation_data = (X_test, y_test),  callbacks=[tensorboard])

    # Save model
    model.save('cnn.model')


    model.summary()
コード例 #22
0
def save_bottlebeck_features():
    datagen = ImageDataGenerator(rescale=1. / 255)

    # build the VGG16 network
    model = applications.VGG16(include_top=False, weights='imagenet')
    print('Generating Train Images...')
    generator = datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False)
    
    print('Predicting Train')
    bottleneck_features_train = model.predict(
        generator, batch_size=nb_train_samples // batch_size, verbose=1)

    np.save(open('bottleneck_features_train.npy', 'wb'),
            bottleneck_features_train)
    print('Train Bottleneck Features Saved...')

    print('Generating Test Images')
    generator = datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False)

    print('Predicting Test')
    bottleneck_features_validation = model.predict(
        generator, batch_size=nb_validation_samples // batch_size, verbose=1)

    np.save(open('bottleneck_features_validation.npy', 'wb'),
            bottleneck_features_validation)
    print('Test Bottleneck Features Saved...')
コード例 #23
0
def wnet(input_shape=(None, None, 3)):
    # Difference with original paper: padding 'valid vs same'
    conv_kernel_initializer = initializers.RandomNormal(stddev=0.01)

    input_flow = layers.Input(input_shape)
    # Encoder
    x = layers.Conv2D(
        64,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(input_flow)
    x = layers.Activation("relu")(x)
    x = layers.Conv2D(
        64,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x = layers.Activation("relu")(x)

    x = layers.MaxPooling2D((2, 2))(x)
    x = layers.Conv2D(
        128,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x = layers.Activation("relu")(x)
    x_1 = layers.Conv2D(
        128,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x_1 = layers.Activation("relu")(x_1)

    x = layers.MaxPooling2D((2, 2))(x_1)
    x = layers.Conv2D(
        256,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x = layers.Activation("relu")(x)
    x = layers.Conv2D(
        256,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x = layers.Activation("relu")(x)
    x_2 = layers.Conv2D(
        256,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x_2 = layers.Activation("relu")(x_2)

    x = layers.MaxPooling2D((2, 2))(x_2)
    x = layers.Conv2D(
        512,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x = layers.Activation("relu")(x)
    x = layers.Conv2D(
        512,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x = layers.Activation("relu")(x)
    x_3 = layers.Conv2D(
        512,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x_3 = layers.Activation("relu")(x_3)

    x = layers.MaxPooling2D((2, 2))(x_3)
    x = layers.Conv2D(
        512,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x = layers.Activation("relu")(x)
    x = layers.Conv2D(
        512,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x = layers.Activation("relu")(x)
    x_4 = layers.Conv2D(
        512,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x_4 = layers.Activation("relu")(x_4)

    # Decoder 1
    x = layers.UpSampling2D((2, 2))(x_4)
    x = layers.concatenate([x_3, x])
    x = layers.Conv2D(
        256,
        (1, 1),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x = layers.Activation("relu")(x)
    x = layers.Conv2D(
        256,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x = layers.Activation("relu")(x)

    x = layers.UpSampling2D((2, 2))(x)
    x = layers.concatenate([x_2, x])
    x = layers.Conv2D(
        128,
        (1, 1),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x = layers.Activation("relu")(x)
    x = layers.Conv2D(
        128,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x = layers.Activation("relu")(x)

    x = layers.UpSampling2D((2, 2))(x)
    x = layers.concatenate([x_1, x])
    x = layers.Conv2D(
        64,
        (1, 1),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x = layers.Activation("relu")(x)
    x = layers.Conv2D(
        64,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x = layers.Activation("relu")(x)
    x = layers.Conv2D(
        32,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x)
    x = layers.Activation("relu")(x)

    # Decoder 2
    x_rb = layers.UpSampling2D((2, 2))(x_4)
    x_rb = layers.concatenate([x_3, x_rb])
    x_rb = layers.Conv2D(
        256,
        (1, 1),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x_rb)
    x_rb = layers.Activation("relu")(x_rb)
    x_rb = layers.Conv2D(
        256,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x_rb)
    x_rb = layers.Activation("relu")(x_rb)

    x_rb = layers.UpSampling2D((2, 2))(x_rb)
    x_rb = layers.concatenate([x_2, x_rb])
    x_rb = layers.Conv2D(
        128,
        (1, 1),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x_rb)
    x_rb = layers.Activation("relu")(x_rb)
    x_rb = layers.Conv2D(
        128,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x_rb)
    x_rb = layers.Activation("relu")(x_rb)

    x_rb = layers.UpSampling2D((2, 2))(x_rb)
    x_rb = layers.concatenate([x_1, x_rb])
    x_rb = layers.Conv2D(
        64,
        (1, 1),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x_rb)
    x_rb = layers.Activation("relu")(x_rb)
    x_rb = layers.Conv2D(
        64,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x_rb)
    x_rb = layers.Activation("relu")(x_rb)
    x_rb = layers.Conv2D(
        32,
        (3, 3),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
    )(x_rb)
    x_rb = layers.Activation("relu")(x_rb)
    x_rb = layers.Conv2D(
        1,
        (1, 1),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
        activation="sigmoid",
    )(x_rb)  # Sigmoid activation

    # Multiplication
    x = layers.multiply([x, x_rb])
    x = layers.Conv2D(
        1,
        (1, 1),
        strides=(1, 1),
        padding="same",
        kernel_initializer=conv_kernel_initializer,
        activation="relu",
    )(x)

    model = models.Model(inputs=input_flow, outputs=x)

    front_end = app.VGG16(
        weights="imagenet", include_top=False
    )  # apply vgg weights to model, maybe we don't need this
    weights_front_end = []
    for layer in front_end.layers:
        if "conv" in layer.name:
            weights_front_end.append(layer.get_weights())
    counter_conv = 0
    for i in range(len(model.layers)):
        if counter_conv >= 13:
            break
        if "conv" in model.layers[i].name:
            model.layers[i].set_weights(weights_front_end[counter_conv])
            counter_conv += 1

    return model
コード例 #24
0
        plt.xticks([])
        plt.yticks([])
        plt.grid(False)
        plt.imshow(image_sample[i].reshape(32, 32, 3)[:, :, 0],
                   cmap=plt.cm.binary)
        plt.xlabel(label2character[label_sample[i]], fontproperties=prop)
    plt.show()


visualize_images(train_images, train_labels)

# ## Transfer learning

# In[7]:

model = applications.VGG16(include_top=False, weights='imagenet')

train_features = model.predict(train_images)
val_features = model.predict(val_images)

train_features.shape

# # Visualize features

# In[9]:

class_0_mask = np.where(train_labels == 0)[0][:4]
class_1_mask = np.where(train_labels == 1)[0][:4]


def visualize_features(mask):
コード例 #25
0
                                            num_parallel_calls=tf.data.experimental.AUTOTUNE)
validation_dataset = validation_dataset.map(process_and_not_augment_image,
                                            num_parallel_calls=tf.data.experimental.AUTOTUNE)
validation_dataset = validation_dataset.batch(BATCH_SIZE, drop_remainder=True)


# ## Reuse a pre-trained CNN
#
# Another option is to reuse a pretrained network. Here we'll use the
# [VGG16](https://keras.io/applications/#vgg16) network architecture
# with weights learned using Imagenet. We remove the top layers and
# freeze the pre-trained weights.
#
# ### Initialization

vgg16 = applications.VGG16(weights='imagenet', include_top=False,
                           input_shape=INPUT_IMAGE_SIZE)
vgg16.trainable = False

# We then stack our own, randomly initialized layers on top of the
# VGG16 network.

model = Sequential([vgg16,
                    Flatten(),
                    Dense(256, activation='relu'),
                    Dense(43, activation='softmax')])

model.compile(loss='sparse_categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

print(model.summary())
コード例 #26
0
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        rotation_range=30, 
        width_shift_range=0.1,
        height_shift_range=0.1,
        samplewise_center = True,
        )

datagen_test = ImageDataGenerator(rescale=1./255, samplewise_center = True)

train_flow = datagen.flow_from_dataframe(train, x_col = 'Filepath', y_col = 'Target', target_size=(224, 224), batch_size = 32, interpolation = 'lanczos', validate_filenames = False)
test_flow = datagen_test.flow_from_dataframe(test, x_col = 'Filepath', y_col = 'Target', target_size=(224, 224), batch_size = 32,  interpolation = 'lanczos', validate_filenames = False)


model = applications.VGG16(weights='imagenet', include_top = False, input_shape=(224, 224, 3))

#model.get_layer('block1_conv1').trainable = False
#model.get_layer('block1_conv2').trainable = False
#model.get_layer('block2_conv1').trainable = False
#model.get_layer('block2_conv2').trainable = False

flat1 = tf.keras.layers.Flatten()(model.output)
dropout1 = Dropout(0.1)(flat1)
class1 = tf.keras.layers.Dense(256, activation='relu')(dropout1)
dropout2 = Dropout(0.1)(class1)
output = tf.keras.layers.Dense(10, activation='softmax')(dropout2)
model = Model(inputs = model.inputs, outputs = output)

reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss', factor = 0.2, patience = 1, verbose = 1, min_delta=0.0001, min_lr = 1e-8)
コード例 #27
0
    return op.get(type)


all_combinations = [(bs, it, op, lr) for bs in BATCH_SIZE
                    for it in INTERPOLATION for op in OPTIMIZER_TYPE
                    for lr in LEARNING_RATE]

for bs, it, op, lr in all_combinations:
    train_data, valid_data = load_preprocess(DIRECTORY, bs, IMAGE_SIZE, it)

    print(
        "Parameter Tuning: Batch_size = {}, Interpolation = {}, Optimizer = {}, Learning_rate = {}"
        .format(bs, it, op, lr))

    base_model = applications.VGG16(include_top=False,
                                    weights="imagenet",
                                    input_shape=(224, 224, 3))

    base_model.summary()
    train_feat, train_labels = extract_features(base_model, train_data, 1442,
                                                bs)
    valid_feat, valid_labels = extract_features(base_model, valid_data, 618,
                                                bs)

    inputs = keras.Input(shape=(7, 7, 512))
    x = layers.Flatten()(inputs)
    x = layers.Dense(2048, activation='relu')(x)
    x = layers.Dropout(0.3)(x)
    x = layers.Dense(512, activation='relu')(x)
    x = layers.Dropout(0.3)(x)
    x = layers.Dense(128, activation='relu')(x)
コード例 #28
0
                               	  for it in INTERPOLATION 
                                  for op in OPTIMIZER_TYPE
                                  for lr in LEARNING_RATE]

for bs, it, op, lr in all_combinations:
	train_data, valid_data = load_preprocess(DIRECTORY,
						 bs,
						 IMAGE_SIZE,
						 it)
	print("Parameter Tuning: Batch_size = {}, Interpolation = {}, Optimizer = {}, Learning_rate = {}".format(bs, it, op, lr))
	
	model = applications.VGG16(
    			include_top=True,
    			weights=None,
    			input_tensor=None,
    			input_shape=None,
    			pooling=None,
    			classes=3,
    			classifier_activation="softmax",
			)

	model.compile(
		optimizer=getOptimizer(op, lr), 
		loss=losses.CategoricalCrossentropy(),
		metrics=[metrics.CategoricalAccuracy()]
		)

	history = model.fit(train_data, epochs=15, verbose=2, validation_data=valid_data)

	with open('../output/vgg16_2nd/output_{}_{}_{}_{}.json'.format(bs, it, op, lr), 'w') as f:
		json.dump(history.history, f)
コード例 #29
0
 def _build_front(self, input_images):
     base_model = applications.VGG16(input_tensor=input_images, weights='imagenet',
                                     include_top=False, input_shape=(256, 256, 3))
     BOTTLENECK_TENSOR_NAME = 'block4_conv3'  # This is the 13th layer in VGG16
     front = self._create_non_trainable_model(base_model, BOTTLENECK_TENSOR_NAME)  # Frontend
     return front
コード例 #30
0
ファイル: agg16.py プロジェクト: erinqhu/pokemon
# In[22]:

import tensorflow as tf
tf.keras.backend.clear_session()

# In[23]:

nrow = 150
ncol = 150

# In[24]:

# Load the VGG16 network
base_model = applications.VGG16(weights='imagenet',
                                include_top=False,
                                input_shape=(nrow, ncol, 3))

# In[25]:

# Create a new model
model = Sequential()

# Loop over base_model.layers and add each layer to model
for layer in base_model.layers:
    model.add(layer)

# In[26]:

for layer in model.layers:
    layer.trainable = False