Пример #1
0
def model(lr):
    # get VGG16
    base_model = vgg16.VGG16(weights='imagenet',
                             include_top=False,
                             input_shape=(32, 32, 3))

    # freez Layers
    for layer in base_model.layers:
        layer.trainable = False

    # build New Network
    model = Sequential()
    model.add(base_model)
    model.add(GlobalAveragePooling2D())
    model.add(BatchNormalization())
    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.6))
    model.add(Dense(10, activation='softmax'))

    # set optimizer
    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr),
                  metrics=['accuracy'])

    return model
    def __init__(self, name):
        self.input_shape = (32, 32, 3)
        if name == "LeNet.h5":
            model = self.createLeNet()
            print("Built: LeNet")
        elif name == "ResNet56.h5":
            self.input_shape = (32, 32, 3)
            model = self.resnet_v2(self.input_shape, depth=56, num_classes=10)
            model.summary()
            print("Built: ResNet56")
        elif name == "VGG19.h5":
            model = vgg16.VGG16(include_top=False, weights=None, input_tensor=None, input_shape=self.input_shape, pooling=None,
                        classes=1000)
            model.summary()
            print("Built: VGG19")
        elif name == "wrn.h5":
            init = (32, 32, 3)
            model = self.create_wide_residual_network(init, nb_classes=10, N=4, k=8, dropout=0.0)
            model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["acc"])
            model.summary()
            print("Built: WRN")
        elif name == "custom.h5":
            model = self.createModel()
            print("Built: Custom")
        elif name == "AlexNet.h5":
            model = self.createAlexNet()
            print("Built: AlexNet")
        else:
            print("Build Failed")
            return

        model_path = os.path.join(save_dir, name)
        model.save(model_path)
Пример #3
0
def create_model(model="vgg19", pool="avg", padding="valid"):
    if model == "vgg19":
        default_model = vgg19.VGG19(weights="imagenet", include_top=False)
    elif model == "vgg16":
        default_model = vgg16.VGG16(weights="imagenet", include_top=False)
    new_layers = []
    for i, layer in enumerate(default_model.layers):
        if i == 0:
            new_layers.append(keras.layers.Input((None, None, 3)))
        else:
            if isinstance(layer, keras.layers.Conv2D):
                config = layer.get_config()
                config["padding"] = padding
                new_layers.append(keras.layers.Conv2D.from_config(config))
            elif isinstance(layer, keras.layers.MaxPooling2D):
                config = layer.get_config()
                config["padding"] = padding
                if pool == "avg":
                    new_layers.append(
                        keras.layers.AveragePooling2D.from_config(config))
                else:
                    new_layers.append(
                        keras.layers.MaxPooling2D.from_config(config))
    input = new_layers[0]
    output = input
    for i in range(1, len(new_layers)):
        output = new_layers[i](output)
    model = keras.models.Model(input, output)
    for new, old in zip(model.layers, default_model.layers):
        new.set_weights(old.get_weights())
    return model
Пример #4
0
def search(files, db_path, mask_path, video_path, params):
    memory_graph = MemoryGraph(db_path, params)
    cnn = vgg16.VGG16(weights="imagenet", include_top=False, input_shape=(32, 32, 3))
    orb = cv2.ORB_create(nfeatures=100000, fastThreshold=7)

    for file in files:
        search_file(file, memory_graph, cnn, orb, mask_path, video_path, params)
Пример #5
0
def un_activation_test():
    vgg_model = vgg16.VGG16(weights='imagenet')
    vgg_model.summary()
    rand_data = np.random.randint(low=-10, high=10, size=(1, 112, 112, 128))
    dlog.debug(rand_data)
    rand_data = un_activation(dget_layer(vgg_model, "block2_conv2"), rand_data)
    dlog.debug(rand_data)
Пример #6
0
def keras_fn():
    '''
    对梯度值进行了修改,它是怎么做到不超出图片的数值范围的?
    :return: 更新后的输入值
    '''
    conv_base = vgg16.VGG16(include_top=False, weights='imagenet')
    im = Image.open('/Users/yongli/Pictures/flower.jpeg')
    im = im.resize((224, 224))
    im_arr = np.array(im)
    im_arr = np.expand_dims(im_arr, axis=0)
    im_arr = im_arr.astype('float32')

    layer_name = 'block3_conv1'
    filter_idx = 0

    layer_output = conv_base.get_layer(layer_name).output
    loss = K.mean(layer_output[:, :, :, filter_idx])

    grads = K.gradients(loss, conv_base.input)[0]

    grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
    iterate = K.function([conv_base.input], [loss, grads])

    step = 1
    for i in range(40):
        loss_value, grads_value = iterate([im_arr])
        im_arr += grads_value
    plot.imshow(im_arr[0])
    plot.show()
Пример #7
0
def create_model(num_trainable_layers=None):
    vgg_conv = vgg16.VGG16(weights='imagenet',
                           include_top=False,
                           input_shape=(224, 224, 3))
    if num_trainable_layers is None:
        # Freeze all the layers
        for layer in vgg_conv.layers[:]:
            layer.trainable = False
    else:
        # Freeze all the layers
        for layer in vgg_conv.layers[:-num_trainable_layers]:
            layer.trainable = False

    # Create the model
    model = Sequential()
    model.add(vgg_conv)
    model.add(Flatten())
    # model.add(Dense(512, activation='relu', input_dim=7 * 7 * 512))
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.25))
    model.add(Dense(3, activation='softmax'))


    return model
Пример #8
0
def main():
    parser = argparser()
    args = parser.parse_args()
    image_path = args.image
    layer_name = args.layer_name
    feature_to_visualize = args.feature
    visualize_mode = args.mode

    model = vgg16.VGG16(weights='imagenet', include_top=True)
    layer_dict = dict([(layer.name, layer) for layer in model.layers])
    if not layer_dict.has_key(layer_name):
        print('Wrong layer name')
        sys.exit()

    # Load data and preprocess
    img = Image.open(image_path)
    img = img.resize((224, 224))
    img_array = np.array(img)
    img_array = np.transpose(img_array, (2, 0, 1))
    img_array = img_array[np.newaxis, :]
    img_array = img_array.astype(np.float)
    img_array = imagenet_utils.preprocess_input(img_array)

    deconv = visualize(model, img_array, layer_name, feature_to_visualize,
                       visualize_mode)

    # postprocess and save image
    deconv = np.transpose(deconv, (1, 2, 0))
    deconv = deconv - deconv.min()
    deconv *= 1.0 / (deconv.max() + 1e-8)
    deconv = deconv[:, :, ::-1]
    uint8_deconv = (deconv * 255).astype(np.uint8)
    img = Image.fromarray(uint8_deconv, 'RGB')
    img.save('results/{}_{}_{}.png'.format(layer_name, feature_to_visualize,
                                           visualize_mode))
Пример #9
0
    def make_pred(self):
        class_labels = ["bread", "rice", "roti", "noodles", "kottu", "pizza"]

        # builds the model using the saved structure and weights
        f = Path("ds_comp/model_structure.json")
        model_structure = f.read_text()
        model = model_from_json(model_structure)
        model.load_weights("ds_comp/model_weights.h5")

        # converts the input image to a normalised numpy array
        img = image.load_img(self.file, target_size=(224, 224))
        image_array = image.img_to_array(img)
        images = np.expand_dims(image_array, axis=0)
        images = vgg16.preprocess_input(images)
        # extracts the features of the new image
        feature_extraction = vgg16.VGG16(weights='imagenet',
                                         include_top=False,
                                         input_shape=(224, 224, 3))
        features = feature_extraction.predict(images)

        # Using the extracted features the model predicts what the image is
        results = model.predict(features)

        # prints the class label of the prediction using results produced
        single_result = results[0]
        most_likely_class_index = int(np.argmax(single_result))
        class_label = class_labels[most_likely_class_index]

        print("{}".format(class_label))
        return "{}".format(class_label)
Пример #10
0
 def __init__(self):
     self.IMG_SIZE = 224
     model_base = vgg16.VGG16(weights='imagenet',
                              include_top=True,
                              input_shape=(self.IMG_SIZE, self.IMG_SIZE, 3))
     self.model = keras.Model(inputs=model_base.layers[0].input,
                              outputs=model_base.layers[-2].output)
     self.model.summary()
Пример #11
0
def un_max_pooling_test():
    A = np.random.randint(low=-10, high=10, size=(1, 4, 4, 4))
    dlog.debug(A, flag=True, channels_first=True)
    R = get_switch(A, (2, 2))
    vgg_model = vgg16.VGG16(weights='imagenet')
    data = max_pool(A, (2, 2))
    output = un_max_pooling(vgg_model, 'block2_pool', data, R)
    dlog.debug(output, flag=True, channels_first=True)
Пример #12
0
 def network(inputs):
     previous = vgg16.VGG16(
         include_top=False,
         input_tensor=inputs,
         pooling=pooling,
         weights=weights,
     ).output
     return tf.keras.layers.Flatten(name="flatten")(previous)
Пример #13
0
    def __init__(self, settings: Settings, view_id, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.settings = settings
        self.view_id = view_id

        # region Use VGG16
        if self.settings.svcnn_model == 'vgg16':
            vgg16_model = vgg16.VGG16(include_top=False,
                                      input_shape=settings.input_shape,
                                      pooling=None,
                                      weights=None)
            x = vgg16_model.layers[-1].output
            x = Flatten(name='flatten')(x)
            x = Dense(4096, activation='relu', name='fc1')(x)
            x = Dense(4096, activation='relu', name='fc2')(x)
            x = Dense(settings.num_classes,
                      activation='softmax',
                      name='predictions')(x)
            self.model_input = vgg16_model.input
            self.model_output = x
            self.model = Model(inputs=self.model_input,
                               outputs=self.model_output)
        # endregion

        # region Use VGG19
        elif self.settings.svcnn_model == 'vgg19':
            vgg19_model = vgg19.VGG19(include_top=False,
                                      input_shape=settings.input_shape,
                                      pooling=None,
                                      weights=None)
            x = vgg19_model.layers[-1].output
            x = Flatten(name='flatten')(x)
            x = Dense(4096, activation='relu', name='fc1')(x)
            x = Dense(4096, activation='relu', name='fc2')(x)
            x = Dense(settings.num_classes,
                      activation='softmax',
                      name='predictions')(x)
            self.model_input = vgg19_model.input
            self.model_output = x
            self.model = Model(inputs=self.model_input,
                               outputs=self.model_output)
        # endregion

        # region Use ResNet50
        elif self.settings.svcnn_model == 'resnet50':
            resnet50_model = resnet50.ResNet50(
                include_top=False,
                input_shape=settings.input_shape,
                pooling=None,
                weights=None)
            x = resnet50_model.layers[-1].output
            x = GlobalAveragePooling2D(name='avg_pool')(x)
            x = Dense(settings.num_classes, activation='softmax',
                      name='probs')(x)
            self.model_input = resnet50_model.input
            self.model_output = x
            self.model = Model(inputs=self.model_input,
                               outputs=self.model_output)
Пример #14
0
    def build(self):

        base_model = None
        output = None

        if self.include_top:
            if self.input_width_height != 224 or self.channels != 3:
                print(
                    "IF include_top=True, input_shape MUST be (224,224,3), exiting..."
                )
                exit()
            else:
                if self.name == "VGG" or self.name == "VGG16":
                    base_model = vgg16.VGG16(weights=self.weights,
                                             include_top=True,
                                             classes=self.num_classes)
                else:
                    print("Invalid name, accepted 'VGG1619', exiting...")
                    exit()
                output = base_model.output
        else:
            inputs = Input(shape=(self.input_width_height,
                                  self.input_width_height, self.channels))
            if self.name == "VGG" or self.name == "VGG16":
                base_model = vgg16.VGG16(weights=self.weights,
                                         include_top=False,
                                         input_tensor=inputs)
            else:
                print("Invalid name, accepted 'VGG16', exiting...")
                exit()
            flatten = Flatten(name='my_flatten')
            output_layer = Dense(self.num_classes,
                                 activation='softmax',
                                 name='my_predictions')
            output = output_layer(flatten(base_model.output))

        input_layer = base_model.input

        model = Model(input_layer, output)
        # model.summary(line_length=50)
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['acc'])
        return model
Пример #15
0
def get_vgg_activation(layer_name, width, height):
    tensor = K.placeholder((1, height, width, 3))
    model = vgg16.VGG16(input_tensor=tensor,
                        weights='imagenet',
                        include_top=False)
    outputs_dict = {}
    for layer in model.layers:
        outputs_dict[layer.name] = layer.output
        layer.trainable = False
    return K.function([tensor], [outputs_dict[layer_name]])
def unet_resnet(
        input_shape=(256, 256, 3), num_classes=8, encoder_weights='imagenet'):

    base_model = resnet50.ResNet50(input_shape=input_shape,
                                   include_top=False,
                                   weights=encoder_weights)

    for l in base_model.layers:
        l.trainable = True

    conv0 = base_model.get_layer("activation").output
    conv1 = base_model.get_layer("activation_1").output
    conv2 = base_model.get_layer("activation_10").output
    conv3 = base_model.get_layer("activation_22").output
    conv4 = base_model.get_layer("activation_40").output
    conv5 = base_model.get_layer("activation_48").output

    # (None, 128, 128, 64) (None, 64, 64, 128) (None, 32, 32, 256) (None, 16, 16, 512) (None, 16, 16, 2048)
    # print(conv1.shape, conv2.shape, conv3.shape, conv4.shape, conv5.shape)

    up6 = K.concatenate([conv5, conv4], axis=-1)
    conv6 = conv_block_simple(up6, 256, "conv6_1")
    conv6 = conv_block_simple(conv6, 256, "conv6_2")

    up7 = K.concatenate([UpSampling2D()(conv6), conv3], axis=-1)
    conv7 = conv_block_simple(up7, 192, "conv7_1")
    conv7 = conv_block_simple(conv7, 192, "conv7_2")

    up8 = K.concatenate([UpSampling2D()(conv7), conv2], axis=-1)
    conv8 = conv_block_simple(up8, 128, "conv8_1")
    conv8 = conv_block_simple(conv8, 128, "conv8_2")

    up9 = K.concatenate([UpSampling2D()(conv8), conv1], axis=-1)
    conv9 = conv_block_simple(up9, 64, "conv9_1")
    conv9 = conv_block_simple(conv9, 64, "conv9_2")

    up9x = K.concatenate([UpSampling2D()(conv9), conv0], axis=-1)
    conv9x = conv_block_simple(up9x, 64, "conv9x_1")
    conv9x = conv_block_simple(conv9x, 64, "conv9x_2")

    vgg = vgg16.VGG16(input_shape=input_shape,
                      input_tensor=base_model.input,
                      include_top=False)
    for l in vgg.layers:
        l.trainable = False

    vgg_first_conv = vgg.get_layer("block1_conv2").output
    up10 = K.concatenate([UpSampling2D()(conv9x), vgg_first_conv], axis=-1)
    conv10 = conv_block_simple(up10, 32, "conv10_1")
    conv10 = conv_block_simple(conv10, 32, "conv10_2")
    conv10 = SpatialDropout2D(0.2)(conv10)

    x = Conv2D(num_classes, (1, 1), activation=None, name="prediction")(conv10)
    model = Model(base_model.input, x)
    return model
Пример #17
0
 def __init__(self, style_layers = ['block1_conv2',
                                    'block2_conv2',
                                    'block3_conv3', 
                                    'block4_conv3']):
     super(LossNetwork, self).__init__()
     vgg = vgg16.VGG16(include_top=False, weights='imagenet')
     vgg.trainable = False
     model_outputs = [vgg.get_layer(name).output for name in style_layers]
     self.model = tf.keras.models.Model(vgg.input, model_outputs)
     # mixed precision float32 output
     self.linear = layers.Activation('linear', dtype='float32') 
Пример #18
0
def UNet(nClasses, input_height, input_width):
    assert input_height % 32 == 0
    assert input_width % 32 == 0

    img_input = Input(shape=(input_height, input_width, 3))

    vgg_streamlined = vgg16.VGG16(include_top=False,
                                  weights='imagenet',
                                  input_tensor=img_input)
    assert isinstance(vgg_streamlined, Model)

    o = UpSampling2D((2, 2))(vgg_streamlined.output)
    o = concatenate([vgg_streamlined.get_layer(name="block4_pool").output, o],
                    axis=-1)
    o = Conv2D(512, (3, 3), padding="same")(o)
    o = BatchNormalization()(o)

    o = UpSampling2D((2, 2))(o)
    o = concatenate([vgg_streamlined.get_layer(name="block3_pool").output, o],
                    axis=-1)
    o = Conv2D(256, (3, 3), padding="same")(o)
    o = BatchNormalization()(o)

    o = UpSampling2D((2, 2))(o)
    o = concatenate([vgg_streamlined.get_layer(name="block2_pool").output, o],
                    axis=-1)
    o = Conv2D(128, (3, 3), padding="same")(o)
    o = BatchNormalization()(o)

    o = UpSampling2D((2, 2))(o)
    o = concatenate([vgg_streamlined.get_layer(name="block1_pool").output, o],
                    axis=-1)
    o = Conv2D(64, (3, 3), padding="same")(o)
    o = BatchNormalization()(o)

    # UNet 네트워크 처리 입력이 2 배 미러링되므로 최종 입력 및 출력이 2 배 감소합니다.
    # 여기에서 직접 업 샘플링하고 원래 크기를 설정합니다.
    o = UpSampling2D((2, 2))(o)
    o = Conv2D(64, (3, 3), padding="same")(o)
    o = BatchNormalization()(o)

    o = Conv2D(1, (1, 1), padding="same")(o)
    o = BatchNormalization()(o)
    o = Activation("sigmoid")(o)

    # o = Conv2D(nClasses, (1, 1), padding="same")(o)
    # o = BatchNormalization()(o)
    # o = Activation("relu")(o)

    # o = Reshape((-1, nClasses))(o)
    # o = Activation("softmax")(o)

    model = Model(inputs=img_input, outputs=o)
    return model
Пример #19
0
def get_feature(x, layer_name):
    g_model = vgg16.VGG16(include_top=False, weights='imagenet')
    output1 = g_model.get_layer(layer_name).output

    fn = K.function([g_model.input], [output1])
    print(x)
    # if x.shape != (224, 224, 3):
    x = np.reshape(x[0], (224, 224, 3))
    output1_val = fn([x])[0]

    return output1_val
Пример #20
0
def getPerceptionModel(windowsize):
    model = vgg16.VGG16(weights="imagenet",
                        include_top=False,
                        input_shape=(windowsize, windowsize, 3))
    model.trainable = False
    structureOutput = model.get_layer("block4_conv3").output
    perceptionModel = Model(inputs=model.inputs, outputs=structureOutput)
    perceptionModel.trainable = False
    for layer in perceptionModel.layers:
        layer.trainable = False
    return perceptionModel
Пример #21
0
def get_feature_val(layer_name, im):
    '''
    和全局变量比较,可能算是变量冗余吧
    这里想要得到中间激活层的数据,就得使用K.function或者Model
    '''
    vgg = vgg16.VGG16(include_top=False, weights='imagenet')
    output1 = vgg.get_layer(layer_name).output
    fn1 = K.function([vgg.input], [output1])
    fn1_value = fn1([im])[0]
    fn1_value = fn1_value[0]
    print(fn1_value.shape)
    return fn1_value
Пример #22
0
def conv_filter_visualization_example():
    # The name of the layer we want to visualize.
    # See model definition at keras/applications/vgg16.py.
    LAYER_NAME = 'block5_conv1'

    # Build the VGG16 network with ImageNet weights.
    vgg = vgg16.VGG16(weights='imagenet', include_top=False)
    print('Model loaded.')
    vgg.summary()

    # Visualize.
    visualize_layer(vgg, LAYER_NAME)
Пример #23
0
def perceptual_loss_wrapper(vgg_layer="block2_conv2"):
    vgg = vgg16.VGG16(include_top=False, weights="imagenet")
    perceptual_layer_model = Model(inputs=vgg.input,
                                   outputs=vgg.get_layer(vgg_layer).output)
    perceptual_layer_model.trainable = False
    perceptual_layer_model.compile(loss="mse", optimizer="adam")

    def perceptual_loss(y_true, y_pred):
        y_true_vgg = perceptual_layer_model(y_true)
        y_pred_vgg = perceptual_layer_model(y_pred)
        return tf.reduce_mean(tf.square(y_pred_vgg - y_true_vgg))

    return perceptual_loss
Пример #24
0
def vgg16(input_shape, frozen_layers=0, weights=None, pooling='avg', **kwargs):
    # create the base model
    base_model = keras_vgg16.VGG16(weights=weights,
                                   include_top=False,
                                   input_shape=input_shape,
                                   pooling=pooling)
    predictions = add_classifier(base_model.output, **kwargs)
    model = models.Model(inputs=base_model.input, outputs=predictions)

    # Freeze some layers:
    freeze_layers(model, frozen_layers)

    return model
Пример #25
0
def VGG16_Feature(img_shape=(256, 256, 3)):
    vgg16_model = vgg16.VGG16(include_top=False,
                              weights='imagenet',
                              input_shape=img_shape)
    model = Model(
        inputs=[vgg16_model.input],
        outputs=[
            vgg16_model.layers[1].output, vgg16_model.layers[2].output,
            vgg16_model.layers[4].output, vgg16_model.layers[5].output,
            vgg16_model.layers[7].output, vgg16_model.layers[8].output,
            vgg16_model.layers[9].output
        ])
    model.trainable = False
    return model
Пример #26
0
def transfer_learning():
    vgg_conv = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(IMG_SIZE, IMG_SIZE, CHANNELS))
    for layer in vgg_conv.layers[:]:
        layer.trainable = False
    model = Sequential()
    model.add(vgg_conv)
    model.add(Flatten())
    model.add(Dense(1024, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(2, activation='softmax'))
    model.summary()
    return model
Пример #27
0
def un_filter_test():
    vgg_model = vgg16.VGG16(weights='imagenet')
    inter_model = dget_intermediate_model(vgg_model, 'block3_conv3')

    # load test image
    img = load_img(TEST_IMAGE_PATH, target_size=(224, 224))
    numpy_image = img_to_array(img)
    image_batch = np.expand_dims(numpy_image, axis=0)
    # prepare the image for the VGG model
    processed_image = vgg16.preprocess_input(image_batch)
    pred = inter_model.predict(processed_image)
    pred = np.squeeze(pred)
    output = un_filter(pred, inter_model, 'block3_conv3')
    dlog.debug(output)
Пример #28
0
def get_prediction_class(filename):
    """

    Gets the prediction class from VGG16 pretrained model

    Input Args:

        filename = absolute path of an image

    Output:

        output_class

    """
    model_file_path = "saved_models/vgg16_weights_tf_dim_ordering_tf_kernels.h5"

    if not os.path.exists(model_file_path):
        print('downloading the vgg model')
        download_vgg16(model_file_path)
        print('download of vgg model finished')

    vgg_model = vgg16.VGG16(weights=model_file_path)
    # load an image in PIL format
    original = load_img(filename, target_size=(224, 224))

    # convert the PIL image to a numpy array
    # IN PIL - image is in (width, height, channel)
    # In Numpy - image is in (height, width, channel)
    numpy_image = img_to_array(original)

    # Convert the image / images into batch format
    # expand_dims will add an extra dimension to the data at a particular axis
    # We want the input matrix to the network to be of the form (batchsize, height, width, channels)
    # Thus we add the extra dimension to the axis 0.
    image_batch = np.expand_dims(numpy_image, axis=0)

    # prepare the image for the VGG model
    processed_image = vgg16.preprocess_input(image_batch.copy())

    # get the predicted probabilities for each class
    predictions = vgg_model.predict(processed_image)
    # print predictions
    # convert the probabilities to class labels
    # we will get top 5 predictions which is the default
    label_vgg = decode_predictions(predictions)
    # print VGG16 predictions
    output_class = label_vgg[0][0][1]

    return output_class
Пример #29
0
def UNet(nClasses, input_height, input_width):
    assert input_height % 32 == 0
    assert input_width % 32 == 0

    img_input = layers.Input(shape=(input_height, input_width, 3))

    base = vgg16.VGG16(include_top=False,
                       weights='imagenet',
                       input_tensor=img_input)

    base_out = base.output
    b4 = base.get_layer(name="block4_pool").output
    b3 = base.get_layer(name="block3_pool").output
    b2 = base.get_layer(name="block2_pool").output
    b1 = base.get_layer(name="block1_pool").output

    x = layers.UpSampling2D((2, 2))(base_out)
    x = layers.concatenate([b4, x], axis=-1)
    x = layers.Conv2D(512, (3, 3), padding="same")(x)
    x = layers.BatchNormalization()(x)

    x = layers.UpSampling2D((2, 2))(x)
    x = layers.concatenate([b3, x], axis=-1)
    x = layers.Conv2D(256, (3, 3), padding="same")(x)
    x = layers.BatchNormalization()(x)

    x = layers.UpSampling2D((2, 2))(x)
    x = layers.concatenate([b2, x], axis=-1)
    x = layers.Conv2D(128, (3, 3), padding="same")(x)
    x = layers.BatchNormalization()(x)

    x = layers.UpSampling2D((2, 2))(x)
    x = layers.concatenate([b1, x], axis=-1)
    x = layers.Conv2D(64, (3, 3), padding="same")(x)
    x = layers.BatchNormalization()(x)

    x = layers.UpSampling2D((2, 2))(x)
    x = layers.Conv2D(64, (3, 3), padding="same")(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv2D(nClasses, (1, 1), padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    x = layers.Reshape((-1, nClasses))(x)
    x = layers.Activation("softmax")(x)

    return Model(inputs=img_input, outputs=x)
Пример #30
0
def get_grad(gImArr):
    """
    Calculate the gradient of the loss function with respect to the generated image
    K.gradient
    """
    g_model = vgg16.VGG16(include_top=False, weights='imagenet')
    target_width = 224
    target_height = 224
    if gImArr.shape != (1, target_width, target_height, 3):
        gImArr = gImArr.reshape((1, target_width, target_height, 3))
    grad_fcn1 = K.gradients(get_loss([g_model.input]), g_model.input)[0]
    grad_fcn = K.function([g_model.input], grad_fcn1)

    print(type(grad_fcn))

    grad = grad_fcn([gImArr])[0].flatten().astype('float64')
    return grad