Ejemplo n.º 1
0
def create_model(model="vgg19", pool="avg", padding="valid"):
    if model == "vgg19":
        default_model = vgg19.VGG19(weights="imagenet", include_top=False)
    elif model == "vgg16":
        default_model = vgg16.VGG16(weights="imagenet", include_top=False)
    new_layers = []
    for i, layer in enumerate(default_model.layers):
        if i == 0:
            new_layers.append(keras.layers.Input((None, None, 3)))
        else:
            if isinstance(layer, keras.layers.Conv2D):
                config = layer.get_config()
                config["padding"] = padding
                new_layers.append(keras.layers.Conv2D.from_config(config))
            elif isinstance(layer, keras.layers.MaxPooling2D):
                config = layer.get_config()
                config["padding"] = padding
                if pool == "avg":
                    new_layers.append(
                        keras.layers.AveragePooling2D.from_config(config))
                else:
                    new_layers.append(
                        keras.layers.MaxPooling2D.from_config(config))
    input = new_layers[0]
    output = input
    for i in range(1, len(new_layers)):
        output = new_layers[i](output)
    model = keras.models.Model(input, output)
    for new, old in zip(model.layers, default_model.layers):
        new.set_weights(old.get_weights())
    return model
Ejemplo n.º 2
0
def vgg_layers(layer_names):
    vgg = vgg19.VGG19(include_top=False, weights='imagenet')
    # frozen the weight
    vgg.trainable = False
    outputs = [vgg.get_layer(name).output for name in layer_names]

    return models.Model([vgg.input], outputs)
def create_vgg_model():
    model=vgg19.VGG19(include_top=False,pooling='avg',weights='imagenet')
    model.trainable=False
    content_layer=CONTENT_LAYERS
    style_layer=STYLE_LAYERS
    output_layers=[model.get_layer(layer).output for layer in (content_layer + style_layer)]
    return tf.keras.models.Model(model.input,output_layers)
Ejemplo n.º 4
0
def styleTransfer(cData, sData, tData):
    print("   Building transfer model.")
    contentTensor = K.variable(cData)
    styleTensor = K.variable(sData)
    genTensor = K.placeholder((1, CONTENT_IMG_H, CONTENT_IMG_W, 3))
    inputTensor = K.concatenate([contentTensor, styleTensor, genTensor],
                                axis=0)
    model = vgg19.VGG19(include_top=False,
                        weights="imagenet",
                        input_tensor=inputTensor)  #TODO: implement.
    outputDict = dict([(layer.name, layer.output) for layer in model.layers])
    print("   VGG19 model loaded.")
    loss = 0.0
    styleLayerNames = [
        "block1_conv1", "block2_conv1", "block3_conv1", "block4_conv1",
        "block5_conv1"
    ]
    contentLayerName = "block5_conv2"
    print("   Calculating content loss.")
    contentLayer = outputDict[contentLayerName]
    contentOutput = contentLayer[0, :, :, :]
    genOutput = contentLayer[2, :, :, :]
    loss += CONTENT_WEIGHT * contentLoss(contentOutput,
                                         genOutput)  #TODO: implement.
    print("   Calculating style loss.")
    for layerName in styleLayerNames:
        styleLayer = outputDict[layerName]
        styleOutput = styleLayer[1, :, :, :]
        genOutput = styleLayer[2, :, :, :]
        loss += (STYLE_WEIGHT / len(styleLayerNames)) * styleLoss(
            styleOutput, genOutput)  #TODO: implement.
    loss += TOTAL_WEIGHT * totalLoss(genTensor)  #TODO: implement.
    # TODO: Setup gradients or use K.gradients().
    gradient = K.gradients(loss, genTensor)
    #create K.function to output loss and gradients
    print(type(gradient))
    outputs = [loss]
    outputs += gradient
    global f_outputs
    global x
    f_outputs = K.function([genTensor], outputs)
    x = cData
    print("   Beginning transfer.")
    for i in range(TRANSFER_ROUNDS):
        print("   Step %d." % i)
        #TODO: perform gradient descent using fmin_l_bfgs_b.
        start_time = time.time()
        x, tLoss, ph = fmin_l_bfgs_b(evaluator.loss,
                                     x.flatten(),
                                     fprime=evaluator.grads,
                                     maxfun=20)

        print("      Loss: %f." % tLoss)
        img = deprocessImage(x.copy())
        filename = 'hello' + str(i)
        saveFile = filename + '.jpg'  #TODO: Implement.
        imsave(saveFile, img)  #Uncomment when everything is working right.
        end_time = time.time()
        print("      Image saved to \"%s\"." % saveFile)
        print('Iteration %d completed in %ds' % (i, end_time - start_time))
Ejemplo n.º 5
0
def styleTransfer(cData, sData, tData):
    print("   Building transfer model.")
    contentTensor = K.variable(cData)
    styleTensor = K.variable(sData)
    genTensor = K.placeholder((1, CONTENT_IMG_H, CONTENT_IMG_W, 3))
    inputTensor = K.concatenate([contentTensor, styleTensor, genTensor], axis=0)
    
    model = vgg19.VGG19(include_top=False, weights="imagenet", input_tensor=inputTensor)
    outputDict = dict([(layer.name, layer.output) for layer in model.layers])
    print("   VGG19 model loaded.")
    
    styleLayerNames = ["block1_conv1", "block2_conv1", "block3_conv1", "block4_conv1", "block5_conv1"]
    contentLayerName = "block5_conv2"
    loss = compute_loss(genTensor, outputDict, styleLayerNames, contentLayerName)
    
    # Setup gradients or use K.gradients().
    grads = K.gradients(loss, genTensor)
    kFunction = K.function([genTensor], [loss] + grads)
    evaluator = Evaluator(kFunction)
    print("   Beginning transfer.")
    x = tData
    for i in range(TRANSFER_ROUNDS):
        print("   Step %d." % i)
        # perform gradient descent using fmin_l_bfgs_b.
        x, tLoss, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.grads, maxiter=20)
        print("      Loss: %f." % tLoss)
        img = deprocessImage(x.copy())
        saveFile = CONTENT_IMG_PATH[:-4] + STYLE_IMG_PATH[:-4] + str(i) + ".jpg"
        imageio.imwrite(saveFile, img)
        print("      Image saved to \"%s\"." % saveFile)
    print("   Transfer complete.")
Ejemplo n.º 6
0
    def make_model(self):
        # Build a VGG19 model loaded with pre-trained ImageNet weights
        self.model = vgg19.VGG19(weights="imagenet", include_top=False)

        # Get the symbolic outputs of each "key" layer (we gave them unique names).
        self.outputs_dict = dict([(layer.name, layer.output)
                                  for layer in self.model.layers])

        # Set up a model that returns the activation values for every layer in VGG19 (as a dict).
        self.feature_extractor = keras.Model(inputs=self.model.inputs,
                                             outputs=self.outputs_dict)

        # List of layers to use for the style loss.
        self.style_layer_names = [
            "block1_conv1",
            "block2_conv1",
            "block3_conv1",
            "block4_conv1",
            "block5_conv1",
        ]

        # The layer to use for the content loss.
        self.content_layer_name = "block5_conv2"

        self.optimizer = keras.optimizers.SGD(
            keras.optimizers.schedules.ExponentialDecay(
                initial_learning_rate=100.0, decay_steps=100, decay_rate=0.96))
Ejemplo n.º 7
0
    def __init__(self, settings: Settings, view_id, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.settings = settings
        self.view_id = view_id

        # region Use VGG16
        if self.settings.svcnn_model == 'vgg16':
            vgg16_model = vgg16.VGG16(include_top=False,
                                      input_shape=settings.input_shape,
                                      pooling=None,
                                      weights=None)
            x = vgg16_model.layers[-1].output
            x = Flatten(name='flatten')(x)
            x = Dense(4096, activation='relu', name='fc1')(x)
            x = Dense(4096, activation='relu', name='fc2')(x)
            x = Dense(settings.num_classes,
                      activation='softmax',
                      name='predictions')(x)
            self.model_input = vgg16_model.input
            self.model_output = x
            self.model = Model(inputs=self.model_input,
                               outputs=self.model_output)
        # endregion

        # region Use VGG19
        elif self.settings.svcnn_model == 'vgg19':
            vgg19_model = vgg19.VGG19(include_top=False,
                                      input_shape=settings.input_shape,
                                      pooling=None,
                                      weights=None)
            x = vgg19_model.layers[-1].output
            x = Flatten(name='flatten')(x)
            x = Dense(4096, activation='relu', name='fc1')(x)
            x = Dense(4096, activation='relu', name='fc2')(x)
            x = Dense(settings.num_classes,
                      activation='softmax',
                      name='predictions')(x)
            self.model_input = vgg19_model.input
            self.model_output = x
            self.model = Model(inputs=self.model_input,
                               outputs=self.model_output)
        # endregion

        # region Use ResNet50
        elif self.settings.svcnn_model == 'resnet50':
            resnet50_model = resnet50.ResNet50(
                include_top=False,
                input_shape=settings.input_shape,
                pooling=None,
                weights=None)
            x = resnet50_model.layers[-1].output
            x = GlobalAveragePooling2D(name='avg_pool')(x)
            x = Dense(settings.num_classes, activation='softmax',
                      name='probs')(x)
            self.model_input = resnet50_model.input
            self.model_output = x
            self.model = Model(inputs=self.model_input,
                               outputs=self.model_output)
Ejemplo n.º 8
0
def create_model(content_layers, style_layers):
    vgg = vgg19.VGG19(include_top=False, weights="imagenet")
    vgg.trainable = False

    style_outputs = [vgg.get_layer(name).output for name in style_layers]
    content_outputs = [vgg.get_layer(name).output for name in content_layers]
    model_outputs = style_outputs + content_outputs

    return Model(vgg.input, model_outputs)
Ejemplo n.º 9
0
def make_model():
    base_model = vgg19.VGG19(include_top=False, weights='imagenet')
    base_model.trainable = False
    content_layers = CONTENT_LAYERS
    style_layers = STYLE_LAYERS
    output_layers = [
        base_model.get_layer(layer).output
        for layer in (content_layers + style_layers)
    ]
    return tf.keras.models.Model(base_model.input, output_layers)
Ejemplo n.º 10
0
    def make_model(self, include_full=False, input_shape=None):
        if include_full:
            base_model = vgg19.VGG19(include_top=True, weights="imagenet")
            return base_model
        elif input_shape is not None:
            base_model = vgg19.VGG19(include_top=False,
                                     input_shape=input_shape,
                                     weights="imagenet")
        else:
            base_model = vgg19.VGG19(include_top=False, weights="imagenet")

        base_model.trainable = False
        content_layers = self.CONTENT_LAYERS
        style_layers = self.OUTPUT_LAYERS
        output_layers = [
            base_model.get_layer(layer).output
            for layer in (content_layers + style_layers)
        ]
        return tf.keras.models.Model(base_model.input, output_layers)
Ejemplo n.º 11
0
 def load_model(self, input_tensor):
     """load_model
     @Load Model VGG19
     
     :param input_tensor: dedicate input shape to use in VGG19
         -type input_tensor : tensoflow Tensor
         
     :return: model (VGG19)
     """
     model = vgg19.VGG19(input_tensor=input_tensor,weights='imagenet', include_top=False)
     return model
Ejemplo n.º 12
0
def vgg_layers(layer_names, shape):
    vgg = vgg19.VGG19(include_top=False,
                      weights='imagenet',
                      input_shape=shape,
                      pooling='max')
    vgg.trainable = False

    outputs = [vgg.get_layer(name).output for name in layer_names]
    model = tf.keras.Model(inputs=vgg.input, outputs=outputs)

    return model
Ejemplo n.º 13
0
def createVGGNetwork():
    model = vgg19.VGG19(weights='imagenet')
    for layer in model.layers:
      layer._name = 'vgg_'+layer._name

    x = Flatten()(model.get_layer('vgg_block5_pool').output)
    x = Dropout(0.3)(x)
    x = Dense(256, name='vgg_weights', activation=LeakyReLU(alpha=0.3))(x)
    x = Dense(4, activation='softmax')(x)

    model = Model(model.input, x)
    return model
Ejemplo n.º 14
0
def run(job_dir, style_targets, style_targets_root, dataset, max_steps,
        save_summary_steps, batch_size, depthwise_separable_conv,
        learning_rate, style_layers, content_layers, style_weight,
        content_weight):
    with tf.summary.create_file_writer(job_dir).as_default():
        transform_net = build_transformation_network(
            n_styles=len(style_targets),
            depthwise_separable_conv=depthwise_separable_conv)
        transform_net.summary()

        vgg = vgg19.VGG19(include_top=False, weights="imagenet")
        vgg.trainable = False
        for layer in vgg.layers:
            layer.trainable = False

        content_outputs = [vgg.get_layer(l).output for l in content_layers]
        style_outputs = [vgg.get_layer(l).output for l in style_layers]

        loss_net = Model(vgg.input, style_outputs + content_outputs)

        style_images_processed = [
            load_and_preprocess_image("%s/%s" %
                                      (style_targets_root, style_target))
            for style_target in style_targets
        ]

        style_images = tf.squeeze(tf.stack(style_images_processed))

        style_features = loss_net(style_images)[:len(style_layers)]

        style_features = [gram_matrix(a) for a in style_features]
        style_features = [
            tf.tile(style_feature, [batch_size, 1, 1])
            for style_feature in style_features
        ]

        optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate,
                                             beta_1=0.9,
                                             beta_2=0.999)

        train(dataset=dataset,
              max_steps=max_steps,
              save_summary_steps=save_summary_steps,
              optimizer=optimizer,
              n_styles=len(style_targets),
              style_layers=style_layers,
              style_features=style_features,
              style_weight=style_weight,
              content_weight=content_weight,
              transform_net=transform_net,
              loss_net=loss_net)

        return transform_net, style_images_processed
def get_model(layer_names):

    """Define model using pretrained vgg19"""
    
    vgg = vgg19.VGG19(weights='imagenet', include_top=False, input_shape=Config.IMAGE_SHAPE)
    
    vgg.trainable = False 
    
    outputs = [vgg.get_layer(name).output for name in layer_names]
    
    model = tf.keras.Model(vgg.input, outputs)
    
    return model
Ejemplo n.º 16
0
def precomputed_vgg19(img_paths):
    logging.info('precomputing vgg19 inputs')
    vgg = vgg19.VGG19(weights="imagenet", include_top=True)

    embeddings = []
    for i, (img_path, image_name) in enumerate(img_paths):
        img = load_image(img_path) / 255
        embedding = calc_vgg_embedding(img, vgg)
        embeddings.append({"file_name": image_name, "embedding": embedding})
        if (i + 1) % 100 == 0:
            print(i + 1)

    df = pd.DataFrame(embeddings)
    df.to_csv("./precomputed_inputs/vgg19.csv")
Ejemplo n.º 17
0
    def setup_feature_extractor(self):
        '''
        Method to setup the model that retrieves the intermediate activations of VGG19 (as a dict, by name).
        '''
        # Build a VGG19 model loaded with pre-trained ImageNet weights
        model = vgg19.VGG19(weights="imagenet", include_top=False)

        # Get the symbolic outputs of each "key" layer (we gave them unique names).
        self.outputs_dict = dict([(layer.name, layer.output)
                                  for layer in model.layers])

        # Set up a model that returns the activation values for every layer in
        # VGG19 (as a dict).
        self.feature_extractor = keras.Model(inputs=model.inputs,
                                             outputs=self.outputs_dict)
Ejemplo n.º 18
0
def styleTransfer(cData, sData, tData):
    print("   Building transfer model.")
    contentTensor = K.variable(cData)
    styleTensor = K.variable(sData)
    genTensor = K.placeholder((1, CONTENT_IMG_H, CONTENT_IMG_W, 3))
    inputTensor = K.concatenate([contentTensor, styleTensor, genTensor],
                                axis=0)

    model = vgg19.VGG19(weights="imagenet", include_top=False)
    outputDict = dict([(layer.name, layer.output) for layer in model.layers])
    print("   VGG19 model loaded.")
    loss = 0.0
    styleLayerNames = [
        "block1_conv1", "block2_conv1", "block3_conv1", "block4_conv1",
        "block5_conv1"
    ]
    contentLayerName = "block5_conv2"

    feature_extractor = keras.Model(inputs=model.inputs, outputs=outputDict)
    features = feature_extractor(inputTensor)

    print("   Calculating content loss.")
    contentLayer = outputDict[contentLayerName]
    contentOutput = contentLayer[0, :, :, :]
    genOutput = contentLayer[2, :, :, :]
    loss += loss + CONTENT_WEIGHT * contentLoss(contentTensor, genTensor)
    print("   Calculating style loss.")

    for layerName in styleLayerNames:
        layer_features = features[layerName]
        style_feature = layer_features[1, :, :, :]
        gen_feature = layer_features[2, :, :, :]
        style_loss = styleLoss(style_feature, gen_feature)

        loss += (STYLE_WEIGHT / len(styleLayerNames)) * style_loss
        # TODO: Setup gradients or use K.gradients().
        print("   Beginning transfer.")
        for i in range(TRANSFER_ROUNDS):
            print("   Step %d." % i)
            # TODO: perform gradient descent using fmin_l_bfgs_b.
            x, min_val, info = scipy.optimize.fmin_l_bfgs_b(
                evaluator.loss, x, fprime=evaluator.grads, maxfun=20)
            print("      Loss: %f." % tLoss)
            img = deprocessImage(x)
            saveFile = None  # TODO: Implement.
            # imsave(saveFile, img)   #Uncomment when everything is working right.
            print("      Image saved to \"%s\"." % saveFile)
            print("   Transfer complete.")
Ejemplo n.º 19
0
def get_basic_model(input_shape, first_layer=6):
    vgg = vgg19.VGG19(weights="imagenet",
                      include_top=False, input_shape=input_shape)
    # or if we want to set the first 20 layers of the network to be non-trainable
    index = 1
    while index <= first_layer:
        layer = vgg.layers[index]
        if "conv" in layer.name:
            layer.trainable = False
            index += 1
        else:
            index += 1
            first_layer += 1
        print(layer.name, layer.trainable)

    return vgg
 def __init__(self, content: np.ndarray, style: np.ndarray):
     tf.compat.v1.disable_eager_execution()
     K.set_floatx('float64')
     self.content = content
     self.style = style
     print("   Building transfer model.")
     self.contentTensor = K.variable(self.content)
     self.styleTensor = K.variable(self.style)
     self.genTensor = K.placeholder((1, CONTENT_IMG_H, CONTENT_IMG_W, 3))
     self.inputTensor = K.concatenate(
         [self.contentTensor, self.styleTensor, self.genTensor], axis=0)
     self.model = vgg19.VGG19(include_top=False,
                              input_tensor=self.inputTensor)
     self.totalLoss = self.constructTotalLoss()
     self.gradient = self.constructGradient()
     self.kerasFunction = self.constructKerasFunction()
     self.runOutput = None
Ejemplo n.º 21
0
def init_perception_model():
    global perception_model
    start = time.time()
    with tf.name_scope('Perceptual'):
        vgg = vgg19.VGG19(weights='imagenet', include_top=False)
        perception_model = Model(inputs=vgg.input,
                                 outputs=[
                                     vgg.get_layer('block1_conv2').output,
                                     vgg.get_layer('block2_conv2').output,
                                     vgg.get_layer('block3_conv2').output,
                                     vgg.get_layer('block4_conv2').output,
                                     vgg.get_layer('block5_conv2').output
                                 ])
        for layer in perception_model.layers:
            layer.trainable = False

    print('Loaded perception model:', time.time() - start)
Ejemplo n.º 22
0
 def __init__(self, input_dim, base_image_path, style_reference_image_path,
              weights):
     self.input_dim = input_dim
     self.img_nrows = self.input_dim[0]
     self.img_ncols = self.input_dim[1]
     self.channels = self.input_dim[2]
     self.base_image_path = base_image_path
     self.style_reference_image_path = style_reference_image_path
     self.content_weight = weights[0]
     self.style_weight = weights[1]
     self.total_weight = weights[2]
     vgg_model = vgg19.VGG19(weights="imagenet", include_top=False)
     outputs_dict = dict([(layer.name, layer.output)
                          for layer in vgg_model.layers])
     self.feature_extractor = keras.Model(inputs=vgg_model.inputs,
                                          outputs=outputs_dict)
     self._build()
Ejemplo n.º 23
0
def styleTransfer(cData, sData, tData):
    print("   Building transfer model.")
    contentTensor = K.variable(cData)
    styleTensor = K.variable(sData)
    genTensor = K.placeholder((1, CONTENT_IMG_H, CONTENT_IMG_W, 3))
    inputTensor = K.concatenate([contentTensor, styleTensor, genTensor], axis=0)
    model = vgg19.VGG19(include_top=False, weights="imagenet", input_tensor=inputTensor)
    outputDict = dict([(layer.name, layer.output) for layer in model.layers])
    print("   VGG19 model loaded.")
    loss = 0.0
    styleLayerNames = ["block1_conv1", "block2_conv1", "block3_conv1", "block4_conv1", "block5_conv1"]
    contentLayerName = "block5_conv2"
    print("   Calculating content loss.")
    contentLayer = outputDict[contentLayerName]
    contentOutput = contentLayer[0, :, :, :]
    genOutput = contentLayer[2, :, :, :]
    loss += CONTENT_WEIGHT * contentLoss(contentOutput, genOutput)
    print("   Calculating style loss.")
    for layer_name in styleLayerNames:
        layer_features = outputDict[layer_name]
        style_reference_features = layer_features[1, :, :, :]
        combination_features = layer_features[2, :, :, :]
        sl = styleLoss(style_reference_features, combination_features)
        loss = loss + (STYLE_WEIGHT / len(styleLayerNames)) * sl
    loss += TOTAL_WEIGHT * totalLoss(genTensor)
    gradient = K.gradients(loss, genTensor)
    outputs = [loss]
    if isinstance(gradient, (list, tuple)):
        outputs += gradient
    else:
        outputs.append(gradient)
    global f_outputs
    f_outputs = K.function([genTensor], outputs)
    print("   Beginning transfer.")
    evaluator = Evaluator()
    for i in range(TRANSFER_ROUNDS):
        index = i + 1
        print("   Step %d." % i)
        tData, tLoss, info = fmin_l_bfgs_b(evaluator.loss, tData.flatten(), fprime=evaluator.grads, maxfun=20)
        print("      Loss: %f." % tLoss)
        img = deprocessImage(tData.copy())
        saveFile = "image" + str(index) + ".jpg"
        imageio.imwrite(saveFile, img)
        print("      Image saved to \"%s\"." % saveFile)
    print("   Transfer complete.")
Ejemplo n.º 24
0
def run(job_dir,
        generator_dir,
        discriminator_dir,
        content_layers,
        real_dataset,
        comics_dataset,
        comics_edge_blurred_dataset,
        learning_rate,
        content_weight,
        max_steps,
        save_summary_steps,
        discriminator_training_interval=1):
    with tf.summary.create_file_writer(job_dir).as_default():
        generator = tf.keras.experimental.load_from_saved_model(
            generator_dir,
            custom_objects={"InstanceNormalization": InstanceNormalization},
        )
        generator.summary()

        discriminator = tf.keras.experimental.load_from_saved_model(
            discriminator_dir,
            custom_objects={"InstanceNormalization": InstanceNormalization},
        ) if discriminator_dir != None else build_discriminator()
        discriminator.summary()

        vgg = vgg19.VGG19(include_top=False, weights="imagenet")
        output_layers = [
            vgg.get_layer(layer).output for layer in content_layers
        ]
        loss_net = Model(vgg.input, output_layers, name="loss_net")

        train(real_dataset=real_dataset,
              comics_dataset=comics_dataset,
              comics_edge_blurred_dataset=comics_edge_blurred_dataset,
              generator=generator,
              discriminator=discriminator,
              loss_net=loss_net,
              learning_rate=learning_rate,
              content_weight=content_weight,
              max_steps=max_steps,
              save_summary_steps=save_summary_steps,
              discriminator_training_interval=discriminator_training_interval)

        return generator
Ejemplo n.º 25
0
def get_model():
  """Creates a model with access to intermediate layers. 
  
  These layers will then be used to create a new model that will take the
  content image and return the outputs from these intermediate layers from the
  VGG model. 
  
  Returns:
    A keras model that takes image inputs and outputs the style and content
    intermediate layers.
  """

  vgg = vgg19.VGG19(include_top=False, weights='imagenet')
  vgg.trainable = False
 
  style_outputs = [vgg.get_layer(name).output for name in style_layers]
  content_outputs = [vgg.get_layer(name).output for name in content_layers]
  model_outputs = style_outputs + content_outputs

  return models.Model(vgg.input, model_outputs)
Ejemplo n.º 26
0
def run(job_dir, dataset, content_layers, num_residual_blocks, use_upsampling,
        learning_rate, max_steps, save_summary_steps):
    with tf.summary.create_file_writer(job_dir).as_default():
        vgg = vgg19.VGG19(include_top=False, weights="imagenet")
        content_outputs = [vgg.get_layer(l).output for l in content_layers]
        loss_net = Model(vgg.input, content_outputs, name="loss_net")

        transform_net = build_generator(
            num_residual_blocks=num_residual_blocks,
            use_upsampling=use_upsampling)
        transform_net.summary()

        train(dataset=dataset,
              transform_net=transform_net,
              loss_net=loss_net,
              learning_rate=learning_rate,
              max_steps=max_steps,
              save_summary_steps=save_summary_steps)

        return transform_net
Ejemplo n.º 27
0
def _get_model(input_tensor, model_name="VGG19"):
    if model_name == "VGG19":
        model = vgg19.VGG19(input_tensor=input_tensor,
                            include_top=False,
                            weights="imagenet")
    """
    elif model_name == "Xception":
        model = Xception(
                input_shape=input_shape,
                include_top=False,
                weights="imagenet"
            )

    elif model_name == "InceptionV3":
        model = InceptionV3(
                input_shape=input_shape,
                include_top=False,
                weights="imagenet"
            )
    """
    return model
Ejemplo n.º 28
0
def vgg19Segmentation(n_classes, input_height=224, input_width=224):

    img_input = Input(shape=(input_height, input_width, 3))

    vgg19_model = vgg19.VGG19(input_tensor=img_input, weights="imagenet")

    o = vgg19_model.get_layer('block4_pool').output

    o = (ZeroPadding2D((1, 1), data_format='channels_last'))(o)
    o = (Conv2D(512, (3, 3), padding='valid', data_format='channels_last'))(o)
    o = (BatchNormalization())(o)

    o = (UpSampling2D((2, 2), data_format='channels_last'))(o)
    o = (ZeroPadding2D((1, 1), data_format='channels_last'))(o)
    o = (Conv2D(512, (3, 3), padding='valid', data_format='channels_last'))(o)
    o = (BatchNormalization())(o)

    o = (UpSampling2D((2, 2), data_format='channels_last'))(o)
    o = (ZeroPadding2D((1, 1), data_format='channels_last'))(o)
    o = (Conv2D(256, (3, 3), padding='valid', data_format='channels_last'))(o)
    o = (BatchNormalization())(o)

    o = (UpSampling2D((2, 2), data_format='channels_last'))(o)
    o = (ZeroPadding2D((1, 1), data_format='channels_last'))(o)
    o = (Conv2D(128, (3, 3), padding='valid', data_format='channels_last'))(o)
    o = (BatchNormalization())(o)

    o = (UpSampling2D((2, 2), data_format='channels_last'))(o)
    o = (ZeroPadding2D((1, 1), data_format='channels_last'))(o)
    o = (Conv2D(64, (3, 3), padding='valid', data_format='channels_last'))(o)
    o = (BatchNormalization())(o)

    o = Conv2D(n_classes, (3, 3), padding='same',
               data_format='channels_last')(o)
    o = (Activation('softmax'))(o)

    model = Model(img_input, o)

    return model
Ejemplo n.º 29
0
def pretrained_VGG(weight_layer_num=16,
                   side=64,
                   labels=2,
                   frozen_layer_num=None,
                   frozen_block_num=None):
    if weight_layer_num == 19:
        vgg_ = vgg19.VGG19(weights='imagenet',
                           input_shape=(side, side, 3),
                           include_top=False)
        block_layer_table = [0, 4, 7, 12, 17, 21]
    else:
        vgg_ = vgg16.VGG16(weights='imagenet',
                           input_shape=(side, side, 3),
                           include_top=False)
        block_layer_table = [0, 4, 7, 11, 15, 19]

    top_ = Sequential()
    top_.add(Flatten(input_shape=vgg_.output_shape[1:]))
    top_.add(Dense(1024, activation='relu', kernel_initializer='he_normal'))
    top_.add(Dense(1024, activation='relu', kernel_initializer='he_normal'))
    top_.add(Dense(labels, activation='softmax'))

    model = Model(inputs=vgg_.inputs, outputs=top_(vgg_.outputs))

    if frozen_layer_num is None and frozen_block_num is None:
        print("No frozen layers: fine-tuning")
        return model

    if frozen_block_num is not None:
        frozen_layer_num = block_layer_table[frozen_block_num]

    for layer in vgg_.layers[:frozen_layer_num]:
        layer.trainable = False

    for i, layer in enumerate(model.layers):
        print("layer " + str(i) + " trainable: " + str(layer.trainable))

    return model
Ejemplo n.º 30
0
def create_model(n_out_classes=32, dropout_enable=False):

    # # model = mobilenet.MobileNet(input_shape = (256,256,3),
    # #                             classes= n_out_classes,
    # #                             weights=None
    # #                             )
    # model = Sequential()
    # ##Conv2d Layer
    # model.add(Conv2D(64, (3, 3), activation='relu', input_shape=(256, 256, 3),padding="valid"))
    # model.add(Activation('relu'))
    # ##3x3 pooling
    # model.add(BatchNormalization())
    # model.add(MaxPooling2D(pool_size=(3, 3)))
    # model.add(Activation('relu'))
    # model.add(BatchNormalization())
    # model.add(Conv2D(32, (3, 3), activation='relu',padding="valid"))
    # model.add(Activation('relu'))
    # model.add(BatchNormalization())
    # model.add(MaxPooling2D(pool_size=(3, 3)))
    # model.add(Activation('relu'))
    # model.add(BatchNormalization())
    #
    #
    # if dropout_enable:
    #     model.add(Dropout(0.20))
    #
    # model.add(Flatten())
    #
    # model.add(Dense(n_out_classes, activation='softmax'))

    model = vgg19.VGG19(
        weights="./vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5",
        include_top=False,
        classes=n_out_classes)

    print(model.summary())

    return model