Beispiel #1
0
def create_uncertainty_model(learning_rate=1e-3, num_hidden_units=20, type = 'mobilenet_v2'):
    mu_input = Input(shape=(num_classes,))
    if type == 'mobilenet_v2':
        base_model = mobilenet_v2.MobileNetV2(include_top=False, weights='imagenet', input_tensor=None,
                                          input_shape=(224, 224, 3), pooling='avg', classes=num_classes)
    elif type == 'vgg16':
        base_model = vgg16.VGG16(include_top=False, weights='imagenet', input_tensor=None,
                                 input_shape=(224, 224, 3), pooling='avg', classes=num_classes)
    elif type == 'resnet50':
        base_model = resnet50.ResNet50(include_top=False, weights='imagenet', input_tensor=None,
                                 input_shape=(224, 224, 3), pooling='avg', classes=num_classes)
    elif type == 'vgg19':
        base_model = vgg19.VGG19(include_top=False, weights='imagenet', input_tensor=None,
                                 input_shape=(224, 224, 3), pooling='avg', classes=num_classes)
    elif type == 'inception_v3':
        base_model = inception_v3.InceptionV3(include_top=False, weights='imagenet', input_tensor=None,
                                 input_shape=(224, 224, 3), pooling='avg', classes=num_classes)
    else:
        base_model = mobilenet_v2.MobileNetV2(include_top=False, weights='imagenet', input_tensor=None,
                                              input_shape=(224, 224, 3), pooling='avg', classes=num_classes)
    base_model.trainable = False
    beta = base_model.output
    beta = Dense(num_hidden_units, activation='relu')(beta)
    beta = Dense(num_hidden_units, activation='relu')(beta)
    beta = Dense(num_hidden_units, activation='relu')(beta)
    # beta = Dense(num_hidden_units,activation='relu')(beta)
    beta = Dense(1, activation='sigmoid')(beta)
    output = concatenate([mu_input, beta])

    model = Model(inputs=[mu_input, base_model.input], outputs=output)
    model.compile(loss=dirichlet_aleatoric_cross_entropy,
                  optimizer=Adam(lr=learning_rate),
                  metrics=[max_beta, min_beta]
                  )
    return model
Beispiel #2
0
    def __init__(self, cls_num, input_size):
        super().__init__()
        self.model_state = "Reference"

        # self.ref_model = Sequential(name="reference")
        # self.tar_model = Sequential(name="secondary")
        #
        # self.build_network(cls_num, input_size)

        self.vgg_model = vgg16.VGG16(weights="imagenet")

        # self.ref_model = self.get_dropout_model( self.vgg_model, 2)
        # self.tar_model = self.get_dropout_model(self.vgg_model, 1)

        self.ref_model = self.vgg_model
        self.tar_model = self.vgg_model

        self.features_net = self.get_features_network(self.vgg_model)

        for layer in self.vgg_model.layers[:19]:
            layer.trainable = False

        self.ref_model.summary()
        self.tar_model.summary()
        self.features_net.summary()

        self.ready_for_train = False
        self.trainer = None
        self.validator = None
def cover():
    global root2, frame2, time
    root2 = tk.Toplevel()
    root2.title('Portable Image Classifier')
    root2.resizable(False, False)
    tit = tk.Label(root2,
                   text="Age Cover Gender Classification",
                   padx=25,
                   pady=6,
                   font=("", 12)).pack()
    canvas = tk.Canvas(root2, height=500, width=500, bg='grey')
    canvas.pack()
    frame2 = tk.Frame(root2, bg='white')
    for img_display in frame2.winfo_children():
        img_display.destroy()

    image_data = r"C:\Users\User\Desktop\evaluation\cover.jpg"
    basewidth = 370  # Processing image for dysplaying
    img = Image.open("cover.jpg")
    wpercent = (basewidth / float(img.size[0]))
    hsize = int((float(img.size[1]) * float(wpercent)))
    img = img.resize((basewidth, hsize), Image.ANTIALIAS)
    img = ImageTk.PhotoImage(img)
    file_name = image_data.split('/')
    panel = tk.Label(frame2, text=str('          ').upper()).pack()
    panel_image = tk.Label(frame2, image=img).pack()

    frame2.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1)

    chose_image = tk.Button(root2,
                            text='Low Light Image',
                            padx=45,
                            pady=10,
                            fg="white",
                            bg="grey",
                            command=low)
    chose_image.pack(side=tk.LEFT)

    enhance_image = tk.Button(root2,
                              text='Blurred Image',
                              padx=35,
                              pady=10,
                              fg="white",
                              bg="grey",
                              command=blur)
    enhance_image.pack(side=tk.LEFT)
    class_image = tk.Button(root2,
                            text='Pixelated Image',
                            padx=35,
                            pady=10,
                            fg="white",
                            bg="grey",
                            command=pixelate)
    class_image.pack(side=tk.RIGHT)
    vgg_model = vgg16.VGG16(weights='imagenet')
    if (time == 'second'):
        root.destroy()
    root2.mainloop()
 def loadModel(self):
     vgg16Model = vgg16.VGG16(include_top = False, weights = 'imagenet', input_shape = imageDataList[0].shape)
     model = Sequential()
     
     for layer in vgg16Model.layers:
         model.add(layer)
     model.add(Flatten(name='flatten'))
     model.add(Dense(1000, activation='relu'))
     model.add(Dense(1, activation = 'sigmoid'))
     model.load_weights(self.weightFilePath)
def main():
    global frame, img, root
    root = tk.Toplevel()
    root.title('Portable Image Classifier')
    root.resizable(False, False)
    tit = tk.Label(root,
                   text="Age defult Gender Classification",
                   padx=25,
                   pady=6,
                   font=("", 12)).pack()
    canvas = tk.Canvas(root, height=550, width=550, bg='grey')
    canvas.pack()
    frame = tk.Frame(root, bg='white')
    frame.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1)

    chose_image = tk.Button(root,
                            text='Load Image',
                            padx=45,
                            pady=10,
                            fg="white",
                            bg="grey",
                            command=load_img)
    chose_image.pack(side=tk.LEFT)

    enhance_image = tk.Button(root,
                              text='Enhance Image',
                              padx=35,
                              pady=10,
                              fg="white",
                              bg="grey",
                              command=enhance_img)
    enhance_image.pack(side=tk.LEFT)

    goback = tk.Button(root,
                       text='Back',
                       padx=35,
                       pady=10,
                       fg="white",
                       bg="grey",
                       command=back)
    goback.pack(side=tk.RIGHT)

    class_image = tk.Button(root,
                            text='Classify Image',
                            padx=35,
                            pady=10,
                            fg="white",
                            bg="grey",
                            command=classify)
    class_image.pack(side=tk.LEFT)
    vgg_model = vgg16.VGG16(weights='imagenet')
    root2.destroy()
    root.mainloop()
Beispiel #6
0
    def __init__(self):
        super().__init__()
        self.model = vgg16.VGG16(weights='imagenet')

        self.is_compiled = False
        self.loss = None
        self.optimizer = None
        self.train_loss_tracker = tf.keras.metrics.Mean(name="train_loss")
        self.val_loss_tracker = tf.keras.metrics.Mean(name="train_loss")

        self.train_accuracy_tracker = tf.keras.metrics.CategoricalAccuracy()
        self.val_accuracy_tracker = tf.keras.metrics.CategoricalAccuracy()
    def getModel(self, input_shape, classes=1):
        vgg16_model = vgg16.VGG16(include_top = False, weights = 'imagenet', input_shape = input_shape)

        model = Sequential()
        for layer in vgg16_model.layers:
            model.add(layer)
        for layer in model.layers:
            layer.trainable = False

        model.add(Flatten(name='flatten'))
        model.add(Dense(1000, activation='relu'))
        model.add(Dense(classes, activation = 'sigmoid'))

        return model
 def init():
     global model
     global model_extractfeatures
     global graph
     global collection
     model = vgg16.VGG16(weights='imagenet', include_top=True)
     model_extractfeatures = Model(model.input,
                                   outputs=model.get_layer('fc2').output)
     graph = tf.get_default_graph()
     global fin_centroids
     fin_centroids = Results.centroid_to_dict(
         Results.read_centroids("data/centroids.pkl"))
     client = MongoClient()
     db = client.flickrurls
     collection = db["final_data"]
    def __init__(self):
        super().__init__()
        # self.__model = vgg16.VGG16(weights='imagenet', include_top=False)
        vgg_conv = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
        for layer in vgg_conv.layers[:]:
            layer.trainable = False

        self.__model = tf.keras.Sequential()
        self.__model.add(vgg_conv)
        self.__model.add(tf.keras.layers.Flatten())
        self.__model.add(tf.keras.layers.Dense(4096, activation='relu'))
        self.__model.add(tf.keras.layers.Dropout(0.5))
        self.__model.add(tf.keras.layers.Dense(4096, activation='relu'))
        self.__model.add(tf.keras.layers.Dropout(0.5))
        self.__model.add(tf.keras.layers.Dense(76, activation='softmax'))

        self.__model.summary()
Beispiel #10
0
def get_loss_net(pastiche_net_output, input_tensor=None):
    '''
    Instantiates a VGG net and applies its layers on top of the pastiche net's
    output.
    '''
    loss_net = vgg16.VGG16(weights='imagenet',
                           include_top=False,
                           input_tensor=input_tensor)
    targets_dict = dict([(layer.name, layer.output)
                         for layer in loss_net.layers])
    i = pastiche_net_output
    # We need to apply all layers to the output of the style net
    outputs_dict = {}
    for l in loss_net.layers[1:]:  # Ignore the input layer
        i = l(i)
        outputs_dict[l.name] = i

    return loss_net, outputs_dict, targets_dict
Beispiel #11
0
def FCN32(nClasses, input_height, input_width):

    img_input = Input(shape=(input_height, input_width, 3))
    model = vgg16.VGG16(include_top=False,
                        weights='imagenet',
                        input_tensor=img_input)
    # vgg去除全连接层为:7x7x512
    # vgg:5个block,1:filters:64,kernel:3;3-128;3-256;3-512
    o = Conv2D(filters=1024,
               kernel_size=(7, 7),
               padding='same',
               activation='relu',
               name='fc6')(model.output)
    o = Dropout(0.5)(o)
    o = Conv2D(filters=1024,
               kernel_size=(1, 1),
               padding='same',
               activation='relu',
               name='fc7')(o)
    o = Dropout(0.5)(o)

    o = Conv2D(filters=nClasses,
               kernel_size=(1, 1),
               padding='same',
               activation='relu',
               name='score_fr')(o)
    o = Conv2DTranspose(filters=nClasses,
                        kernel_size=(32, 32),
                        strides=(32, 32),
                        padding='valid',
                        activation=None,
                        name='score2')(o)
    o = Reshape((-1, nClasses))(o)
    o = Activation("softmax")(o)
    fcn8 = Model(img_input, o)
    return fcn8
Beispiel #12
0
    def __init__(self, classes_num, input_size):
        super().__init__(classes_num, input_size)
        self.output_path = None
        vgg_conv = vgg16.VGG16(weights=None,
                               include_top=False,
                               classes=classes_num,
                               input_shape=(input_size[0], input_size[0], 3))
        vgg_conv.summary()

        # for layer in vgg_conv.layers[:]:
        #     layer.trainable = False

        self.__model = tf.keras.Sequential()
        self.__model.add(vgg_conv)
        self.__model.add(tf.keras.layers.Flatten())
        self.__model.add(tf.keras.layers.Dense(4096, activation='relu'))
        self.__model.add(tf.keras.layers.Dropout(0.5))
        self.__model.add(tf.keras.layers.Dense(4096, activation='relu'))
        self.__model.add(tf.keras.layers.Dropout(0.5))

        self.__model.add(
            tf.keras.layers.Dense(classes_num, activation='softmax'))

        self.__model.summary()
Beispiel #13
0
    def __init__(self, classes_num, input_size):
        super().__init__(classes_num, input_size)
        self.output_path = None
        vgg_conv = vgg16.VGG16(weights=None,
                               include_top=False,
                               input_shape=(input_size[0], input_size[0], 3))
        vgg_conv.summary()

        # for layer in vgg_conv.layers[:]:
        #     layer.trainable = False

        self.__model = tf.keras.Sequential()
        self.__model.add(vgg_conv)
        self.__model.add(tf.keras.layers.Flatten())
        self.__model.add(tf.keras.layers.Dense(4096, activation='relu'))
        self.__model.add(tf.keras.layers.Dense(4096, activation='relu'))
        self.__model.add(
            tf.keras.layers.Dense(classes_num, activation='softmax'))

        self.__model.summary()

        if os.path.exists(self.get_last_ckpt_path()):
            self.load_model(self.get_last_ckpt_path())
            print("loads last weights")
 def __init__(self):
     super().__init__()
     self.__model = vgg16.VGG16(weights='imagenet')
     self.__model.summary()
Beispiel #15
0
                        default=None,
                        nargs='+',
                        help='Style image file names.')
    parser.add_argument('--style_img_size',
                        type=int,
                        default=[None],
                        nargs='+',
                        help='Largest size of the style images')
    parser.add_argument('--style_layers', type=str, nargs='+', default=def_sl)
    parser.add_argument('--gpu', type=str, default='')
    parser.add_argument('--allow_growth', default=False, action='store_true')
    args = parser.parse_args()

    config_gpu(args.gpu, args.allow_growth)

    loss_net = vgg16.VGG16(weights='imagenet', include_top=False)

    targets_dict = dict([(layer.name, layer.output)
                         for layer in loss_net.layers])

    s_targets = get_style_features(targets_dict, args.style_layers)

    get_style_target = K.function([loss_net.input], s_targets)
    gm_lists = [[] for l in args.style_layers]

    img_list = []
    img_size_list = []
    # Get style image names or get all images in the directory
    if args.style_imgs is None:
        args.style_imgs = os.listdir(args.style_dir)
Beispiel #16
0
from tensorflow.python.keras.applications import vgg16

weights_path = r"F:\5-model data\vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5"
model = vgg16.VGG16(weights=weights_path, include_top=False)
model.summary()

#设置DeepDream配置
layer_contributions = {
    'mixed2': 0.2,
    'mixed3': 3.,
    'mixed4': 2.,
    'mixed5': 1.5,
}
    content_image = K.zeros(shape=shape)

images = K.concatenate([style_image, target_image, content_image], axis=0)

# Create tensor variables for masks
raw_style_mask, raw_target_mask = load_mask_labels()
style_mask = K.variable(raw_style_mask.astype("float32"))
target_mask = K.variable(raw_target_mask.astype("float32"))
masks = K.concatenate([style_mask, target_mask], axis=0)

# index constants for images and tasks variables
STYLE, TARGET, CONTENT = 0, 1, 2

# Build image model, mask model and use layer outputs as features
# image model as VGG19
image_model = vgg16.VGG16(include_top=False, input_tensor=images)

# mask model as a series of pooling
mask_input = Input(tensor=masks, shape=(None, None, None), name="mask_input")
x = mask_input
for layer in image_model.layers[1:]:
    name = 'mask_%s' % layer.name
    if 'conv' in layer.name:
        x = AveragePooling2D((3, 3),
                             strides=(1, 1),
                             name=name,
                             border_mode="same")(x)
    elif 'pool' in layer.name:
        x = AveragePooling2D((2, 2), name=name)(x)
mask_model = Model(mask_input, x)
from tensorflow.python.keras.preprocessing import image
from tensorflow.python.keras.applications.vgg16 import preprocess_input
from tensorflow.python.keras.models import Model
import numpy as np
import matplotlib.pyplot as plt
import pprint
import pandas as pd
import pickle
import time
import subprocess

# pp = pprint.PrettyPrinter(indent=2)

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

model = vgg16.VGG16(weights='imagenet', include_top=True)
model_extractfeatures = Model(model.input,
                              outputs=model.get_layer('fc2').output)


def read_centroids(path):
    f = open(path, "rb")
    centroids = pickle.load(f)
    f.close()
    return centroids


def to_float_arr(arr):
    return np.array(list(map(float, arr)))

Beispiel #19
0
def load_trained_model():
    vgg_model = vgg16.VGG16(weights='imagenet')
    feature_extractor = Model(inputs=vgg_model.input,
                              outputs=vgg_model.get_layer('fc2').output)
    feature_extractor.summary()
    return feature_extractor
import cv2
import numpy as np
from tensorflow.python.keras.applications import vgg16
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator

vgg16_model = vgg16.VGG16()  # vgg16 modelini getirdik
#Train ,test , valid klasörleri altında Dog ve Cat adında klasörler oluşturup buraya fotoğrafları ayrı ayrı atın
train_path = 'C:\\Users\\bagat\\OneDrive\\Masaüstü\\DCimages\\Train'
valid_path = 'C:\\Users\\bagat\\OneDrive\\Masaüstü\\DCimages\\Valid'
test_path = 'C:\\Users\\bagat\\OneDrive\\Masaüstü\\DCimages\\Test'

# verdiğimiz yoldan batch halinde resimleri alır
train_batches = ImageDataGenerator().flow_from_directory(
    train_path, target_size=(224, 224), classes=['Cat', 'Dog'], batch_size=10)
test_batches = ImageDataGenerator().flow_from_directory(test_path,
                                                        target_size=(224, 224),
                                                        classes=['Cat', 'Dog'],
                                                        batch_size=10)
valid_batches = ImageDataGenerator().flow_from_directory(
    valid_path, target_size=(224, 224), classes=['Cat', 'Dog'], batch_size=4)

imgs, labels = next(train_batches)

model = Sequential()

# modelin sonra katmanında 1000 tane sınıf var bunun yerine biz 2 sınıf istiyoruz son katmanı silip modei aldık
for layer in vgg16_model.layers[:-1]:
    model.add(layer)  # vgg16 modelini kendi modelimize aktardık
Beispiel #21
0
# ref_val_datagen = ref_gen.flow_from_directory(ref_path, subset="validation",
#                                                   class_mode="categorical",
#                                                   target_size=input_size,
#                                                   batch_size=batch_size, classes=ref_classes)

ref_train_datagen, ref_val_datagen = get_iterators_by_root_dir(ref_path,
                                                               batch_size,
                                                               input_size,
                                                               0.2,
                                                               1000,
                                                               shuffle=True)

# ref_train_datagen = get_iterator("C:/Users/lotan/Documents/studies/Affordances/datasets/imagenet_files/train.txt")
# ref_val_datagen = get_iterator("C:/Users/lotan/Documents/studies/Affordances/datasets/imagenet_files/val.txt")

model = vgg16.VGG16(weights="imagenet")
loss_fn = tf.keras.losses.CategoricalCrossentropy()
accuracy_func_train = tf.keras.metrics.CategoricalAccuracy()
loss_tracker_train = tf.keras.metrics.Mean(name="loss")

accuracy_func_val = tf.keras.metrics.CategoricalAccuracy()
loss_tracker_val = tf.keras.metrics.Mean(name="loss")

for layer in model.layers[:19]:
    layer.trainable = False
    print(layer.name)

model.summary()

base_learning_rate = 0.00001
optimizer = tf.keras.optimizers.Adam(lr=base_learning_rate)
Beispiel #22
0
def get_vgg16_model(weights='imagenet', input_tensor=None):
    return vgg16.VGG16(weights=weights, include_top=False, input_tensor=input_tensor)
Beispiel #23
0
 def __init__(self):
     super().__init__()
     self.__model = vgg16.VGG16(weights='imagenet')
     self.ref_model = self.get_dropout_model(0)
     self.tar_model = self.get_dropout_model(0)
     print(self.tar_model.summary())