Пример #1
0
def optA():
    mycoco.setmode('test')
    if args.maxinstances != None:
        idsstart = mycoco.query(args.categories, exclusive=True)
        ids = []
        for id in idsstart:
            ids.append(list(id[:args.maxinstances]))
    else:
        ids = mycoco.query(args.categories, exclusive=True)

    # Gets image data (without labels).
    imgs = mycoco.iter_images_nocat(ids, args.categories, batch=32)
    # Gets image data (with labels).
    imgslabeled = mycoco.iter_images(ids, args.categories, batch=32)

    # Loading model from file.
    json_file = open(args.modelfile, "r")
    loaded_model = json_file.read()
    json_file.close()
    model = model_from_json(loaded_model)

    # Compiling model.
    model.compile(optimizer='adam',
                  loss='mean_squared_error',
                  metrics=['accuracy'])
    #score = model.evaluate(imgs, imgs, verbose=0)
    #print(score)

    # Building second model for extracting embeddings.
    inputlayer = Input(shape=(200, 200, 3))
    # encoder
    conv2dlayer = Conv2D(8, (3, 3), padding='same')(inputlayer)
    relulayer = Activation('relu')(conv2dlayer)
    maxpool2dlayer = MaxPooling2D(pool_size=(2, 2))(relulayer)
    conv2dlayer2 = Conv2D(16, (3, 3), padding='same')(maxpool2dlayer)
    relulayer2 = Activation('relu')(conv2dlayer2)
    maxpool2dlayer2 = MaxPooling2D(pool_size=(2, 2))(relulayer2)
    encoded = maxpool2dlayer2

    # Creating second model.
    model2 = Model(inputlayer, maxpool2dlayer2)
    model2.summary()

    # Setting steps for predictions, as number of images divided by batch size.
    num_imgs = 0
    for id in ids:
        num_imgs += len(id)
    steps = round(num_imgs / 32)

    # Copying weights to a new model (only the encoder) and predicting to get embeddings.
    model2.set_weights(model.get_weights()[0:2])
    predictions = model2.predict_generator(imgslabeled, steps=steps)

    # Printing an example (since I am stopping here).
    print("An example prediction:")
    print(predictions[0])
Пример #2
0
def opt_a():
    """
    Option A - Convolutional image autoencoder
    """
    mycoco.setmode('train')
    image_id_lists = mycoco.query(args.categories)
    image_count = sum(map(lambda list: len(list), image_id_lists))
    image_iter = mycoco.iter_images(image_id_lists, args.categories)
    encoder, autoencoder = train_autoencoder(image_iter, image_count)
    autoencoder.save(args.modelfile)
Пример #3
0
def optA():
    mycoco.setmode('test')
    # loading images
    # i've modified iter_images in mycoco.py to only return the images without the labels
    cat_list = []
    for cat in args.categories:
        cat_list.append([cat])
    n_classes = len(cat_list)
    allids = mycoco.query(cat_list)
    if args.maxinstances:
        imgs = mycoco.iter_images([x[:int(args.maxinstances)] for x in allids],
                                  [x for x in range(n_classes)],
                                  batch=16)
        n_imgs = int(args.maxinstances) * len(cat_list)
    else:
        imgs = mycoco.iter_images([x for x in allids],
                                  [x for x in range(n_classes)],
                                  batch=16)
        n_imgs = sum(len(x) for x in allids)

    # print(n_imgs)

    # loading the autoencoder model
    autoencoder = load_model(args.modelfile)

    # encoder
    input_img = Input(shape=(200, 200, 3))

    x = Conv2D(8, (3, 3), activation='relu', padding='same')(input_img)
    x = MaxPooling2D((2, 2), padding='same')(x)
    x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2), padding='same')(x)
    x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    encoded = MaxPooling2D((2, 2), padding='same')(x)

    encoder = Model(input_img, encoded)
    encoder.set_weights(autoencoder.get_weights()[0:7])
    preds = encoder.predict_generator(imgs, steps=(n_imgs))

    print("Predictions shape: ", preds.shape)
Пример #4
0
def optA():
    mycoco.setmode('test')
    model = load_model(args.modelfile)
    ids = mycoco.query(args.categories, exclusive=False)
    if args.maxinstances:
        x = args.maxinstances
    else:
        x = len(min(ids, key=len))
    list1 = []
    for i in range(len(ids)):
        list1.append(ids[i][:x])
    print("Maximum number of instances are :" , str(x))
    imgiter = mycoco.iter_images(list1, [0,1], size=(200,200), batch=50)
    img_sample = next(imgiter)
    predictions = model.predict(img_sample[0])
    classes = [(1 if x >= 0.5 else 0) for x in predictions]
    correct = [x[0] == x[1] for x in zip(classes, img_sample[1])]
    z = sum(correct)
    print(z)
    print("Option A is implemented!")
Пример #5
0
def optA():
    mycoco.setmode('train')
    ids = mycoco.query(args.categories, exclusive=False)
    if args.maxinstances:
        x = args.maxinstances
    else:
        x = len(min(ids, key=len))
    list1 = []
    for i in range(len(ids)):
        list1.append(ids[i][:x])
    print("Maximum number of instances are :" , str(x))
    imgiter = mycoco.iter_images(list1, [0,1], batch=100)
    input_img = Input(shape=(200,200,3))
    # Encoder Layers
    x = Conv2D(8, (3, 3), activation='relu')(input_img)
    x = MaxPooling2D((2, 2), padding='same')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(8, (3, 3), activation='relu')(x)
    x = MaxPooling2D((2, 2), padding='same')(x)
    x = Conv2D(16, (3, 3), activation='relu')(x)

    # Decoder Layers
    x = Conv2D(16, (3, 3), activation='relu')(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(8, (3, 3), activation='relu')(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(1, (3, 3), activation='relu')(x)
    x = Flatten()(x)
    x = Dense(10)(x)
    decode = Dense(1, activation="sigmoid")(x)
    
    model = Model(input_img, decode)
    model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
    filepath="/scratch/gusmohyo/checkfile.hdf5"
    checkpoint = ModelCheckpoint(filepath, monitor='acc', verbose=1, save_best_only=True, mode='max')
    callbacks_list = [checkpoint]
    model.fit_generator(imgiter, steps_per_epoch=10, epochs=30, callbacks=callbacks_list, verbose=0)
    model.save(args.modelfile)
    print("Option A is implemented!")
Пример #6
0
def opt_a():
    """
    Option A - Convolutional image autoencoder
    """
    mycoco.setmode('test')

    # Load model
    model: Model = load_model(args.modelfile)
    encoder = Model(inputs=model.input, outputs=model.get_layer("encoder").output)

    # Load image iterator, limit to args.maxinstances per category list
    image_id_lists = mycoco.query(args.categories)
    if args.maxinstances is not None:
        image_id_lists = list(map(lambda list: list[:args.maxinstances], image_id_lists))

    pyplot.figure(figsize=[6, 6])

    for image_id_list in image_id_lists:
        image_count = len(image_id_list)
        image_iter = mycoco.iter_images([image_id_list], args.categories)

        # Create predictions for images
        batch_size = 1
        generator = autoencoder_generator(image_iter, batch_size)
        encoder_prediction = encoder.predict_generator(generator, steps=image_count / batch_size)

        # Reduce dimensionality with PCA
        reshaped_predictions = encoder_prediction.reshape((encoder_prediction.shape[0], -1))
        pca = PCA(n_components=2)
        pca_predictions = pca.fit_transform(reshaped_predictions)

        # Plot values
        pyplot.scatter(pca_predictions[:, 0], pca_predictions[:, 1])

    pyplot.title('Clustering')
    pyplot.legend(args.categories)
    pyplot.savefig('cluster.svg', format='svg')
Пример #7
0
# https://keras.io/getting-started/faq/#how-can-i-obtain-the-output-of-an-intermediate-layer
## Use same network but stop at flattened encoded layer to extract
encoder = Model(inputs=autoencoder.input,
                outputs=autoencoder.get_layer('encoded_flat').output)

##


def only_img_iter(img_iter):
    for i in img_iter:
        yield (i[0], i[0])


# Pretty hacky to train on all the images
all_ids = mycoco.query([['']])
all_img = mycoco.iter_images(all_ids, [None])

all_only_img = only_img_iter(all_img)

csv_logger = CSVLogger('./autoencoder_2.csv', append=True, separator=',')
filepath = "/scratch/gussteen/autoencoder/autoencoder_2.best.hdf5"
checkpoint = ModelCheckpoint(filepath,
                             monitor='acc',
                             verbose=1,
                             save_best_only=True,
                             mode='max')
autoencoder.fit_generator(all_only_img,
                          steps_per_epoch=10000,
                          epochs=50,
                          verbose=2,
                          callbacks=[checkpoint, csv_logger])
Пример #8
0
def optA():
    mycoco.setmode('train')
    # loading images
    # i've modified iter_images in mycoco.py to only return the images without the labels
    cat_list = []
    for cat in args.categories:
        cat_list.append([cat])
    n_classes = len(cat_list)
    allids = mycoco.query(cat_list)
    if args.maxinstances:
        imgs = mycoco.iter_images([x[:int(args.maxinstances)] for x in allids],
                                  [x for x in range(n_classes)],
                                  batch=16)
        n_imgs = int(args.maxinstances) * len(cat_list)
    else:
        imgs = mycoco.iter_images([x for x in allids],
                                  [x for x in range(n_classes)],
                                  batch=16)
        n_imgs = sum(len(x) for x in allids)

    # print(n_imgs)

    # model layers:

    # encoder
    input_img = Input(shape=(200, 200, 3))

    x = Conv2D(8, (3, 3), activation='relu', padding='same')(input_img)
    x = MaxPooling2D((2, 2), padding='same')(x)
    x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2), padding='same')(x)
    x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    encoded = MaxPooling2D((2, 2), padding='same')(x)

    # I'm not sure here if i should extract the last maxpooling2d layer for the second model
    # or if there are also supposed to be flatten and dense layers before the extraction.
    # I've also tried making flatten and dense layers as the connecting step
    # between both parts in the autoencoder model to make the extracted bottleneck layer
    # 2 dimensional. I've since discarded those as I wasn't sure they were needed.

    # flat_encoded = Flatten()(encoded)
    # denselayer = Dense(n_classes, activation='softmax')(flat_encoded)

    # decoder
    x = Conv2DTranspose(32, (3, 3), activation='relu', padding='same')(encoded)
    x = UpSampling2D((2, 2))(x)
    x = Conv2DTranspose(16, (3, 3), activation='relu', padding='same')(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2DTranspose(8, (3, 3), activation='relu', padding='same')(x)
    x = UpSampling2D((2, 2))(x)
    decoded = Conv2DTranspose(3, (3, 3), activation='softmax',
                              padding='same')(x)

    # autoencoder model
    autoencoder = Model(input_img, decoded)
    autoencoder.summary()
    # encoder model
    encoder = Model(input_img, encoded)
    encoder.summary()
    # plot_model(autoencoder, to_file='autoencoder.png', show_shapes=True, show_layer_names=True)
    # plot_model(encoder, to_file='encoder.png', show_shapes=True, show_layer_names=True)
    autoencoder.compile(loss='mean_squared_error', optimizer='adam')

    batch = 16
    autoencoder.fit_generator(imgs,
                              steps_per_epoch=(n_imgs / batch),
                              epochs=20)

    encoder.set_weights(autoencoder.get_weights()[0:7])
    preds = encoder.predict_generator(imgs, steps=(n_imgs))

    print("Predictions shape: ", preds.shape)

    # saving the autoencoder model
    autoencoder.save(args.modelfile)
Пример #9
0
x = SpatialDropout2D(0, data_format='channels_last')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = SpatialDropout2D(0, data_format='channels_last')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)

# (10, 10, 8) encoded
encoded = MaxPooling2D((2, 2), padding='same')(x)

x = SpatialDropout2D(0, data_format='channels_last')(encoded)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = SpatialDropout2D(0, data_format='channels_last')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = SpatialDropout2D(0, data_format='channels_last')(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((5, 5))(x)
x = SpatialDropout2D(0, data_format='channels_last')(x)
decoded = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(x)

autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adam', loss='mean_absolute_error')

mycoco.setmode('train')
zebraids, horseids = mycoco.query([['zebra'], ['horse']])
imgs = mycoco.iter_images([zebraids, horseids], [0, 1],
                          batch=10,
                          size=(200, 200, 3))
autoencoder.fit_generator(test_func(imgs), steps_per_epoch=4, epochs=4)