Exemplo n.º 1
0
def main(saved_weights_path):
    prepare_log_dir()
    train_data, train_labels = load_svhn_data("train", "full")
    valid_data, valid_labels = load_svhn_data("valid", "full")
    test_data, test_labels = load_svhn_data("test", "full")

    print("TrainData", train_data.shape)
    print("Valid Data", valid_data.shape)
    print("Test Data", test_data.shape)

    train_size = len(train_labels)
    train_regressor(train_data, train_labels, valid_data, valid_labels,
                    test_data, test_labels, train_size, saved_weights_path)
Exemplo n.º 2
0
def main(saved_weights_path):
    prepare_log_dir()
    train_data, train_labels = load_svhn_data("train", "full")
    valid_data, valid_labels = load_svhn_data("valid", "full")
    test_data, test_labels = load_svhn_data("test", "full")

    print("TrainData", train_data.shape)
    print("Valid Data", valid_data.shape)
    print("Test Data", test_data.shape)

    train_size = len(train_labels)
    train_regressor(train_data, train_labels, valid_data, valid_labels,
                    test_data, test_labels, train_size, saved_weights_path)
Exemplo n.º 3
0
def main(saved_weights_path):
    prepare_log_dir()
    train_data, train_labels = load_svhn_data("train", "cropped")
    valid_data, valid_labels = load_svhn_data("valid", "cropped")
    test_data, test_labels = load_svhn_data("test", "cropped")

    print("Training", train_data.shape)
    print("Valid", valid_data.shape)
    print("Test", test_data.shape)

    train_size = train_labels.shape[0]
    #saved_weights_path = None
    train_classification(train_data, train_labels, valid_data, valid_labels,
                         test_data, test_labels, train_size,
                         saved_weights_path)
Exemplo n.º 4
0
def main(saved_weights_path):
    prepare_log_dir()
    train_data, train_labels = load_svhn_data("train", "cropped")
    valid_data, valid_labels = load_svhn_data("valid", "cropped")
    test_data, test_labels = load_svhn_data("test", "cropped")

    print("Training", train_data.shape)
    print("Valid", valid_data.shape)
    print("Test", test_data.shape)

    train_size = train_labels.shape[0]
    saved_weights_path = None
    train_classification(train_data, train_labels,
                         valid_data, valid_labels,
                         test_data, test_labels, train_size,
                         saved_weights_path)
Exemplo n.º 5
0
import sys
import os
import numpy as np
import tensorflow as tf
import PIL.Image as Image
import matplotlib.pyplot as plt
import cv2 as cv

from svhn_model import regression_head
from svhn_data import load_svhn_data
import time

test_dataset, test_labels = load_svhn_data("test", "full")
WEIGHTS_FILE = "regression.ckpt"

# tf.reset_default_graph()


def optimistic_restore(session, save_file):
    reader = tf.train.NewCheckpointReader(save_file)
    saved_shapes = reader.get_variable_to_shape_map()
    var_names = sorted([(var.name, var.name.split(':')[0])
                        for var in tf.global_variables()
                        if var.name.split(':')[0] in saved_shapes])
    restore_vars = []
    name2var = dict(
        zip(map(lambda x: x.name.split(':')[0], tf.global_variables()),
            tf.global_variables()))

    with tf.variable_scope('', reuse=True):
        for var_name, saved_var_name in var_names:
def main(saved_weights_path):
    prepare_log_dir()
    train_data, train_labels = load_svhn_data("train", "full")
    valid_data, valid_labels = load_svhn_data("valid", "full")
    test_data, test_labels = load_svhn_data("test", "full")

    print("\nTrain data", train_data.shape)
    print("Valid data", valid_data.shape)
    print("Test data", test_data.shape)
    print("Train labels", train_labels.shape)
    print("Valid labels", valid_labels.shape)
    print("Test labels", test_labels.shape)

    train_size = len(train_labels); print('train_size',train_size)
    valid_size = len(valid_labels); print('valid_size',valid_size)
    test_size = len(test_labels); print('test_size',test_size,'\n')
    data_size = [train_size , valid_size , test_size]


    print('1. starting the grouping data into subsets process.')
    # Grouping the data into subsets, each subset has a certain num of digits in every image  # data_groups  (k) are as follows:    k=0 -> train   k=1 -> valid   k=2 -> test
    # For each data/label group, subsets have a set number of digits (a number: 1 to 5)
    # create list templates
    data_list_of_lists_tvt = [ [ [],[],[],[],[],[] ], [ [],[],[],[],[],[] ] , [ [],[],[],[],[],[] ] ]
    num_digits_list = [[0,0,0,0,0,0] , [0,0,0,0,0,0] , [0,0,0,0,0,0]]
    print('2. created list templates')

    data_set = [train_data, valid_data, test_data]
    labels_set = [train_labels, valid_labels, test_labels]

    # Forming the list of lists & counting the number of images with x digits, for data group (train / valid / test) # data_groups:  k=0 -> train   k=1 -> valid   k=2 -> test
    for data_group in range(3):
        labels = labels_set[data_group]
        for i in range(data_size[data_group]):
            for num_digits_in_data_subset in range(6):
                data_num_digits = labels[i, 0]
                if data_num_digits == (num_digits_in_data_subset):
                    data_list_of_lists_tvt[data_group][num_digits_in_data_subset].append(i)
                    num_digits_list[data_group][num_digits_in_data_subset] += 1
    labels = None

    if TEST_FLAG:
        print('np.array(num_digits_list).shape',np.array(num_digits_list).shape)
        print('np.array(num_digits_list[0]).shape',np.array(num_digits_list[0]).shape)
        print('np.array(num_digits_list[0][1]).shape',np.array(num_digits_list[0][1]).shape,'If this shape is (), this reflects that this object is a scalar value.')

    print('\n3. created populated data/labels lists')

    # print the count of number of images in each group / subset. each subset reflects num_digits
    print('\nThe following are counts of number of images for each group and (num_digits) subset. Groups are as follows: k=0 -> train   k=1 -> valid   k=2 -> test')
    for data_group in range(3):
        for num_digits_in_data_subset in range(6):
            print('Data group:',data_group,'\tNum of digits_',num_digits_in_data_subset,' = \t',num_digits_list[data_group][num_digits_in_data_subset])

    # create lists of zero arrays, for each group and each subset (for both data and labels)
    data_group_with_digits = [[0,0,0,0,0,0] , [0,0,0,0,0,0] , [0,0,0,0,0,0]]
    label_group_with_digits = [[0,0,0,0,0,0] , [0,0,0,0,0,0] , [0,0,0,0,0,0]]
    for data_group in range(3):
        for num_digits_in_data_subset in range(6):
            data_group_with_digits[data_group][num_digits_in_data_subset] = np.zeros(shape=(num_digits_list[data_group][num_digits_in_data_subset] , IMG_HEIGHT , IMG_WIDTH , NUM_CHANNELS), dtype=np.float32) #shape_data = [train_data.shape , valid_data.shape , test_data.shape]
            label_group_with_digits[data_group][num_digits_in_data_subset] =np.zeros(shape=(num_digits_list[data_group][num_digits_in_data_subset],6) , dtype=np.int32)
            if data_group ==0: print('num_digits_list[data_group][num_digits_in_data_subset]',num_digits_list[data_group][num_digits_in_data_subset])
            #if data_group == 0:
            #    if num_digits_in_data_subset == 4:
            #        print('num_digits_list[data_group][num_digits_in_data_subset]',num_digits_list[data_group][num_digits_in_data_subset])
            #        print('label_group_with_digits[data_group][num_digits_in_data_subset]',label_group_with_digits[data_group][num_digits_in_data_subset])
    print('\n4. created lists of zero arrays\n')

    if TEST_FLAG:
        print('data_group_with_digits.shape',np.array(data_group_with_digits).shape)
        print('data_group_with_digits[0].shape',np.array(data_group_with_digits[0]).shape)
        print('data_group_with_digits[0][1].shape',np.array(data_group_with_digits[0][1]).shape)
        print('np.array(num_digits_list).shape',np.array(num_digits_list).shape)
        print('np.array(num_digits_list[0]).shape',np.array(num_digits_list[0]).shape)
        print('np.array(num_digits_list[0][1]).shape',np.array(num_digits_list[0][1]).shape)
        print('np.array(label_group_with_digits).shape',np.array(label_group_with_digits).shape)
        print('np.array(label_group_with_digits[0])',np.array(label_group_with_digits[0]).shape)
        print('np.array(label_group_with_digits[0][1])',np.array(label_group_with_digits[0][1] ).shape)


    #populate the arrays with data/labels
    for data_group in range(3):
        labels = labels_set[data_group]
        data = data_set[data_group]
        for num_digits_in_data_subset in range(6):
            if TEST_FLAG: print('data_group',data_group,'num_digits_in_data_subset',num_digits_in_data_subset)
            data_group_with_digits[data_group][num_digits_in_data_subset] = [data[i,:,:,:] for i in data_list_of_lists_tvt[data_group][num_digits_in_data_subset] if num_digits_list[data_group][num_digits_in_data_subset] != 0 ]
            label_group_with_digits[data_group][num_digits_in_data_subset] = [ labels[i,:] for i in data_list_of_lists_tvt[data_group][num_digits_in_data_subset] if num_digits_list[data_group][num_digits_in_data_subset] != 0 ]

    print('\n5. populated the arrays with data/labels\n')

    if TEST_FLAG:
        print('data_group_with_digits.shape',np.array(data_group_with_digits).shape)
        print('data_group_with_digits[0].shape',np.array(data_group_with_digits[0]).shape)
        print('data_group_with_digits[0][0].shape',np.array(data_group_with_digits[0][0]).shape)
        print('data_group_with_digits[0][1].shape',np.array(data_group_with_digits[0][1]).shape)
        #print('data_group_with_digits[0][1].shape',np.array(data_group_with_digits[0][1]).shape)
        print('np.array(label_group_with_digits).shape',np.array(label_group_with_digits).shape)
        print('np.array(label_group_with_digits[0]).shape',np.array(label_group_with_digits[0]).shape)
        print('np.array(label_group_with_digits[0][0]).shape',np.array(label_group_with_digits[0][0]).shape)
        print('np.array(label_group_with_digits[0][1]).shape',np.array(label_group_with_digits[0][1]).shape)


    """ TRAIN THE REGRESSOR"""
    # define num_digits_in_data_subset , such that we can train the regressor , for a given num_digits_in_data_subset
    # num_digits_in_data_subset is input into train_regressor function

    # training => data_group == 0
    for num_digits_in_data_subset in range(6):
        data_train      = np.array( data_group_with_digits[0][num_digits_in_data_subset] )
        labels_train    = np.array( label_group_with_digits[0][num_digits_in_data_subset] )
        data_valid      = np.array( data_group_with_digits[1][num_digits_in_data_subset] )
        labels_valid    = np.array( label_group_with_digits[1][num_digits_in_data_subset] )
        data_test       = np.array( data_group_with_digits[2][num_digits_in_data_subset] )
        labels_test     = np.array( label_group_with_digits[2][num_digits_in_data_subset] )

        data_shape      = np.array(data_train).shape # data_shape[0] is the training data length
        if data_shape[0] > 0: # we check there is some data, for a given num_of_digits, so that we input some data (vs. NULL) into regression model during training
            if num_digits_in_data_subset > 0: # we don't need to train the model to predict that there are no digits in an image (in this use-case)
                print('num_digits_in_data_subset',num_digits_in_data_subset,'data_shape[0]',data_shape[0])
                train_regressor(data_train, labels_train, data_valid, labels_valid, data_test, labels_test, train_size=data_shape[0], saved_weights_path=saved_weights_path, num_digits_in_data_subset=num_digits_in_data_subset)
import sys
import os
import numpy as np
import tensorflow as tf
import PIL.Image as Image
import matplotlib.pyplot as plt

from svhn_model import regression_head
from svhn_data import load_svhn_data
import time

test_dataset, test_labels = load_svhn_data("test", "full")
WEIGHTS_FILE = "regression.ckpt"


def prediction_to_string(pred_array):
    pred_str = ""
    for i in range(len(pred_array)):
        if pred_array[i] != 10:
            pred_str += str(pred_array[i])
        else:
            return pred_str
    return pred_str


def detect(img_path, saved_model_weights):
    sample_img = Image.open(img_path)
    plt.imshow(sample_img)
    plt.show()

    pix = np.array(sample_img)
Exemplo n.º 8
0
    # start = cv2.getTickCount()
    score = anogan.compute_anomaly_score(model,
                                         test_imgs[i].reshape(1, 32, 32, 3),
                                         iterations=500,
                                         d=None)
    # time = (cv2.getTickCount() - start) / cv2.getTickFrequency() * 1000
    print('%d label, %d : done' % (1, i), '%.2f' % score)
    score_for_2.append(score)
score_for_2 = np.sort(score_for_2)[::-1]
print(score_for_2)
import pickle
pickle.dump(score_for_2, open("cifar100_score.pickle", "wb"))

### compute outlier scores on SVHN
import svhn_data
(X_train, y_train), (X_test, y_test) = svhn_data.load_svhn_data()
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_test = (X_test.astype(np.float32) - 127.5) / 127.5
# X_train = X_train[:,:,:,None]
# X_test = X_test[:,:,:,None]
perm_test = np.random.permutation(len(X_train))
X_train = X_train[perm_test]

test_imgs = X_train[0:200]
score_for_2 = []
# test_img = np.random.uniform(-1,1, (28,28,1))
model = anogan.anomaly_detector(g=None, d=None)
for i in range(np.shape(test_imgs)[0]):
    # start = cv2.getTickCount()
    score = anogan.compute_anomaly_score(model,
                                         test_imgs[i].reshape(1, 32, 32, 3),