def test():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config',
                        '-c',
                        default='configs/celebA_YSBBB_Classifier.yaml')
    args = parser.parse_args()
    # ============= Load config =============
    config_path = args.config
    config = yaml.load(open(config_path))
    print(config)
    # ============= Experiment Folder=============
    output_dir = os.path.join(config['log_dir'], config['name'])
    classifier_output_path = os.path.join(output_dir, 'classifier_output')
    try:
        os.makedirs(classifier_output_path)
    except:
        pass
    past_checkpoint = output_dir
    # ============= Experiment Parameters =============
    BATCH_SIZE = config['batch_size']
    channels = config['num_channel']
    input_size = config['input_size']
    N_CLASSES = config['num_class']
    # ============= Data =============
    try:
        categories, file_names_dict = read_data_file(
            config['image_label_dict'])
    except:
        print("Problem in reading input data file : ",
              config['image_label_dict'])
        sys.exit()
    data_train = np.load(config['train'])
    data_test = np.load(config['test'])
    print("The classification categories are: ")
    print(categories)
    print('The size of the training set: ', data_train.shape[0])
    print('The size of the testing set: ', data_test.shape[0])

    # ============= placeholder =============
    with tf.name_scope('input'):
        x_ = tf.placeholder(tf.float32,
                            [None, input_size, input_size, channels],
                            name='x-input')
        y_ = tf.placeholder(tf.int64, [None, N_CLASSES], name='y-input')
        isTrain = tf.placeholder(tf.bool)
    # ============= Model =============

    if N_CLASSES == 1:
        y = tf.reshape(y_, [-1])
        y = tf.one_hot(y, 2, on_value=1.0, off_value=0.0, axis=-1)
        logit, prediction = pretrained_classifier(x_,
                                                  n_label=2,
                                                  reuse=False,
                                                  name='classifier',
                                                  isTrain=isTrain)
    else:
        logit, prediction = pretrained_classifier(x_,
                                                  n_label=N_CLASSES,
                                                  reuse=False,
                                                  name='classifier',
                                                  isTrain=isTrain)
        y = y_
    classif_loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=y,
                                                   logits=logit)
    loss = tf.losses.get_total_loss()
    # ============= Variables =============
    # Note that this list of variables only include the weights and biases in the model.
    lst_vars = []
    for v in tf.global_variables():
        lst_vars.append(v)
    # ============= Session =============
    sess = tf.InteractiveSession()
    saver = tf.train.Saver(var_list=lst_vars)
    tf.global_variables_initializer().run()
    # ============= Load Checkpoint =============
    if past_checkpoint is not None:
        ckpt = tf.train.get_checkpoint_state(past_checkpoint + '/')
        if ckpt and ckpt.model_checkpoint_path:
            print("HERE...................lod checkpoint.........")
            print(str(ckpt.model_checkpoint_path))
            saver.restore(sess,
                          tf.train.latest_checkpoint(past_checkpoint + '/'))
        else:
            sys.exit()
    else:
        sys.exit()
    # ============= Testing Save the Output =============
    names = np.empty([0])
    prediction_y = np.empty([0])
    true_y = np.empty([0])
    for epoch in range(1):
        num_batch = int(data_train.shape[0] / BATCH_SIZE)
        for i in range(0, num_batch):
            start = i * BATCH_SIZE
            ns = data_train[start:start + BATCH_SIZE]
            xs, ys = load_images_and_labels(ns,
                                            config['image_dir'],
                                            N_CLASSES,
                                            file_names_dict,
                                            input_size,
                                            channels,
                                            do_center_crop=True)
            [_pred] = sess.run([prediction],
                               feed_dict={
                                   x_: xs,
                                   isTrain: False,
                                   y_: ys
                               })
            if i == 0:
                names = np.asarray(ns)
                prediction_y = np.asarray(_pred)
                true_y = np.asarray(ys)
            else:
                names = np.append(names, np.asarray(ns), axis=0)
                prediction_y = np.append(prediction_y,
                                         np.asarray(_pred),
                                         axis=0)
                true_y = np.append(true_y, np.asarray(ys), axis=0)
        np.save(classifier_output_path + '/name_train1.npy', names)
        np.save(classifier_output_path + '/prediction_y_train1.npy',
                prediction_y)
        np.save(classifier_output_path + '/true_y_train1.npy', true_y)

        names = np.empty([0])
        prediction_y = np.empty([0])
        true_y = np.empty([0])
        num_batch = int(data_test.shape[0] / BATCH_SIZE)
        for i in range(0, num_batch):
            start = i * BATCH_SIZE
            ns = data_test[start:start + BATCH_SIZE]
            xs, ys = load_images_and_labels(ns,
                                            config['image_dir'],
                                            N_CLASSES,
                                            file_names_dict,
                                            input_size,
                                            channels,
                                            do_center_crop=True)
            [_pred] = sess.run([prediction],
                               feed_dict={
                                   x_: xs,
                                   isTrain: False,
                                   y_: ys
                               })
            if i == 0:
                names = np.asarray(ns)
                prediction_y = np.asarray(_pred)
                true_y = np.asarray(ys)
            else:
                names = np.append(names, np.asarray(ns), axis=0)
                prediction_y = np.append(prediction_y,
                                         np.asarray(_pred),
                                         axis=0)
                true_y = np.append(true_y, np.asarray(ys), axis=0)
        np.save(classifier_output_path + '/name_test1.npy', names)
        np.save(classifier_output_path + '/prediction_y_test1.npy',
                prediction_y)
        np.save(classifier_output_path + '/true_y_test1.npy', true_y)
示例#2
0
                                      n_neighbors=1,
                                      return_distance=True)
    class2 = classes[ind2[0][0]]
    [class1, round(dist1[0][0]), class2, round(dist2[0][0], 3)]
    return [class1, round(dist1[0][0]), class2, round(dist2[0][0], 3)]


# =============================================================================
# LOAD IMAGES
# =============================================================================
path = 'C:/Users/Pablo/Google Drive/TFM/'

# Load images normal size
class_names = os.listdir(path + 'Images/')
class_numbers = list(map(lambda x: int(x[0]), class_names))
images, labels = utils.load_images_and_labels(path + 'Images/')
num_samples, height, width = images.shape
images_mean = []
images_closest = []
for i in np.arange(0, 61 * 6 + 1, 61):
    images_mean.append(np.mean(images[i:i + 61], axis=0))
    index = i + np.argmin(
        np.sum(np.abs(images[i:i + 61] - images_mean[-1]), axis=(1, 2)))
    images_closest.append(images[index])
if save_comp:
    ims_closest = []
    for im in images_closest:
        ims_closest.append(cv2.cvtColor(im, cv2.COLOR_GRAY2BGR))
    ims_closest.append(cv2.cvtColor(img_blank, cv2.COLOR_GRAY2BGR))

# Load images reduced
示例#3
0
def extract_features_ORB(images, orb):
    descriptors = []
    for img in images:
        descriptors.append(orb.detectAndCompute(img, None)[1])
    return descriptors


# =============================================================================
# LOAD IMAGES
# =============================================================================
path = 'C:/Users/Pablo/Google Drive/TFM/'

# Load images
class_names = os.listdir(path + 'Images/')
class_numbers = list(map(lambda x: int(x[0]), class_names))
images, labels = utils.load_images_and_labels(path + 'Images/')
num_samples, height, width = images.shape

MAX_MATCHES_array = np.arange(150, 601, 50)
accuracies = np.zeros((len(video_names), len(MAX_MATCHES_array)))

for index, MAX_MATCHES in enumerate(MAX_MATCHES_array):

    # =============================================================================
    # CREATE / LOAD ORB FEATURES
    # =============================================================================
    path = 'C:/Users/Pablo/Google Drive/TFM/'
    orb = cv2.ORB_create(MAX_MATCHES)
    matcher = cv2.DescriptorMatcher_create(
        cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    if not os.path.isfile(path + 'Models/orb_' + str(MAX_MATCHES) + '.pkl'):
示例#4
0
def train():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config',
                        '-c',
                        default='configs/celebA_YSBBB_Classifier.yaml')
    args = parser.parse_args()
    # ============= Load config =============
    config_path = args.config
    config = yaml.load(open(config_path))
    print(config)
    # ============= Experiment Folder=============
    output_dir = os.path.join(config['log_dir'], config['name'])
    try:
        os.makedirs(output_dir)
    except:
        pass
    try:
        os.makedirs(os.path.join(output_dir, 'logs'))
    except:
        pass
    # ============= Experiment Parameters =============
    BATCH_SIZE = config['batch_size']
    EPOCHS = config['epochs']
    channels = config['num_channel']
    input_size = config['input_size']
    N_CLASSES = config['num_class']
    ckpt_dir_continue = config['ckpt_dir_continue']
    if ckpt_dir_continue == '':
        continue_train = False
    else:
        continue_train = True
    # ============= Data =============
    try:
        categories, file_names_dict = read_data_file(
            config['image_label_dict'])
    except:
        print("Problem in reading input data file : ",
              config['image_label_dict'])
        sys.exit()
    data_train = np.load(config['train'])
    data_test = np.load(config['test'])
    print("The classification categories are: ")
    print(categories)
    print('The size of the training set: ', data_train.shape[0])
    print('The size of the testing set: ', data_test.shape[0])
    fp = open(os.path.join(output_dir, 'setting.txt'), 'w')
    fp.write('config_file:' + str(config_path) + '\n')
    fp.close()
    # ============= placeholder =============
    with tf.name_scope('input'):
        x_ = tf.placeholder(tf.float32,
                            [None, input_size, input_size, channels],
                            name='x-input')
        y_ = tf.placeholder(tf.int64, [None, N_CLASSES], name='y-input')
        isTrain = tf.placeholder(tf.bool)
    # ============= Model =============
    if N_CLASSES == 1:
        y = tf.reshape(y_, [-1])
        y = tf.one_hot(y, 2, on_value=1.0, off_value=0.0, axis=-1)
        logit, prediction = pretrained_classifier(x_,
                                                  n_label=2,
                                                  reuse=False,
                                                  name='classifier',
                                                  isTrain=isTrain)
    else:
        logit, prediction = pretrained_classifier(x_,
                                                  n_label=N_CLASSES,
                                                  reuse=False,
                                                  name='classifier',
                                                  isTrain=isTrain)
        y = y_
    classif_loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=y,
                                                   logits=logit)
    loss = tf.losses.get_total_loss()
    # ============= Optimization functions =============
    train_step = tf.train.AdamOptimizer(0.0001).minimize(loss)
    # ============= summary =============
    cls_loss = tf.summary.scalar('classif_loss', classif_loss)
    total_loss = tf.summary.scalar('total_loss', loss)
    sum_train = tf.summary.merge([cls_loss, total_loss])
    # ============= Variables =============
    # Note that this list of variables only include the weights and biases in the model.
    lst_vars = []
    for v in tf.global_variables():
        lst_vars.append(v)
    # ============= Session =============
    sess = tf.InteractiveSession()
    saver = tf.train.Saver(var_list=lst_vars)
    tf.global_variables_initializer().run()
    writer = tf.summary.FileWriter(output_dir + '/train', sess.graph)
    writer_test = tf.summary.FileWriter(output_dir + '/test', sess.graph)
    # ============= Checkpoints =============
    if continue_train:
        print("Before training, Load checkpoint ")
        print("Reading checkpoint...")
        ckpt = tf.train.get_checkpoint_state(ckpt_dir_continue)
        if ckpt and ckpt.model_checkpoint_path:
            ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
            saver.restore(sess, os.path.join(ckpt_dir_continue, ckpt_name))
            print(ckpt_name)
            print("Successful checkpoint upload")
        else:
            print("Failed checkpoint load")
            sys.exit()
    # ============= Training =============
    train_loss = []
    test_loss = []
    itr_train = 0
    itr_test = 0
    for epoch in range(EPOCHS):
        total_loss = 0.0
        perm = np.arange(data_train.shape[0])
        np.random.shuffle(perm)
        data_train = data_train[perm]
        num_batch = int(data_train.shape[0] / BATCH_SIZE)
        for i in range(0, num_batch):
            start = i * BATCH_SIZE
            ns = data_train[start:start + BATCH_SIZE]
            xs, ys = load_images_and_labels(ns,
                                            config['image_dir'],
                                            N_CLASSES,
                                            file_names_dict,
                                            input_size,
                                            channels,
                                            do_center_crop=True)
            [_, _loss, summary_str] = sess.run([train_step, loss, sum_train],
                                               feed_dict={
                                                   x_: xs,
                                                   isTrain: True,
                                                   y_: ys
                                               })
            writer.add_summary(summary_str, itr_train)
            itr_train += 1
            total_loss += _loss
        total_loss /= i
        print("Epoch: " + str(epoch) + " loss: " + str(total_loss) + '\n')
        train_loss.append(total_loss)

        total_loss = 0.0
        perm = np.arange(data_test.shape[0])
        np.random.shuffle(perm)
        data_test = data_test[perm]
        num_batch = int(data_test.shape[0] / BATCH_SIZE)
        for i in range(0, num_batch):
            start = i * BATCH_SIZE
            ns = data_test[start:start + BATCH_SIZE]
            xs, ys = load_images_and_labels(ns,
                                            config['image_dir'],
                                            N_CLASSES,
                                            file_names_dict,
                                            input_size,
                                            channels,
                                            do_center_crop=True)
            [_loss, summary_str] = sess.run([loss, sum_train],
                                            feed_dict={
                                                x_: xs,
                                                isTrain: False,
                                                y_: ys
                                            })
            writer_test.add_summary(summary_str, itr_test)
            itr_test += 1
            total_loss += _loss
        total_loss /= i
        print("Epoch: " + str(epoch) + " Test loss: " + str(total_loss) + '\n')
        test_loss.append(total_loss)

        if epoch % 2 == 0:
            checkpoint_name = os.path.join(output_dir,
                                           'cp1_epoch' + str(epoch) + '.ckpt')
            save_path = saver.save(sess, checkpoint_name)
            np.save(os.path.join(output_dir, 'logs', 'train_loss.npy'),
                    np.asarray(train_loss))
            np.save(os.path.join(output_dir, 'logs', 'test_loss.npy'),
                    np.asarray(test_loss))
    checkpoint_name = os.path.join(output_dir,
                                   'cp1_epoch' + str(epoch) + '.ckpt')
    save_path = saver.save(sess, checkpoint_name)
    np.save(os.path.join(output_dir, 'logs', 'train_loss.npy'),
            np.asarray(train_loss))
    np.save(os.path.join(output_dir, 'logs', 'test_loss.npy'),
            np.asarray(test_loss))
示例#5
0
         for img_name in os.listdir(path + pathnew):
             im = plt.imread(path + pathnew + '/' + img_name)
             imNew = generic_filter(im, spatial_kernel_filter, size=(neighborhood,neighborhood))
             imNew = np.stack((imNew,)*3, axis=2)
             plt.imsave(path + '../Modified Images ' + technique + '/' + pathnew + '/' + img_name, imNew)
 
 # =============================================================================
 # LOAD IMAGES AND CREATE / LOAD MODEL
 # =============================================================================
 
 path = 'C:/Users/Pablo/Google Drive/TFM/'
 
 # Load images
 class_names = os.listdir(path + 'Modified Images ' + technique + '/')
 class_numbers = list(map(lambda x: int(x[0]), class_names))
 images, labels = utils.load_images_and_labels(path + 'Modified Images ' + technique + '/')
 num_samples, height, width = images.shape
 
 for model_name in model_names:
 
     if model_name == 'eigen':
         num_components = 40
     elif model_name == 'fisher':
         num_components = 6
     else:
         print('ERROR: Wrong model_name')
         
     # Generate and save model or Load model if exists
     if not os.path.isfile(path + 'Models/' + model_name + '_' + technique + '_' + str(num_components) + '_' + str(threshold) + '.yml'):
         if model_name == 'eigen':
             model = cv2.face.createEigenFaceRecognizer(num_components)