Beispiel #1
0
def dump_errors(name,
                ckpt,
                fold,
                output_filename,
                method='full',
                samples=0,
                pooling='median'):
    samples = int(samples)
    with get_session() as sess:
        kwargs = {'dataset_name': 'gehler', 'subset': 0, 'fold': fold}
        fcn = FCN(sess=sess, name=name, kwargs=kwargs)
        fcn.load(ckpt)
        for i in range(4):
            if method == 'full':
                errors, t, _, _, _ = fcn.test(scales=[0.5])
            elif method == 'resize':
                errors, t = fcn.test_resize()
            elif method == 'patches':
                errors, t = fcn.test_patch_based(scale=0.5,
                                                 patches=samples,
                                                 pooling=pooling)
            else:
                assert False
    utils.print_angular_errors(errors)
    with open(output_filename, 'w') as f:
        pickle.dump({'e': errors, 't': t}, f)
Beispiel #2
0
    def test_naive():
        t = time.time()

        import scipy.io
        std = scipy.io.loadmat('/home/yuanming/colorchecker_shi_greyworld.mat')
        names = map(lambda x: x[0].encode('utf8'), std['all_image_names'][0])
        #print(names)
        records = load_data(TEST_FOLDS)

        errors = []
        for r in records:
            est = np.mean(r.img, axis=(0, 1))[::-1]
            est /= np.linalg.norm(est)
            #print(r.fn, est)
            #est=np.array((1, 1, 1))
            #est2= std['estimated_illuminants'][names.index(r.fn[:-4])]
            gt2 = std['groundtruth_illuminants'][names.index(r.fn[:-4])]
            #print(est2)
            error = math.degrees(angular_error(est, gt2))
            errors.append(error)

        print("Full Image:")
        ret = print_angular_errors(errors)
        print('Test time:',
              time.time() - t, 'per image:', (time.time() - t) / len(records))

        return errors
Beispiel #3
0
    def test_patch_based(self, scale, patches, pooling='median'):
        records = load_data(TEST_FOLDS)
        avg_errors = []
        median_errors = []
        t = time.time()

        def sample_patch(img):
            s = FCN_INPUT_SIZE
            x = random.randrange(0, img.shape[0] - s + 1)
            y = random.randrange(0, img.shape[1] - s + 1)
            return img[x:x + s, y:y + s]

        for r in records:
            img = cv2.resize(r.img, (0, 0), fx=scale, fy=scale)
            img = [sample_patch(img) for i in range(patches)]
            illum_est = []
            batch_size = 4
            for j in range((len(img) + batch_size - 1) // batch_size):
                illum_est.append(
                    self.sess.run(self.illum_normalized,
                                  feed_dict={
                                      self.images:
                                      img[j * batch_size:(j + 1) * batch_size],
                                      self.dropout:
                                      1.0
                                  }))
            illum_est = np.vstack(illum_est)
            med = len(illum_est) // 2
            illum_est_median = np.array(
                [sorted(list(illum_est[:, i]))[med] for i in range(3)])
            illum_est_avg = np.mean(illum_est, axis=0)
            avg_error = math.degrees(angular_error(illum_est_avg, r.illum))
            median_error = math.degrees(
                angular_error(illum_est_median, r.illum))
            avg_errors.append(avg_error)
            median_errors.append(median_error)
        print("Avg pooling:")
        print_angular_errors(avg_errors)
        print("Median pooling:")
        print_angular_errors(median_errors)
        ppt = (time.time() - t) / len(records)
        print('Test time:', time.time() - t, 'per image:', ppt)
        if pooling == 'median':
            errors = median_errors
        else:
            errors = avg_errors
        return errors, ppt
Beispiel #4
0
Datei: fcn.py Projekt: zjudzl/fc4
  def test_resize(self):
    records = load_data(TEST_FOLDS)
    t = time.time()

    errors = []
    for r in records:
      img = cv2.resize(r.img, (FCN_INPUT_SIZE, FCN_INPUT_SIZE))
      illum_est = self.sess.run(
          self.illum_normalized,
          feed_dict={self.images: [img],
                     self.dropout: 1.0})
      avg_error = math.degrees(angular_error(illum_est, r.illum))
      errors.append(avg_error)
    print_angular_errors(errors)
    ppt = (time.time() - t) / len(records)
    print('Test time:', time.time() - t, 'per image:', ppt)
    return errors, ppt
Beispiel #5
0
def main():
    x = tf.placeholder(tf.float32, [batch_size, 512, 512, 3])
    y = tf.placeholder(tf.float32, [None, 3])
    keep_prob = tf.placeholder(tf.float32)
    out = M.fc4_architecture(x, keep_prob)
    angular_loss = N.get_angular_error(out, y)
    # Probably use no augmentation in testing?
    dp = DataProvider(True, ['g0'])
    dp.set_batch_size(batch_size)
    nr_step = 100
    saver = tf.train.Saver()
    errors = []
    with tf.Session() as sess:
        saver.restore(sess, "tf_log/model.ckpt")
        for epoch in range(0, nr_epochs):
            for step in range(0, nr_step):
                batch = dp.get_batch()
                feed_x = batch[0]
                feed_y = batch[2]
                ans, angular_error = sess.run([out, angular_loss],
                                              feed_dict={
                                                  x: feed_x,
                                                  y: feed_y,
                                                  keep_prob: 1.0
                                              })
                print(str(step) + " Angular_error: " + str(angular_error))
                errors.append(angular_error)
                print(ans[0])
                print(feed_y[0])
                img = feed_x[0] / feed_x[0].max()
                #cv2.imshow("Input", np.power(img, 1 / 2.2))
                #cv2.waitKey(0)
                cv2.imwrite("data/inference/" + str(step) + "_img_input.png",
                            255 * np.power(img, 1 / 2.2))
                img_gt = sp.apply_gt(img, feed_y[0])
                cv2.imwrite("data/inference/" + str(step) + "_img_gt.png",
                            255 * np.power(img_gt, 1 / 2.2))
                img_pred = sp.apply_gt(img, ans[0])
                cv2.imwrite("data/inference/" + str(step) + "_img_pred.png",
                            255 * np.power(img_pred, 1 / 2.2))
        dp.stop()
        # Print the stats
        ut.print_angular_errors(errors)
Beispiel #6
0
def test(FLAGS):
    batch_size = FLAGS.batch_size
    height = width = FLAGS.patch_size
    final_W = FLAGS.final_W
    final_K = FLAGS.final_K
    dataset_dir = os.path.join(FLAGS.dataset_dir)
    dataset_file_name = FLAGS.dataset_file_name
    if FLAGS.use_ms:
        input_image, gt_image = data_provider.load_batch(dataset_dir, dataset_file_name,
                                                         batch_size, height, width, channel = final_W,
                                                         shuffle = False, use_ms = True, is_train = False)
    else:
        input_image, gt_image, label, file_name = data_provider.load_batch(dataset_dir, dataset_file_name,
                                                     batch_size, height, width, channel = final_W,
                                                     shuffle = False, use_ms = False, with_file_name_gain = True, is_train = False)

    with tf.variable_scope('generator'):
        if FLAGS.patch_size == 128:
            N_size = 3
        else:
            N_size = 2
        filters = net.convolve_net(input_image, final_K, final_W, ch0=64,
                                   N=N_size, D=3,
                      scope='get_filted', separable=False, bonus=False)
    predict_image = net.convolve(input_image, filters, final_K, final_W)

    # summaies
    # filters_sum = tf.summary.image('filters', filters)
    # input_image_sum = tf.summary.image('input_image', input_image)
    # gt_image_sum = tf.summary.image('gt_image', gt_image)
    # predict_image_sum = tf.summary.image('predict_image', predict_image)

    sum_total = tf.summary.merge_all()

    config = tf.ConfigProto()
    with tf.Session(config=config) as sess:

        print ('Initializers variables')
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        if FLAGS.write_sum:
            writer = tf.summary.FileWriter(FLAGS.save_dir, sess.graph)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        restorer = tf.train.Saver(max_to_keep=None)

        ckpt_path = tf.train.latest_checkpoint(FLAGS.ckpt_path)
        if ckpt_path is not None:
            print ('Restoring from', ckpt_path)
            restorer.restore(sess, ckpt_path)

        errors = []

        max_steps = FLAGS.total_test_num // batch_size
        for i_step in range(max_steps):
            if FLAGS.use_ms:
                input_image_, gt_image_, predict_image_, filters_, sum_total_ = \
                    sess.run([input_image, gt_image, predict_image, filters, sum_total])
            else:
                input_image_, gt_image_, predict_image_, filters_, label_, file_name_ , sum_total_ = \
                    sess.run([input_image, gt_image, predict_image, filters, label, file_name, sum_total])

            batch_confidence_r = utils.compute_rate_confidence(filters_, input_image_, final_K, final_W, sel_ch = 0, ref_ch = [2])
            batch_confidence_b = utils.compute_rate_confidence(filters_, input_image_, final_K, final_W, sel_ch = 2, ref_ch = [0])

            concat = utils.get_concat(input_image_, gt_image_, predict_image_)
            for batch_i in range(batch_size):
                est = utils.solve_gain(input_image_[batch_i], np.clip(predict_image_[batch_i], 0, 500))
                print ('confidence_r: ', batch_confidence_r[batch_i])
                print ('confidence_b: ', batch_confidence_b[batch_i])

                if FLAGS.use_ms:
                    save_file_name = '%03d_%02d.png'%(i_step,batch_i)
                else:
                    current_file_name = file_name_[batch_i][0].decode('utf-8').split('/')[-1]
                    print (' {} saved once'.format(current_file_name))
                    gt = label_[batch_i]
                    error = utils.angular_error(est, gt)
                    print ('est is ; ', est)
                    print ('gt is ; ', gt)
                    print ('error is ; ', error)
                    errors.append(error)
                    save_file_name = current_file_name

                est_img_ = np.clip(input_image_[batch_i] * est, 0, 255.0) / 255.0
                all_concat = np.concatenate([concat[batch_i], est_img_], axis = 1)
                if FLAGS.save_dir is not None:
                    imsave(os.path.join(FLAGS.save_dir, save_file_name), all_concat*255.0 )

                # np.save(os.path.join(FLAGS.save_dir,'%03d_%02d.npy'%(i_step,batch_i)), predict_image_[batch_i])

            if FLAGS.write_sum and i_step % 20 == 0:
                writer.add_summary(sum_total_, i)
                print ('summary saved')

        coord.request_stop()
        coord.join(threads)
    if errors:
        utils.print_angular_errors(errors)
Beispiel #7
0
import sys
import os
from utils import *


def load_errors(model_name):
    model_path = 'models/fc4/' + model_name + '/'
    if model_name.endswith('.pkl'):
        pkl = model_name
    else:
        # Find the last one
        fn = list(
            sorted(
                filter(lambda x: x.startswith('error'),
                       os.listdir(model_path))))[-1]
        pkl = os.path.join(model_path, fn)
    with open(pkl) as f:
        return pickle.load(f)


def combine(models):
    combined = []
    for model in models:
        combined += load_errors(model)
    return combined


if __name__ == '__main__':
    models = sys.argv[1:]
    print_angular_errors(combine(models))
Beispiel #8
0
def test(FLAGS):
    batch_size = FLAGS.batch_size
    height = width = FLAGS.patch_size
    final_W = FLAGS.final_W
    final_K = FLAGS.final_K
    dataset_dir = os.path.join(FLAGS.dataset_dir)
    dataset_file_name = FLAGS.dataset_file_name
    shuffle = FLAGS.shuffle
    input_image = tf.placeholder(tf.float32, shape=(None, height, width, 3))

    with tf.variable_scope('generator'):
        if FLAGS.patch_size == 128:
            N_size = 3
        else:
            N_size = 2
        filters = net.convolve_net(input_image,
                                   final_K,
                                   final_W,
                                   ch0=64,
                                   N=N_size,
                                   D=3,
                                   scope='get_filted',
                                   separable=False,
                                   bonus=False)
    predict_image = convolve(input_image, filters, final_K, final_W)

    config = tf.ConfigProto()
    with tf.Session(config=config) as sess:

        print('Initializers variables')
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        restorer = tf.train.Saver(max_to_keep=None)

        ckpt_path = tf.train.latest_checkpoint(FLAGS.ckpt_path)
        if ckpt_path is not None:
            print('Restoring from', ckpt_path)
            restorer.restore(sess, ckpt_path)

        errors = []

        max_steps = FLAGS.total_test_num // batch_size
        for i_step in range(max_steps):
            if FLAGS.use_ms:
                imgs, imgs_gt, labels, file_names, configs = utils.data_loader_np(
                    data_folder=dataset_dir,
                    data_txt=dataset_file_name,
                    patch_size=FLAGS.patch_size,
                    start_index=i_step * batch_size,
                    batch_size=batch_size,
                    use_ms=True)
            else:
                imgs, imgs_gt, labels, file_names = utils.data_loader_np(
                    data_folder=dataset_dir,
                    data_txt=dataset_file_name,
                    patch_size=FLAGS.patch_size,
                    start_index=i_step * batch_size,
                    batch_size=batch_size,
                    use_ms=False)
            input_image_ = utils.batch_stable_process(
                imgs,
                use_crop=FLAGS.use_crop,
                use_clip=FLAGS.use_clip,
                use_flip=FLAGS.use_flip,
                use_rotate=FLAGS.use_rotate,
                use_noise=FLAGS.use_noise)
            gt_image_ = imgs_gt
            predict_image_, filters_ = sess.run(
                [predict_image, filters],
                feed_dict={input_image: input_image_})
            # [batch, h ,w]
            batch_confidence_r = utils.compute_rate_confidence(filters_,
                                                               input_image_,
                                                               final_K,
                                                               final_W,
                                                               sel_ch=0,
                                                               ref_ch=[2],
                                                               is_spatial=True)
            batch_confidence_b = utils.compute_rate_confidence(filters_,
                                                               input_image_,
                                                               final_K,
                                                               final_W,
                                                               sel_ch=2,
                                                               ref_ch=[0],
                                                               is_spatial=True)

            concat = utils.get_concat(input_image_, gt_image_, predict_image_)
            num_filt = (FLAGS.final_K**2) * (FLAGS.final_W**2)
            for batch_i in range(batch_size):
                est_global = utils.solve_gain(
                    input_image_[batch_i],
                    np.clip(predict_image_[batch_i], 0, 500))

                if FLAGS.use_ms:
                    save_file_name = '%s_%s.png' % (
                        file_names[batch_i][0][:-4],
                        file_names[batch_i][1][:-4])
                else:
                    print('confidence_r: ',
                          np.mean(batch_confidence_r[batch_i]))
                    print('confidence_b: ',
                          np.mean(batch_confidence_b[batch_i]))
                    current_file_name = file_names[batch_i]
                    print(' {} saved once'.format(current_file_name))
                    gt = labels[batch_i]
                    error = utils.angular_error(est, gt)
                    print('est is ; ', est)
                    print('gt is ; ', gt)
                    print('error is ; ', error)
                    errors.append(error)
                    save_file_name = current_file_name

                est_global_img_ = np.clip(input_image_[batch_i] * est_global,
                                          0, 255.0) / 255.0
                all_concat = np.concatenate([concat[batch_i], est_global_img_],
                                            axis=1)
                if FLAGS.save_dir is not None:
                    imsave(os.path.join(FLAGS.save_dir, save_file_name),
                           all_concat * 255.0)
                    np_concat = np.concatenate(
                        [input_image_[batch_i], predict_image_[batch_i]],
                        axis=0)
                    file_name_np = os.path.join(FLAGS.save_dir,
                                                save_file_name[:-3] + 'npy')
                    np.save(file_name_np, np_concat)
                    if FLAGS.use_ms:
                        if FLAGS.save_clus:
                            print('local gain fitting', save_file_name)
                            gain_box, clus_img, clus_labels = utils.gain_fitting(
                                input_image_[batch_i],
                                predict_image_[batch_i],
                                is_local=True,
                                n_clusters=2,
                                gamma=4.0,
                                with_clus=True)
                            num_multi = len(set(clus_labels))
                            for index_ill in range(num_multi):
                                confi_multi_r = utils.get_confi_multi(
                                    clus_labels,
                                    batch_confidence_r[batch_i],
                                    label=index_ill)
                                confi_multi_b = utils.get_confi_multi(
                                    clus_labels,
                                    batch_confidence_b[batch_i],
                                    label=index_ill)
                                print('confidence_r for ill %d' % index_ill,
                                      confi_multi_r)
                                print('confidence_b for ill %d' % index_ill,
                                      confi_multi_b)
                            imsave(
                                os.path.join(
                                    FLAGS.save_dir,
                                    '%s_clus.png' % (save_file_name[:-4])),
                                clus_img)
                        if FLAGS.save_filt:
                            cur_filt = filters_[batch_i]
                            for filt_index in range(num_filt):
                                cur_ = cur_filt[..., filt_index]
                                imsave(
                                    os.path.join(
                                        FLAGS.save_dir, '%s_filt_%d.png' %
                                        (save_file_name[:-4], filt_index)),
                                    cur_)
                        file_name_json = os.path.join(
                            FLAGS.save_dir, save_file_name[:-3] + 'json')
                        save_dict = configs[batch_i]
                        with open(file_name_json, 'w') as fp:
                            json.dump(save_dict, fp, ensure_ascii=False)
                # np.save(os.path.join(FLAGS.save_dir,'%03d_%02d.npy'%(i_step,batch_i)), predict_image_[batch_i])
    if errors:
        utils.print_angular_errors(errors)
Beispiel #9
0
    def test(self,
             summary=False,
             scales=[1.0],
             weights=[],
             summary_key=0,
             data=None,
             eval_speed=False,
             visualize=False):
        if not TEST_FOLDS:
            return [0]
        if data is None:
            records = load_data(TEST_FOLDS)
        else:
            records = data
        avg_errors = []
        median_errors = []
        t = time.time()

        summaries = []
        if weights == []:
            weights = [1.0] * len(scales)

        outputs = []
        ground_truth = []
        avg_confidence = []

        errors = []
        for r in records:
            all_pixels = []
            for scale, weight in zip(scales, weights):
                img = r.img
                if scale != 1.0:
                    img = cv2.resize(img, (0, 0), fx=scale, fy=scale)
                shape = img.shape[:2]
                if shape not in self.test_nets:
                    aspect_ratio = 1.0 * shape[1] / shape[0]
                    if aspect_ratio < 1:
                        target_shape = (MERGED_IMAGE_SIZE,
                                        MERGED_IMAGE_SIZE * aspect_ratio)
                    else:
                        target_shape = (MERGED_IMAGE_SIZE / aspect_ratio,
                                        MERGED_IMAGE_SIZE)
                    target_shape = tuple(map(int, target_shape))

                    test_net = {}
                    test_net['illums'] = tf.placeholder(tf.float32,
                                                        shape=(None, 3),
                                                        name='test_illums')
                    test_net['images'] = tf.placeholder(tf.float32,
                                                        shape=(None, shape[0],
                                                               shape[1], 3),
                                                        name='test_images')
                    with tf.variable_scope("FCN", reuse=True):
                        test_net['pixels'] = FCN.build_branches(
                            test_net['images'], 1.0)
                        test_net['est'] = tf.reduce_sum(test_net['pixels'],
                                                        axis=(1, 2))
                    test_net['merged'] = get_visualization(
                        test_net['images'], test_net['pixels'],
                        test_net['est'], test_net['illums'], target_shape)
                    self.test_nets[shape] = test_net
                test_net = self.test_nets[shape]

                pixels, est, merged = self.sess.run(
                    [test_net['pixels'], test_net['est'], test_net['merged']],
                    feed_dict={
                        test_net['images']: img[None, :, :, :],
                        test_net['illums']: r.illum[None, :]
                    })

                if eval_speed:
                    eval_batch_size = 1
                    eval_packed_input = img[None, :, :, :].copy()
                    eval_packed_input = np.concatenate(
                        [eval_packed_input for i in range(eval_batch_size)],
                        axis=0)
                    eval_packed_input = np.ascontiguousarray(eval_packed_input)
                    eval_start_t = time.time()
                    print(eval_packed_input.shape)
                    eval_rounds = 100
                    images_variable = tf.Variable(
                        tf.random_normal(eval_packed_input.shape,
                                         dtype=tf.float32,
                                         stddev=1e-1))
                    print(images_variable)
                    for eval_t in range(eval_rounds):
                        print(eval_t)
                        pixels, est = self.sess.run(
                            [test_net['pixels'], test_net['est']],
                            feed_dict={
                                test_net['images']:  #images_variable,
                                eval_packed_input,
                            })
                    eval_elapsed_t = time.time() - eval_start_t
                    print('per image evaluation time',
                          eval_elapsed_t / (eval_rounds * eval_batch_size))

                pixels = pixels[0]
                #est = est[0]
                merged = merged[0]

                all_pixels.append(weight * pixels.reshape(-1, 3))

            all_pixels = np.sum(np.concatenate(all_pixels, axis=0), axis=0)
            est = all_pixels / (np.linalg.norm(all_pixels) + 1e-7)
            outputs.append(est)
            ground_truth.append(r.illum)
            error = math.degrees(angular_error(est, r.illum))
            errors.append(error)
            avg_confidence.append(np.mean(np.linalg.norm(all_pixels)))

            summaries.append((r.fn, error, merged))
        print("Full Image:")
        ret = print_angular_errors(errors)
        ppt = (time.time() - t) / len(records)
        print('Test time:', time.time() - t, 'per image:', ppt)

        if summary:
            for fn, error, merged in summaries:
                folder = self.get_ckpt_folder() + '/test%04dsummaries_%4f/' % (
                    summary_key, scale)
                try:
                    os.mkdir(folder)
                except:
                    pass
                summary_fn = '%s/%5.3f-%s.png' % (folder, error, fn)
                cv2.imwrite(summary_fn, merged[:, :, ::-1] * 255)

        if visualize:
            for fn, error, merged in summaries:
                cv2.imshow('Testing', merged[:, :, ::-1])
                cv2.waitKey(0)

        return errors, ppt, outputs, ground_truth, ret, avg_confidence