def save_data(self,
                x,
                name,
                save_dir,
                x_is_real_x=False,
                batch_image_fn=None,
                emphasize=None):
    """Save dataspace instances.

    Args:
      x: A numpy array of dataspace points.
      name: A string indicating the name in the saved file.
      save_dir: A string indicating the directory to put the saved file.
      x_is_real_x: An boolean indicating whether `x` is already in dataspace. If
          not, `x` is converted to dataspace before saving
      batch_image_fn: An function that batches images. If not specified,
          default function `common.batch_image` will be used.
      emphasize: An optional list of indecies of x to emphasize. It not specied,
          nothing would be empasized.
    """
    batch_image_fn = batch_image_fn or common.batch_image

    if not x_is_real_x:
      np.savetxt(join(save_dir, '%s.array.txt' % name), x)
    real_x = x if x_is_real_x else self.decode(x)
    real_x = common.post_proc(real_x, self.config, emphasize=emphasize)
    batched_real_x = batch_image_fn(real_x)
    sample_file = join(save_dir, '%s.png' % name)
    common.save_image(batched_real_x, sample_file)
  def save_data(self,
                x,
                name,
                save_dir,
                x_is_real_x=False,
                batch_image_fn=None,
                emphasize=None):
    """Save dataspace instances.

    Args:
      x: A numpy array of dataspace points.
      name: A string indicating the name in the saved file.
      save_dir: A string indicating the directory to put the saved file.
      x_is_real_x: An boolean indicating whether `x` is already in dataspace. If
          not, `x` is converted to dataspace before saving
    """
    if not x_is_real_x:
      np.savetxt(join(save_dir, '%s.x_array.txt' % name), x)
    real_x = x if x_is_real_x else self.decode(x)

    batched_real_x = common.batch_audio(real_x)
    sample_file = join(save_dir, '%s.wav' % name)
    wavfile.write(sample_file, rate=16000, data=batched_real_x)

    spectrum = common.audio_to_spectrum(real_x)
    spectrum = common.post_proc(spectrum, self.config, emphasize=emphasize)
    batched_spectrum = (batch_image_fn or common.batch_image)(spectrum)
    spectrum_sample_file = join(save_dir, '%s.spectrum.png' % name)
    common.save_image(batched_spectrum, spectrum_sample_file)
 def effect(self):
     image_node = None
     for node in self.svg.selected.values():
         if (common.is_image(node)):
             image_node = node
         if image_node is not None:
             image = common.prep_image(image_node)
             image = image.convert('L')
             data = pattern(image)
             image = Image.fromarray(data)
             image = image.convert('RGB')
             common.save_image(image_node, image, img_format='PNG')
 def effect(self):
     image_node = None
     for node in self.svg.selected.values():
         if (common.is_image(node)):
             image_node = node
         if image_node is not None:
             image = common.prep_image(image_node)
             cmyk = gcr(image, 0)
             dots = halftone(image, cmyk, 10, 1)
             image = Image.merge('CMYK', dots)
             image = image.convert('RGB')
             common.save_image(image_node, image, img_format='PNG')
	def effect(self):
		image_node = None
		for node in self.selected.values():
			if(common.is_image(node)):
				image_node = node
			if image_node is not None:
				image = common.prep_image(image_node)
				image = image.convert('CMYK')
				image = image.split()
				for channel in image:
					error_dispersion(channel.load(), channel.size)
				image = Image.merge("CMYK", image).convert("RGB")
				common.save_image(image_node, image, img_format='PNG')
Esempio n. 6
0
    def save_data(self, x, name, save_dir, x_is_real_x=False):
        """Save dataspace instances.

    Args:
      x: A numpy array of dataspace points.
      name: A string indicating the name in the saved file.
      save_dir: A string indicating the directory to put the saved file.
      x_is_real_x: An boolean indicating whether `x` is already in dataspace. If
          not, `x` is converted to dataspace before saving
    """
        real_x = x if x_is_real_x else self.decode(x)
        real_x = common.post_proc(real_x, self.config)
        batched_real_x = common.batch_image(real_x)
        sample_file = join(save_dir, '%s.png' % name)
        common.save_image(batched_real_x, sample_file)
Esempio n. 7
0
            break
        if diff[depth] < diff[data_min]:
            data_min = depth
    return data_min


(left_min, left_max) = calculate_min_max(left)
(right_min, right_max) = calculate_min_max(right)

# 扫描像素
for row_pos in range(len(left)):
    # 读入图像
    row_left = left[row_pos]
    row_right = right[row_pos]
    # 读入行
    row_left_min = left_min[row_pos]
    row_left_max = left_max[row_pos]
    row_right_min = right_min[row_pos]
    row_right_max = right_max[row_pos]
    for pixel_pos in range(len(row_left)):
        depth = calculate_diff_bt((row_left, row_left_min, row_left_max), (row_right, row_right_min, row_right_max),
                                  pixel_pos)
        my_result[row_pos][pixel_pos] = depth * 255 / 10

data_set['my_result_4'] = my_result
save_image(my_result, 'pixel bt method')
show_image(data_set)
if __name__ == '__main__':
    # calculate_min_max(left)
    pass
Esempio n. 8
0
    left = data_set['left']
    right = data_set['right']
    result = data_set['result']
    import time

    window_size = 5
    d_max = 32
    tt = time.time()
    stereo = StereoVisionBM_SGM(left, right, window_size, d_max)
    stereo.low_texture_detection()
    stereo.compute_cost()
    stereo.aggregate_cost()
    my_result = stereo.get_result()

    diff_result = stereo.left_right_check()
    diff_result = diff_result * (255.0 / d_max / 16)
    data_set['diff_result'] = diff_result

    post_result = stereo.post_processing()
    post_result = post_result * (255.0 / d_max / 16)
    data_set['post_result'] = post_result

    print time.time() - tt

    my_result = my_result * (255.0 / d_max / 16)
    data_set['my_result_sgm'] = my_result

    show_image(data_set)
    save_image(my_result, 'window method sgm')
    save_image(post_result, 'window method sgm post')
Esempio n. 9
0
            # 统计结果 最后一个最小的视差
            min_d_last = np.argmin(cost[-1])
            self.my_result[row][-1] = min_d_last
            min_d = min_d_last
            for column in range(self.column_length - 1, 0, -1):
                self.my_result[row][column - 1] = result_array[column][min_d]
                min_d = result_array[column][min_d]
        return self.my_result.copy()


if __name__ == '__main__':
    data_set = get_data_set(0)
    # get data
    left = data_set['left']
    right = data_set['right']
    result = data_set['result']
    import time

    window_size = 5
    d_max = 20
    tt = time.time()
    stereo = StereoVisionBM_DP_BT2(left, right, window_size, d_max)
    stereo.compute_cost()
    stereo.aggregate_cost()
    my_result = stereo.get_result()
    my_result = my_result * 255 / d_max
    data_set['my_result_bt_dp2'] = my_result
    print time.time() - tt
    show_image(data_set)
    save_image(my_result, 'window method BT DP 2')
Esempio n. 10
0
    data_set = get_data_set(0)
    # get data
    left = data_set['left']
    right = data_set['right']
    result = data_set['result']
    import time

    window_size = 5
    d_max = 15
    tt = time.time()
    stereo = StereoVisionBM1(left, right, window_size, d_max)
    '''
    stereo.get_sad_all()
    my_result = stereo.get_result()
    my_result = my_result * 255 / d_max
    data_set['my_result_6'] = my_result
    print time.time() - tt
    save_image(my_result, 'window method 6')
    show_image(data_set)'''
    data_set = get_data_set(0, is_color=True)
    left = data_set['left']
    right = data_set['right']
    result = data_set['result']
    stereo = StereoVisionBM1(left, right, window_size, d_max, is_color=True)
    stereo.get_sad_all()
    my_result = stereo.get_result()
    my_result = my_result * 255 / d_max
    data_set['my_result_6'] = my_result
    show_image(data_set, is_color=True)
    save_image(my_result, 'window method 6')
def main(unused_argv):
    # pylint:disable=unused-variable
    # Reason:
    #   This training script relys on many programmatical call to function and
    #   access to variables. Pylint cannot infer this case so it emits false alarm
    #   of unused-variable if we do not disable this warning.

    # pylint:disable=invalid-name
    # Reason:
    #   Following variables have their name consider to be invalid by pylint so
    #   we disable the warning.
    #   - Variable that in its name has A or B indictating their belonging of
    #     one side of data.
    del unused_argv

    # Load main config
    config_name = FLAGS.config
    config = load_config(config_name)

    config_name_A = config['config_A']
    config_name_B = config['config_B']
    config_name_classifier_A = config['config_classifier_A']
    config_name_classifier_B = config['config_classifier_B']

    # Load dataset
    dataset_A = common_joint.load_dataset(config_name_A, FLAGS.exp_uid_A)
    (dataset_blob_A, train_data_A, train_label_A, train_mu_A, train_sigma_A,
     index_grouped_by_label_A) = dataset_A
    dataset_B = common_joint.load_dataset(config_name_B, FLAGS.exp_uid_B)
    (dataset_blob_B, train_data_B, train_label_B, train_mu_B, train_sigma_B,
     index_grouped_by_label_B) = dataset_B

    # Prepare directories
    dirs = common_joint.prepare_dirs('joint', config_name, FLAGS.exp_uid)
    save_dir, sample_dir = dirs

    # Set random seed
    np.random.seed(FLAGS.random_seed)
    tf.set_random_seed(FLAGS.random_seed)

    # Real Training.
    tf.reset_default_graph()
    sess = tf.Session()

    # Load model's architecture (= build)
    one_side_helper_A = common_joint.OneSideHelper(config_name_A,
                                                   FLAGS.exp_uid_A,
                                                   config_name_classifier_A,
                                                   FLAGS.exp_uid_classifier)
    one_side_helper_B = common_joint.OneSideHelper(config_name_B,
                                                   FLAGS.exp_uid_B,
                                                   config_name_classifier_B,
                                                   FLAGS.exp_uid_classifier)
    m = common_joint.load_model(model_joint.Model, config_name, FLAGS.exp_uid)

    # Initialize and restore
    sess.run(tf.global_variables_initializer())

    one_side_helper_A.restore(dataset_blob_A)
    one_side_helper_B.restore(dataset_blob_B)

    # Restore from ckpt
    config_name = FLAGS.config
    model_uid = common.get_model_uid(config_name, FLAGS.exp_uid)
    save_name = join(save_dir,
                     'transfer_%s_%d.ckpt' % (model_uid, FLAGS.load_ckpt_iter))
    m.vae_saver.restore(sess, save_name)

    # prepare intepolate dir
    intepolate_dir = join(sample_dir, 'interpolate_sample',
                          '%010d' % FLAGS.load_ckpt_iter)
    tf.gfile.MakeDirs(intepolate_dir)

    # things
    interpolate_labels = [int(_) for _ in FLAGS.interpolate_labels.split(',')]
    nb_images_between_labels = FLAGS.nb_images_between_labels

    index_list_A = []
    last_pos = [0] * 10
    for label in interpolate_labels:
        index_list_A.append(index_grouped_by_label_A[label][last_pos[label]])
        last_pos[label] += 1

    index_list_B = []
    last_pos = [-1] * 10
    for label in interpolate_labels:
        index_list_B.append(index_grouped_by_label_B[label][last_pos[label]])
        last_pos[label] -= 1

    z_A = []
    z_A.append(train_mu_A[index_list_A[0]])
    for i_label in range(1, len(interpolate_labels)):
        last_z_A = z_A[-1]
        this_z_A = train_mu_A[index_list_A[i_label]]
        for j in range(1, nb_images_between_labels + 1):
            z_A.append(last_z_A + (this_z_A - last_z_A) *
                       (float(j) / nb_images_between_labels))
    z_B = []
    z_B.append(train_mu_B[index_list_B[0]])
    for i_label in range(1, len(interpolate_labels)):
        last_z_B = z_B[-1]
        this_z_B = train_mu_B[index_list_B[i_label]]
        for j in range(1, nb_images_between_labels + 1):
            z_B.append(last_z_B + (this_z_B - last_z_B) *
                       (float(j) / nb_images_between_labels))
    z_B_tr = []
    for this_z_A in z_A:
        this_z_B_tr = sess.run(m.x_A_to_B_direct,
                               {m.x_A: np.array([this_z_A])})
        z_B_tr.append(this_z_B_tr[0])

    # Generate data domain instances and save.
    z_A = np.array(z_A)
    x_A = one_side_helper_A.m_helper.decode(z_A)
    x_A = common.post_proc(x_A, one_side_helper_A.m_helper.config)
    batched_x_A = common.batch_image(
        x_A,
        max_images=len(x_A),
        rows=len(x_A),
        cols=1,
    )
    common.save_image(batched_x_A, join(intepolate_dir, 'x_A.png'))

    z_B = np.array(z_B)
    x_B = one_side_helper_B.m_helper.decode(z_B)
    x_B = common.post_proc(x_B, one_side_helper_B.m_helper.config)
    batched_x_B = common.batch_image(
        x_B,
        max_images=len(x_B),
        rows=len(x_B),
        cols=1,
    )
    common.save_image(batched_x_B, join(intepolate_dir, 'x_B.png'))

    z_B_tr = np.array(z_B_tr)
    x_B_tr = one_side_helper_B.m_helper.decode(z_B_tr)
    x_B_tr = common.post_proc(x_B_tr, one_side_helper_B.m_helper.config)
    batched_x_B_tr = common.batch_image(
        x_B_tr,
        max_images=len(x_B_tr),
        rows=len(x_B_tr),
        cols=1,
    )
    common.save_image(batched_x_B_tr, join(intepolate_dir, 'x_B_tr.png'))
Esempio n. 12
0
        if diff[depth] < diff[data_min]:
            data_min = depth
    return data_min


(left_min, left_max) = calculate_min_max(left)
(right_min, right_max) = calculate_min_max(right)

# 扫描像素
for row_pos in range(len(left)):
    # 读入图像
    row_left = left[row_pos]
    row_right = right[row_pos]
    # 读入行
    row_left_min = left_min[row_pos]
    row_left_max = left_max[row_pos]
    row_right_min = right_min[row_pos]
    row_right_max = right_max[row_pos]
    for pixel_pos in range(len(row_left)):
        depth = calculate_diff_bt((row_left, row_left_min, row_left_max),
                                  (row_right, row_right_min, row_right_max),
                                  pixel_pos)
        my_result[row_pos][pixel_pos] = depth * 255 / 10

data_set['my_result_4'] = my_result
save_image(my_result, 'pixel bt method')
show_image(data_set)
if __name__ == '__main__':
    # calculate_min_max(left)
    pass
Esempio n. 13
0
    data_set['low_texture_row'] = low_texture_row
    data_set['low_texture_column'] = low_texture_column

    stereo.compute_cost()
    t_diff = stereo.aggregate_cost()
    my_result = stereo.get_result()
    my_result = my_result * (255.0 / d_max / 16)
    data_set['my_result_7'] = my_result

    diff_result = stereo.left_right_check()
    diff_result = diff_result * (255.0 / d_max / 16)
    data_set['diff_result'] = diff_result

    post_result = stereo.post_processing()
    post_result = post_result * (255.0 / d_max / 16)
    data_set['post_result'] = post_result

    post_result2 = stereo.fix_low_texture()
    post_result2 = post_result2 * (255.0 / d_max / 16)
    post_result2 = filters.median_filter(post_result2, 5)
    data_set['post_result2'] = post_result2
    print time.time() - tt
    show_image(data_set)

    save_image(diff_result, 'diff_result')
    save_image(post_result, 'post_result')
    save_image(post_result2, 'post_result_2')
    save_image(low_texture_column, 'low_texture_column')
    save_image(low_texture_row, 'low_texture_row')
    save_image(my_result, 'window method 7')
    stereo.compute_cost()
    stereo.aggregate_cost()
    my_result = stereo.get_result()

    diff_result = stereo.left_right_check()

    post_result = stereo.post_processing()
    # post_result2 = stereo.fix_low_texture()

    print "use time:", time.time() - tt
    data_set['low_texture'] = low_texture

    my_result = my_result * (255.0 / d_max / 16)

    data_set['my_result_ensus_bm'] = my_result

    diff_result = diff_result * (255.0 / d_max / 16)
    data_set['diff_result'] = diff_result

    post_result = post_result * (255.0 / d_max / 16)
    data_set['post_result'] = post_result

    # post_result2 = post_result2 * (255.0 / d_max / 16)
    # data_set['post_result2'] = post_result2

    show_image(data_set)
    save_image(my_result, 'window method Census BM')
    save_image(diff_result, 'window method Census BM diff_result')
    save_image(post_result, 'window method Census BM post_result')
    # save_image(post_result2, 'window method Census BM post_result2')
    stereo.compute_cost()
    stereo.aggregate_cost()
    my_result = stereo.get_result()

    diff_result = stereo.left_right_check()

    post_result = stereo.post_processing()
    # post_result2 = stereo.fix_low_texture()

    print "use time:", time.time() - tt
    data_set['low_texture'] = low_texture

    my_result = my_result * (255.0 / d_max / 16)

    data_set['my_result_ensus_bm'] = my_result

    diff_result = diff_result * (255.0 / d_max / 16)
    data_set['diff_result'] = diff_result

    post_result = post_result * (255.0 / d_max / 16)
    data_set['post_result'] = post_result

    # post_result2 = post_result2 * (255.0 / d_max / 16)
    # data_set['post_result2'] = post_result2

    show_image(data_set)
    save_image(my_result, 'window method Census BM')
    save_image(diff_result, 'window method Census BM diff_result')
    save_image(post_result, 'window method Census BM post_result')
    # save_image(post_result2, 'window method Census BM post_result2')
Esempio n. 16
0
    left = data_set['left']
    right = data_set['right']
    result = data_set['result']
    import time

    window_size = 5
    d_max = 32
    tt = time.time()
    stereo = StereoVisionBM_SGM(left, right, window_size, d_max)
    stereo.low_texture_detection()
    stereo.compute_cost()
    stereo.aggregate_cost()
    my_result = stereo.get_result()

    diff_result = stereo.left_right_check()
    diff_result = diff_result * (255.0 / d_max / 16)
    data_set['diff_result'] = diff_result

    post_result = stereo.post_processing()
    post_result = post_result * (255.0 / d_max / 16)
    data_set['post_result'] = post_result

    print time.time() - tt

    my_result = my_result * (255.0 / d_max / 16)
    data_set['my_result_sgm'] = my_result

    show_image(data_set)
    save_image(my_result, 'window method sgm')
    save_image(post_result, 'window method sgm post')
Esempio n. 17
0
        '''reverse = self.get_result_reverse()
        up = self.get_result_up()
        down = self.get_result_down()
        for row in np.arange(self.row_length):
            for column in np.arange(self.column_length):
                self.my_result[row][column] = np.median(
                    (reverse[row][column], forward[row][column], down[row][column], up[row][column]))
        return self.my_result.copy()'''


if __name__ == '__main__':
    data_set = get_data_set(0)
    # get data
    left = data_set['left']
    right = data_set['right']
    result = data_set['result']
    import time

    window_size = 5
    d_max = 20
    tt = time.time()
    stereo = StereoVisionBM_DP(left, right, window_size, d_max)
    stereo.compute_cost()
    stereo.aggregate_cost()
    my_result = stereo.get_result()
    my_result = my_result * 255 / d_max
    data_set['my_result_dp'] = my_result
    show_image(data_set)
    print time.time() - tt
    save_image(my_result, 'window method DP')
    diff_result = stereo.left_right_check()

    post_result = stereo.post_processing()
    # post_result2 = stereo.fix_low_texture()

    print "use time:", time.time() - tt
    data_set['low_texture'] = low_texture
    data_set['left_c'] = left_c
    data_set['right_c'] = right_c
    my_result = my_result * (255.0 / d_max / 16)

    data_set['my_result_ensus_bm'] = my_result

    diff_result = diff_result * (255.0 / d_max / 16)
    data_set['diff_result'] = diff_result

    post_result = post_result * (255.0 / d_max / 16)
    data_set['post_result'] = post_result

    # post_result2 = post_result2 * (255.0 / d_max / 16)
    # data_set['post_result2'] = post_result2

    show_image(data_set)
    save_image(left_c,'window method low texture BM left_c')
    save_image(right_c,'window method low texture BM right_c')
    save_image(my_result, 'window method low texture BM')
    save_image(diff_result, 'window method low texture BM diff_result')
    save_image(post_result, 'window method low texture BM post_result')
    # save_image(post_result2, 'window method Census BM post_result2')
Esempio n. 19
0
    :param d_max:最大深度
    :return:视差值
    """
    start_pos = (pixel_pos - d_max) if (pixel_pos - d_max) > 0 else 0
    row_right = row_right[start_pos:pixel_pos]
    diff = map(lambda value: abs(value - pixel_value), row_right)
    diff = diff[::-1]  # 逆序
    data_min = 0
    for depth in range(len(diff)):
        if diff[data_min] == 0:
            break
        if diff[depth] < diff[data_min]:
            data_min = depth
    return data_min


# 扫描像素
for row_pos in range(len(left)):
    row_left = left[row_pos]
    row_right = right[row_pos]
    for pixel_pos in range(len(row_left)):
        pixel = row_left[pixel_pos]
        depth = calculate_diff_naive(pixel, row_right, pixel_pos)
        my_result[row_pos][pixel_pos] = depth * 255 / 10

data_set['my_result'] = my_result
show_image(data_set)
save_image(my_result, 'pixel naive method')
if __name__ == '__main__':
    pass
def main(unused_argv):
    del unused_argv

    # Load Config
    config_name = FLAGS.config
    config_module = importlib.import_module('configs.%s' % config_name)
    config = config_module.config
    model_uid = common.get_model_uid(config_name, FLAGS.exp_uid)
    n_latent = config['n_latent']

    # Load dataset
    dataset = common.load_dataset(config)
    basepath = dataset.basepath
    save_path = dataset.save_path
    train_data = dataset.train_data

    # Make the directory
    save_dir = os.path.join(save_path, model_uid)
    best_dir = os.path.join(save_dir, 'best')
    tf.gfile.MakeDirs(save_dir)
    tf.gfile.MakeDirs(best_dir)
    tf.logging.info('Save Dir: %s', save_dir)

    # Set random seed
    np.random.seed(FLAGS.random_seed)
    tf.set_random_seed(FLAGS.random_seed)

    # Load Model
    tf.reset_default_graph()
    sess = tf.Session()
    with tf.device(tf.train.replica_device_setter(ps_tasks=0)):
        m = model_dataspace.Model(config, name=model_uid)
        _ = m()  # noqa

        # Initialize
        sess.run(tf.global_variables_initializer())

        # Load
        m.vae_saver.restore(
            sess, os.path.join(best_dir, 'vae_best_%s.ckpt' % model_uid))

        # Sample from prior
        sample_count = 64

        image_path = os.path.join(basepath, 'sample', model_uid)
        tf.gfile.MakeDirs(image_path)

        # from prior
        z_p = np.random.randn(sample_count, m.n_latent)
        x_p = sess.run(m.x_mean, {m.z: z_p})
        x_p = common.post_proc(x_p, config)
        common.save_image(common.batch_image(x_p),
                          os.path.join(image_path, 'sample_prior.png'))

        # Sample from priro, as Grid
        boundary = 2.0
        number_grid = 50
        blob = common.make_grid(boundary=boundary,
                                number_grid=number_grid,
                                dim_latent=n_latent)
        z_grid, dim_grid = blob.z_grid, blob.dim_grid
        x_grid = sess.run(m.x_mean, {m.z: z_grid})
        x_grid = common.post_proc(x_grid, config)
        batch_image_grid = common.make_batch_image_grid(dim_grid, number_grid)
        common.save_image(batch_image_grid(x_grid),
                          os.path.join(image_path, 'sample_grid.png'))

        # Reconstruction
        sample_count = 64
        x_real = train_data[:sample_count]
        mu, sigma = sess.run([m.mu, m.sigma], {m.x: x_real})
        x_rec = sess.run(m.x_mean, {m.mu: mu, m.sigma: sigma})
        x_rec = common.post_proc(x_rec, config)

        x_real = common.post_proc(x_real, config)
        common.save_image(common.batch_image(x_real),
                          os.path.join(image_path, 'image_real.png'))
        common.save_image(common.batch_image(x_rec),
                          os.path.join(image_path, 'image_rec.png'))
Esempio n. 21
0
def scrape_parks():
    if (len(sys.argv) == 2):
        if sys.argv[1] == 'mk':
            print("getting mk")
            get_streetview_in_bounding_box(28.42, -81.586220, 28.422,
                                           -81.577347)

        if sys.argv[1] == 'epcot':
            print("getting epcot")
            get_streetview_in_bounding_box(28.371, -81.553664, 28.377229,
                                           -81.545482)

        if sys.argv[1] == 'dhs':
            print("getting dhs")
            get_streetview_in_bounding_box(28.358, -81.563629, 28.361727,
                                           -81.556257)

        if sys.argv[1] == 'ak':
            print("getting ak")
            get_streetview_in_bounding_box(28.358, -81.594788, 28.3639,
                                           -81.586140)
    else:

        parks = [
            'regions/epcot/park.csv', 'regions/hollywood_studios/park.csv',
            'regions/animal_kingdom/park.csv'
        ]
        #parks = ['regions/animal_kingdom/park.csv']
        num = 0
        for csv_path in parks:
            print(csv_path)
            coords = []
            with open(csv_path, "r") as f:
                for line in f.readlines():
                    coords.append([
                        float(line.split(",")[i])
                        for i in range(len(line.split(",")))
                    ])
            coords = np.array(coords)

            max_lat = "{:.3f}".format(max(coords[:, 0]))
            max_long = "{:.3f}".format(max(coords[:, 1]))
            min_lat = "{:.3f}".format(min(coords[:, 0]))
            min_long = "{:.3f}".format(min(coords[:, 1]))
            flickr = flickrapi.FlickrAPI(api_key,
                                         api_secret,
                                         format='parsed-json')
            bbox_coords = [min_long, min_lat, max_long, max_lat]
            print(bbox_coords)
            for page in range(8, 10):
                print("page " + str(page))
                photos_search = flickr.photos.search(
                    bbox=",".join(bbox_coords),
                    min_upload_date=1524009600,
                    page=page)
                assert (photos_search['stat'] == 'ok')
                photos = photos_search['photos']['photo']

                for im in photos:
                    common.save_image(im['id'], "Medium", "data")
                    print(num)
                    num += 1
Esempio n. 22
0
    data_set['low_texture_row'] = low_texture_row
    data_set['low_texture_column'] = low_texture_column

    stereo.compute_cost()
    t_diff = stereo.aggregate_cost()
    my_result = stereo.get_result()
    my_result = my_result * (255.0 / d_max / 16)
    data_set['my_result_7'] = my_result

    diff_result = stereo.left_right_check()
    diff_result = diff_result * (255.0 / d_max / 16)
    data_set['diff_result'] = diff_result

    post_result = stereo.post_processing()
    post_result = post_result * (255.0 / d_max / 16)
    data_set['post_result'] = post_result

    post_result2 = stereo.fix_low_texture()
    post_result2 = post_result2 * (255.0 / d_max / 16)
    post_result2 = filters.median_filter(post_result2,5)
    data_set['post_result2'] = post_result2
    print time.time() - tt
    show_image(data_set)

    save_image(diff_result, 'diff_result')
    save_image(post_result, 'post_result')
    save_image(post_result2, 'post_result_2')
    save_image(low_texture_column, 'low_texture_column')
    save_image(low_texture_row, 'low_texture_row')
    save_image(my_result, 'window method 7')