Exemple #1
0
def test_osm_merger():
    trails = OSMData.load(get_resource_path('test_osm_trails_input.xml'))
    terrains = OSMData.load(get_resource_path('test_osm_terrains_input.xml'))
    combined = merge([trails, terrains])
    result_path = path.join(TEMP_DIR, 'test_osm_combined_result.xml')
    combined.tree.write(result_path, encoding='utf-8', xml_declaration=True)
    assert_xml_equal(get_resource_path('test_osm_combined_result.xml'), result_path)
Exemple #2
0
def test_osmdata():
    def node_filter_a(element, data):
        return element.get('user') == 'zmeu'
    def node_filter_b(element, data):
        for tag in element:
            if tag.get('k') == 'zmeuicon':
                return True
        return False
    def way_filter_a(element, data):
        for tag in element.iter('tag'):
            if tag.get('k') == 'zmeucolor' and tag.get('v') == 'yellow':
                return True
        return False
    def way_filter_b(element, data):
        for tag in element.iter('tag'):
            if tag.get('k') == 'highway' and tag.get('v') == 'footway':
                return True
        return False
    def way_filter_c(element, data):
        for tag in element.iter('tag'):
            if tag.get('k') == 'EXPORT_ROUTE' and tag.get('v') == 'true':
                return True
        return False
    data = OSMData.load(get_resource_path('test_osm_input.xml'))
    data.add_node_filter(node_filter_a) #Node filter a
    data.add_node_filter(node_filter_b) #Or node filter b
    data.add_way_filter(way_filter_a, way_filter_b) #Way filter a and way filter b
    data.add_way_filter(way_filter_c) #Or way filter c
    result_path = path.join(TEMP_DIR, 'test_osm_result.xml')
    data.do_filter()
    data.prepare_for_save()
    data.save(result_path)
    assert_xml_equal(get_resource_path('test_osm_expected.xml'), result_path)
Exemple #3
0
def test_osm_trailfilter():
    data = OSMData.load(get_resource_path('test_osm_trails_input.xml'))
    data.add_way_filter(trailFilter)
    
    result_path = path.join(TEMP_DIR, 'test_osm_trails_result.xml')
    data.do_filter()
    data.prepare_for_save()
    data.save(result_path)
    assert_xml_equal(get_resource_path('test_osm_trails_expected.xml'), result_path)
Exemple #4
0
def test_way_coordinate_filter():
    wcf = WayCoordinateFilter(-112.060, -112.000, 36.050, 36.109)
    data = OSMData.load(get_resource_path('test_osm_input.xml'))
    data.add_way_filter(wcf.filter)
    result_path = path.join(TEMP_DIR, 'test_osm_result.xml')
    data.do_filter()
    data.prepare_for_save()
    data.save(result_path)
    assert_xml_equal(get_resource_path('test_osm_expected_way_coord_filter.xml'), result_path)
Exemple #5
0
def test_osm_trailfilter_with_coordinates():
    wcf = WayCoordinateFilter(-113.0, -112.0, 36.0, 37.0)
    data = OSMData.load(get_resource_path('test_osm_trails_input.xml'))
    data.add_way_filter(trailFilter, wcf.filter)
    
    result_path = path.join(TEMP_DIR, 'test_osm_trails_and_coordinates_result.xml')
    data.do_filter()
    data.prepare_for_save()
    data.save(result_path)
    assert_xml_equal(get_resource_path('test_osm_trails_and_coordinates_expected.xml'), result_path)
Exemple #6
0
def test_gdal_coordinates(mock_popen):
    f = open(get_resource_path('test_satelliteimg_gdalinfo.txt'), 'rb')
    mock_popen.return_value.__enter__.return_value.communicate.return_value = (
        f.read(), b'')
    gdal_info = gdal_util.Gdalinfo.for_file(
        get_resource_path('test_satelliteimage.tif'))
    assert abs(gdal_info.minX - (-112.5047533)) < PRECISION
    assert abs(gdal_info.maxY - (36.0036116)) < PRECISION

    assert abs(gdal_info.maxX - (-112.4328512)) < PRECISION
    assert abs(gdal_info.minY - (35.9338875)) < PRECISION
Exemple #7
0
def test_insert_colors():
    test_init_build()
    data = OSMData.load(get_resource_path('test_osm_terrains_input.xml'))
    state = State()
    state.add_area_color('meadow', 0, 255, 0)
    status = OSMStatus(0, ['asd'], state)
    status.osmdata = [data]
    building.insert_colors(status)
    outpath = path.join(building.BUILD_DIR, 'result.xml')
    data.save(outpath)
    assert_xml_equal(get_resource_path('test_osm_terrain_colors_expected.xml'),
                     outpath)
Exemple #8
0
class ABGamut:
    RESOURCE_POINTS = get_resource_path('ab-gamut.npy')
    RESOURCE_PRIOR = get_resource_path('q-prior.npy')

    DTYPE = np.float32
    EXPECTED_SIZE = 313

    def __init__(self):
        self.points = np.load(self.RESOURCE_POINTS).astype(self.DTYPE)
        self.prior = np.load(self.RESOURCE_PRIOR).astype(self.DTYPE)

        assert self.points.shape == (self.EXPECTED_SIZE, 2)
        assert self.prior.shape == (self.EXPECTED_SIZE, )
Exemple #9
0
def ks():
    """Return kickstart script (text), customized based upon connecting IP address.
    """

    pxe_config = util._db(constants.PXE_CONFIG)
    ip_address = flask.request.remote_addr
    ip_address = util.get_normalized_ipv4_address(ip_address)
    host_info = pxe_config[ip_address]
    mac_address = host_info['mac_address']

    template = util.get_resource_path('template.ks')
    template = open(template, 'r').read()
    kwargs = dict(host_info)
    kwargs.update(pxe_ip_address=config.pxe_ip_address,
                  www_port=config.www_port,
                  ip_address=ip_address)
    result = flask.render_template_string(template, **kwargs)
    logger.debug('%s asked for kickstart file. Marking as "%s"' %
                 (mac_address, constants.IMAGING_STARTED))
    util._db(constants.HOST_STATE, {mac_address: constants.IMAGING_STARTED})
    util.delete_pxe_config_links(mac_address)
    util.disable_dhcp(mac_address)
    util.dhcp_service_do('restart')
    logger.debug('Unconfigured DHCP for %s. Serving kickstart file...' %
                 mac_address)
    return flask.Response(result, mimetype='text/plain')
Exemple #10
0
def _run_inference():
    """Runs all images through depth model and saves depth maps."""
    ckpt_basename = os.path.basename(FLAGS.model_ckpt)
    ckpt_modelname = os.path.basename(os.path.dirname(FLAGS.model_ckpt))
    output_dir = os.path.join(
        FLAGS.output_dir,
        FLAGS.kitti_video.replace('/', '_') + '_' + ckpt_modelname + '_' +
        ckpt_basename)
    if not gfile.exists(output_dir):
        gfile.makedirs(output_dir)
    inference_model = model.Model(is_training=False,
                                  seq_length=FLAGS.seq_length,
                                  batch_size=FLAGS.batch_size,
                                  img_height=FLAGS.img_height,
                                  img_width=FLAGS.img_width)
    vars_to_restore = util.get_vars_to_restore(FLAGS.model_ckpt)
    saver = tf.compat.v1.train.Saver(vars_to_restore)
    sv = tf.compat.v1.train.Supervisor(logdir='/tmp/', saver=None)
    with sv.managed_session() as sess:
        saver.restore(sess, FLAGS.model_ckpt)
        if FLAGS.kitti_video == 'test_files_eigen':
            im_files = util.read_text_lines(
                util.get_resource_path('dataset/kitti/test_files_eigen.txt'))
            im_files = [os.path.join(FLAGS.kitti_dir, f) for f in im_files]
        else:
            video_path = os.path.join(FLAGS.kitti_dir, FLAGS.kitti_video)
            im_files = gfile.glob(
                os.path.join(video_path, 'image_02/data', '*.png'))
            im_files = [f for f in im_files if 'disp' not in f]
            im_files = sorted(im_files)
        for i in range(0, len(im_files), FLAGS.batch_size):
            if i % 100 == 0:
                logging.info('Generating from %s: %d/%d', ckpt_basename, i,
                             len(im_files))
            inputs = np.zeros(
                (FLAGS.batch_size, FLAGS.img_height, FLAGS.img_width, 3),
                dtype=np.uint8)
            for b in range(FLAGS.batch_size):
                idx = i + b
                if idx >= len(im_files):
                    break
                im = imageio.imread(im_files[idx])
                inputs[b] = np.array(
                    Image.fromarray(im).resize(
                        (FLAGS.img_width, FLAGS.img_height)))
            results = inference_model.inference(inputs, sess, mode='depth')
            for b in range(FLAGS.batch_size):
                idx = i + b
                if idx >= len(im_files):
                    break
                if FLAGS.kitti_video == 'test_files_eigen':
                    depth_path = os.path.join(output_dir, '%03d.png' % idx)
                else:
                    depth_path = os.path.join(output_dir, '%04d.png' % idx)

                depth_map = results['depth'][b]
                depth_map = np.squeeze(depth_map)
                colored_map = _normalize_depth_for_display(depth_map,
                                                           cmap=CMAP)
                imageio.imsave(depth_path, colored_map)
Exemple #11
0
def _run_inference():
  """Runs all images through depth model and saves depth maps."""
  ckpt_basename = os.path.basename(FLAGS.model_ckpt)
  ckpt_modelname = os.path.basename(os.path.dirname(FLAGS.model_ckpt))
  output_dir = os.path.join(FLAGS.output_dir,
                            FLAGS.kitti_video.replace('/', '_') + '_' +
                            ckpt_modelname + '_' + ckpt_basename)
  if not gfile.Exists(output_dir):
    gfile.MakeDirs(output_dir)
  inference_model = model.Model(is_training=False,
                                seq_length=FLAGS.seq_length,
                                batch_size=FLAGS.batch_size,
                                img_height=FLAGS.img_height,
                                img_width=FLAGS.img_width)
  vars_to_restore = util.get_vars_to_restore(FLAGS.model_ckpt)
  saver = tf.train.Saver(vars_to_restore)
  sv = tf.train.Supervisor(logdir='/tmp/', saver=None)
  with sv.managed_session() as sess:
    saver.restore(sess, FLAGS.model_ckpt)
    if FLAGS.kitti_video == 'test_files_eigen':
      im_files = util.read_text_lines(
          util.get_resource_path('dataset/kitti/test_files_eigen.txt'))
      im_files = [os.path.join(FLAGS.kitti_dir, f) for f in im_files]
    else:
      video_path = os.path.join(FLAGS.kitti_dir, FLAGS.kitti_video)
      im_files = gfile.Glob(os.path.join(video_path, 'image_02/data', '*.png'))
      im_files = [f for f in im_files if 'disp' not in f]
      im_files = sorted(im_files)
    for i in range(0, len(im_files), FLAGS.batch_size):
      if i % 100 == 0:
        logging.info('Generating from %s: %d/%d', ckpt_basename, i,
                     len(im_files))
      inputs = np.zeros(
          (FLAGS.batch_size, FLAGS.img_height, FLAGS.img_width, 3),
          dtype=np.uint8)
      for b in range(FLAGS.batch_size):
        idx = i + b
        if idx >= len(im_files):
          break
        im = scipy.misc.imread(im_files[idx])
        inputs[b] = scipy.misc.imresize(im, (FLAGS.img_height, FLAGS.img_width))
      results = inference_model.inference(inputs, sess, mode='depth')
      for b in range(FLAGS.batch_size):
        idx = i + b
        if idx >= len(im_files):
          break
        if FLAGS.kitti_video == 'test_files_eigen':
          depth_path = os.path.join(output_dir, '%03d.png' % idx)
        else:
          depth_path = os.path.join(output_dir, '%04d.png' % idx)
        depth_map = results['depth'][b]
        depth_map = np.squeeze(depth_map)
        colored_map = _normalize_depth_for_display(depth_map, cmap=CMAP)
        input_float = inputs[b].astype(np.float32) / 255.0
        vertical_stack = np.concatenate((input_float, colored_map), axis=0)
        scipy.misc.imsave(depth_path, vertical_stack)
Exemple #12
0
def _run_inference():
    """Runs all images through depth model and saves depth maps."""
    ckpt_basename = os.path.basename(FLAGS.model_ckpt)
    ckpt_modelname = os.path.basename(os.path.dirname(FLAGS.model_ckpt))
    og_dir = FLAGS.output_dir
    output_dir = os.path.join(
        FLAGS.output_dir,
        FLAGS.kitti_video.replace('/', '_') + '_' + ckpt_modelname + '_' +
        ckpt_basename)
    if not gfile.Exists(output_dir):
        gfile.MakeDirs(output_dir)
    inference_model = model.Model(is_training=False,
                                  seq_length=FLAGS.seq_length,
                                  batch_size=FLAGS.batch_size,
                                  img_height=FLAGS.img_height,
                                  img_width=FLAGS.img_width)
    vars_to_restore = util.get_vars_to_restore(FLAGS.model_ckpt)
    saver = tf.train.Saver(vars_to_restore)
    sv = tf.train.Supervisor(logdir='/tmp/', saver=None)
    with sv.managed_session() as sess:
        saver.restore(sess, FLAGS.model_ckpt)
        if FLAGS.kitti_video == 'test_files_eigen':
            im_files = util.read_text_lines(
                util.get_resource_path('dataset/kitti/test_files_eigen.txt'))
            im_files = [os.path.join(FLAGS.kitti_dir, f) for f in im_files]
        else:
            video_path = os.path.join(FLAGS.kitti_dir, FLAGS.kitti_video)
            #edit
            #im_files = gfile.Glob(os.path.join(video_path, 'image_02/data', '*.png'))
            im_files = gfile.Glob(os.path.join(video_path, '*.png'))  #delete
            #end edit
            im_files = [f for f in im_files if 'disp' not in f]
            im_files = sorted(im_files)
        # regularPictures(im_files, output_dir, inference_model)
        for i in range(0, len(im_files), FLAGS.batch_size):
            if i % 100 == 0:
                logging.info('Generating from %s: %d/%d', ckpt_basename, i,
                             len(im_files))
            inputs = np.zeros(
                (FLAGS.batch_size, FLAGS.img_height, FLAGS.img_width, 3),
                dtype=np.uint8)
            for b in range(FLAGS.batch_size):
                idx = i + b
                if idx >= len(im_files):
                    break
                im = scipy.misc.imread(im_files[idx],
                                       mode='RGB')  #added 2nd arg
                inputs[b] = scipy.misc.imresize(
                    im, (FLAGS.img_height, FLAGS.img_width))
            results = inference_model.inference(inputs, sess, mode='depth')
            for b in range(FLAGS.batch_size):
                idx = i + b
                if idx >= len(im_files):
                    break
                if FLAGS.kitti_video == 'test_files_eigen':
                    depth_path = os.path.join(output_dir, '%03d.png' % idx)
                else:
                    depth_path = os.path.join(output_dir, '%04d.png' % idx)
                depth_map = results['depth'][b]
                depth_map = np.squeeze(depth_map)
                colored_map = _normalize_depth_for_display(depth_map,
                                                           cmap=CMAP)
                input_float = inputs[b].astype(np.float32) / 255.0
                vertical_stack = np.concatenate((input_float, colored_map),
                                                axis=0)
                scipy.misc.imsave(depth_path, vertical_stack)
                #edit
                if FLAGS.kitti_video == 'test_files_eigen':
                    outPath = os.path.join(og_dir, 'reg_pics',
                                           '%03d.png' % idx)
                else:
                    outPath = os.path.join(og_dir, 'reg_pics',
                                           '%04d.png' % idx)
                tempImg = scipy.misc.imread(im_files[0], mode='RGB')
                resized_colored_map = scipy.misc.imresize(
                    colored_map, (len(tempImg), len(tempImg[0])))
                scipy.misc.imsave(outPath, resized_colored_map)