Exemplo n.º 1
0
def make_job(args):
    job = {}
    job['frame_range'] = args.frame_range
    if args.timeout:
        job['timeout'] = args.timeout
    job['save_blender_files'] = args.save_blender_files
    job['no_annotations'] = args.no_annotations
    job['traffic_file'] = args.traffic_file
    job['video_dir'] = args.video_info_dir
    setParamUnlessThere(job, 'out_video_dir', op.dirname(args.traffic_file))
    return job
Exemplo n.º 2
0
def generate_video_traffic(job):
    ''' Generate traffic file for the whole video.
  Args:
    in_db_file - should have all the images for which traffic is generated
    job - the same as for process_video
  '''
    assertParamIsThere(job, 'in_db_file')
    assertParamIsThere(job, 'out_video_dir')
    setParamUnlessThere(job, 'frame_range', '[::]')
    assertParamIsThere(job, 'video_dir')

    video = Video(video_dir=job['video_dir'])
    camera = video.build_camera()

    assert op.exists(atcity(job['in_db_file'])), \
        'in db %s does not exist' % atcity(job['in_db_file'])
    conn_in = sqlite3.connect(atcity(job['in_db_file']))
    c_in = conn_in.cursor()
    c_in.execute('SELECT time FROM images')
    timestamps = c_in.fetchall()
    conn_in.close()

    cad = Cad()

    if 'speed_kph' in job:
        model = TrafficModel(camera,
                             video,
                             cad=cad,
                             speed_kph=job['speed_kph'])
    elif 'num_cars' in job:
        model = TrafficModelRandom(camera,
                                   video,
                                   cad=cad,
                                   num_cars_mean=job['num_cars'])
    else:
        assert False

    diapason = Diapason(len(timestamps), job['frame_range'])

    traffic = {'in_db_file': job['in_db_file']}
    traffic['frames'] = []

    for frame_id in diapason.frame_range:
        logging.info('generating traffic for frame %d' % frame_id)
        timestamp = timestamps[frame_id][0]
        time = parseTimeString(timestamp)
        traffic_frame = model.get_next_frame(time)
        traffic_frame['frame_id'] = frame_id  # for validating
        traffic['frames'].append(traffic_frame)

    return traffic
Exemplo n.º 3
0
def make_snapshot(render_dir, car_names, params):
    '''Set up the weather, and render vehicles into files
    Args:
      render_dir:  path to directory where to put all rendered images
      car_names:   names of car objects in the scene
      params:      dictionary with frame information
    Returns:
      nothing
    '''

    logging.info('make_snapshot: started')

    setParamUnlessThere(params, 'scale', 1)
    setParamUnlessThere(params, 'render_individual_cars', True)
    # debug options
    setParamUnlessThere(params, 'save_blender_file', False)
    setParamUnlessThere(params, 'render_satellite', False)
    setParamUnlessThere(params, 'render_cars_as_cubes', False)

    bpy.data.worlds['World'].light_settings.environment_energy = 0.0
    bpy.data.worlds['World'].light_settings.ao_factor = 0.5
    bpy.data.objects['-Sky-sunset'].data.energy = np.random.normal(1, 0.5)  #2

    params['weather'] = np.random.choice(['Sunny', 'Cloudy', 'Rainy', 'Wet'])
    set_weather(params)

    # render the image from satellite, when debuging
    if '-Satellite' in bpy.data.objects:
        bpy.data.objects[
            '-Satellite'].hide_render = not params['render_satellite']

    # make all cars receive shadows
    logging.info('materials: %s' % len(bpy.data.materials))
    for m in bpy.data.materials:
        m.use_transparent_shadows = True

    # create render dir
    if not op.exists(render_dir):
        os.makedirs(render_dir)

    # make all cars receive shadows
    logging.info('materials: %s' % len(bpy.data.materials))
    for m in bpy.data.materials:
        m.use_transparent_shadows = True

    # # render all cars and shadows
    # bpy.context.scene.render.layers['RenderLayer'].use_pass_combined = True
    # bpy.context.scene.render.layers['RenderLayer'].use_pass_z = False
    # #bpy.data.objects['-Ground'].hide_render = False
    # render_scene(op.join(render_dir, 'render'))

    # # render cars depth map
    # #bpy.context.scene.render.alpha_mode = 'TRANSPARENT'
    # bpy.context.scene.render.layers['RenderLayer'].use_pass_combined = False
    # bpy.context.scene.render.layers['RenderLayer'].use_pass_z = True
    # #bpy.data.objects['-Ground'].hide_render = True
    # render_scene(op.join(render_dir, 'depth-all'))

    # # render just the car for each car (to extract bbox)
    # if params['render_individual_cars'] and not params['render_cars_as_cubes']:
    #     # hide all cars
    #     for car_name in car_names:
    #         hide_car (car_name)
    #     # show, render, and hide each car one by one
    #     for i,car_name in enumerate(car_names):
    #         show_car (car_name)
    #         render_scene( op.join(render_dir, 'depth-car-%03d.png' % i) )
    #         hide_car (car_name)

    # # clean up
    # bpy.data.objects['-Ground'].hide_render = False
    # if not params['render_cars_as_cubes']:
    #     for car_name in car_names:
    #         show_car (car_name)

    def _rename(render_dir, from_name, to_name):
        os.rename(atcity(op.join(render_dir, from_name)),
                  atcity(op.join(render_dir, to_name)))

    # set directional blur amount from if given
    #bpy.data.node_groups['Compositing Nodetree'].nodes['Camera-Blur'].zoom

    # there are two nodes -- "render" and "depth"
    # they save images in BW16 or RBG8
    # they render layers "Render" and "Depth" with "Combined" and "Z" passes.
    bpy.context.scene.node_tree.nodes['depth'].base_path = atcity(render_dir)
    bpy.context.scene.node_tree.nodes['render'].base_path = atcity(render_dir)

    # leave only shadows
    for m in bpy.data.materials:
        if m != bpy.data.materials['Material-dry-asphalt'] and \
           m != bpy.data.materials['Material-wet-asphalt']:
            m.use_only_shadow = True

    # render shadows only
    bpy.data.objects['-Ground'].hide_render = False
    bpy.ops.render.render(write_still=True, layer='Render')
    _rename(render_dir, 'render0001', 'render.png')

    # materials back to normal
    for m in bpy.data.materials:
        if m != bpy.data.materials['Material-dry-asphalt'] and \
           m != bpy.data.materials['Material-wet-asphalt']:
            m.use_only_shadow = False

    # render without ground
    bpy.data.objects['-Ground'].hide_render = True
    bpy.ops.render.render(write_still=True, layer='Render')
    _rename(render_dir, 'render0001', 'cars-only.png')

    # render depth of all cars
    bpy.ops.render.render(write_still=True, layer='Depth')
    _rename(render_dir, 'depth0001', 'depth-all.png')

    if params['render_individual_cars'] and not params['render_cars_as_cubes']:
        for car_i0, car_name0 in enumerate(car_names):

            # remove all cars from the only layer, and add car_name0 back to it
            for car_name in car_names:
                bpy.data.objects[car_name].hide_render = True
            bpy.data.objects[car_name0].hide_render = False

            # render scene
            bpy.ops.render.render(write_still=True, layer='Depth')
            _rename(render_dir, 'depth0001', 'depth-%03d.png' % car_i0)

    if params['save_blender_files']:
        bpy.ops.wm.save_as_mainfile(
            filepath=atcity(op.join(render_dir, 'render.blend')))

    # logging.info ('objects in the end of frame: %d' % len(bpy.data.objects))
    logging.info('make_snapshot: successfully finished a frame')
Exemplo n.º 4
0
            _rename(render_dir, 'depth0001', 'depth-%03d.png' % car_i0)

    if params['save_blender_files']:
        bpy.ops.wm.save_as_mainfile(
            filepath=atcity(op.join(render_dir, 'render.blend')))

    # logging.info ('objects in the end of frame: %d' % len(bpy.data.objects))
    logging.info('make_snapshot: successfully finished a frame')


setupLogging('log/augmentation/renderScene.log', logging.INFO, 'a')

traffic_path = op.join(WORK_DIR, TRAFFIC_FILENAME)
logging.info('traffic_path: %s' % traffic_path)
frame_info = json.load(open(traffic_path))
setParamUnlessThere(frame_info, 'render_cars_as_cubes', False)

# place all cars
car_names = []
for i, vehicle in enumerate(frame_info['vehicles']):
    if frame_info['render_cars_as_cubes']:
        location = (vehicle['x'], vehicle['y'], 0.1)
        bpy.ops.mesh.primitive_cube_add(location=location, radius=0.3)
    else:
        collection_id = vehicle['collection_id']
        model_id = vehicle['model_id']
        blend_path = atcity(
            op.join('data/augmentation/CAD', collection_id, 'blend',
                    '%s.blend' % model_id))
        car_name = 'car_%i' % i
        car_names.append(car_name)
Exemplo n.º 5
0
                        'Can be "traffic.json" in video output dir.')
    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument('--speed_kph', type=int)
    group.add_argument('--num_cars', type=int)
    args = parser.parse_args()

    setupLogging('log/augmentation/GenerateTraffic.log', args.logging_level,
                 'w')

    if not op.exists(atcity(op.dirname(args.traffic_file))):
        os.makedirs(atcity(op.dirname(args.traffic_file)))

    job = {
        'frame_range': args.frame_range,
        'in_db_file': args.in_db_file,
        'video_dir': args.video_dir,
        'out_video_dir': op.dirname(args.in_db_file)
    }
    if args.speed_kph is not None:
        setParamUnlessThere(job, 'speed_kph', args.speed_kph)
    elif args.num_cars is not None:
        setParamUnlessThere(job, 'num_cars', args.num_cars)
    else:
        assert False

    pprint(job)
    traffic = generate_video_traffic(job)
    print 'generated traffic for %d frames' % len(traffic['frames'])
    with open(atcity(args.traffic_file), 'w') as f:
        f.write(json.dumps(traffic, indent=2))
Exemplo n.º 6
0
def process_video(job):

    assertParamIsThere(job, 'video_dir')
    video = Video(video_dir=job['video_dir'])
    camera = video.build_camera()

    # some parameters
    assertParamIsThere(job, 'traffic_file')
    setParamUnlessThere(job, 'save_blender_files', False)
    setParamUnlessThere(
        job, 'out_video_dir',
        op.join('augmentation/video', 'cam%s' % camera.info['cam_id'],
                video.info['video_name']))
    setParamUnlessThere(job, 'no_annotations', False)
    setParamUnlessThere(job, 'timeout', 1000000000)
    setParamUnlessThere(job, 'frame_range', '[::]')
    setParamUnlessThere(job, 'save_blender_files', False)
    job['render_individual_cars'] = not job['no_annotations']

    # load camera dimensions (compare it to everything for extra safety)
    width0 = camera.info['camera_dims']['width']
    height0 = camera.info['camera_dims']['height']

    # for checking timeout
    start_time = datetime.now()

    cad = Cad()

    # upload info on parsed vehicles to the monitor server
    monitor = None  # MonitorDatasetClient (cam_id=camera.info['cam_id'])

    # load traffic info
    traffic_video = json.load(open(atcadillac(job['traffic_file'])))

    # reader and writer
    video_reader = ReaderVideo()
    image_vfile = op.join(job['out_video_dir'], 'image.avi')
    mask_vfile = op.join(job['out_video_dir'], 'mask.avi')
    video_writer = SimpleWriter(image_vfile, mask_vfile, {'unsafe': True})

    (conn, c) = dbInit(traffic_video['in_db_file'],
                       op.join(job['out_video_dir'], 'traffic.db'))
    c.execute('SELECT imagefile,maskfile,width,height,time FROM images')
    image_entries = c.fetchall()
    c.execute('DELETE FROM images')

    #assert len(traffic_video['frames']) >= len(image_entries), \
    #  'traffic json is too small %d < %d' % (len(traffic_video['frames']), len(image_entries))

    diapason = Diapason(len(image_entries), job['frame_range'])

    num_processes = int(multiprocessing.cpu_count() / 2 + 1)
    pool = multiprocessing.Pool(processes=num_processes)

    # each frame_range chunk is processed in parallel
    for frame_range in diapason.frame_range_as_chunks(pool._processes):
        logging.info('chunk of frames %d to %d' %
                     (frame_range[0], frame_range[-1]))

        # quit, if reached the timeout
        time_passed = datetime.now() - start_time
        logging.info('passed: %s' % time_passed)
        if (time_passed.total_seconds() > job['timeout'] * 60):
            logging.warning('reached timeout %d. Passed %s' %
                            (job['timeout'], time_passed))
            break

        # collect frame jobs
        frame_jobs = []
        for frame_id in frame_range:

            (in_backfile, in_maskfile, width, height,
             _) = image_entries[frame_id]
            assert (width0 == width
                    and height0 == height), (width0, width, height0, height)
            logging.info('collect job for frame number %d' % frame_id)

            back = video_reader.imread(in_backfile)

            traffic = traffic_video['frames'][frame_id]
            assert traffic['frame_id'] == frame_id, '%d vs %d' % (
                traffic['frame_id'], frame_id)
            traffic['save_blender_files'] = job['save_blender_files']

            frame_jobs.append((video, camera, traffic, back, job))

        #for i, (out_image, out_mask, work_dir) in enumerate(sequentialworker(frame_jobs)):
        for i, (out_image, out_mask,
                work_dir) in enumerate(pool.imap(worker, frame_jobs)):
            frame_id = frame_range[i]
            logging.info('processed frame number %d' % frame_id)

            assert out_image is not None and out_mask is not None
            out_imagefile = video_writer.imwrite(out_image)
            out_maskfile = video_writer.maskwrite(out_mask)
            logging.info('out_imagefile: %s, out_maskfile: %s' %
                         (out_imagefile, out_maskfile))

            # update out database
            (_, _, width, height, time) = image_entries[frame_id]
            c.execute(
                'INSERT INTO images(imagefile,maskfile,width,height,time) VALUES (?,?,?,?,?)',
                (out_imagefile, out_maskfile, width, height, time))
            logging.info('wrote frame %d' % c.lastrowid)

            if not job['no_annotations']:
                extract_annotations(work_dir, c, cad, camera, out_imagefile,
                                    monitor)

            if not job['save_blender_files']:
                shutil.rmtree(work_dir)

            conn.commit()
    conn.close()

    pool.close()
    pool.join()
Exemplo n.º 7
0
def render_frame(video, camera, traffic):
    ''' Write down traffci file for blender and run blender with renderScene.py 
  All work is in current-frame dir.
  '''
    WORK_DIR = '%s-%d' % (WORK_RENDER_DIR, os.getpid())
    setParamUnlessThere(traffic, 'save_blender_files', False)
    setParamUnlessThere(traffic, 'render_individual_cars', True)
    unsharp_mask_params = {'radius': 4.7, 'threshold': 23, 'amount': 1}

    # load camera dimensions (compare it to everything for extra safety)
    width0 = camera.info['camera_dims']['width']
    height0 = camera.info['camera_dims']['height']
    logging.debug('camera width,height: %d,%d' % (width0, height0))

    image = None
    mask = None

    # pass traffic info to blender
    traffic['scale'] = camera.info['scale']
    traffic_path = op.join(WORK_DIR, TRAFFIC_FILENAME)
    if not op.exists(op.dirname(traffic_path)):
        os.makedirs(op.dirname(traffic_path))
    with open(traffic_path, 'w') as f:
        f.write(json.dumps(traffic, indent=4))

    # remove so that they do not exist if blender fails
    if op.exists(op.join(WORK_DIR, RENDERED_FILENAME)):
        os.remove(op.join(WORK_DIR, RENDERED_FILENAME))
    if op.exists(op.join(WORK_DIR, 'depth-all.png')):
        os.remove(op.join(WORK_DIR, 'depth-all.png'))
    # render
    assert video.render_blend_file is not None
    render_blend_path = atcadillac(video.render_blend_file)
    command = [
        '%s/blender' % os.getenv('BLENDER_ROOT'), render_blend_path,
        '--background', '--python',
        '%s/src/augmentation/renderScene.py' % os.getenv('CITY_PATH')
    ]
    logging.debug('WORK_DIR: %s' % WORK_DIR)
    logging.debug(' '.join(command))
    returncode = subprocess.call(command,
                                 shell=False,
                                 stdout=FNULL,
                                 stderr=FNULL)
    logging.info('rendering: blender returned code %s' % str(returncode))

    # check and sharpen rendered
    rendered_filepath = op.join(WORK_DIR, RENDERED_FILENAME)
    image = cv2.imread(rendered_filepath, -1)
    assert image is not None
    assert image.shape == (height0, width0, 4), image.shape
    image = unsharp_mask(image, unsharp_mask_params)
    cv2.imwrite(rendered_filepath, image)

    # check and sharpen cars-only
    carsonly_filepath = op.join(WORK_DIR, CARSONLY_FILENAME)
    image = cv2.imread(carsonly_filepath, -1)
    assert image is not None
    assert image.shape == (height0, width0, 4), image.shape
    image = unsharp_mask(image, unsharp_mask_params)
    shutil.move(carsonly_filepath, op.join(WORK_DIR, 'unsharpened.png'))
    cv2.imwrite(carsonly_filepath, image)

    # create mask
    if traffic['render_individual_cars'] == True:
        mask = _get_masks(WORK_DIR, traffic)
        # TODO: visibility is returned via traffic file, NOT straightforward
        with open(traffic_path, 'w') as f:
            f.write(json.dumps(traffic, indent=4))

    # correction_path = op.join(WORK_DIR, CORRECTION_FILENAME)
    # if op.exists(correction_path): os.remove(correction_path)
    # if not params['no_correction']:
    #     correction_info = color_correction (video.example_background, background)
    #     with open(correction_path, 'w') as f:
    #         f.write(json.dumps(correction_info, indent=4))

    return image, mask