Exemple #1
0
    def __init__(self, camera, video, cad, num_cars_mean):

        self.sun = Sun()

        self.camera = camera
        self.video = video

        # get the map of azimuths.
        # it has gray values (r==g==b=) and alpha, saved as 4-channels
        azimuth_path = atcity(
            op.join(camera['camera_dir'], camera['azimuth_name']))
        azimuth_map = cv2.imread(azimuth_path, cv2.IMREAD_UNCHANGED)
        assert azimuth_map is not None and azimuth_map.shape[2] == 4

        # black out the invisible azimuth_map regions
        if 'mask' in camera and camera['mask']:
            mask_path = atcity(op.join(camera['camera_dir'], camera['mask']))
            mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
            assert mask is not None, mask_path
            azimuth_map[mask] = 0

        self.num_cars_mean = num_cars_mean
        self.num_cars_std = num_cars_mean * 1.0
        self.azimuth_map = azimuth_map
        self.cad = cad
        self.pxls_in_meter = camera['pxls_in_meter']
Exemple #2
0
    def __init__(self, camera, video, cad, speed_kph, burn_in=True):

        self.sun = Sun()  # TODO: only load when it's sunny

        self.camera = camera
        self.video = video

        # load mask
        if 'mask' in camera:
            mask_path = atcity(op.join(camera['camera_dir'], camera['mask']))
            self.mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
            assert self.mask is not None, mask_path
            logging.info('TrafficModel: loaded a mask')
        else:
            self.mask = None

        # create lanes
        lanes_path = atcity(op.join(camera['camera_dir'],
                                    camera['lanes_name']))
        lanes_dicts = json.load(open(lanes_path))
        self.lanes = [
            Lane(('%d' % i), l, cad, speed_kph, camera['pxls_in_meter'])
            for i, l in enumerate(lanes_dicts)
        ]
        logging.info('TrafficModel: loaded %d lanes' % len(self.lanes))

        if burn_in:
            for i in range(100):
                self.get_next_frame(time=datetime.now())
def generate_video_traffic(job):
    ''' Generate traffic file for the whole video.
  Args:
    in_db_file - should have all the images for which traffic is generated
    job - the same as for process_video
  '''
    assertParamIsThere(job, 'in_db_file')
    assertParamIsThere(job, 'out_video_dir')
    setParamUnlessThere(job, 'frame_range', '[::]')
    assertParamIsThere(job, 'video_dir')

    video = Video(video_dir=job['video_dir'])
    camera = video.build_camera()

    assert op.exists(atcity(job['in_db_file'])), \
        'in db %s does not exist' % atcity(job['in_db_file'])
    conn_in = sqlite3.connect(atcity(job['in_db_file']))
    c_in = conn_in.cursor()
    c_in.execute('SELECT time FROM images')
    timestamps = c_in.fetchall()
    conn_in.close()

    cad = Cad()

    if 'speed_kph' in job:
        model = TrafficModel(camera,
                             video,
                             cad=cad,
                             speed_kph=job['speed_kph'])
    elif 'num_cars' in job:
        model = TrafficModelRandom(camera,
                                   video,
                                   cad=cad,
                                   num_cars_mean=job['num_cars'])
    else:
        assert False

    diapason = Diapason(len(timestamps), job['frame_range'])

    traffic = {'in_db_file': job['in_db_file']}
    traffic['frames'] = []

    for frame_id in diapason.frame_range:
        logging.info('generating traffic for frame %d' % frame_id)
        timestamp = timestamps[frame_id][0]
        time = parseTimeString(timestamp)
        traffic_frame = model.get_next_frame(time)
        traffic_frame['frame_id'] = frame_id  # for validating
        traffic['frames'].append(traffic_frame)

    return traffic
def combine_frame(background, video, camera):
    ''' Overlay image onto background '''
    jpg_qual = 40

    WORK_DIR = '%s-%d' % (WORK_RENDER_DIR, os.getpid())

    # load camera dimensions (compare it to everything for extra safety)
    width0 = camera.info['camera_dims']['width']
    height0 = camera.info['camera_dims']['height']

    # get background file
    assert background is not None
    assert background.shape == (height0, width0, 3), background.shape
    # make a completely gray background frame hahaha
    #background.fill(128)
    cv2.imwrite(op.join(WORK_DIR, BACKGROUND_FILENAME), background)

    # get shadows file
    #shadow_path = op.join(WORK_DIR, 'render.png')
    #shadow = scipy.misc.imread(shadow_path)
    #shadow[:,:,3] = 0  # assign full transparency
    #scipy.misc.imsave(shadow_path, shadow)

    # remove previous result so that there is an error if blender fails
    if op.exists(op.join(WORK_DIR, COMBINED_FILENAME)):
        os.remove(op.join(WORK_DIR, COMBINED_FILENAME))

    # overlay
    assert video.combine_blend_file is not None
    combine_scene_path = atcity(video.combine_blend_file)
    command = [
        '%s/blender' % os.getenv('BLENDER_ROOT'), combine_scene_path,
        '--background', '--python',
        '%s/src/augmentation/combineScene.py' % os.getenv('CITY_PATH')
    ]
    returncode = subprocess.call(command,
                                 shell=False,
                                 stdout=FNULL,
                                 stderr=FNULL)
    logging.info('combine: blender returned code %s' % str(returncode))
    combined_filepath = op.join(WORK_DIR, COMBINED_FILENAME)
    assert op.exists(combined_filepath), combined_filepath
    image = cv2.imread(combined_filepath)
    assert image.shape == (height0, width0,
                           3), '%s vs %s' % (image.shape, (height0, width0))

    # reencode to match jpeg quality
    shutil.move(combined_filepath, op.join(WORK_DIR, 'uncompressed.png'))
    _, ajpg = cv2.imencode(".jpg", image, [cv2.IMWRITE_JPEG_QUALITY, jpg_qual])
    image = cv2.imdecode(ajpg, cv2.CV_LOAD_IMAGE_COLOR)
    cv2.imwrite(combined_filepath, image)

    return image
Exemple #5
0
import sys, os, os.path as op
sys.path.insert(0, op.join(os.getenv('CITY_PATH'), 'src'))
import json
import logging
import bpy
import numpy as np
from learning.helperSetup import atcity, setupLogging, setParamUnlessThere
''' Make all frame postprocessing and combination in RENDER_DIR '''

WORK_RENDER_DIR = atcity('data/augmentation/blender/current-frame')
BACKGROUND_FILENAME = 'background.png'
NORMAL_FILENAME = 'render.png'
CARSONLY_FILENAME = 'cars-only.png'
COMBINED_FILENAME = 'out.png'
#CORRECTION_FILENAME = 'color-correction.json'

WORK_DIR = '%s-%d' % (WORK_RENDER_DIR, os.getppid())
WORK_DIR_SUFFIX = '-%d' % os.getppid()

#correction_path = op.join(WORK_DIR, CORRECTION_FILENAME)

image_node = bpy.context.scene.node_tree.nodes['Image-Background'].image
image_node.filepath = op.join(WORK_DIR, BACKGROUND_FILENAME)

image_node = bpy.context.scene.node_tree.nodes['Image-Cars-Only'].image
image_node.filepath = op.join(WORK_DIR, CARSONLY_FILENAME)

image_node = bpy.context.scene.node_tree.nodes['Image-Normal'].image
image_node.filepath = op.join(WORK_DIR, NORMAL_FILENAME)

bpy.context.scene.node_tree.nodes[
Exemple #6
0
    def __init__(self, video_dir=None, video_info=None):

        if video_dir:
            video_name = op.basename(video_dir)
            video_path = atcity(op.join(video_dir, '%s.json' % video_name))
            assert op.exists(video_path), video_path
            logging.info('Video: loading info from: %s' % video_path)
            video_info = json.load(open(video_path))
        elif video_info:
            assert 'video_dir' in video_info
            video_dir = video_info['video_dir']
            assert op.exists(atcity(video_dir)), video_dir
            video_name = op.basename(video_dir)
        else:
            raise Exception('pass video_info or video_dir')
        logging.info('Video: parse info for: %s' % video_dir)

        self.info = video_info
        self.info['video_name'] = video_name

        if 'example_frame_name' in video_info:
            logging.info('- found example_frame_name: %s' % example_frame_name)
            self.example_frame = cv2.imread(
                op.join(video_dir, example_frame_name))
            assert self.example_frame is not None
        else:
            # trying possible paths, and take the first to match
            example_frame_paths = glob(atcity(op.join(video_dir,
                                                      'frame*.png')))
            if len(example_frame_paths) > 0:
                logging.info('- deduced example_frame: %s' %
                             example_frame_paths[0])
                self.example_frame = cv2.imread(example_frame_paths[0])
                self.info['example_frame_name'] = op.basename(
                    example_frame_paths[0])
                assert self.example_frame is not None
            else:
                logging.warning('- no example_frame for %s' % video_dir)
                self.example_frame = None
                self.info['example_frame_name'] = None

        if 'example_background_name' in video_info:
            example_background_name = video_info['example_background_name']
            logging.info('- found example_background_name: %s' %
                         example_background_name)
            example_background_path = atcity(
                op.join(video_dir, example_background_name))
            self.example_background = cv2.imread(example_background_path)
            assert self.example_background is not None
        else:
            # trying possible paths
            example_back_paths = glob(
                atcity(op.join(video_dir, 'background*.png')))
            if len(example_back_paths) > 0:
                logging.info('- deduced example_background: %s' %
                             example_back_paths[0])
                self.example_background = cv2.imread(example_back_paths[0])
                assert self.example_background is not None
            else:
                logging.warning('- no example_background for %s' % video_dir)
                self.example_background = None

        if 'start_timestamp' in video_info:
            start_timestamp = video_info['start_timestamp']
            logging.info('- found start_timestamp: %s' % start_timestamp)
            self.start_time = datetime.strptime(start_timestamp, TIME_FORMAT)
        else:
            # deduce from the name of the file
            self.start_time = datetime.strptime(video_name, VIDEO_DIR_STRPTIME)
            logging.info('- deduced start_time: %s' %
                         self.start_time.strftime(TIME_FORMAT))

        if 'frame_range' in video_info:
            self.info['frame_range'] = video_info['frame_range']
            logging.info('- found frame_range: %s' % video_info['frame_range'])
        else:
            self.info['frame_range'] = ':'

        if 'render_blend_file' in video_info:
            self.render_blend_file = video_info['render_blend_file']
            logging.info('- found render_blend_file: %s' %
                         self.render_blend_file)
            assert op.exists(atcity(self.render_blend_file))
        elif op.exists(atcity(op.join(video_dir, 'render.blend'))):
            # if found the default name in the video folder
            self.render_blend_file = op.join(video_dir, 'render.blend')
            logging.info('- found render_blend_file in video dir: %s' %
                         self.render_blend_file)
        elif op.exists(atcity(op.join(video_dir, 'render-generated.blend'))):
            # if found the default name in the video folder
            self.render_blend_file = op.join(video_dir,
                                             'render-generated.blend')
            logging.warning('- using generated render_blend_file: %s' %
                            self.render_blend_file)
        else:
            logging.warning('- could not figure out render_blend_file')

        if 'combine_blend_file' in video_info:
            self.combine_blend_file = video_info['combine_blend_file']
            logging.info('- found combine_blend_file: %s' %
                         self.combine_blend_file)
            op.exists(atcity(self.combine_blend_file))
        elif op.exists(atcity(op.join(video_dir, 'combine.blend'))):
            # if found the default name in the video folder
            self.combine_blend_file = op.join(video_dir, 'combine.blend')
            logging.info('- found combine_blend_file in video dir: %s' %
                         self.combine_blend_file)
        elif op.exists(atcity(op.join(video_dir, 'combine-generated.blend'))):
            # if found the default name in the video folder
            self.combine_blend_file = op.join(video_dir,
                                              'combine-generated.blend')
            logging.warning('- using generated combine_blend_file: %s' %
                            self.combine_blend_file)
        else:
            logging.warning('- could not figure out combine_blend_file')

        if 'camera_dir' in video_info:
            self.camera_dir = video_info['camera_dir']
            logging.info('- found camera_dir: %s' % self.camera_dir)
        else:
            # deduce from the name of the file
            self.camera_dir = op.dirname(video_dir)
            logging.info('- deduced camera_dir: %s' % self.camera_dir)
        assert op.exists(atcity(self.camera_dir)), atcity(self.camera_dir)

        if 'pose_id' in video_info:
            self.pose_id = int(video_info['pose_id'])
            logging.info('- found pose_id: %d' % self.pose_id)
        else:
            self.pose_id = 0
            logging.info('- take default pose_id = 0')
Exemple #7
0
 def _rename(render_dir, from_name, to_name):
     os.rename(atcity(op.join(render_dir, from_name)),
               atcity(op.join(render_dir, to_name)))
Exemple #8
0
def make_snapshot(render_dir, car_names, params):
    '''Set up the weather, and render vehicles into files
    Args:
      render_dir:  path to directory where to put all rendered images
      car_names:   names of car objects in the scene
      params:      dictionary with frame information
    Returns:
      nothing
    '''

    logging.info('make_snapshot: started')

    setParamUnlessThere(params, 'scale', 1)
    setParamUnlessThere(params, 'render_individual_cars', True)
    # debug options
    setParamUnlessThere(params, 'save_blender_file', False)
    setParamUnlessThere(params, 'render_satellite', False)
    setParamUnlessThere(params, 'render_cars_as_cubes', False)

    bpy.data.worlds['World'].light_settings.environment_energy = 0.0
    bpy.data.worlds['World'].light_settings.ao_factor = 0.5
    bpy.data.objects['-Sky-sunset'].data.energy = np.random.normal(1, 0.5)  #2

    params['weather'] = np.random.choice(['Sunny', 'Cloudy', 'Rainy', 'Wet'])
    set_weather(params)

    # render the image from satellite, when debuging
    if '-Satellite' in bpy.data.objects:
        bpy.data.objects[
            '-Satellite'].hide_render = not params['render_satellite']

    # make all cars receive shadows
    logging.info('materials: %s' % len(bpy.data.materials))
    for m in bpy.data.materials:
        m.use_transparent_shadows = True

    # create render dir
    if not op.exists(render_dir):
        os.makedirs(render_dir)

    # make all cars receive shadows
    logging.info('materials: %s' % len(bpy.data.materials))
    for m in bpy.data.materials:
        m.use_transparent_shadows = True

    # # render all cars and shadows
    # bpy.context.scene.render.layers['RenderLayer'].use_pass_combined = True
    # bpy.context.scene.render.layers['RenderLayer'].use_pass_z = False
    # #bpy.data.objects['-Ground'].hide_render = False
    # render_scene(op.join(render_dir, 'render'))

    # # render cars depth map
    # #bpy.context.scene.render.alpha_mode = 'TRANSPARENT'
    # bpy.context.scene.render.layers['RenderLayer'].use_pass_combined = False
    # bpy.context.scene.render.layers['RenderLayer'].use_pass_z = True
    # #bpy.data.objects['-Ground'].hide_render = True
    # render_scene(op.join(render_dir, 'depth-all'))

    # # render just the car for each car (to extract bbox)
    # if params['render_individual_cars'] and not params['render_cars_as_cubes']:
    #     # hide all cars
    #     for car_name in car_names:
    #         hide_car (car_name)
    #     # show, render, and hide each car one by one
    #     for i,car_name in enumerate(car_names):
    #         show_car (car_name)
    #         render_scene( op.join(render_dir, 'depth-car-%03d.png' % i) )
    #         hide_car (car_name)

    # # clean up
    # bpy.data.objects['-Ground'].hide_render = False
    # if not params['render_cars_as_cubes']:
    #     for car_name in car_names:
    #         show_car (car_name)

    def _rename(render_dir, from_name, to_name):
        os.rename(atcity(op.join(render_dir, from_name)),
                  atcity(op.join(render_dir, to_name)))

    # set directional blur amount from if given
    #bpy.data.node_groups['Compositing Nodetree'].nodes['Camera-Blur'].zoom

    # there are two nodes -- "render" and "depth"
    # they save images in BW16 or RBG8
    # they render layers "Render" and "Depth" with "Combined" and "Z" passes.
    bpy.context.scene.node_tree.nodes['depth'].base_path = atcity(render_dir)
    bpy.context.scene.node_tree.nodes['render'].base_path = atcity(render_dir)

    # leave only shadows
    for m in bpy.data.materials:
        if m != bpy.data.materials['Material-dry-asphalt'] and \
           m != bpy.data.materials['Material-wet-asphalt']:
            m.use_only_shadow = True

    # render shadows only
    bpy.data.objects['-Ground'].hide_render = False
    bpy.ops.render.render(write_still=True, layer='Render')
    _rename(render_dir, 'render0001', 'render.png')

    # materials back to normal
    for m in bpy.data.materials:
        if m != bpy.data.materials['Material-dry-asphalt'] and \
           m != bpy.data.materials['Material-wet-asphalt']:
            m.use_only_shadow = False

    # render without ground
    bpy.data.objects['-Ground'].hide_render = True
    bpy.ops.render.render(write_still=True, layer='Render')
    _rename(render_dir, 'render0001', 'cars-only.png')

    # render depth of all cars
    bpy.ops.render.render(write_still=True, layer='Depth')
    _rename(render_dir, 'depth0001', 'depth-all.png')

    if params['render_individual_cars'] and not params['render_cars_as_cubes']:
        for car_i0, car_name0 in enumerate(car_names):

            # remove all cars from the only layer, and add car_name0 back to it
            for car_name in car_names:
                bpy.data.objects[car_name].hide_render = True
            bpy.data.objects[car_name0].hide_render = False

            # render scene
            bpy.ops.render.render(write_still=True, layer='Depth')
            _rename(render_dir, 'depth0001', 'depth-%03d.png' % car_i0)

    if params['save_blender_files']:
        bpy.ops.wm.save_as_mainfile(
            filepath=atcity(op.join(render_dir, 'render.blend')))

    # logging.info ('objects in the end of frame: %d' % len(bpy.data.objects))
    logging.info('make_snapshot: successfully finished a frame')
Exemple #9
0
import bpy
import sys, os, os.path as op
sys.path.insert(0, op.join(os.getenv('CITY_PATH'), 'src'))
import json
import logging
import numpy as np
from augmentation.common import *
from learning.helperSetup import atcity, setupLogging, setParamUnlessThere

WORK_RENDER_DIR = atcity('data/augmentation/blender/current-frame')
TRAFFIC_FILENAME = 'traffic.json'

WORK_DIR = '%s-%d' % (WORK_RENDER_DIR, os.getppid())


def make_snapshot(render_dir, car_names, params):
    '''Set up the weather, and render vehicles into files
    Args:
      render_dir:  path to directory where to put all rendered images
      car_names:   names of car objects in the scene
      params:      dictionary with frame information
    Returns:
      nothing
    '''

    logging.info('make_snapshot: started')

    setParamUnlessThere(params, 'scale', 1)
    setParamUnlessThere(params, 'render_individual_cars', True)
    # debug options
    setParamUnlessThere(params, 'save_blender_file', False)
                        help='python style ranges, e.g. "[5::2]"')
    parser.add_argument('--in_db_file', required=True)
    parser.add_argument('--video_dir', required=True)
    parser.add_argument('--traffic_file',
                        required=True,
                        help='output .json file where to write traffic info. '
                        'Can be "traffic.json" in video output dir.')
    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument('--speed_kph', type=int)
    group.add_argument('--num_cars', type=int)
    args = parser.parse_args()

    setupLogging('log/augmentation/GenerateTraffic.log', args.logging_level,
                 'w')

    if not op.exists(atcity(op.dirname(args.traffic_file))):
        os.makedirs(atcity(op.dirname(args.traffic_file)))

    job = {
        'frame_range': args.frame_range,
        'in_db_file': args.in_db_file,
        'video_dir': args.video_dir,
        'out_video_dir': op.dirname(args.in_db_file)
    }
    if args.speed_kph is not None:
        setParamUnlessThere(job, 'speed_kph', args.speed_kph)
    elif args.num_cars is not None:
        setParamUnlessThere(job, 'num_cars', args.num_cars)
    else:
        assert False
import sys, os, os.path as op
sys.path.insert(0, os.path.join(os.getenv('CITY_PATH'), 'src'))
import os, os.path as op
import datetime
import pprint
import cPickle
from learning.helperSetup import atcity

txt_file = 'data/augmentation/resources/sun_position.txt'
bin_file = 'data/augmentation/resources/sun_position.pkl'


with open(atcity(txt_file)) as f:
  lines = f.readlines()
  lines = lines[9:]

positions = {}

for line in lines:
  (daystr, clockstr, altitude, azimuth) = tuple(line.split())
  (month, day) = tuple(daystr.split('/'))
  (hour, minute) = tuple(clockstr.split(':'))

  date = datetime.datetime (year=2015, month=int(month), day=int(day), 
                            hour=int(hour), minute=int(minute))

  positions[date] = (float(altitude), float(azimuth))


with open(atcity(bin_file), 'wb') as f:
  cPickle.dump(positions, f, cPickle.HIGHEST_PROTOCOL)
Exemple #12
0
  '''Render a frame using default video's background.
     Leave the frame at the default blender location'''

  parser = argparse.ArgumentParser()
  parser.add_argument('--work_dir', required=True)
  parser.add_argument('--video_dir', required=True)
  parser.add_argument('--save_blender_files', action='store_true')
  parser.add_argument('--no_combine', action='store_true')
  parser.add_argument('--no_render', action='store_true')
  parser.add_argument('--no_annotations', action='store_true')
  parser.add_argument('--logging_level', default=20, type=int)
  parser.add_argument('--background_file',
                      help='if not given, take the default from video_dir')
  args = parser.parse_args()

  setupLogging('log/augmentation/ProcessFrame.log', args.logging_level, 'w')

  traffic = json.load(open(atcity(op.join(args.work_dir, 'traffic.json'))))
  traffic['save_blender_files'] = args.save_blender_files
  traffic['render_individual_cars'] = not args.no_annotations

  video = Video(video_dir=args.video_dir)
  camera = video.build_camera()

  if not args.no_render:
    render_frame (video, camera, traffic, work_dir=args.work_dir)
  if not args.no_combine:
    back_file = op.join(args.work_dir, 'background.png')
    background = cv2.imread(atcity(back_file))
    combine_frame (background, video, camera, work_dir=args.work_dir)
Exemple #13
0
    def __init__(self):
        sun_pose_file = 'data/augmentation/resources/sun_position.pkl'

        with open(atcity(sun_pose_file), 'rb') as f:
            self.data = cPickle.load(f)
Exemple #14
0
def process_video(job):

    assertParamIsThere(job, 'video_dir')
    video = Video(video_dir=job['video_dir'])
    camera = video.build_camera()

    # some parameters
    assertParamIsThere(job, 'traffic_file')
    setParamUnlessThere(job, 'save_blender_files', False)
    setParamUnlessThere(
        job, 'out_video_dir',
        op.join('augmentation/video', 'cam%s' % camera.info['cam_id'],
                video.info['video_name']))
    setParamUnlessThere(job, 'no_annotations', False)
    setParamUnlessThere(job, 'timeout', 1000000000)
    setParamUnlessThere(job, 'frame_range', '[::]')
    setParamUnlessThere(job, 'save_blender_files', False)
    job['render_individual_cars'] = not job['no_annotations']

    # load camera dimensions (compare it to everything for extra safety)
    width0 = camera.info['camera_dims']['width']
    height0 = camera.info['camera_dims']['height']

    # for checking timeout
    start_time = datetime.now()

    cad = Cad()

    # upload info on parsed vehicles to the monitor server
    monitor = None  # MonitorDatasetClient (cam_id=camera.info['cam_id'])

    # load traffic info
    traffic_video = json.load(open(atcity(job['traffic_file'])))

    # reader and writer
    video_reader = ReaderVideo()
    image_vfile = op.join(job['out_video_dir'], 'image.avi')
    mask_vfile = op.join(job['out_video_dir'], 'mask.avi')
    video_writer = SimpleWriter(image_vfile, mask_vfile, {'unsafe': True})

    (conn, c) = dbInit(traffic_video['in_db_file'],
                       op.join(job['out_video_dir'], 'traffic.db'))
    c.execute('SELECT imagefile,maskfile,width,height,time FROM images')
    image_entries = c.fetchall()
    c.execute('DELETE FROM images')

    #assert len(traffic_video['frames']) >= len(image_entries), \
    #  'traffic json is too small %d < %d' % (len(traffic_video['frames']), len(image_entries))

    diapason = Diapason(len(image_entries), job['frame_range'])

    num_processes = int(multiprocessing.cpu_count() / 2 + 1)
    pool = multiprocessing.Pool(processes=num_processes)

    # each frame_range chunk is processed in parallel
    for frame_range in diapason.frame_range_as_chunks(pool._processes):
        logging.info('chunk of frames %d to %d' %
                     (frame_range[0], frame_range[-1]))

        # quit, if reached the timeout
        time_passed = datetime.now() - start_time
        logging.info('passed: %s' % time_passed)
        if (time_passed.total_seconds() > job['timeout'] * 60):
            logging.warning('reached timeout %d. Passed %s' %
                            (job['timeout'], time_passed))
            break

        # collect frame jobs
        frame_jobs = []
        for frame_id in frame_range:

            (in_backfile, in_maskfile, width, height,
             _) = image_entries[frame_id]
            assert (width0 == width
                    and height0 == height), (width0, width, height0, height)
            logging.info('collect job for frame number %d' % frame_id)

            back = video_reader.imread(in_backfile)

            traffic = traffic_video['frames'][frame_id]
            assert traffic['frame_id'] == frame_id, '%d vs %d' % (
                traffic['frame_id'], frame_id)
            traffic['save_blender_files'] = job['save_blender_files']

            frame_jobs.append((video, camera, traffic, back, job))

        #for i, (out_image, out_mask, work_dir) in enumerate(sequentialworker(frame_jobs)):
        for i, (out_image, out_mask,
                work_dir) in enumerate(pool.imap(worker, frame_jobs)):
            frame_id = frame_range[i]
            logging.info('processed frame number %d' % frame_id)

            assert out_image is not None and out_mask is not None
            out_imagefile = video_writer.imwrite(out_image)
            out_maskfile = video_writer.maskwrite(out_mask)
            logging.info('out_imagefile: %s, out_maskfile: %s' %
                         (out_imagefile, out_maskfile))

            # update out database
            (_, _, width, height, time) = image_entries[frame_id]
            c.execute(
                'INSERT INTO images(imagefile,maskfile,width,height,time) VALUES (?,?,?,?,?)',
                (out_imagefile, out_maskfile, width, height, time))
            logging.info('wrote frame %d' % c.lastrowid)

            if not job['no_annotations']:
                extract_annotations(work_dir, c, cad, camera, out_imagefile,
                                    monitor)

            if not job['save_blender_files']:
                shutil.rmtree(work_dir)

            conn.commit()
    conn.close()

    pool.close()
    pool.join()
Exemple #15
0
def render_frame(video, camera, traffic):
    ''' Write down traffci file for blender and run blender with renderScene.py 
  All work is in current-frame dir.
  '''
    WORK_DIR = '%s-%d' % (WORK_RENDER_DIR, os.getpid())
    setParamUnlessThere(traffic, 'save_blender_files', False)
    setParamUnlessThere(traffic, 'render_individual_cars', True)
    unsharp_mask_params = {'radius': 4.7, 'threshold': 23, 'amount': 1}

    # load camera dimensions (compare it to everything for extra safety)
    width0 = camera.info['camera_dims']['width']
    height0 = camera.info['camera_dims']['height']
    logging.debug('camera width,height: %d,%d' % (width0, height0))

    image = None
    mask = None

    # pass traffic info to blender
    traffic['scale'] = camera.info['scale']
    traffic_path = op.join(WORK_DIR, TRAFFIC_FILENAME)
    if not op.exists(op.dirname(traffic_path)):
        os.makedirs(op.dirname(traffic_path))
    with open(traffic_path, 'w') as f:
        f.write(json.dumps(traffic, indent=4))

    # remove so that they do not exist if blender fails
    if op.exists(op.join(WORK_DIR, RENDERED_FILENAME)):
        os.remove(op.join(WORK_DIR, RENDERED_FILENAME))
    if op.exists(op.join(WORK_DIR, 'depth-all.png')):
        os.remove(op.join(WORK_DIR, 'depth-all.png'))
    # render
    assert video.render_blend_file is not None
    render_blend_path = atcity(video.render_blend_file)
    command = [
        '%s/blender' % os.getenv('BLENDER_ROOT'), render_blend_path,
        '--background', '--python',
        '%s/src/augmentation/renderScene.py' % os.getenv('CITY_PATH')
    ]
    logging.debug('WORK_DIR: %s' % WORK_DIR)
    logging.debug(' '.join(command))
    returncode = subprocess.call(command,
                                 shell=False,
                                 stdout=FNULL,
                                 stderr=FNULL)
    logging.info('rendering: blender returned code %s' % str(returncode))

    # check and sharpen rendered
    rendered_filepath = op.join(WORK_DIR, RENDERED_FILENAME)
    image = cv2.imread(rendered_filepath, -1)
    assert image is not None
    assert image.shape == (height0, width0, 4), image.shape
    image = unsharp_mask(image, unsharp_mask_params)
    cv2.imwrite(rendered_filepath, image)

    # check and sharpen cars-only
    carsonly_filepath = op.join(WORK_DIR, CARSONLY_FILENAME)
    image = cv2.imread(carsonly_filepath, -1)
    assert image is not None
    assert image.shape == (height0, width0, 4), image.shape
    image = unsharp_mask(image, unsharp_mask_params)
    shutil.move(carsonly_filepath, op.join(WORK_DIR, 'unsharpened.png'))
    cv2.imwrite(carsonly_filepath, image)

    # create mask
    if traffic['render_individual_cars'] == True:
        mask = _get_masks(WORK_DIR, traffic)
        # TODO: visibility is returned via traffic file, NOT straightforward
        with open(traffic_path, 'w') as f:
            f.write(json.dumps(traffic, indent=4))

    # correction_path = op.join(WORK_DIR, CORRECTION_FILENAME)
    # if op.exists(correction_path): os.remove(correction_path)
    # if not params['no_correction']:
    #     correction_info = color_correction (video.example_background, background)
    #     with open(correction_path, 'w') as f:
    #         f.write(json.dumps(correction_info, indent=4))

    return image, mask