Exemple #1
0
 def __init__(self):
     self.current_profile_dxf = []
     
     Cad.__init__(self)
     
     try:
         opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
     except getopt.error, msg:
         print msg
         print "for help use --help"
         sys.exit(2)
Exemple #2
0
    def __init__(self):
        save_out = sys.stdout
        save_err = sys.stderr

        wx.App.__init__(self)

        sys.stdout = save_out
        sys.stderr = save_err

        Cad.__init__(self)

        heekscad.init()

        try:
            opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
        except getopt.error, msg:
            print msg
            print "for help use --help"
            sys.exit(2)
    def __init__(self):
        save_out = sys.stdout
        save_err = sys.stderr

        wx.App.__init__(self)

        sys.stdout = save_out
        sys.stderr = save_err

        Cad.__init__(self)

        heekscad.init()

        try:
            opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
        except getopt.error, msg:
            print msg
            print "for help use --help"
            sys.exit(2)
def generate_video_traffic(job):
    ''' Generate traffic file for the whole video.
  Args:
    in_db_file - should have all the images for which traffic is generated
    job - the same as for process_video
  '''
    assertParamIsThere(job, 'in_db_file')
    assertParamIsThere(job, 'out_video_dir')
    setParamUnlessThere(job, 'frame_range', '[::]')
    assertParamIsThere(job, 'video_dir')

    video = Video(video_dir=job['video_dir'])
    camera = video.build_camera()

    assert op.exists(atcity(job['in_db_file'])), \
        'in db %s does not exist' % atcity(job['in_db_file'])
    conn_in = sqlite3.connect(atcity(job['in_db_file']))
    c_in = conn_in.cursor()
    c_in.execute('SELECT time FROM images')
    timestamps = c_in.fetchall()
    conn_in.close()

    cad = Cad()

    if 'speed_kph' in job:
        model = TrafficModel(camera,
                             video,
                             cad=cad,
                             speed_kph=job['speed_kph'])
    elif 'num_cars' in job:
        model = TrafficModelRandom(camera,
                                   video,
                                   cad=cad,
                                   num_cars_mean=job['num_cars'])
    else:
        assert False

    diapason = Diapason(len(timestamps), job['frame_range'])

    traffic = {'in_db_file': job['in_db_file']}
    traffic['frames'] = []

    for frame_id in diapason.frame_range:
        logging.info('generating traffic for frame %d' % frame_id)
        timestamp = timestamps[frame_id][0]
        time = parseTimeString(timestamp)
        traffic_frame = model.get_next_frame(time)
        traffic_frame['frame_id'] = frame_id  # for validating
        traffic['frames'].append(traffic_frame)

    return traffic
Exemple #5
0

if __name__ == "__main__":

    setupLogging('log/augmentation/traffic.log', logging.DEBUG, 'w')

    video_dir = 'augmentation/scenes/cam166/Feb23-09h'
    collection_names = [
        '7c7c2b02ad5108fe5f9082491d52810',
        'uecadcbca-a400-428d-9240-a331ac5014f6'
    ]
    timestamp = datetime.now()
    video = Video(video_dir)
    camera = video.build_camera()

    cad = Cad(collection_names)

    #model = TrafficModel (camera, video, cad=cad, speed_kph=10, burn_in=True)
    model = TrafficModelRandom(camera, video, cad, num_cars_mean=10)

    # cv2.imshow('lanesmap', model.generate_map())
    # cv2.waitKey(-1)
    while True:
        model.get_next_frame(timestamp)
        display = fit_image_to_screen(model.generate_map())
        cv2.imshow('lanesmap', display)
        key = cv2.waitKey(-1)
        if key == 27: break

    # traffic_path = op.join(WORK_RENDER_DIR, TRAFFIC_FILENAME)
    # with open(traffic_path, 'w') as f:
Exemple #6
0
def process_video(job):

    assertParamIsThere(job, 'video_dir')
    video = Video(video_dir=job['video_dir'])
    camera = video.build_camera()

    # some parameters
    assertParamIsThere(job, 'traffic_file')
    setParamUnlessThere(job, 'save_blender_files', False)
    setParamUnlessThere(
        job, 'out_video_dir',
        op.join('augmentation/video', 'cam%s' % camera.info['cam_id'],
                video.info['video_name']))
    setParamUnlessThere(job, 'no_annotations', False)
    setParamUnlessThere(job, 'timeout', 1000000000)
    setParamUnlessThere(job, 'frame_range', '[::]')
    setParamUnlessThere(job, 'save_blender_files', False)
    job['render_individual_cars'] = not job['no_annotations']

    # load camera dimensions (compare it to everything for extra safety)
    width0 = camera.info['camera_dims']['width']
    height0 = camera.info['camera_dims']['height']

    # for checking timeout
    start_time = datetime.now()

    cad = Cad()

    # upload info on parsed vehicles to the monitor server
    monitor = None  # MonitorDatasetClient (cam_id=camera.info['cam_id'])

    # load traffic info
    traffic_video = json.load(open(atcadillac(job['traffic_file'])))

    # reader and writer
    video_reader = ReaderVideo()
    image_vfile = op.join(job['out_video_dir'], 'image.avi')
    mask_vfile = op.join(job['out_video_dir'], 'mask.avi')
    video_writer = SimpleWriter(image_vfile, mask_vfile, {'unsafe': True})

    (conn, c) = dbInit(traffic_video['in_db_file'],
                       op.join(job['out_video_dir'], 'traffic.db'))
    c.execute('SELECT imagefile,maskfile,width,height,time FROM images')
    image_entries = c.fetchall()
    c.execute('DELETE FROM images')

    #assert len(traffic_video['frames']) >= len(image_entries), \
    #  'traffic json is too small %d < %d' % (len(traffic_video['frames']), len(image_entries))

    diapason = Diapason(len(image_entries), job['frame_range'])

    num_processes = int(multiprocessing.cpu_count() / 2 + 1)
    pool = multiprocessing.Pool(processes=num_processes)

    # each frame_range chunk is processed in parallel
    for frame_range in diapason.frame_range_as_chunks(pool._processes):
        logging.info('chunk of frames %d to %d' %
                     (frame_range[0], frame_range[-1]))

        # quit, if reached the timeout
        time_passed = datetime.now() - start_time
        logging.info('passed: %s' % time_passed)
        if (time_passed.total_seconds() > job['timeout'] * 60):
            logging.warning('reached timeout %d. Passed %s' %
                            (job['timeout'], time_passed))
            break

        # collect frame jobs
        frame_jobs = []
        for frame_id in frame_range:

            (in_backfile, in_maskfile, width, height,
             _) = image_entries[frame_id]
            assert (width0 == width
                    and height0 == height), (width0, width, height0, height)
            logging.info('collect job for frame number %d' % frame_id)

            back = video_reader.imread(in_backfile)

            traffic = traffic_video['frames'][frame_id]
            assert traffic['frame_id'] == frame_id, '%d vs %d' % (
                traffic['frame_id'], frame_id)
            traffic['save_blender_files'] = job['save_blender_files']

            frame_jobs.append((video, camera, traffic, back, job))

        #for i, (out_image, out_mask, work_dir) in enumerate(sequentialworker(frame_jobs)):
        for i, (out_image, out_mask,
                work_dir) in enumerate(pool.imap(worker, frame_jobs)):
            frame_id = frame_range[i]
            logging.info('processed frame number %d' % frame_id)

            assert out_image is not None and out_mask is not None
            out_imagefile = video_writer.imwrite(out_image)
            out_maskfile = video_writer.maskwrite(out_mask)
            logging.info('out_imagefile: %s, out_maskfile: %s' %
                         (out_imagefile, out_maskfile))

            # update out database
            (_, _, width, height, time) = image_entries[frame_id]
            c.execute(
                'INSERT INTO images(imagefile,maskfile,width,height,time) VALUES (?,?,?,?,?)',
                (out_imagefile, out_maskfile, width, height, time))
            logging.info('wrote frame %d' % c.lastrowid)

            if not job['no_annotations']:
                extract_annotations(work_dir, c, cad, camera, out_imagefile,
                                    monitor)

            if not job['save_blender_files']:
                shutil.rmtree(work_dir)

            conn.commit()
    conn.close()

    pool.close()
    pool.join()