示例#1
0
def main(cmd, dataset, imsize, visualize):
    imsize = parse_resolution(imsize)

    mask = Masker(dataset)

    if cmd == "findvids" or cmd == "continue":
        vidfolder = datasets_path / dataset / "videos"
        kltfolder = datasets_path / dataset / "klt"
        mkdir(kltfolder)

        allvids = list(vidfolder.glob('*.mkv'))
        allvids.sort()

        if cmd == "continue":
            existing = list(kltfolder.glob('*.pklz'))
            existing.sort()
            existing = [x.stem for x in existing]
            allvids = [x for x in allvids if not x.stem in existing]

        for vidpath in allvids:
            datpath = kltfolder / (vidpath.stem + '.pklz')
            if visualize:
                outvidpath = datpath.with_name(datpath.stem + '_klt.mp4')
                print_flush("{}   ->   {} & {}".format(vidpath, datpath,
                                                       outvidpath))
            else:
                outvidpath = None
                print_flush("{}   ->   {}".format(vidpath, datpath))

            klt_save(vidpath, datpath, imsize, mask, outvidpath)

        print_flush("Done!")
    else:
        raise (ValueError())
示例#2
0
def detect(dataset, run, res, conf, bs, clean):
    vids = list((datasets_path / dataset / "videos").glob('*.mkv'))
    vids.sort()

    outfolder = runs_path / "{}_{}".format(dataset, run) / "csv"
    mkdir(outfolder)

    nvids = len(vids)

    for i, vid in enumerate(vids):
        vname = vid.stem
        outname = outfolder / (vname + '.csv')

        if not clean:
            if outname.is_file():
                print_flush("Skipping {}".format(outname))
                continue

        before = time()

        print_flush(vname)
        run_detector(dataset, run, vid, outname, res, conf, bs)

        done_percent = round(100 * (i + 1) / nvids)
        now = time()
        mins = floor((now - before) / 60)
        secs = round(now - before - 60 * mins)
        print_flush("{}  {}% done, time: {} min {} seconds".format(
            vid, done_percent, mins, secs))

    print_flush("Done!")
示例#3
0
def detect(dataset, run, res, conf, bs, clean):

    vids = sorted(glob("{}{}/videos/*.mkv".format(datasets_path, dataset)))

    outfolder = "{}{}_{}/csv/".format(runs_path, dataset, run)
    mkdir(outfolder)

    nvids = len(vids)

    for i, vid in enumerate(vids):
        vname = vid.split('/')[-1]
        vsplit = vname.split('.')
        outname = outfolder + vsplit[0] + '.csv'

        if not clean:
            if os.path.isfile(outname):
                print_flush("Skipping {}".format(outname))
                continue

        before = time()

        print_flush(vname)
        run_detector(dataset, run, vid, outname, res, conf, bs)

        done_percent = round(100 * (i + 1) / nvids)
        now = time()
        mins = floor((now - before) / 60)
        secs = round(now - before - 60 * mins)
        print_flush("{}  {}% done, time: {} min {} seconds".format(
            vid, done_percent, mins, secs))

    print_flush("Done!")
示例#4
0
def parse(basepath, dataset, resolution):
    """ Parses a dataset for data frames CSV files and draws the detections.
        This is used for showing object detector results on the validation set used 
        during training. 
        Arguments:
        basepath -- path to folder with CSV files
        dataset  -- name of the dataset used, used for finding the correct mask
    """
    colors = class_colors()
    masker = Masker(dataset)

    csvpath = basepath / 'detections_0.csv'
    res = pd.read_csv(csvpath)

    outpath = basepath / 'visualize/'
    if not outpath.is_dir():
        mkdir(outpath)
    else:
        old_files = list(outpath.glob('*'))
        for old in old_files:
            old.unlink()

    files = res['filename'].unique()
    for i, filename in enumerate(files):
        df = res.loc[res['filename'] == filename]
        impath = df['filename'].iloc[0]
        im = cv2.imread(impath)
        im = cv2.resize(im, (resolution[0], resolution[1]))
        im = masker.mask(im)
        im = draw(im, df, colors)
        outfilepath = outpath / '{}_{}'.format(1 + i, Path(filename).name)
        cv2.imwrite(outfilepath, im)
        print(outfilepath)
示例#5
0
def main(cmd, dataset, run, conf, make_videos):   
    if make_videos:
        from visualize_tracking import render_video
        from config import DatasetConfig
        from apply_mask import Masker
        
        mask = Masker(dataset)
        dc = DatasetConfig(dataset)
        
    config_path = "{rp}{ds}_{rn}/world_tracking_optimization.pklz".format(rp=runs_path, ds=dataset, rn=run)
    if isfile(config_path):
        config = load(config_path)
    else:
        #raise(ValueError("No world tracking optimized configuration exists at {}".format(config_path)))
        config = WorldTrackingConfig(default_config)
    
    calib = Calibration(dataset)    
    munkres = Munkres()
    ts = Timestamps(dataset)
    
    start_stop = None
    
    if cmd == "findvids":
        from glob import glob
        vidnames = glob('{dsp}{ds}/videos/*.mkv'.format(dsp=datasets_path, ds=dataset))
        vidnames = [right_remove(x.split('/')[-1], '.mkv') for x in vidnames]
        vidnames.sort()
        
        outfolder = '{}{}_{}/tracks_world/'.format(runs_path, dataset, run)
        mkdir(outfolder)
    else:
        vidnames = [cmd]
        outfolder = './'
        start_stop = (0,500)
            
    for v in vidnames:
        print_flush(v)    
        out_path = "{of}{v}_tracks.pklz".format(of=outfolder, v=v)
        
        print_flush("Loading data...")
        det_path = "{rp}{ds}_{rn}/detections_world/{v}_world.csv".format(rp=runs_path, ds=dataset, rn=run, v=v)
        detections3D = pd.read_csv(det_path)
        
        klt_path = det_path.replace('.csv', '_klt.pklz')
        klts = load(klt_path)
        
        print_flush("Tracking...")
        tracks = make_tracks(dataset, v, detections3D, klts, munkres, ts, calib, config, start_stop=start_stop)
        
        print_flush("Saving tracks...")
        save(tracks, out_path)
        
        if make_videos:

            vidpath = "{dsp}{ds}/videos/{v}.mkv".format(dsp=datasets_path, ds=dataset, v=v)
            print_flush("Rendering video...")
            render_video(tracks, vidpath, out_path.replace('.pklz','.mp4'), calib=calib, mask=mask, fps=dc.get('video_fps'))

    print_flush("Done!")
示例#6
0
def main(cmd, dataset, run, conf, make_videos):   
    from pathlib import Path
    
    if make_videos:
        from visualize_tracking import render_video
        from config import DatasetConfig
        from apply_mask import Masker
        
        mask = Masker(dataset)
        dc = DatasetConfig(dataset)
        
    config_path = runs_path / "{}_{}".format(dataset,run) / "world_tracking_optimization.pklz"
    if config_path.is_file():
        config = load(config_path)
    else:
        #raise(ValueError("No world tracking optimized configuration exists at {}".format(config_path)))
        config = WorldTrackingConfig(default_config)
    
    calib = Calibration(dataset)    
    munkres = Munkres()
    ts = Timestamps(dataset)
    
    start_stop = None
    
    if cmd == "findvids":
        vidnames = (datasets_path / dataset / "videos").glob('*.mkv')
        vidnames = [x.stem for x in vidnames]
        vidnames.sort()
        
        outfolder = runs_path / "{}_{}".format(dataset,run) / "tracks_world"
        mkdir(outfolder)
    else:
        vidnames = [cmd]
        outfolder = Path('./')
        start_stop = (0,500)
            
    for v in vidnames:
        print_flush(v) 
        out_path = outfolder / (v+'_tracks.pklz')   
        
        print_flush("Loading data...")
        det_path = runs_path / "{}_{}".format(dataset,run) / "detections_world" / (v+'_world.csv')
        detections3D = pd.read_csv(det_path)
        
        klt_path = det_path.with_name(det_path.stem + '_klt.pklz')
        klts = load(klt_path)
        
        print_flush("Tracking...")
        tracks = make_tracks(dataset, v, detections3D, klts, munkres, ts, calib, config, start_stop=start_stop)
        
        print_flush("Saving tracks...")
        save(tracks, out_path)
        
        if make_videos:            
            vidpath = datasets_path / dataset / "videos" / (v+'.mkv')
            print_flush("Rendering video...")
            render_video(tracks, vidpath, out_path.with_suffix('.mp4'), calib=calib, mask=mask, fps=dc.get('video_fps'))

    print_flush("Done!")
示例#7
0
def get_pretrained_weights():
    path = ssd_path / 'weights_SSD300.hdf5'
    if path.is_file():
        return ("Already present", 200)
    else:
        url = 'https://github.com/hakanardo/weights/raw/d2243707493e2e5f94c465b6248558ee16c90be6/weights_SSD300.hdf5'
        mkdir(ssd_path)
        os.system("wget -O %s '%s'" % (path, url))
    if not path.is_file():
        return ("Download failed", 500)
    if validate_pretrained_md5(path):
        return ("Downloaded", 200)
    else:
        path.unlink()
        return ("File rejected", 500)
示例#8
0
def post_dataset(dataset_name, class_names, class_heights):
    if ' ' in dataset_name:
        return ("Spaces are not allowed in dataset names!", 500)
        
    dataset_name = quote(dataset_name)
    path = "{}{}/".format(datasets_path, dataset_name)
    mkdir(path)
    mkdir(path + 'videos')

    class_names = [quote(x.lower()) for x in class_names.split(',')]
    class_heights = map(float, class_heights.split(','))
    class_data = [{'name': n, 'height': h} for n, h in zip(class_names, class_heights)]
    set_class_data(dataset_name, class_data)

    return (NoContent, 200)
示例#9
0
def main(cmd, res, dataset, run, conf, fps, coords):
    res = parse_resolution(res)
    classnames = get_classnames(dataset)
    
    local_output = False
    csvs = []
    if cmd == "findvids":
        if coords == "pixels":
            found = (runs_path / "{}_{}".format(dataset,run) / "csv").glob('*.csv')
        elif coords == "world":
            found = (runs_path / "{}_{}".format(dataset,run) / "detections_world").glob('*.csv')
            
        found = list(found)
        found.sort()
        csvs.extend(found)
    else:
        csvs.append(cmd)
        local_output = True
    
    if coords == "pixels":
        out_folder = runs_path / "{}_{}".format(dataset,run) / "detections"
    elif coords == "world":
        out_folder = runs_path / "{}_{}".format(dataset,run) / "detections_world"
        
    mkdir(out_folder)
    
    for csv_path in csvs:
        vidname = csv_path.stem
        if coords == "world":
            vidname = right_remove(vidname, '_world')
        
        vid_path = datasets_path / dataset / "videos" / (vidname+'.mkv')    

        if local_output:
            outvid_path = Path('.') / '{}.mp4'.format(vidname)
        else:
            outvid_path = out_folder / '{}.mp4'.format(vidname)        
        
        detections = pd.read_csv(csv_path)
        detections_video(detections, vid_path, outvid_path, classnames, dataset, res, fps=fps, conf_thresh=conf, coords=coords)
        print_flush(outvid_path)
    
    print_flush("Done!")
示例#10
0
def main(cmd, res, dataset, run, conf, fps, coords):
    res = parse_resolution(res)
    classnames = get_classnames(dataset)
    
    local_output = False
    csvs = []
    if cmd == "findvids":
        if coords == "pixels":
            query = "{rp}{ds}_{r}/csv/*.csv".format(rp=runs_path, ds=dataset, r=run)
        elif coords == "world":
            query = "{rp}{ds}_{r}/detections_world/*.csv".format(rp=runs_path, ds=dataset, r=run)
            
        found = glob(query)
        found.sort()
        csvs.extend(found)
    else:
        csvs.append(cmd)
        local_output = True
    
    if coords == "pixels":
        out_folder = '{rp}{ds}_{r}/detections/'.format(rp=runs_path, ds=dataset, r=run)
    elif coords == "world":
        out_folder = '{rp}{ds}_{r}/detections_world/'.format(rp=runs_path, ds=dataset, r=run)
        
    mkdir(out_folder)
    
    for csv_path in csvs:
        vidname = right_remove(csv_path.split('/')[-1], '.csv')
        if coords == "world":
            vidname = right_remove(vidname, '_world')
            
        vid_path = "{dsp}{ds}/videos/{v}.mkv".format(dsp=datasets_path, ds=dataset, v=vidname)

        if local_output:
            outvid_path = '{}.mp4'.format(vidname)
        else:
            outvid_path = '{}{}.mp4'.format(out_folder, vidname)        
        
        detections = pd.read_csv(csv_path)
        detections_video(detections, vid_path, outvid_path, classnames, dataset, res, fps=fps, conf_thresh=conf, coords=coords)
        print_flush(outvid_path)
    
    print_flush("Done!")
示例#11
0
def generate_tracks_in_zip(dataset, run, tf, coords):
    assert (tf in all_track_formats)

    tracks_format = tf
    if coords == 'pixels':
        tracks = glob("{rp}{dn}_{rn}/tracks/*.pklz".format(rp=runs_path,
                                                           dn=dataset,
                                                           rn=run))
    elif coords == 'world':
        tracks = glob("{rp}{dn}_{rn}/tracks_world/*.pklz".format(rp=runs_path,
                                                                 dn=dataset,
                                                                 rn=run))
    else:
        raise (ValueError("Incorrect coordinate system: {}".format(coords)))

    tracks.sort()

    zips_folder = "{rp}{dn}_{rn}/track_zips/".format(rp=runs_path,
                                                     dn=dataset,
                                                     rn=run)
    mkdir(zips_folder)

    zip_path = "{zf}{tf}.zip".format(zf=zips_folder, tf=tracks_format)
    if coords == 'world':
        zip_path = zip_path.replace('.zip', '_world.zip')

    with ZipFile(zip_path, mode='w', compression=ZIP_DEFLATED) as z:
        for t in tracks:
            tname = t.split('/')[-1]
            print_flush(tname)

            text = format_tracks_from_file(t, tracks_format, coords)

            suffix = '.txt'
            if tracks_format == 'csv':
                suffix = '.csv'
            z.writestr(tname.replace('.pklz', suffix), text)

    print_flush("Done!")
    return zip_path
示例#12
0
def main(cmd, dataset, imsize, visualize):
    imsize = parse_resolution(imsize)

    mask = Masker(dataset)

    if cmd == "findvids" or cmd == "continue":
        vidfolder = "{}{}/videos/".format(datasets_path, dataset)
        kltfolder = "{}{}/klt/".format(datasets_path, dataset)
        mkdir(kltfolder)

        allvids = sorted(glob(vidfolder + "*.mkv"))

        if cmd == "continue":
            existing = sorted(glob(kltfolder + "*.pklz"))
            existing = [
                right_remove(x.split('/')[-1], '.pklz') for x in existing
            ]
            allvids = [
                x for x in allvids
                if not right_remove(x.split('/')[-1], '.mkv') in existing
            ]

        for vidpath in allvids:
            datpath = kltfolder + vidpath.split('/')[-1].replace(
                '.mkv', '.pklz')
            if visualize:
                outvidpath = datpath.replace('.pklz', '_klt.mp4')
                print_flush("{}   ->   {} & {}".format(vidpath, datpath,
                                                       outvidpath))
            else:
                outvidpath = None
                print_flush("{}   ->   {}".format(vidpath, datpath))

            klt_save(vidpath, datpath, imsize, mask, outvidpath)

        print_flush("Done!")
    else:
        raise (ValueError())
示例#13
0
def gen_images(outbasepath, vidpath, n):
    """ Pick n images evenly spread out over the video """

    folder = outbasepath / vidpath.stem
    mkdir(folder)

    with io.get_reader(vidpath) as vid:
        l = vid.get_length()

        # Avoid the edges of the video
        fnums = np.linspace(0, l, n + 2)
        fnums = [int(x) for x in fnums[1:n + 1]]

        # Log files allow these to be recreated, if necessary.
        # These logs are used when mining rare classes, to avoid annotating to close to existing annotations
        with (folder / "frames.log").open('w') as f:
            f.write(vidpath.stem + "\n")
            for fn in fnums:
                f.write("{} ".format(fn))

        for i, fn in enumerate(fnums):
            frame = vid.get_data(fn)
            imsave(folder / "{}.jpg".format(i + 1), frame)
示例#14
0
def generate_tracks_in_zip(dataset, run, tf, coords):
    assert (tf in all_track_formats)

    tracks_format = tf
    if coords == 'pixels':
        tracks = (runs_path / "{}_{}".format(dataset, run) /
                  "tracks").glob('*.pklz')
    elif coords == 'world':
        tracks = (runs_path / "{}_{}".format(dataset, run) /
                  "tracks_world").glob('*.pklz')
    else:
        raise (ValueError("Incorrect coordinate system: {}".format(coords)))

    tracks = list(tracks)
    tracks.sort()

    zips_folder = runs_path / "{}_{}".format(dataset, run) / "track_zips"
    mkdir(zips_folder)

    zip_path = zips_folder / (tracks_format + '.zip')
    if coords == 'world':
        zip_path = zip_path.with_name(zip_path.stem + '_world.zip')

    with ZipFile(str(zip_path), mode='w', compression=ZIP_DEFLATED) as z:
        for t in tracks:
            tname = t.name
            print_flush(tname)

            text = format_tracks_from_file(t, tracks_format, coords)

            suffix = '.txt'
            if tracks_format == 'csv':
                suffix = '.csv'
            z.writestr(tname.replace('.pklz', suffix), text)

    print_flush("Done!")
    return zip_path
示例#15
0
def main(cmd, dataset, run, vidres, ssdres, kltres, make_videos):
    vidres = parse_resolution(vidres)
    ssdres = parse_resolution(ssdres)
    kltres = parse_resolution(kltres)

    x_factor = float(vidres[0]) / ssdres[0]
    y_factor = float(vidres[1]) / ssdres[1]
    det_dims = ('xmin', 'xmax', 'ymin', 'ymax')
    det_factors = (x_factor, x_factor, y_factor, y_factor)

    calib = Calibration(dataset)
    ts = Timestamps(dataset)
    class_data = get_class_data(dataset)
    class_heights = {d['name']: d['height'] for d in class_data}

    class KLTConfig(object):
        klt_x_factor = 0
        klt_y_factor = 0

    klt_config = KLTConfig()
    klt_config.klt_x_factor = vidres[0] / kltres[0]
    klt_config.klt_y_factor = vidres[1] / kltres[1]

    if cmd == "findvids":
        from glob import glob
        vidnames = glob('{dsp}{ds}/videos/*.mkv'.format(dsp=datasets_path,
                                                        ds=dataset))
        vidnames = [right_remove(x.split('/')[-1], '.mkv') for x in vidnames]
        vidnames.sort()

        outfolder = '{}{}_{}/detections_world/'.format(runs_path, dataset, run)
        mkdir(outfolder)
    else:
        vidnames = [cmd]
        outfolder = './'

    mkdir(outfolder)

    if make_videos:
        classnames = get_classnames(dataset)
        dc = DatasetConfig(dataset)
        fps = dc.get('video_fps')

    for v in vidnames:
        print_flush(v)
        detections = pd.read_csv('{}{}_{}/csv/{}.csv'.format(
            runs_path, dataset, run, v))

        # Convert pixel coordinate positions from SSD resolution to video resolution
        # because Calibration assumes video resolution coordinates
        for dim, factor in zip(det_dims, det_factors):
            detections[dim] = round(detections[dim] * factor).astype(int)

        print_flush("Converting point tracks...")
        klt = load('{}{}/klt/{}.pklz'.format(datasets_path, dataset, v))
        klt, klt_frames = convert_klt(klt, klt_config)
        pts = PointTrackStructure(klt, klt_frames, vidres[0], vidres[1])

        outpath = '{of}{v}_world.csv'.format(of=outfolder, v=v)

        print_flush("Converting to world coordinates...")
        detections3D = detections_to_3D(detections,
                                        pts,
                                        calib,
                                        ts,
                                        v,
                                        klt_save_path=outpath.replace(
                                            '.csv', '_klt.pklz'),
                                        class_heights=class_heights)

        detections3D.to_csv(outpath, float_format='%.4f')

        if make_videos:
            from visualize_detections import detections_video
            vidpath = "{dsp}{ds}/videos/{v}.mkv".format(dsp=datasets_path,
                                                        ds=dataset,
                                                        v=v)

            print_flush("Rendering video...")
            detections_video(detections3D,
                             vidpath,
                             outpath.replace('.csv', '.mp4'),
                             classnames,
                             dataset,
                             vidres,
                             fps=fps,
                             conf_thresh=0.0,
                             coords='world')

    print_flush("Done!")
示例#16
0
def main(cmd, dataset, run, vidres, ssdres, kltres, make_videos):
    vidres = parse_resolution(vidres)
    ssdres = parse_resolution(ssdres)
    kltres = parse_resolution(kltres)

    x_factor = float(vidres[0]) / ssdres[0]
    y_factor = float(vidres[1]) / ssdres[1]
    det_dims = ('xmin', 'xmax', 'ymin', 'ymax')
    det_factors = (x_factor, x_factor, y_factor, y_factor)

    calib = Calibration(dataset)
    ts = Timestamps(dataset)
    class_data = get_class_data(dataset)
    class_heights = {d['name']: d['height'] for d in class_data}

    class KLTConfig(object):
        klt_x_factor = 0
        klt_y_factor = 0

    klt_config = KLTConfig()
    klt_config.klt_x_factor = vidres[0] / kltres[0]
    klt_config.klt_y_factor = vidres[1] / kltres[1]

    if cmd == "findvids":
        vidnames = list((datasets_path / dataset / "videos").glob('*.mkv'))
        vidnames = [x.stem for x in vidnames]
        vidnames.sort()

        outfolder = runs_path / '{}_{}'.format(dataset,
                                               run) / 'detections_world'
        mkdir(outfolder)
    else:
        vidnames = [cmd]
        outfolder = Path('.')

    mkdir(outfolder)

    if make_videos:
        classnames = get_classnames(dataset)
        dc = DatasetConfig(dataset)
        fps = dc.get('video_fps')

    for v in vidnames:
        print_flush(v)
        detections = pd.read_csv(runs_path / '{}_{}'.format(dataset, run) /
                                 'csv' / (v + '.csv'))

        # Convert pixel coordinate positions from SSD resolution to video resolution
        # because Calibration assumes video resolution coordinates
        for dim, factor in zip(det_dims, det_factors):
            detections[dim] = round(detections[dim] * factor).astype(int)

        print_flush("Converting point tracks...")
        klt = load(datasets_path / dataset / 'klt' / (v + '.pklz'))
        klt, klt_frames = convert_klt(klt, klt_config)
        pts = PointTrackStructure(klt, klt_frames, vidres[0], vidres[1])

        outpath = outfolder / '{v}_world.csv'.format(v=v)

        print_flush("Converting to world coordinates...")
        detections3D = detections_to_3D(
            detections,
            pts,
            calib,
            ts,
            v,
            klt_save_path=outpath.with_name(outpath.stem + '_klt.pklz'),
            class_heights=class_heights)

        detections3D.to_csv(outpath, float_format='%.4f')

        if make_videos:
            from visualize_detections import detections_video
            vidpath = datasets_path / dataset / "videos" / "{}.mkv".format(v)

            print_flush("Rendering video...")
            detections_video(detections3D,
                             vidpath,
                             outpath.with_suffix('.mp4'),
                             classnames,
                             dataset,
                             vidres,
                             fps=fps,
                             conf_thresh=0.0,
                             coords='world')

    print_flush("Done!")
示例#17
0
def import_videos(query, dataset, resolution, fps, suffix, method, logs, minutes):   
    
    assert(suffix == '.mkv')
    
    logs = Path(logs)
    assert(logs.is_dir())
    
    if method == "imageio":
        encode = encode_imageio
    elif method == "handbrake":
        encode = encode_handbrake
    else:
        raise(ValueError("Incorrect method {}".format(method)))

    resolution = parse_resolution(resolution)
    width, height = resolution[0:2]
    
    target = datasets_path / dataset / "videos"
    mkdir(target)
    
    logs_target = datasets_path / dataset / "logs"
    mkdir(logs_target)
    
    files = glob(query)
    files.sort()
    files = [Path(x) for x in files]

    if minutes == 0:
        for path in files:
            video_name = path.stem
            
            src_log_path = logs / (video_name + '.log')
            with src_log_path.open('r') as f:
                first = f.readline().rstrip()
            first_time, _ = line_to_datetime(first)
            target_path, target_log_path = generate_paths(first_time, target, logs_target, suffix)
            
            print_flush(target_path)
            
            encode(path, target_path, width, height, fps)
            
            if validate_logfile(src_log_path):
                copy(str(src_log_path), str(target_log_path)) # python 3.5 and earlier compatability
                print_flush("Log file OK! {}".format(src_log_path))
            else:
                raise(ValueError("Incorrect log file {}".format(src_log_path)))
    else:
        if method == "handbrake":
            # Recoding videos using handbrake into new clips of different lengths, based on log files,
            # would be cumbersome to implement. Therefore, we instead first recode every video with
            # handbrake and then use imageio to recode the videos again into the desired length. This
            # should still provide handbrake's robustness to strange videos, even though this solution is slow.
            tmp_folder = Path("/data/tmp_import/")
            if tmp_folder.is_dir():
                rmtree(str(tmp_folder))
            mkdir(tmp_folder)
            
            for i,path in enumerate(files):
                print_flush("Handbraking {} ...".format(path))
                video_name = path.stem
                src_log_path = logs / (video_name + '.log')
                
                target_path = tmp_folder / (i + suffix)
                target_log_path = tmp_folder / (i + '.log')
                
                if validate_logfile(src_log_path):
                    copy(str(src_log_path), str(target_log_path))
                else:
                    raise(ValueError("Incorrect log file {}".format(src_log_path)))
                
                encode(path, target_path, width, height, fps)
            
            files = list(tmp_folder.glob('*' + suffix))
            files.sort()
            logs = tmp_folder
            print_flush("Handbrake section complete")

        recode_minutes_imageio(files, logs, minutes, width, height, fps, target, logs_target, suffix)
        
        if method == "handbrake":
            rmtree(str(tmp_folder))
                    
    print_flush("Done!")       
示例#18
0
 def __init__(self, dataset=None, run=None):
     run_path = runs_path / "{}_{}".format(dataset, run)
     mkdir(run_path)
     self.filepath = run_path / "config.txt"
     super().__init__(self.filepath)
示例#19
0
 def __init__(self):
     mkdir(jobs_path)
示例#20
0
def main(batch_size, max_images, epochs, name, import_datasets, frozen_layers,
         experiment, train_data_dir, input_shape, image_shape, memory_fraction,
         do_crop):
    from keras.backend.tensorflow_backend import set_session
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = memory_fraction
    set_session(tf.Session(config=config))

    run_name = "{}_{}".format(name, experiment)

    input_shape = parse_resolution(input_shape)
    image_shape = parse_resolution(image_shape)

    load_detections = LoadDetections()
    session = tf.Session()
    K.set_session(session)
    log('Started TensorFlow session')
    log('Chosen input_shape is {}'.format(input_shape))
    detections_file = runs_path / run_name / "detections.pickle"
    mkdir(runs_path / run_name)

    logging.basicConfig(filename=str(runs_path / run_name / "trainlog.log"),
                        level=logging.INFO)

    try:
        githash = subprocess.check_output(['git', 'rev-parse', 'HEAD'
                                           ]).strip()[0:6].decode('utf-8')
        log("Git hash: {}".format(githash))
    except subprocess.CalledProcessError:
        pass

    log('Loading detections')

    datasets = [name]
    if import_datasets:
        datasets.extend(import_datasets.split(','))
        log('Using these datasets: ' + str(datasets))

    detections = load_detections.custom(datasets)

    log('Detections loaded')
    log('Calculating image properties')
    detections = detections.reset_index(drop=True)
    image_props = get_image_props(detections)
    log('Image properties created')

    log('Adding y_true to detections')
    detections = detections_add_ytrue(detections, image_props, name)

    detections.index = detections.image_file
    print(' ')
    print('Detection frequencies:')
    print(detections.type.value_counts())
    print(' ')
    classes = get_classnames(name)  #sorted(detections.type.unique())
    num_classes = len(classes) + 1

    log('Loading priors')

    keys = sorted(detections.image_file.unique())
    random.shuffle(keys)
    if max_images > 0:
        keys = keys[:max_images]
    shuffle(keys)
    num_train = int(round(0.9 * len(keys)))
    if num_train == len(keys):
        num_train -= 1
    train_keys = keys[:num_train]
    val_keys = keys[num_train:]
    train_keys_file = runs_path / run_name / "train_keys.pickle"
    log('Saving training keys to: {}'.format(train_keys_file))
    pickle.dump(str(train_keys), train_keys_file.open('wb'))
    val_keys_file = runs_path / run_name / "val_keys.pickle"
    log('Saving validation keys to: {}'.format(val_keys_file))
    pickle.dump(str(val_keys), val_keys_file.open('wb'))

    log('Loading model')
    model = SSD300((input_shape[1], input_shape[0], input_shape[2]),
                   num_classes=num_classes)
    model.load_weights(ssd_path / "weights_SSD300.hdf5", by_name=True)

    log('Generating priors')
    im_in = np.random.random(
        (1, input_shape[1], input_shape[0], input_shape[2]))
    priors = model.predict(im_in, batch_size=1)[0, :, -8:]
    bbox_util = BBoxUtility(num_classes, priors)

    generator_kwargs = {
        'saturation_var': 0.5,
        'brightness_var': 0.5,
        'contrast_var': 0.5,
        'lighting_std': 0.5,
        'hflip_prob': 0.5,
        'vflip_prob': 0,
        'do_crop': do_crop,
        'crop_area_range': [0.1, 1.0],
        'aspect_ratio_range': [0.5, 2]
    }

    path_prefix = ''
    gen = Generator(detections, bbox_util, batch_size, path_prefix, train_keys,
                    val_keys, (input_shape[1], input_shape[0]),
                    **generator_kwargs)

    # freeze several layers
    # freeze = []
    freeze = [
        ['input_1', 'conv1_1', 'conv1_2', 'pool1'],
        ['conv2_1', 'conv2_2', 'pool2'],
        ['conv3_1', 'conv3_2', 'conv3_3', 'pool3'],
        ['conv4_1', 'conv4_2', 'conv4_3', 'pool4'],
        ['conv5_1', 'conv5_2', 'conv5_3', 'pool5'],
    ][:min(frozen_layers, 5)]

    for L in model.layers:
        if L.name in freeze:
            L.trainable = False
    mkdir(runs_path / run_name / "checkpoints")
    shutil.rmtree(str(runs_path / run_name / "logs"), ignore_errors=True)
    mkdir(runs_path / run_name / "logs")

    callbacks = [
        ModelCheckpoint(str(runs_path / run_name / 'checkpoints') +
                        '/weights.{epoch:02d}-{val_loss:.2f}.hdf5',
                        verbose=2,
                        save_weights_only=True),
        TensorBoard(log_dir=str(runs_path / run_name / "logs"),
                    write_graph=False),
        LearningRateScheduler(schedule)
    ]

    optim = keras.optimizers.Adam(lr=BASE_LR / 10)
    # optim = keras.optimizers.RMSprop(lr=BASE_LR / 10)
    model.compile(optimizer=optim,
                  loss=MultiboxLoss(num_classes,
                                    neg_pos_ratio=2.0).compute_loss)

    log('Running model')
    history = model.fit_generator(gen.generate(True),
                                  steps_per_epoch=gen.train_batches,
                                  epochs=epochs,
                                  verbose=2,
                                  callbacks=callbacks,
                                  validation_data=gen.generate(False),
                                  validation_steps=gen.val_batches,
                                  workers=1)
    log('Done training model')
    session.close()
    log('Session closed, starting with writing results')
    results = pd.DataFrame(history.history).unstack().reset_index(0)
    results = results.rename(columns={'level_0': 'type', 0: 'value'})

    x1 = []
    y1 = []
    x2 = []
    y2 = []
    for row in pandas_loop(results):
        if row['type'] == 'loss':
            x1.append(row['_'])
            y1.append(row['value'])
        elif row['type'] == 'val_loss':
            x2.append(row['_'])
            y2.append(row['value'])

    plot_path = runs_path / run_name / "training.png"
    multi_plot([x1, x2], [y1, y2],
               plot_path,
               xlabel='epochs',
               ylabel='loss',
               title='Training',
               legend=['loss', 'validation loss'])

    results.to_csv(runs_path / run_name / "results.csv")

    log('Cleaning up non-optimal weights...')
    cleanup(name, experiment)

    log('Finished TensorFlow session')
    print_flush('Done!')
示例#21
0
def main(cmd, dataset, run, vidres, ssdres, kltres, conf, make_videos):
    from storage import load, save
    from folder import datasets_path, runs_path

    mask = Masker(dataset)
    #v = '20170516_163607_4C86'
    #v = '20170516_121024_A586'

    if cmd == "findvids":
        from glob import glob
        vidnames = glob('{}{}/videos/*.mkv'.format(datasets_path, dataset))
        vidnames = [right_remove(x.split('/')[-1], '.mkv') for x in vidnames]
        vidnames.sort()

        outfolder = '{}{}_{}/tracks/'.format(runs_path, dataset, run)
    else:
        vidnames = [cmd]
        outfolder = './'

    vidres = parse_resolution(vidres)
    ssdres = parse_resolution(ssdres)
    kltres = parse_resolution(kltres)

    x_factor = float(vidres[0]) / ssdres[0]
    y_factor = float(vidres[1]) / ssdres[1]
    det_dims = ('xmin', 'xmax', 'ymin', 'ymax')
    det_factors = (x_factor, x_factor, y_factor, y_factor)

    c = Config(vidres, kltres, conf)

    from folder import mkdir
    mkdir(outfolder)

    for v in vidnames:
        detections = pd.read_csv('{}{}_{}/csv/{}.csv'.format(
            runs_path, dataset, run, v))
        for dim, factor in zip(det_dims, det_factors):
            detections[dim] = round(detections[dim] * factor).astype(int)

        klt = load('{}{}/klt/{}.pklz'.format(datasets_path, dataset, v))
        klt, klt_frames = convert_klt(klt, c)

        tracks = []
        if len(detections) > 0:
            tracks = build_tracks(detections, klt, klt_frames, c)
            print_flush("{}  tracks done".format(v))
            save(tracks, '{}{}_tracks.pklz'.format(outfolder, v))
        else:
            print_flush(
                "{}  skipping tracking, because there were no detections".
                format(v))

        if make_videos:
            if tracks:
                from visualize_tracking import render_video
                vidpath = "{}{}/videos/{}.mkv".format(datasets_path, dataset,
                                                      v)
                render_video(tracks,
                             vidpath,
                             "{}{}_tracks.mp4".format(outfolder, v),
                             mask=mask)
                print_flush("{}  video done".format(v))
            else:
                print_flush(
                    "{}  skipping video rendering, because there were no tracks"
                    .format(v))

    print_flush("Done!")
示例#22
0
 def __init__(self, dataset=None, run=None):
     run_path = "{}{}_{}/".format(runs_path, dataset, run)
     mkdir(run_path)
     self.filepath = "{}config.txt".format(run_path)
     super().__init__(self.filepath)
示例#23
0
def main(cmd, dataset, run, vidres, ssdres, kltres, conf, make_videos):
    from storage import load, save
    from folder import datasets_path, runs_path
    from pathlib import Path
    from folder import mkdir

    mask = Masker(dataset)

    if cmd == "findvids":
        vidnames = (datasets_path / dataset / "videos").glob('*.mkv')
        vidnames = [x.stem for x in vidnames]
        vidnames.sort()

        outfolder = runs_path / '{}_{}'.format(dataset, run) / 'tracks'
    else:
        vidnames = [cmd]
        outfolder = Path('./')

    vidres = parse_resolution(vidres)
    ssdres = parse_resolution(ssdres)
    kltres = parse_resolution(kltres)

    x_factor = float(vidres[0]) / ssdres[0]
    y_factor = float(vidres[1]) / ssdres[1]
    det_dims = ('xmin', 'xmax', 'ymin', 'ymax')
    det_factors = (x_factor, x_factor, y_factor, y_factor)

    c = Config(vidres, kltres, conf)

    mkdir(outfolder)

    for v in vidnames:
        det_path = runs_path / "{}_{}".format(dataset,
                                              run) / "csv" / (v + '.csv')
        detections = pd.read_csv(det_path)
        for dim, factor in zip(det_dims, det_factors):
            detections[dim] = round(detections[dim] * factor).astype(int)

        klt = load(datasets_path / dataset / "klt" / (v + '.pklz'))
        klt, klt_frames = convert_klt(klt, c)

        tracks = []
        if len(detections) > 0:
            tracks = build_tracks(detections, klt, klt_frames, c)
            print_flush("{}  tracks done".format(v))
            save(tracks, outfolder / '{}_tracks.pklz'.format(v))
        else:
            print_flush(
                "{}  skipping tracking, because there were no detections".
                format(v))

        if make_videos:
            if tracks:
                from visualize_tracking import render_video
                vidpath = datasets_path / dataset / "videos" / (v + '.mkv')
                render_video(tracks,
                             vidpath,
                             outfolder / (v + "_tracks.mp4"),
                             mask=mask)
                print_flush("{}  video done".format(v))
            else:
                print_flush(
                    "{}  skipping video rendering, because there were no tracks"
                    .format(v))

    print_flush("Done!")
示例#24
0
assert (tt == "train" or tt == "test")

if comp == "peano":
    basepath = '/media/ma/48026b8d-78d7-48d8-90ec-0ab2252ab34d/ma/miotcd/MIO-TCD-Localization/{}/'.format(
        tt)
elif comp == "browkin":
    basepath = '/home2/ahrnbom/MIO-TCD-Localization/{}/'.format(tt)
else:
    print("What is this I don't even")
    sys.exit()

fold = sys.argv[1]
print("Using fold {}".format(fold))

foldername = "csv_results"
mkdir(foldername)

outname = "{}/{}_{}.csv".format(foldername, fold, tt)
try:
    os.remove(outname)
    print("Removed old file")
except:
    print("Did not remove file")

orig = glob("results/miotcd*.txt")
miotcd_classes = [
    "articulatedtruck", "bicycle", "bus", "car", "motorcycle",
    "motorizedvehicle", "nonmotorizedvehicle", "pedestrian", "pickuptruck",
    "singleunittruck", "workvan"
]