コード例 #1
0
ファイル: consens_se.py プロジェクト: andypohl/ipyrad
def make_chunks(data, samples, lbview):
    """
    calls chunk_clusters and tracks progress.
    """
    ## first progress bar
    start = time.time()
    printstr = " chunking clusters     | {} | s5 |"
    elapsed = datetime.timedelta(seconds=int(time.time()-start))
    progressbar(10, 0, printstr.format(elapsed), spacer=data._spacer)

    ## send off samples to be chunked
    lasyncs = {}
    for sample in samples:
        lasyncs[sample.name] = lbview.apply(chunk_clusters, *(data, sample))

    ## block until finished
    while 1:
        ready = [i.ready() for i in lasyncs.values()]
        elapsed = datetime.timedelta(seconds=int(time.time()-start))
        progressbar(len(ready), sum(ready), printstr.format(elapsed), spacer=data._spacer)
        time.sleep(0.1)
        if len(ready) == sum(ready):
            print("")
            break

    ## check for failures
    for sample in samples:
        if not lasyncs[sample.name].successful():
            LOGGER.error("  sample %s failed: %s", sample.name, 
                        lasyncs[sample.name].exception())

    return lasyncs
コード例 #2
0
def generatePFamInfoByProtein():
    with open(conf.FamToArrDictLoc) as f:
        famToArrDict = load(f)
    PFamInfoByProtein = {}

    open(conf.pFamToLabelFile, "w")
    length = len(famToArrDict.keys())
    util.progressbarGuide(20)
    for i, family in enumerate(famToArrDict.keys()):
        util.progressbar(i, length, 20)
        arrs = famToArrDict[family]
        for arr in arrs:
            # format:
            # 0      1           2               3               4           5           6           7
            # PDB_ID CHAIN_ID    PdbResNumStart  PdbResNumEnd    PFAM_ACC    PFAM_Name   PFAM_desc   eValue
            pid = arr[0]
            start = arr[2]
            end = arr[3]
            if pid in PFamInfoByProtein.keys():
                PFamInfoByProtein[pid].append([i, start, end])
            else:
                PFamInfoByProtein[pid] = [[i, start, end]]
        with open(conf.pFamToLabelFile, "a") as f:
            f.write(str(i) + "\t" + family + "\n")
    with open(conf.PFamInfoByProteinFile, "wb") as f:
        dump(PFamInfoByProtein, f)
コード例 #3
0
ファイル: improc.py プロジェクト: isarandi/metrabs
def transform_video(inp_path, out_path, process_frame_fn, **kwargs):
    util.ensure_path_exists(out_path)
    with imageio.get_reader(inp_path) as reader:
        fps = reader.get_meta_data()['fps']
        with imageio.get_writer(out_path, fps=fps, codec='h264',
                                **kwargs) as writer:
            for frame in util.progressbar(reader):
                writer.append_data(process_frame_fn(frame))
コード例 #4
0
        def __init__(self, label, n):
            self._label = label
            self._n = n
            self._bar = None
            if show_progress:
                self._bar = util.progressbar(label, self._n)

            if update_progress:
                update_progress(label, 0, self._n)
コード例 #5
0
ファイル: pile.py プロジェクト: lucasc15/mt3py
        def __init__(self, label, n):
            self._label = label
            self._n = n
            self._bar = None
            if show_progress:
                self._bar = util.progressbar(label, self._n)

            if update_progress:
                update_progress(label, 0, self._n)
コード例 #6
0
ファイル: consens_se.py プロジェクト: ewarschefsky/ipyrad
def calculate_depths(data, samples, lbview):
    """
    check whether mindepth has changed, and thus whether clusters_hidepth
    needs to be recalculated, and get new maxlen for new highdepth clusts.
    if mindepth not changed then nothing changes.
    """

    ## send jobs to be processed on engines
    start = time.time()
    printstr = " calculating depths    | {} | s5 |"
    recaljobs = {}
    maxlens = []
    for sample in samples:
        recaljobs[sample.name] = lbview.apply(recal_hidepth, *(data, sample))

    ## block until finished
    while 1:
        ready = [i.ready() for i in recaljobs.values()]
        elapsed = datetime.timedelta(seconds=int(time.time() - start))
        progressbar(len(ready),
                    sum(ready),
                    printstr.format(elapsed),
                    spacer=data._spacer)
        time.sleep(0.1)
        if len(ready) == sum(ready):
            print("")
            break

    ## check for failures and collect results
    modsamples = []
    for sample in samples:
        if not recaljobs[sample.name].successful():
            LOGGER.error("  sample %s failed: %s", sample.name,
                         recaljobs[sample.name].exception())
        else:
            modsample, _, maxlen, _, _ = recaljobs[sample.name].result()
            modsamples.append(modsample)
            maxlens.append(maxlen)

    ## reset global maxlen if something changed
    data._hackersonly["max_fragment_length"] = int(max(maxlens)) + 4

    return samples
コード例 #7
0
ファイル: datasets2d.py プロジェクト: isarandi/metrabs
def make_mpii():
    joint_names = 'rank,rkne,rhip,lhip,lkne,lank,pelv,thor,neck,head,rwri,relb,rsho,lsho,lelb,lwri'
    edges = 'lsho-lelb-lwri,rsho-relb-rwri,lhip-lkne-lank,rhip-rkne-rank,neck-head,pelv-thor'
    joint_info_full = JointInfo(joint_names, edges)

    joint_names_used = 'rank,rkne,rhip,lhip,lkne,lank,rwri,relb,rsho,lsho,lelb,lwri'
    joint_info_used = JointInfo(joint_names_used, edges)
    dataset = Pose2DDataset(joint_info_used)
    selected_joints = [joint_info_full.ids[name] for name in joint_info_used.names]

    mat_path = f'{paths.DATA_ROOT}/mpii/mpii_human_pose_v1_u12_1.mat'
    s = matlabfile.load(mat_path).RELEASE
    annolist = np.atleast_1d(s.annolist)
    pool = util.BoundedPool(None, 120)

    for anno, is_train, rect_ids in zip(annolist, util.progressbar(s.img_train), s.single_person):
        if not is_train:
            continue

        image_path = f'mpii/images/{anno.image.name}'
        annorect = np.atleast_1d(anno.annorect)
        rect_ids = np.atleast_1d(rect_ids) - 1

        for rect_id in rect_ids:
            rect = annorect[rect_id]
            if 'annopoints' not in rect or len(rect.annopoints) == 0:
                continue

            coords = np.full(
                shape=[joint_info_full.n_joints, 2], fill_value=np.nan, dtype=np.float32)
            for joint in np.atleast_1d(rect.annopoints.point):
                coords[joint.id] = [joint.x, joint.y]

            coords = coords[selected_joints]
            rough_person_center = np.float32([rect.objpos.x, rect.objpos.y])
            rough_person_size = rect.scale * 200

            # Shift person center down like [Sun et al. 2018], who say this is common on MPII
            rough_person_center[1] += 0.075 * rough_person_size

            topleft = np.array(rough_person_center) - np.array(rough_person_size) / 2
            bbox = np.array([topleft[0], topleft[1], rough_person_size, rough_person_size])
            ex = Pose2DExample(image_path, coords, bbox=bbox)
            new_im_path = image_path.replace('mpii', 'mpii_downscaled')
            without_ext, ext = os.path.splitext(new_im_path)
            new_im_path = f'{without_ext}_{rect_id:02d}{ext}'
            pool.apply_async(
                make_efficient_example, (ex, new_im_path), callback=dataset.examples[TRAIN].append)

    print('Waiting for tasks...')
    pool.close()
    pool.join()
    print('Done...')
    dataset.examples[TRAIN].sort(key=lambda x: x.image_path)
    return dataset
コード例 #8
0
ファイル: collect.py プロジェクト: m00tiny/Dorkscan-Project
async def collect_urls(stdscr, alldorks, domain, page_amount):
    urls_collected = []

    stdscr.border()
    stdscr.addstr(1, 1, "Collecting dorks...")

    quantity = 0
    for dork in alldorks:
        quantity += 1
        page = 0
        while page < int(page_amount):
            futures = []
            search_query = dork + "+site:" + domain
            loop = asyncio.get_event_loop()
            for i in range(25):
                complete_url = "http://www.bing.com/search?q=" + search_query + \
                    "&go=Submit&first=" + \
                        str((page + i) * 50 + 1) + "&count=50"
                futures.append(
                    loop.run_in_executor(None, fire_and_forget, complete_url))
            page += 25
        string_regex = re.compile('(?<=href=")(.*?)(?=")')
        names = []
        for future in futures:
            result = await future
            names.extend(string_regex.findall(result))
        domains = set()
        for name in names:
            basename = re.search(r"(?<=(://))[^/]*(?=/)", name)
            if (basename is None):
                basename = re.search(r"(?<=://).*", name)
            if basename is not None:
                basename = basename.group(0)
            if basename not in domains and basename is not None and name.startswith(
                    "http://"):
                domains.add(basename)
                urls_collected.append(unquote(html.unescape(name)))
        percent = int(quantity / len(alldorks) * 100)

        stdscr.addstr(
            2, 1, "Processed dorks: ({}/{}) for domain {}".format(
                quantity, len(alldorks), domain))
        stdscr.addstr(3, 1, "Current dork: {}".format(dork))
        stdscr.addstr(4, 1, "Collected URLs: {}".format(len(urls_collected)))
        stdscr.addstr(
            5, 1,
            "Progress: {} ({}%)".format(progressbar(quantity, len(alldorks)),
                                        percent))
        stdscr.addstr(
            7, 1, "Collecting mode: {}".format(
                "random" if settings.RANDOM_ORDER else "sequential"))
        stdscr.refresh()
        stdscr.timeout(30)
    return urls_collected
コード例 #9
0
ファイル: data_handler.py プロジェクト: salonimishr/Mapreduce
def download_dataset():
    """
    download all the years of flight data into data folder
    """
    if check_dataset():

        if not os.path.exists(DATA_DIR):
            os.mkdir(DATA_DIR)

        year_range = range(1987, 2009)
        for ind, year in enumerate(year_range):
            # vars
            data_url = '{}/{}.csv.bz2'.format(DATA_SOURCE, year)
            file_path = '{}/raw_{}.csv.bz2'.format(DATA_DIR, year)

            # download
            download_handler(file_path, data_url)
            
            # progress
            util.progressbar(len(year_range), ind + 1, 'download status: ')
    else:
        print('data downloaded. you can skip this step or delete data folder to download again.')
コード例 #10
0
def CreateFamilyToArrDict():
    #create the generated folder
    util.generateDirectoriesMult([conf.GeneratedFolder])

    #import all the mapping lines excluding the header
    mappingLines = open(conf.mappingFile, "r").read().split("\n")[1:]

    #dictionary of lines from the mapping file
    #key: PFAM_ACC
    #val: arrays of arrays of values in the lines
    FamToArrDict = {}
    print "Adding information to dictionary..."
    util.progressbarGuide(20)
    for i, line in enumerate(mappingLines):
        util.progressbar(i, len(mappingLines), 20)

        if len(line) > 0:
            #format:
            #0      1           2               3               4           5           6           7
            #PDB_ID	CHAIN_ID	PdbResNumStart	PdbResNumEnd	PFAM_ACC	PFAM_Name	PFAM_desc	eValue
            arr = line.split("\t")
            fName = arr[5]
            if fName in FamToArrDict.keys():
                FamToArrDict[fName].append(arr)
            else:
                FamToArrDict[fName] = [arr]

    print "Sorting infomation inside dictionary..."
    util.progressbarGuide(10)
    famNames = FamToArrDict.keys()
    for i, famName in enumerate(famNames):
        util.progressbar(i, len(famNames), 10)
        FamToArrDict[famName].sort(key=lambda x: x[0])

    #dump the dictionary to disc
    with open(conf.FamToArrDictLoc, "wb") as f:
        dump(FamToArrDict, f)
コード例 #11
0
ファイル: consens_se.py プロジェクト: ewarschefsky/ipyrad
def process_chunks(data, samples, lasyncs, lbview):
    """
    submit chunks to consens func and ...
    """

    ## send chunks to be processed
    start = time.time()
    asyncs = {sample.name: [] for sample in samples}
    printstr = " consens calling       | {} | s5 |"

    ## get chunklist from results
    for sample in samples:
        clist = lasyncs[sample.name].result()
        for optim, chunkhandle in clist:
            args = (data, sample, chunkhandle, optim)
            #asyncs[sample.name].append(lbview.apply_async(consensus, *args))
            asyncs[sample.name].append(lbview.apply_async(newconsensus, *args))
            elapsed = datetime.timedelta(seconds=int(time.time() - start))
            progressbar(10, 0, printstr.format(elapsed), spacer=data._spacer)

    ## track progress
    allsyncs = list(itertools.chain(*[asyncs[i.name] for i in samples]))
    while 1:
        ready = [i.ready() for i in allsyncs]
        elapsed = datetime.timedelta(seconds=int(time.time() - start))
        progressbar(len(ready),
                    sum(ready),
                    printstr.format(elapsed),
                    spacer=data._spacer)
        time.sleep(0.1)
        if len(ready) == sum(ready):
            break

    ## get clean samples
    casyncs = {}
    for sample in samples:
        rlist = asyncs[sample.name]
        statsdicts = [i.result() for i in rlist]
        casyncs[sample.name] = lbview.apply(cleanup,
                                            *(data, sample, statsdicts))
    while 1:
        ready = [i.ready() for i in casyncs.values()]
        elapsed = datetime.timedelta(seconds=int(time.time() - start))
        progressbar(10, 10, printstr.format(elapsed), spacer=data._spacer)
        time.sleep(0.1)
        if len(ready) == sum(ready):
            print("")
            break

    ## check for failures:
    for key in asyncs:
        asynclist = asyncs[key]
        for async in asynclist:
            if not async .successful():
                LOGGER.error("  async error: %s \n%s", key, async .exception())
コード例 #12
0
def generate_all_visible_boxes(composites, i_valids):
    """Saves the visible bounding box for each person instance.
     Visible bounding box means the box around the unoccluded foreground mask.
     A part of the foreground mask may be occluded by a person or chair pasted in front.
     """
    result = np.zeros((composites.shape[0], 4, 4), np.float32)

    def setter(i):
        def fn(val):
            result[i] = val

        return fn

    with util.BoundedPool(None, 120) as pool:
        for i, sample in zip(i_valids, util.progressbar(composites[i_valids])):
            pool.apply_async(get_visible_boxes, (sample, ), callback=setter(i))
    np.save(f'{paths.DATA_ROOT}/muco/visible_boxes.npy', result)
コード例 #13
0
    def chopper_grouped(self, gather, progress=None, *args, **kwargs):
        keys = self.gather_keys(gather)
        if len(keys) == 0:
            return
        outer_group_selector = None
        if 'group_selector' in kwargs:
            outer_group_selector = kwargs['group_selector']

        outer_trace_selector = None
        if 'trace_selector' in kwargs:
            outer_trace_selector = kwargs['trace_selector']

        # the use of this gather-cache makes it impossible to modify the pile
        # during chopping
        gather_cache = {}
        pbar = None
        if progress is not None:
            pbar = util.progressbar(progress, len(keys))

        for ikey, key in enumerate(keys):

            def tsel(tr):
                return gather(tr) == key and (outer_trace_selector is None
                                              or outer_trace_selector(tr))

            def gsel(gr):
                if gr not in gather_cache:
                    gather_cache[gr] = gr.gather_keys(gather)

                return key in gather_cache[gr] and (
                    outer_group_selector is None or outer_group_selector(gr))

            kwargs['trace_selector'] = tsel
            kwargs['group_selector'] = gsel

            for traces in self.chopper(*args, **kwargs):
                yield traces

            if pbar:
                pbar.update(ikey + 1)

        if pbar:
            pbar.finish()
コード例 #14
0
ファイル: predict_mupots.py プロジェクト: isarandi/metrabs
def predict_sequence(predict_fn, frame_gpu, frames_cpu, n_frames, camera, viz):
    predict_fn = functools.partial(
        predict_fn,
        intrinsic_matrix=camera.intrinsic_matrix[np.newaxis],
        distortion_coeffs=camera.get_distortion_coeffs()[np.newaxis])
    progbar = util.progressbar(total=n_frames)
    poses_per_frame = []

    for frames_b_gpu, frames_b_cpu in zip(frame_gpu, frames_cpu):
        pred = predict_fn(frames_b_gpu)
        poses_per_frame.extend(pred['poses3d'].numpy())
        progbar.update(frames_b_gpu.shape[0])

        if viz is not None:
            for frame, boxes, poses3d in zip(frames_b_cpu,
                                             pred['boxes'].numpy(),
                                             pred['poses3d'].numpy()):
                viz.update(frame, boxes, poses3d, camera)

    return poses_per_frame
コード例 #15
0
ファイル: pile.py プロジェクト: trokia/pyrocko
    def chopper_grouped(self, gather, progress=None, *args, **kwargs):
        keys = self.gather_keys(gather)
        if len(keys) == 0:
            return
        outer_group_selector = None
        if "group_selector" in kwargs:
            outer_group_selector = kwargs["group_selector"]

        outer_trace_selector = None
        if "trace_selector" in kwargs:
            outer_trace_selector = kwargs["trace_selector"]

        # the use of this gather-cache makes it impossible to modify the pile
        # during chopping
        gather_cache = {}
        pbar = None
        if progress is not None:
            pbar = util.progressbar(progress, len(keys))

        for ikey, key in enumerate(keys):

            def tsel(tr):
                return gather(tr) == key and (outer_trace_selector is None or outer_trace_selector(tr))

            def gsel(gr):
                if gr not in gather_cache:
                    gather_cache[gr] = gr.gather_keys(gather)

                return key in gather_cache[gr] and (outer_group_selector is None or outer_group_selector(gr))

            kwargs["trace_selector"] = tsel
            kwargs["group_selector"] = gsel

            for traces in self.chopper(*args, **kwargs):
                yield traces

            if pbar:
                pbar.update(ikey + 1)

        if pbar:
            pbar.finish()
コード例 #16
0
ファイル: predict_3dhp.py プロジェクト: isarandi/metrabs
def predict_sequence(predict_fn, frames_gpu, frames_cpu, n_frames, camera, viz):
    predict_fn = functools.partial(
        predict_fn,
        intrinsic_matrix=camera.intrinsic_matrix[np.newaxis],
        distortion_coeffs=camera.get_distortion_coeffs()[np.newaxis],
        extrinsic_matrix=camera.get_extrinsic_matrix()[np.newaxis],
        world_up_vector=camera.world_up)
    progbar = util.progressbar(total=n_frames)
    pose_batches = []

    for frames_b, frames_b_cpu in zip(frames_gpu, frames_cpu):
        pred = predict_fn(frames_b)
        pred = tf.nest.map_structure(lambda x: tf.squeeze(x, 1).numpy(), pred)
        pose_batches.append(pred['poses3d'])
        progbar.update(frames_b.shape[0])

        if FLAGS.viz:
            for frame, box, pose3d in zip(frames_b_cpu, pred['boxes'], pred['poses3d']):
                viz.update(frame, box[np.newaxis], pose3d[np.newaxis], camera)

    return np.concatenate(pose_batches, axis=0)
コード例 #17
0
def create_image(time: str):
    """
    Создает картинку с временем из строки полученной из параметра time.
    """
    image = Image.new('RGB', (500, 500), color='white')
    W, H = image.size  # ширина, высота фото
    draw = ImageDraw.Draw(image)

    clock_font = ImageFont.truetype(font='fonts/digital_num.ttf', size=215)
    progressbar_font = ImageFont.truetype(font='fonts/progressbar.ttf',
                                          size=22)
    percent_font = ImageFont.truetype(font='fonts/num.ttf', size=25)

    wt, ht = draw.textsize(time, font=clock_font)
    draw.text(((W - wt) / 2, ((H - ht) / 2) - 60),
              time,
              font=clock_font,
              fill='black')  # write main clock in photo
    draw.text((6, 355), progressbar(time), font=progressbar_font,
              fill='black')  # write progressbar in photo
    draw.text((410, 350), percent(time), font=percent_font,
              fill='black')  # write percent in photo

    image.save("clock.png")
コード例 #18
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model-path', type=str, required=True)
    parser.add_argument('--output-dir', type=str, required=True)
    parser.add_argument('--video-path', type=str)
    parser.add_argument('--dataset', type=str)
    parser.add_argument('--darknet-dir', type=str)
    parser.add_argument('--gt-assoc', action=options.YesNoAction)
    parser.add_argument('--precomputed-detections', action=options.YesNoAction)
    parser.add_argument('--batched', action=options.YesNoAction)
    parser.add_argument('--crops', type=int, default=5)
    parser.add_argument('--detector-flip-aug', action=options.YesNoAction)
    parser.add_argument('--detector-path', type=str)
    parser.add_argument('--antialias', action=options.YesNoAction)
    parser.add_argument('--real-intrinsics', action=options.YesNoAction)
    parser.add_argument('--causal-smoothing', action=options.YesNoAction)
    parser.add_argument('--gui', action=options.YesNoAction)
    options.initialize(parser)
    for gpu in tf.config.experimental.list_physical_devices('GPU'):
        tf.config.experimental.set_memory_growth(gpu, True)

    detector = tf.saved_model.load(FLAGS.detector_path)
    pose_estimator = tf.saved_model.load(FLAGS.model_path)

    joint_names = [
        b.decode('utf8')
        for b in pose_estimator.crop_model.joint_names.numpy()
    ]
    edges = pose_estimator.crop_model.joint_edges.numpy()
    ji3d = data.datasets3d.JointInfo(joint_names, edges)

    if FLAGS.gui:
        q = queue.Queue(30)
        visualizer_thread = threading.Thread(target=main_visualize,
                                             args=(q, ji))
        visualizer_thread.start()
    else:
        q = None

    seq_filepaths = sorted(
        glob.glob(f'{paths.DATA_ROOT}/3dpw/sequenceFiles/*/*.pkl'))
    seq_filepaths = [x for x in seq_filepaths if 'capoeira' in x]
    seq_names = [os.path.basename(p).split('.')[0] for p in seq_filepaths]
    subdir = 'gtassoc' if FLAGS.gt_assoc else 'nogtassoc'
    subdirpath = f'{FLAGS.output_dir}/{subdir}'

    for seq_name, seq_filepath in util.progressbar(
            zip(seq_names, seq_filepaths)):
        already_done_files = glob.glob(f'{subdirpath}/*/*.pkl')
        if any(seq_name in p for p in already_done_files):
            continue
        print(seq_name)
        frame_paths = sorted(
            glob.glob(
                f'{paths.DATA_ROOT}/3dpw/imageFiles/{seq_name}/image_*.jpg'))
        poses2d_true = get_poses2d_3dpw(seq_name)
        camera = get_3dpw_camera(
            seq_filepath) if FLAGS.real_intrinsics else None
        tracks = track_them(detector,
                            pose_estimator,
                            frame_paths,
                            poses2d_true,
                            ji2d,
                            ji3d,
                            q,
                            camera=camera)
        save_result_file(seq_name, subdirpath, tracks)
コード例 #19
0
ファイル: datasets2d.py プロジェクト: isarandi/metrabs
def make_mpii_yolo():
    joint_info_full = JointInfo(
        'rank,rkne,rhip,lhip,lkne,lank,pelv,thor,neck,head,rwri,relb,rsho,lsho,lelb,lwri',
        'lsho-lelb-lwri,rsho-relb-rwri,lhip-lkne-lank,rhip-rkne-rank,neck-head,pelv-thor')
    joint_info_used = JointInfo(
        'rank,rkne,rhip,lhip,lkne,lank,rwri,relb,lelb,lwri',
        'lelb-lwri,relb-rwri,lhip-lkne-lank,rhip-rkne-rank')
    selected_joints = [joint_info_full.ids[name] for name in joint_info_used.names]

    mat_path = f'{paths.DATA_ROOT}/mpii/mpii_human_pose_v1_u12_1.mat'
    s = matlabfile.load(mat_path).RELEASE
    annolist = np.atleast_1d(s.annolist)
    all_boxes = util.load_pickle(f'{paths.DATA_ROOT}/mpii/yolov3_detections.pkl')

    examples = []
    with util.BoundedPool(None, 120) as pool:
        for anno_id, (anno, is_train) in enumerate(
                zip(annolist, util.progressbar(s.img_train))):
            if not is_train:
                continue

            image_path = f'{paths.DATA_ROOT}/mpii/images/{anno.image.name}'

            annorect = np.atleast_1d(anno.annorect)
            gt_people = []
            for rect_id, rect in enumerate(annorect):
                if 'annopoints' not in rect or len(rect.annopoints) == 0:
                    continue

                coords = np.full(
                    shape=[joint_info_full.n_joints, 2], fill_value=np.nan, dtype=np.float32)
                for joint in np.atleast_1d(rect.annopoints.point):
                    coords[joint.id] = [joint.x, joint.y]

                bbox = boxlib.expand(boxlib.bb_of_points(coords), 1.25)
                coords = coords[selected_joints]
                ex = Pose2DExample(image_path, coords, bbox=bbox)
                gt_people.append(ex)

            if not gt_people:
                continue

            image_relpath = os.path.relpath(f'images/{anno.image.name}')
            boxes = [box for box in all_boxes[image_relpath] if box[-1] > 0.5]
            if not boxes:
                continue

            iou_matrix = np.array([[boxlib.iou(gt_person.bbox, box[:4])
                                    for box in boxes]
                                   for gt_person in gt_people])
            gt_indices, box_indices = scipy.optimize.linear_sum_assignment(-iou_matrix)

            for i_gt, i_det in zip(gt_indices, box_indices):
                if iou_matrix[i_gt, i_det] > 0.1:
                    ex = gt_people[i_gt]
                    ex.bbox = np.array(boxes[i_det][:4])
                    new_im_path = image_path.replace('mpii', 'mpii_downscaled_yolo')
                    without_ext, ext = os.path.splitext(new_im_path)
                    new_im_path = f'{without_ext}_{i_gt:02d}{ext}'
                    pool.apply_async(make_efficient_example, (ex, new_im_path),
                                     callback=examples.append)

    examples.sort(key=lambda ex: ex.image_path)

    def n_valid_joints(example):
        return np.count_nonzero(np.all(~np.isnan(example.coords), axis=-1))

    examples = [ex for ex in examples if n_valid_joints(ex) > 6]

    return Pose2DDataset(joint_info_used, examples)
コード例 #20
0
ファイル: mpi_inf_3dhp.py プロジェクト: matiasmolinas/metrabs
def make_mpi_inf_3dhp(camera_ids=(0, 1, 2, 4, 5, 6, 7, 8)):
    all_short_names = (
        'spi3,spi4,spi2,spin,pelv,neck,head,htop,lcla,lsho,lelb,lwri,lhan,rcla,rsho,relb,rwri,'
        'rhan,lhip,lkne,lank,lfoo,ltoe,rhip,rkne,rank,rfoo,rtoe'.split(','))

    test_set_selected_joints = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 14]
    selected_joints = [7, 5, 14, 15, 16, 9, 10, 11, 23, 24, 25, 18, 19, 20, 3, 6, 4]
    joint_names = [all_short_names[j] for j in selected_joints]

    edges = ('htop-head-neck-lsho-lelb-lwri,neck-rsho-relb-rwri,neck-spin-pelv-lhip-lkne-lank,'
             'pelv-rhip-rkne-rank')
    joint_info = p3ds.JointInfo(joint_names, edges)

    root_3dhp = f'{paths.DATA_ROOT}/3dhp'
    detections_all = util.load_pickle(f'{paths.DATA_ROOT}/3dhp/yolov3_person_detections.pkl')

    #################################
    # TRAINING AND VALIDATION SET
    #################################
    num_frames = np.asarray(
        [[6416, 12430], [6502, 6081], [12488, 12283], [6171, 6675], [12820, 12312], [6188, 6145],
         [6239, 6320], [6468, 6054]])

    train_subjects = [0, 1, 2, 3, 4, 5, 6]
    valid_subjects = [7]  # this is my own arbitrary split for validation (Istvan Sarandi)
    train_examples = []
    valid_examples = []

    pool = util.BoundedPool(None, 120)
    for i_subject, i_seq, i_cam in itertools.product(
            train_subjects + valid_subjects, range(2), camera_ids):
        seqpath = f'{root_3dhp}/S{i_subject + 1}/Seq{i_seq + 1}'
        print(f'Processing {seqpath} camera {i_cam}')

        cam3d_coords = [ann.reshape([ann.shape[0], -1, 3])[:, selected_joints]
                        for ann in matlabfile.load(f'{seqpath}/annot.mat')['annot3']]
        univ_cam3d_coords = [ann.reshape([ann.shape[0], -1, 3])[:, selected_joints]
                             for ann in matlabfile.load(f'{seqpath}/annot.mat')['univ_annot3']]
        cameras = load_cameras(f'{seqpath}/camera.calibration')

        examples_container = train_examples if i_subject in train_subjects else valid_examples
        frame_step = 5

        prev_coords = None
        camera = cameras[i_cam]
        n_frames = num_frames[i_subject, i_seq]

        if i_subject == 5 and i_seq == 1 and i_cam == 2:
            # This video is shorter for some reason
            n_frames = 3911

        for i_frame in util.progressbar(range(0, n_frames, frame_step)):
            image_relpath = (
                    f'3dhp/S{i_subject + 1}/Seq{i_seq + 1}/'
                    f'imageSequence/img_{i_cam}_{i_frame:06d}.jpg')

            cam_coords = cam3d_coords[i_cam][i_frame]
            world_coords = cameras[i_cam].camera_to_world(cam_coords)

            univ_camcoords = univ_cam3d_coords[i_cam][i_frame]
            univ_world_coords = cameras[i_cam].camera_to_world(univ_camcoords)

            # Check if the joints are within the image frame bounds
            if not np.all(camera.is_visible(world_coords, [2048, 2048])):
                continue

            im_coords = camera.camera_to_image(cam_coords)
            bbox = get_bbox(im_coords, image_relpath, detections_all)

            # Adaptive temporal sampling
            if (prev_coords is not None and
                    np.all(np.linalg.norm(world_coords - prev_coords, axis=1) < 100)):
                continue
            prev_coords = world_coords

            mask_path = image_relpath.replace('imageSequence', 'FGmasks')
            new_image_relpath = image_relpath.replace('3dhp', '3dhp_downscaled')
            ex = p3ds.Pose3DExample(
                image_relpath, world_coords, bbox, camera, mask=mask_path,
                univ_coords=univ_world_coords)

            pool.apply_async(make_efficient_example, (ex, new_image_relpath, 1, True),
                             callback=examples_container.append)

    print('Waiting for tasks...')
    pool.close()
    pool.join()
    print('Done...')
    #################################
    # TEST SET
    #################################
    test_examples = []

    cam1_4 = make_3dhp_test_camera(
        sensor_size=np.array([10, 10]), im_size=np.array([2048, 2048]), focal_length=7.32506,
        pixel_aspect=1.00044, center_offset=np.array([-0.0322884, 0.0929296]), distortion=None,
        origin=np.array([3427.28, 1387.86, 309.42]), up=np.array([-0.208215, 0.976233, 0.06014]),
        right=np.array([0.000575281, 0.0616098, -0.9981]))

    cam5_6 = make_3dhp_test_camera(
        sensor_size=np.array([10, 5.625]), im_size=np.array([1920, 1080]), focal_length=8.770747185,
        pixel_aspect=0.993236423, center_offset=np.array([-0.104908645, 0.104899704]),
        distortion=np.array([-0.276859611, 0.131125256, -0.000360494, -0.001149441, -0.049318332]),
        origin=np.array([-2104.3074, 1038.6707, -4596.6367]),
        up=np.array([0.025272345, 0.995038509, 0.096227370]),
        right=np.array([-0.939647257, -0.009210289, 0.342020929]))

    activity_names = [
        'Stand/Walk', 'Exercise', 'Sit on Chair', 'Reach/Crouch', 'On Floor', 'Sports', 'Misc.']
    for i_subject in range(1, 7):
        seqpath = f'{root_3dhp}/TS{i_subject}'
        annotation_path = f'{seqpath}/annot_data.mat'

        with h5py.File(annotation_path, 'r') as m:
            cam3d_coords = np.array(m['annot3'])[:, 0, test_set_selected_joints]
            univ_cam3d_coords = np.array(m['univ_annot3'])[:, 0, test_set_selected_joints]
            valid_frames = np.where(m['valid_frame'][:, 0])[0]
            activity_ids = m['activity_annotation'][:, 0].astype(int) - 1

        camera = cam1_4 if i_subject <= 4 else cam5_6
        scene = ['green-screen', 'no-green-screen', 'outdoor'][(i_subject - 1) // 2]

        for i_frame in valid_frames:
            image_relpath = f'3dhp/TS{i_subject}/imageSequence/img_{i_frame + 1:06d}.jpg'
            cam_coords = cam3d_coords[i_frame]
            univ_camcoords = univ_cam3d_coords[i_frame]
            activity = activity_names[activity_ids[i_frame]]
            world_coords = camera.camera_to_world(cam_coords)
            univ_world_coords = camera.camera_to_world(univ_camcoords)
            im_coords = camera.camera_to_image(cam_coords)
            bbox = get_bbox(im_coords, image_relpath, detections_all)

            ex = p3ds.Pose3DExample(
                image_relpath, world_coords, bbox, camera, activity_name=activity,
                scene_name=scene, univ_coords=univ_world_coords)
            test_examples.append(ex)

    train_examples.sort(key=lambda x: x.image_path)
    valid_examples.sort(key=lambda x: x.image_path)
    test_examples.sort(key=lambda x: x.image_path)
    return p3ds.Pose3DDataset(joint_info, train_examples, valid_examples, test_examples)
コード例 #21
0
    def download(self, pkg_todownload):
        """
        Download apks from the pkg_todownload list

        pkg_todownload -- list either of app names or
        of tuple of app names and filepath to write them

        Example: ['org.mozilla.focus','org.mozilla.firefox'] or
                 [('org.mozilla.focus', 'org.mozilla.focus.apk'),
                  ('org.mozilla.firefox', 'download/org.mozilla.firefox.apk')]
        """
        success_downloads = []
        failed_downloads  = []
        unavail_downloads = []

        # case where no filenames have been provided
        for index, pkg in enumerate(pkg_todownload):
            if isinstance(pkg, str):
                pkg_todownload[index] = [pkg, None]
            # remove whitespaces before and after package name
            pkg_todownload[index][0] = pkg_todownload[index][0].strip()

        # Check for download folder
        download_folder = self.download_folder
        if not os.path.isdir(download_folder):
            os.makedirs(download_folder, exist_ok=True)

        # BulkDetails requires only one HTTP request
        # Get APK info from store
        details = list()
        for pkg in pkg_todownload:
            try:
                detail = self.api.details(pkg[0])
                details.append(detail)
            except RequestError as request_error:
                failed_downloads.append((pkg, request_error))

        if any([d is None for d in details]):
            logger.info("Token has expired while downloading. Retrieving a new one.")
            self.refresh_token()
            details = self.api.bulkDetails([pkg[0] for pkg in pkg_todownload])

        for position, (detail, item) in enumerate(zip(details, pkg_todownload)):
            packagename, filename = item

            if filename is None:
                if self.append_version:
                    filename = "%s-v.%s.apk" % (detail['docid'], detail['details']['appDetails']['versionString'])
                else:
                    filename = "%s.apk" % detail['docid']

            logger.info("%s / %s %s", 1+position, len(pkg_todownload), packagename)

            # Download
            try:
                if detail['offer'][0]['checkoutFlowRequired']:
                    method = self.api.delivery
                else:
                    method = self.api.download
                data_iter = method(packagename, expansion_files=self.addfiles_enable)
                success_downloads.append(packagename)
            except IndexError as exc:
                logger.error("Error while downloading %s : this package does not exist, "
                             "try to search it via --search before",
                             packagename)
                unavail_downloads.append((item, exc))
                continue
            except Exception as exc:
                logger.error("Error while downloading %s : %s", packagename, exc)
                failed_downloads.append((item, exc))
                continue

            filepath = os.path.join(download_folder, filename)

            #if file exists, continue
            if self.append_version and os.path.isfile(filepath):
                logger.info("File %s already exists, skipping.", filename)
                continue

            additional_data = data_iter['additionalData']
            splits = data_iter['splits']
            total_size = int(data_iter['file']['total_size'])
            chunk_size = int(data_iter['file']['chunk_size'])
            try:
                with open(filepath, "wb") as fbuffer:
                    bar = util.progressbar(expected_size=total_size, hide=not self.progress_bar)
                    for index, chunk in enumerate(data_iter['file']['data']):
                        fbuffer.write(chunk)
                        bar.show(index * chunk_size)
                    bar.done()
                if additional_data:
                    for obb_file in additional_data:
                        obb_filename = "%s.%s.%s.obb" % (obb_file["type"], obb_file["versionCode"], data_iter["docId"])
                        obb_filename = os.path.join(download_folder, obb_filename)
                        obb_total_size = int(obb_file['file']['total_size'])
                        obb_chunk_size = int(obb_file['file']['chunk_size'])
                        with open(obb_filename, "wb") as fbuffer:
                            bar = util.progressbar(expected_size=obb_total_size, hide=not self.progress_bar)
                            for index, chunk in enumerate(obb_file["file"]["data"]):
                                fbuffer.write(chunk)
                                bar.show(index * obb_chunk_size)
                            bar.done()
                if splits:
                    for split in splits:
                        split_total_size = int(split['file']['total_size'])
                        split_chunk_size = int(split['file']['chunk_size'])
                        with open(split['name'], "wb") as fbuffer:
                            bar = util.progressbar(expected_size=split_total_size, hide=not self.progress_bar)
                            for index, chunk in enumerate(split["file"]["data"]):
                                fbuffer.write(chunk)
                                bar.show(index * split_chunk_size)
                            bar.done()
            except IOError as exc:
                logger.error("Error while writing %s : %s", packagename, exc)
                failed_downloads.append((item, exc))

        success_items = set(success_downloads)
        failed_items  = set([item[0] for item, error in failed_downloads])
        unavail_items = set([item[0] for item, error in unavail_downloads])
        to_download_items = set([item[0] for item in pkg_todownload])

        self.write_logfiles(success_items, failed_items, unavail_items)
        self.print_failed(failed_downloads + unavail_downloads)
        return to_download_items - failed_items
コード例 #22
0
ファイル: h36m.py プロジェクト: isarandi/metrabs
def make_h36m(train_subjects=(1, 5, 6, 7, 8),
              valid_subjects=(),
              test_subjects=(9, 11),
              correct_S9=True,
              partial_visibility=False):
    joint_names = ('rhip,rkne,rank,lhip,lkne,lank,tors,neck,head,htop,'
                   'lsho,lelb,lwri,rsho,relb,rwri,pelv'.split(','))
    edges = ('htop-head-neck-lsho-lelb-lwri,neck-rsho-relb-rwri,'
             'neck-tors-pelv-lhip-lkne-lank,pelv-rhip-rkne-rank')
    joint_info = ps3d.JointInfo(joint_names, edges)

    if not util.all_disjoint(train_subjects, valid_subjects, test_subjects):
        raise Exception('Set of train, val and test subject must be disjoint.')

    # use last subject of the non-test subjects for validation
    train_examples = []
    test_examples = []
    valid_examples = []
    pool = util.BoundedPool(None, 120)

    if partial_visibility:
        dir_suffix = '_partial'
        further_expansion_factor = 1.8
    else:
        dir_suffix = '' if correct_S9 else 'incorrect_S9'
        further_expansion_factor = 1

    for i_subject in [*test_subjects, *train_subjects, *valid_subjects]:
        if i_subject in train_subjects:
            examples_container = train_examples
        elif i_subject in valid_subjects:
            examples_container = valid_examples
        else:
            examples_container = test_examples

        frame_step = 5 if i_subject in train_subjects else 64

        for activity_name, camera_id in itertools.product(
                get_activity_names(i_subject), range(4)):
            print(f'Processing S{i_subject} {activity_name} {camera_id}')
            image_relpaths, world_coords_all, bboxes, camera = get_examples(
                i_subject,
                activity_name,
                camera_id,
                frame_step=frame_step,
                correct_S9=correct_S9)
            prev_coords = None
            for image_relpath, world_coords, bbox in zip(
                    util.progressbar(image_relpaths), world_coords_all,
                    bboxes):
                # Using very similar examples is wasteful when training. Therefore:
                # skip frame if all keypoints are within a distance compared to last stored frame.
                # This is not done when testing, as it would change the results.
                if (i_subject in train_subjects and prev_coords is not None
                        and np.all(
                            np.linalg.norm(world_coords -
                                           prev_coords, axis=1) < 100)):
                    continue
                prev_coords = world_coords
                activity_name = activity_name.split(' ')[0]
                ex = ps3d.Pose3DExample(image_relpath,
                                        world_coords,
                                        bbox,
                                        camera,
                                        activity_name=activity_name)
                new_image_relpath = image_relpath.replace(
                    'h36m', f'h36m_downscaled{dir_suffix}')
                pool.apply_async(
                    make_efficient_example,
                    (ex, new_image_relpath, further_expansion_factor),
                    callback=examples_container.append)

    print('Waiting for tasks...')
    pool.close()
    pool.join()
    print('Done...')
    train_examples.sort(key=lambda x: x.image_path)
    valid_examples.sort(key=lambda x: x.image_path)
    test_examples.sort(key=lambda x: x.image_path)
    return ps3d.Pose3DDataset(joint_info, train_examples, valid_examples,
                              test_examples)
コード例 #23
0
ファイル: datasets3d.py プロジェクト: matiasmolinas/metrabs
def convert_examples(src_examples, mapping):
    return [
        convert_example(e, mapping) for e in util.progressbar(src_examples)
    ]
コード例 #24
0
def track_them(detector,
               pose_estimator,
               frame_paths,
               poses2d_true,
               joint_info2d,
               joint_info3d,
               q,
               n_tracks=None,
               camera=None):
    if poses2d_true is not None:
        n_tracks = poses2d_true.shape[1]
        prev_poses2d_pred_ordered = np.zeros(
            (n_tracks, joint_info3d.n_joints, 2))
        tracks = [[] for _ in range(n_tracks)]
    elif n_tracks is not None:
        prev_poses2d_pred_ordered = None
        tracks = [[(-1, np.full((joint_info3d.n_joints, 3),
                                fill_value=np.inf))] for _ in range(n_tracks)]
    else:
        prev_poses2d_pred_ordered = None
        tracks = []

    dataset = tf.data.Dataset.from_tensor_slices(frame_paths)
    dataset = dataset.map(load_image,
                          tf.data.experimental.AUTOTUNE,
                          deterministic=False)

    if FLAGS.batched:
        dataset = predict_in_batches(dataset, camera, detector, pose_estimator)

    for i_frame, item in enumerate(util.progressbar(dataset)):
        if FLAGS.batched:
            frame, detections, poses = item
            crop_boxes = detections
            if camera is None:
                camera = get_main_camera(frame.shape)
        else:
            frame = item[0].numpy()
            if camera is None:
                camera = get_main_camera(frame.shape)
            detections = detector(frame[np.newaxis], 0.5, 0.4)[0].numpy()

            # Inject new boxes based on the previous poses
            crop_boxes = get_crop_boxes(i_frame, camera, tracks, detections)
            poses = pose_estimator.predict_single_image(
                frame, camera.intrinsic_matrix, crop_boxes[..., :4], 65,
                FLAGS.crops).numpy()

        pose_sanity = [
            is_pose_sane(pose, mean_bone_lengths, ji) for pose in poses
        ]
        poses = poses[pose_sanity]
        confs = np.array(crop_boxes)[:, 4][pose_sanity]
        poses, confs = nms_pose(poses, confs)

        if FLAGS.gt_assoc or (i_frame == 0 and poses2d_true is not None):
            poses2d_pred = [camera.camera_to_image(pose) for pose in poses]
            poses_ordered, prev_poses2d_pred_ordered = associate_predictions(
                poses, poses2d_pred, poses2d_true[i_frame],
                prev_poses2d_pred_ordered, joint_info3d, joint_info2d)
            for pose, track in zip(poses_ordered, tracks):
                if not np.any(np.isnan(pose)):
                    track.append((i_frame, pose))
        else:
            update_tracks(i_frame, tracks, poses, confs)

        poses = np.array([t[-1][1] for t in tracks if t])
        if q is not None:
            for box in detections:
                improc.draw_box(frame, box, color=(255, 0, 0), thickness=5)
            q.put((frame, poses, camera))

    return tracks
コード例 #25
0
def build_graph(blastInfoFilename, blastdir):

    cutoffRatio = conf.cutoffRatio
    evalueCutoff = conf.evalueCutoff

    g = nx.Graph()

    # a dictionary that stores node names by the protein names
    nodeNames = {}

    # load protein lengths (this assumes that the filename is the same as the input filename
    protLenDict = load(open(os.path.join(conf.proteinLenFolder, blastInfoFilename), "rb"))

    numlines = len(open(os.path.join(blastdir, blastInfoFilename), "r").readlines())
    # add the HSP edges
    print "Proc 1"
    util.progressbarGuide(20)
    numBlastLines = 0
    with open(os.path.join(blastdir, blastInfoFilename), "r") as f:
        for i, line in enumerate(f):
            util.progressbar(i, numlines, 20)
            numBlastLines += 1
            if len(line) > 0:
                hsp = read_HSP(line)
                goodeval = hsp["EValue"] < evalueCutoff
                query = hsp["query_id"]
                subject = hsp["target_id"]
                qLen = hsp["query_len"]
                sLen = hsp["target_len"]

                # filter out similar proteins
                sameID = (query == subject)
                wholeProt1 = (abs(qLen)/float(protLenDict[query])) > conf.simularProteinRatio
                wholeProt2 = (abs(sLen)/float(protLenDict[subject])) > conf.simularProteinRatio

                notsameprotein = (not sameID) and (not (wholeProt1 and wholeProt2))
                # notsameprotein = (not sameID)
                if goodeval and notsameprotein:
                    # Add the nodes (p_1,s_1,e_1) and (p_2,s_2,e_2) and create an edge between them
                    g.add_node(nodeName(hsp, "query"))
                    g.add_node(nodeName(hsp, "target"))
                    g.add_edge(nodeName(hsp, "query"), nodeName(hsp, "target"), eValue=hsp["EValue"])

                    # add the two node names to the nodeNames dictionary and take away the duplicates
                    addToDict(nodeNames, nodeName(hsp, "query")[0], nodeName(hsp, "query"))
                    addToDict(nodeNames, nodeName(hsp, "target")[0], nodeName(hsp, "target"))

    # add the Interval edges
    proteins = nodeNames.keys()
    lenprot = len(proteins)
    numIntEdge = 0
    print "Proc 2"
    util.progressbarGuide(20)
    for j, protein in enumerate(proteins):
        util.progressbar(j, lenprot, 20)
        subNodeNames = nodeNames[protein]
        # print subNodeNames
        for i in xrange(len(subNodeNames) - 1):
            for j in xrange(i + 1, len(subNodeNames)):
                name1 = subNodeNames[i]
                name2 = subNodeNames[j]

                overlapPairs = findOverlapIntervalsMutual(name1, name2, cutoffRatio)
                # overlapPairs = findOverlapIntervalsMutualOld(name1, name2)
                # print "test"
                for overlapPair in overlapPairs:
                    g.add_edge(overlapPair[0], overlapPair[1])
                    numIntEdge += 1
    util.printL("Number of Blast Edges: "+str(numBlastLines)+"\n")
    util.printL("number of IntervalEdges added: "+str(numIntEdge)+"\n")
    # # save the HSPIntGraph
    # splitFilename = blastInfoFilename.split(".")
    # fileExt = "." + splitFilename[len(splitFilename) - 1]
    # outputFile = blastInfoFilename.replace(fileExt, "") + '_HSPIntGraph.gpickle'
    # outputPath = os.path.join(hspIntGraphdir, outputFile)
    # with open(outputPath, 'wb') as fout:
    #     dump(g, fout, HIGHEST_PROTOCOL)

    return g, numBlastLines, numIntEdge
コード例 #26
0
ファイル: muco.py プロジェクト: matiasmolinas/metrabs
def make_muco():
    joint_info, selected_joints = make_joint_info()

    root_3dhp = f'{paths.DATA_ROOT}/3dhp'
    root_muco = f'{paths.DATA_ROOT}/muco'
    sample_info = np.load(f'{root_muco}/composite_frame_origins.npy')
    n_all_joints = 28
    valid_indices = list(np.load(f'{root_muco}/valid_composite_frame_indices.npy'))
    all_detections = util.load_pickle(f'{root_muco}/yolov3_detections.pkl')
    all_detections = np.array([all_detections[k] for k in sorted(all_detections.keys())])
    all_visible_boxes = np.load(f'{root_muco}/visible_boxes.npy')
    matloader = functools.lru_cache(1024)(matlabfile.load)

    @functools.lru_cache(1024)
    def get_world_coords(i_subject, i_seq, i_cam, anno_name):
        seqpath = f'{root_3dhp}/S{i_subject}/Seq{i_seq}'
        anno_file = matloader(f'{seqpath}/annot.mat')
        camcoords = anno_file[anno_name][i_cam].reshape(
            [-1, n_all_joints, 3])[:, selected_joints]
        camera = load_cameras(f'{seqpath}/camera.calibration')[i_cam]
        world_coords = [camera.camera_to_world(c) for c in camcoords]
        return world_coords

    examples = []

    with util.BoundedPool(None, 120) as pool:
        for i_sample, people, detections, visible_boxes in zip(
                util.progressbar(valid_indices), sample_info[valid_indices],
                all_detections[valid_indices], all_visible_boxes[valid_indices]):

            detections = [box for box in detections if box[-1] > 0.1]
            if not detections:
                continue

            filename = f'{i_sample + 1:06d}.jpg'
            image_relpath = f'unaugmented_set_001/{filename[:2]}/{filename[:4]}/{filename}'

            gt_people = []
            for i_person, ((i_subject, i_seq, i_cam, i_frame), visible_box) in enumerate(
                    zip(people, visible_boxes)):
                seqpath = f'{root_3dhp}/S{i_subject}/Seq{i_seq}'
                world_coords = get_world_coords(i_subject, i_seq, i_cam, 'annot3')[i_frame]
                univ_world_coords = get_world_coords(
                    i_subject, i_seq, i_cam, 'univ_annot3')[i_frame]
                camera = load_cameras(f'{seqpath}/camera.calibration')[i_cam]

                im_coords = camera.world_to_image(world_coords)
                coord_bbox = boxlib.expand(boxlib.intersect(
                    boxlib.bb_of_points(im_coords),
                    boxlib.full_box([2048, 2048])), 1.05)
                bbox = boxlib.intersect_vertical(visible_box, coord_bbox)

                ex = p3ds.Pose3DExample(
                    image_relpath, world_coords, bbox, camera, mask=None,
                    univ_coords=univ_world_coords)
                gt_people.append(ex)

            if not gt_people:
                continue

            iou_matrix = np.array([[boxlib.iou(gt_person.bbox, box[:4])
                                    for box in detections]
                                   for gt_person in gt_people])
            gt_indices, det_indices = scipy.optimize.linear_sum_assignment(-iou_matrix)

            for i_gt, i_det in zip(gt_indices, det_indices):
                gt_box = gt_people[i_gt].bbox
                det_box = detections[i_det]
                if (iou_matrix[i_gt, i_det] > 0.1 and
                        boxlib.area(det_box) < 2 * boxlib.area(gt_box)):
                    ex = gt_people[i_gt]
                    ex.bbox = np.array(detections[i_det][:4])
                    pool.apply_async(make_efficient_example, (ex, root_muco, i_gt),
                                     callback=examples.append)

    examples.sort(key=lambda ex: ex.image_path)
    return p3ds.Pose3DDataset(joint_info, examples)
コード例 #27
0
def generate_all_overall_masks(composites, i_valids):
    with util.BoundedPool(None, 120) as pool:
        for i, sample in zip(i_valids, util.progressbar(composites[i_valids])):
            pool.apply_async(save_overall_mask, (sample, i))
コード例 #28
0
ファイル: datasets2d.py プロジェクト: isarandi/metrabs
def make_coco(single_person=True):
    joint_info = JointInfo(
        'nose,leye,reye,lear,rear,lsho,rsho,lelb,relb,lwri,rwri,lhip,rhip,lkne,rkne,lank,rank',
        'lsho-lelb-lwri,rsho-relb-rwri,lhip-lkne-lank,rhip-rkne-rank,lear-leye-nose-reye-rear')
    n_joints = joint_info.n_joints
    learning_phase_shortnames = {TRAIN: 'train', VALID: 'val', TEST: 'test'}
    UNLABELED = 0
    OCCLUDED = 1
    VISIBLE = 2
    iou_threshold = 0.1 if single_person else 0.5

    suffix = '' if single_person else '_multi'
    examples_per_phase = {TRAIN: [], VALID: []}
    with util.BoundedPool(None, 120) as pool:
        for example_phase in (TRAIN, VALID):
            phase_shortname = learning_phase_shortnames[example_phase]
            coco_filepath = (
                f'{paths.DATA_ROOT}/coco/annotations/person_keypoints_{phase_shortname}2014.json')
            coco = pycocotools.coco.COCO(coco_filepath)

            impath_to_examples = {}
            for ann in coco.anns.values():
                filename = coco.imgs[ann['image_id']]['file_name']
                image_path = f'{paths.DATA_ROOT}/coco/{phase_shortname}2014/{filename}'

                joints = np.array(ann['keypoints']).reshape([-1, 3])
                visibilities = joints[:, 2]
                coords = joints[:, :2].astype(np.float32).copy()
                n_visible_joints = np.count_nonzero(visibilities == VISIBLE)
                n_occluded_joints = np.count_nonzero(visibilities == OCCLUDED)
                n_labeled_joints = n_occluded_joints + n_visible_joints

                if n_visible_joints >= n_joints / 3 and n_labeled_joints >= n_joints / 2:
                    coords[visibilities == UNLABELED] = np.nan
                    bbox_pt1 = np.array(ann['bbox'][0:2], np.float32)
                    bbox_wh = np.array(ann['bbox'][2:4], np.float32)
                    bbox = np.array([*bbox_pt1, *bbox_wh])
                    ex = Pose2DExample(image_path, coords, bbox=bbox)
                    impath_to_examples.setdefault(image_path, []).append(ex)

            n_images = len(impath_to_examples)
            for impath, examples in util.progressbar(impath_to_examples.items(), total=n_images):
                for i_example, example in enumerate(examples):
                    box = boxlib.expand(boxlib.bb_of_points(example.coords), 1.25)
                    if np.max(box[2:]) < 200:
                        continue

                    if single_person:
                        other_boxes = [boxlib.expand(boxlib.bb_of_points(e.coords), 1.25)
                                       for e in examples if e is not example]
                        ious = np.array([boxlib.iou(b, box) for b in other_boxes])
                        usable = np.all(ious < iou_threshold)
                    else:
                        usable = True

                    if usable:
                        new_im_path = impath.replace('coco', 'coco_downscaled' + suffix)
                        without_ext, ext = os.path.splitext(new_im_path)
                        new_im_path = f'{without_ext}_{i_example:02d}{ext}'
                        pool.apply_async(
                            make_efficient_example, (example, new_im_path),
                            callback=examples_per_phase[example_phase].append)

    examples_per_phase[TRAIN].sort(key=lambda ex: ex.image_path)
    examples_per_phase[VALID].sort(key=lambda ex: ex.image_path)
    return Pose2DDataset(joint_info, examples_per_phase[TRAIN], examples_per_phase[VALID])