Пример #1
0
def process_video(video, cfg, video_number):
    video_folder = video["video_folder"]
    scene_file = glob.glob(os.path.join(video_folder, "*.yaml"))[0]

    meta_data = read_serialized(scene_file)
    camera = meta_data["camera"]
    camera["camera_theta"] *= -1
    camera["camera_phi"] += 90

    min_area = cfg.MIN_AREA

    prev_objects = {
        "initialized": False,
        "map": defaultdict(lambda: len(prev_objects["map"]))
    }
    frames_dicts = [
        process_frame(meta_data["scene"], camera, f, video_number, min_area,
                      video_folder, prev_objects, video["is_possible"])
        for f in range(4, len(meta_data["scene"]))
    ]

    #leave only objects that never lost continuity
    # for fr in frames_dicts:
    #     fr["annotations"] = [an for an in fr["annotations"] if an["object_id"] in prev_objects["map"]]

    if len(prev_objects["map"]) > 10:
        print("this video  has more than 10  objects: " +
              video["video_folder"])
    return frames_dicts
Пример #2
0
def main(case_id, case_name, case_folder, segmentation, annotations):
    anns_file = read_serialized(
        os.path.join(case_folder, "{}_ann.yaml".format(case_name)))
    anns = []
    n_frames = len(anns_file.scene)
    assert n_frames < 500
    for image_index in range(n_frames):
        image = anns_file.scene[image_index]
        image_id = case_id * 500 + image_index

        image.image_filename = os.path.join(case_folder, "imgs",
                                            os.path.basename(image.image_path))
        image.image_index = image_id

        image.gt_objects = []
        for obj in image.objects:
            obj.area = int(mask_util.area(obj.mask))
            if obj.area < AREA_MIN_THRESHOLD:
                continue
            #  Tell occluders from cubes
            if obj.scale[0] < .05:
                obj.type = "Occluder"
            else:
                obj.type = "Sphere"
                obj.rotation = [0, 0, 0]
            image.gt_objects.append(obj)

        image.objects = segmentation[image_id]
        anns.append(image)
    annotations[case_id] = anns

    print("{} generated".format(case_name))
Пример #3
0
def process_video(video_folder, vid_number, min_area):
    status_file = os.path.join(video_folder, "status.json")
    if os.path.exists(status_file):
        status = read_serialized(status_file)
    else:
        status = {}

    try:
        camera = normalize_dimensions(status["header"]["camera"])
    except KeyError:
        camera = _DUMMY_CAMERA

    ####### set camera #########
    camera_terms = {}
    [
        camera_terms.__setitem__("cam_location_" + k, v)
        for k, v in camera["location"].items()
    ]
    [
        camera_terms.__setitem__("cam_rotation_" + k, v)
        for k, v in camera["rotation"].items()
    ]

    frames_dicts = [
        process_frame(video_folder, vid_number, f, min_area, status,
                      camera_terms) for f in range(100)
    ]

    return frames_dicts
Пример #4
0
def compute_scores(cfg, video_dict, n_filter, out_dir, tim_key, distributed):
    video_dict = deepcopy(video_dict)
    scene_dict = read_serialized(video_dict["scene_file"])
    video_dict.update(scene_dict["debug"])
    observations = scene_dict['scene_states']
    observations = list(map(encode_mask_all_objects, observations))

    initial_belief = observations[0]["objects"]
    camera = EasyDict(observations[0]["suggested_view"]["camera"])

    filter = FilterUpdater(cfg.MODULE_CFG, initial_belief, camera, n_filter)
    if cfg.MODULE_CFG.DEBUG:
        filter.run(observations[1:])
    else:
        filter.run(observations[1:])
    score = filter.get_score()
    video_dict["scores"] = score

    results_folder = os.path.join(out_dir, video_dict["perception"])
    out_file = scene_dict["debug"]["original_video"].replace("/",
                                                             "--") + ".json"
    out_file = os.path.join(results_folder, out_file)
    os.makedirs(results_folder, exist_ok=True)

    write_serialized(video_dict, out_file)

    if distributed:
        send_results_to_tim(out_file, tim_key)

    print("done with {}".format(out_file))
    return video_dict
Пример #5
0
def main(case_name, summary, output_path):
    anns = read_serialized(os.path.join(args.ann_folder, case_name, "{}_ann.yaml".format(case_name)))
    images_files = [ann["image_path"] for ann in anns["scene"]][1:]
    with ThreadPool(cpu_count() * 4) as p:
        images = p.map(Image.open, images_files)
    plot_case(images, summary["all"], summary["raw"], summary["location"], case_name, output_path)

    print("{} generated".format(case_name))
Пример #6
0
def process_frame(frame_json, vid_number, min_area):
    frame_data = read_serialized(str(frame_json))
    frame_num = int(frame_json.parts[-1].split("_")[0])

    depth_file = str(frame_json.parent /
                     f'depth/{str(frame_num).zfill(4)}_depth.png')
    segm_file = str(frame_json.parent /
                    f'segmentation/{str(frame_num).zfill(4)}_seg.png')
    depth_array, segm_array = map(
        lambda x: np.array(Image.open(x), dtype=np.uint64),
        [depth_file, segm_file])

    segm_array_id = segm_array[:, :,
                               0] * 256**2 + segm_array[:, :,
                                                        1] * 256 + segm_array[:, :,
                                                                              2]

    height, width = depth_array.shape
    # try:
    structural_anns = [
        process_object(*w)
        for w in zip(frame_data["structural"], repeat(segm_array_id),
                     repeat(min_area))
    ]

    nonstructural_anns = [
        process_object(*w)
        for w in zip(frame_data["nonstructural"], repeat(segm_array_id),
                     repeat(min_area))
    ]
    # except:
    #     traceback.print_exc()
    #     print(frame_json)
    #     assert False

    annotations = structural_anns + nonstructural_anns

    annotations = [an for an in annotations if an is not None]

    out_frame = {
        "file_name": depth_file,
        "image_id": vid_number * 500 + frame_num,
        'height': height,
        'width': width,
        'annotations': annotations,
        'original_video': frame_json.parts[-2]
    }

    return out_frame
Пример #7
0
def main(case_id, case_name, case_folder, args):
    anns_file = read_serialized(os.path.join(case_folder, "{}_ann.yaml".format(case_name)))
    n_frames = len(anns_file.scene)
    assert n_frames < 500
    images = []
    annotations = []
    for image_index in range(n_frames):
        image = anns_file.scene[image_index]
        image.image_path = os.path.join(case_folder, "imgs", os.path.basename(image.image_path))
        image_id = case_id * 500 + image_index

        objects = []
        drop_image = False

        for obj in image.objects:
            obj.segmentation = mask2contour(mask_util.decode(obj.mask))
            
            # otherwise could be interpreted as a bbox
            if len(obj.segmentation) == 4:
                drop_image = True
                continue
            obj.area = int(mask_util.area(obj.mask))
            if obj.area < AREA_MIN_THRESHOLD:
                continue
            obj.bbox = list(mask_util.toBbox(obj.mask))
            obj.category_id = CATEGORY2ID[obj.type]
            del obj.mask
            del obj.type
            obj.image_id = image_id
            obj.id = image_id * 10 + len(objects)
            obj.iscrowd = 0
            objects.append(obj)

        if len(objects) > 10:
            raise ValueError("More than 10 objects")

        if len(objects) > 0 and not drop_image:
            for obj in objects:
                annotations.append(obj)
            images.append(dict(file_name=image.image_path, width=WIDTH, height=HEIGHT,
                               id=image_id))
    return images, annotations
def main(args):
    cfg = default_cfg.clone()
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_folder = mkdir(cfg.OUTPUT_FOLDER)
    assert_proper_output_dir(args.config_file, output_folder)

    start_time = time.time()

    processes = []
    case_names = cfg.CASE_NAMES
    if len(case_names) == 0:
        case_names = sorted([dir_name for dir_name in os.listdir(cfg.ANNOTATION_FOLDER) if
                             os.path.isdir(os.path.join(cfg.ANNOTATION_FOLDER, dir_name)) and "." not in dir_name])
    if args.start_index is None:
        start_index = get_host_id() % args.stride
    else:
        start_index = args.start_index
    case_names = case_names[start_index::args.stride]

    manager = Manager()
    n_filter = manager.Semaphore(1)
    args.n_filter = n_filter

    for case_name in case_names:
        p = Process(target=run_updater, args=(cfg, args, case_name))
        processes.append(p)
        p.start()
        break  #ERASE
    for p in processes:
        p.join()

    if not args.only_plot:
        for case_name in case_names:
            results = read_serialized(os.path.join(output_folder, "results", "{}.json".format(case_name)))
            print(case_name,
                  {key: result for key, result in results[case_name].items() if key in ["sum", "mean", "max"]})

    print('| finish with time ', time.time() - start_time)
Пример #9
0
def process_frame(video_folder, vid_num, frame, frame_num, min_area):
    depth_map_file, segmentation_file,annotation_file = \
        map(lambda ending: os.path.join(video_folder, f'{frame}{ending}'),
            [".depth.npy", ".seg.npy", ".json"])

    segm = np.load(segmentation_file)
    anns = read_serialized(annotation_file)

    height, width = segm.shape
    annotations = [
        process_object(segm, an, oid, min_area)
        for oid, an in enumerate(anns["objects"])
    ]
    # annotations = [an for an in annotations if find_bbox_area(an["bbox"]) > min_area]

    return {
        "file_name": depth_map_file,
        "image_id": vid_num * 500 + frame_num,
        "height": height,
        "width": width,
        "annotations": annotations
    }
    def __init__(self, dataset_cfg, args):
        self.root = dataset_cfg.ROOT
        self.split = dataset_cfg.SPLIT
        object_derender_file = read_serialized(
            dataset_cfg.OBJECT_DERENDER_FILE)

        self.img_tuple_files = []
        self.masks = []
        self.attributes = []
        self.basename2objects = defaultdict(list)

        self.case_names = sorted(os.listdir(self.root))
        if "case_name" in args:
            self.case_names = [args.case_name]
        elif self.split == "TRAIN":
            self.case_names = self.case_names[:int(self._split_point *
                                                   len(self.case_names))]
        elif self.split == "VAL":
            self.case_names = self.case_names[int(self._split_point *
                                                  len(self.case_names)):]

        with Pool(cpu_count()) as p:
            with tqdm(total=len(self.case_names)) as bar:
                results = [
                    p.apply_async(
                        ObjectProposalDataset._prepare_case,
                        (self.root, c, object_derender_file[c].scene),
                        callback=lambda *a: bar.update())
                    for c in self.case_names
                ]
                results = [r.get() for r in results]

        for img_tuple_files, masks, attributes in results:
            self.img_tuple_files.extend(img_tuple_files)
            self.masks.extend(masks)
            if self.split != "TEST":
                self.attributes.extend(to_torch(attributes))
Пример #11
0

def main(case_name, summary, output_path):
    anns = read_serialized(os.path.join(args.ann_folder, case_name, "{}_ann.yaml".format(case_name)))
    images_files = [ann["image_path"] for ann in anns["scene"]][1:]
    with ThreadPool(cpu_count() * 4) as p:
        images = p.map(Image.open, images_files)
    plot_case(images, summary["all"], summary["raw"], summary["location"], case_name, output_path)

    print("{} generated".format(case_name))


if __name__ == '__main__':
    args = parse_args()
    category = os.path.basename(os.path.split(args.summary_path)[-1])
    data = read_serialized(os.path.join(args.summary_path, "result.json"))

    if args.output_path:
        output_path = args.output_path
    else:
        output_path = "output/{}".format(category)

    processes = []
    for case_name, summary in data.items():
        p = Process(target=main, args=(case_name, summary, output_path))
        processes.append(p)
        p.start()

    for p in processes:
        p.join()
Пример #12
0
        material.user_clear()
        bpy.data.materials.remove(material)

    for ob in bpy.context.selected_editable_objects:
        ob.active_material_index = 0
        for i in range(len(ob.material_slots)):
            bpy.ops.object.material_slot_remove({'object': ob})

    bpy.ops.wm.save_as_mainfile(filepath=out_path)
    print("{} generated".format(name))


if __name__ == '__main__':
    args = parse_args()
    if os.path.exists(os.path.join(SIM_SHAPE_FOLDER, "all_dimensions.json")):
        all_dimensions = read_serialized(
            os.path.join(SIM_SHAPE_FOLDER, "all_dimensions.json"))
    else:
        all_dimensions = dict()
    for name in args.obj_name:
        while True:
            try:
                obj_to_blend(name, all_dimensions)
            except:
                continue
            else:
                break

    write_serialized(all_dimensions,
                     os.path.join(SIM_SHAPE_FOLDER, "all_dimensions.json"))
Пример #13
0
def run_updater(cfg, args, case_name):
    output_folder = cfg.OUTPUT_FOLDER
    result_folder = mkdir(os.path.join(output_folder, "results"))

    gt = read_serialized(os.path.join(cfg.ANNOTATION_FOLDER, case_name, case_name + "_ann.yaml"))
    camera = gt["camera"]

    observation_path = os.path.join(cfg.OBSERVATION_FOLDER, case_name + ".json")
    scenes = read_serialized(observation_path)['scene']
    for s in scenes:
        for o in s["objects"]:
            if "color" not in o:
                o["color"] = "green"

    if not args.only_plot:
        mkdir(os.path.join(output_folder, "logs"))
        logger = setup_logger("{}{}".format(cfg.LOG_PREFIX, case_name),
                              os.path.join(cfg.OUTPUT_FOLDER, "logs", "{}.txt".format(case_name)))
        logger.info('{} start running '.format(case_name))
        logger.info(args)
        logger.info("Running with config:\n{}".format(cfg))

        # run updater
        init_belief = scenes[0]['objects']
        filter_updater = FilterUpdater(cfg, init_belief, case_name, camera, args.n_filter)
        filter_updater.run(scenes[1:])

        score = filter_updater.get_score()
        write_serialized({case_name: score}, os.path.join(result_folder, "{}.json".format(case_name)))

        with open(os.path.join(output_folder, "{}.txt".format(case_name)), 'w') as fout:
            fout.write(
                '| negative log likelihood: ' + json.dumps(
                    {key: result for key, result in score.items() if
                     key in ["sum", "mean", "max", "sum_lower", "mean_lower", "max_lower"]}) + '\n')

        logger.info('{} completed running '.format(case_name))
    else:
        results = read_serialized(os.path.join(result_folder, "{}.json".format(case_name)))
        score = results[case_name]

    images_files = [ann["image_path"] for ann in gt["scene"]][6:]
    with ThreadPool(cpu_count() * 4) as p:
        images = p.map(Image.open, images_files)

    plot_case(images, score["all"], score["raw"], score["location"], scenes[1:], [None] * len(images),
              case_name, output_folder)

    # os.system(
    #     '/data/vision/billf/object-properties/local/bin/ffmpeg -nostdin -r %d -pattern_type glob -i \'%s/%s.png\' '
    #     '-pix_fmt yuv420p -vcodec libx264 -crf 0 %s.mp4 -y'
    #     % (15, "{}/imgs".format(output_folder), "{}_???".format(case_name),
    #        "{}/{}_summary".format(output_folder, case_name)))
    # print('ffmpeg -nostdin -r %d -pattern_type glob -i \'%s/%s.png\' '
    # '-pix_fmt yuv420p -vcodec libx264 -crf 0 %s.mp4 -y'
    # % (15, "{}/imgs".format(output_folder), "{}_???".format(case_name),
    #    "{}/{}_summary".format(output_folder, case_name)))
    os.system(
    'ffmpeg -nostdin -r %d -pattern_type glob -i \'%s/%s.png\' '
    '-pix_fmt yuv420p -vcodec libx264 -crf 0 %s.mp4 -y'
    % (15, "{}/imgs".format(output_folder), "{}_???".format(case_name),
       "{}/{}_summary".format(output_folder, case_name)))
Пример #14
0
def run_adept(cfg, rank, num_machines, tim_key, distributed):
    if cfg.MODULE_CFG.ANALYZE_RESULTS_FOLDER == "None":
        all_scenes = []
        for attributes_key in cfg.MODULE_CFG.ATTRIBUTES_KEYS:
            for dataset_name in cfg.MODULE_CFG.DATASETS.TEST:
                dataset_jsons_dir = get_jsons_directory(
                    cfg.DATA_CFG, "adept", attributes_key, dataset_name)

                dataset_files = sorted(os.listdir(dataset_jsons_dir))
                all_scenes.extend([{
                    "scene_file":
                    os.path.join(dataset_jsons_dir, d),
                    "dataset_split":
                    dn,
                    "perception":
                    attr
                } for d, dn, attr in zip(dataset_files, repeat(dataset_name),
                                         repeat(attributes_key))])
        manager = Manager()
        n_filter = manager.Semaphore(1)
        if cfg.MODULE_CFG.DEBUG:
            if len(cfg.MODULE_CFG.DEBUG_VIDEOS) > 0:
                all_scenes = [
                    s for s in all_scenes
                    if s["scene_file"] in cfg.MODULE_CFG.DEBUG_VIDEOS
                ]
            results = [
                compute_scores(*w) for w in zip(
                    repeat(cfg),
                    all_scenes,  #[:3][rank::num_machines],
                    repeat(n_filter),
                    repeat(cfg.MODULE_CFG.OUTPUT_DIR),
                    repeat(tim_key),
                    repeat(distributed))
            ]
        else:
            with Pool(int(cpu_count())) as p:
                results = p.starmap(
                    compute_scores,
                    zip(repeat(cfg), all_scenes[rank::num_machines],
                        repeat(n_filter), repeat(cfg.MODULE_CFG.OUTPUT_DIR),
                        repeat(tim_key), repeat(distributed)))

        # send_results_to_tim(cfg.MODULE_CFG.OUTPUT_DIR, tim_key)
        # write_serialized(results,os.path.join(cfg.MODULE_CFG.OUTPUT_DIR,
        #                                       str(attributes_key)+"results.json"))
    else:
        cfg.MODULE_CFG.OUTPUT_DIR = cfg.MODULE_CFG.ANALYZE_RESULTS_FOLDER
        # results = read_serialized(os.path.join(cfg.MODULE_CFG.OUTPUT_DIR, "results.json"))
    if not distributed:
        base_dir = cfg.MODULE_CFG.OUTPUT_DIR
        for attributes_key in cfg.MODULE_CFG.ATTRIBUTES_KEYS:
            # group by dataset
            attri_dir = os.path.join(base_dir, attributes_key)
            results = [
                read_serialized(os.path.join(attri_dir, v))
                for v in os.listdir(attri_dir)
            ]

            #group by matched surprise/control for relative scores
            group_by_control_surprise = CONTROL_SURPRISE_GROUPERS[
                cfg.DATA_CFG.BASE_NAME]
            grouped_dataset = group_by_control_surprise(results)
            scores_per_stimuli = defaultdict(list)
            for stimuli in grouped_dataset:
                for control_surprise_g in grouped_dataset[stimuli]:
                    g_score = relative_score(
                        grouped_dataset[stimuli][control_surprise_g])
                    scores_per_stimuli[stimuli].append(g_score)
                    scores_per_stimuli['total'].append(g_score)
            avg_relative_scores = {
                k: bs.bootstrap(np.array(v), stat_func=bs_stats.mean)
                for k, v in scores_per_stimuli.items()
            }

            write_serialized(
                avg_relative_scores,
                os.path.join(cfg.MODULE_CFG.OUTPUT_DIR,
                             str(attributes_key) + "_relative_scores.json"))

            print(scores_per_stimuli)
            shapes = random.sample(big_shapes, k=2)
            for shape in shapes:
                case, index = pairs[0]
                origin = "human_{}_{}_{}".format(case, shape, index)
                destination = "{:03d}_surprise_human_{}_{}_{}".format(
                    experiment_id, case, shape, index)
                print(origin, destination)
                experiment_id += 1

                for i, (case, index) in enumerate(pairs[1:], 1):
                    origin = "human_{}_{}_{}".format(case, shape, index)
                    destination = "{:03d}_control_human_{}_{}_{}".format(
                        experiment_id, case, shape, index)
                    file_map["origin"].append(origin)
                    file_map["destination"].append(destination)
                    experiment_id += 1

    if args.overwrite:
        write_serialized(file_map, "dataset/human/pairs.json")
    else:
        file_map = read_serialized("dataset/human/pairs.json")

    for origin, destination in zip(file_map["origin"],
                                   file_map["destination"]):
        for suffix in _suffices:
            os.system("cp {} {}".format(
                os.path.join(HUMAN_VIDEO_OUTPUT_FOLDER,
                             "{}{}".format(origin, suffix)),
                os.path.join(new_human_folder,
                             "{}{}".format(destination, suffix))))
import argparse
import json
import os
import csv
from collections import defaultdict

from dataset.human.result_storage import ResultStorage, CASE_PAIRS, SHAPE_CATS, get_shapes_from_cat
from utils.io import read_serialized
from utils.constants import CONTENT_FOLDER

_prefix = "| negative log likelihood: "

_human_pairs = read_serialized(
    os.path.join(CONTENT_FOLDER, "dataset", "human", "pairs.json"))["origin"]


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--summary_folder", type=str)
    parser.add_argument("--summary_folders", type=str, nargs="+")
    parser.add_argument("--summary_file", type=str)
    parser.add_argument("--violations", type=str)
    parser.add_argument("--shape_cats", type=str)
    parser.add_argument("--use_surprise_metric", type=int, default=True)
    parser.add_argument("--output_folder", type=str)
    return parser.parse_args()


if __name__ == '__main__':
    args = parse_args()
    max_scores = {}
Пример #17
0
        with Pool(cpu_count(), initializer=maybe_fix_zip_file) as p:
            # TODO: this *really* should be using unordered_imap and/or a chunksize of 16
            # right now even if you have 10+ cores, only 1 will be used for the most part
            # since the task complexity is uneven and most of the workers finish early :(
            # TODO: actually switching it over from starmap causes the workers to hang :(
            # I suspect it's some cursed Blender interaction, but I can't reproduce it :(
            all_dimensions = p.starmap(obj_to_blend, worker_args, 16)
            print("starmap done")

        write_serialized(dict(all_dimensions),
                         os.path.join(SIM_SHAPE_NET_FOLDER, "all_dimensions_{:02d}.json".format(args_start_index)))
    else:
        all_dimensions = dict()
        for i in range(args_stride):
            all_dimensions.update(
                read_serialized(os.path.join(SIM_SHAPE_NET_FOLDER, "all_dimensions_{:02d}.json".format(i))))

        write_serialized(dict(all_dimensions),
                         os.path.join(SIM_SHAPE_NET_FOLDER, "all_dimensions.json"))

        to_rotate_index = defaultdict(int)
        for name, dimension in all_dimensions.items():
            # x > y bad
            if dimension[0] > dimension[1]:
                to_rotate_index[name[:4]] += 1
            else:
                to_rotate_index[name[:4]] -= 1
        write_serialized(dict(to_rotate_index),
                         os.path.join(SIM_SHAPE_NET_FOLDER, "categories_to_rotate.json"))

Пример #18
0
            save_path = os.path.join(sim.output_dir, 'imgs',
                                     '%s_%06.2fs.png' % (sim.img_name_prefix, i * sim.timestep))
            print('| saving to %s' % save_path)
            imageio.imsave(save_path, img)
        motion.append(dict(objects=object_motions, occluders=occluder_motions, desks=desk_motions))
        if om.has_collision():
            valid = False
        if step_pattern[i]:
            p.stepSimulation()

    save_path = os.path.join(sim.output_dir, "motion.json")
    output_file = {
        'timestep': sim.timestep,
        'motion': motion
    }
    print('| saving motion file to %s' % save_path)
    write_serialized(output_file, save_path)

    p.disconnect()
    print('| finish')
    if not valid:
        print("Collision detected!")
    return valid


if __name__ == '__main__':
    args = parser.parse_args()
    config = read_serialized(args.config_file)
    config.sim = EasyDict(vars(args))
    main(config)
Пример #19
0
                    'bookshelf', 'bottle', 'bowl', 'bus', 'cabinet', 'camera', 'can', 'cap']

SIM_SHAPE_FOLDER = mkdir(os.path.join(CONTENT_FOLDER, "phys_sim", "data", "shapes"))
RENDER_SHAPE_FOLDER = mkdir(os.path.join(CONTENT_FOLDER, "render", "data", "shapes"))

SIM_SHAPE_NET_FOLDER = mkdir(os.path.join(CONTENT_FOLDER, "phys_sim", "data", "additional_shapes"))
RENDER_SHAPE_NET_FOLDER = mkdir(os.path.join(CONTENT_FOLDER, "render", "data", "additional_shapes"))

if len(os.listdir(SIM_SHAPE_NET_FOLDER)) > 0:
    SHAPE_NET_CATEGORY = {"{:04d}".format(i): name for i, name in enumerate(_shape_net_names_sample)}
    SHAPE_NET_NUMS = {"{:04d}".format(i): len(os.listdir(os.path.join(SIM_SHAPE_NET_FOLDER, "{:04d}".format(i))))
                      for i in range(len(_shape_net_names_sample))}
    SHAPE_NET_CATEGORY_INVERSE = {v: k for k, v in SHAPE_NET_CATEGORY.items()}

if os.path.exists(os.path.join(SIM_SHAPE_NET_FOLDER, "all_dimensions.json")):
    _shape_net_dimensions = read_serialized(os.path.join(SIM_SHAPE_NET_FOLDER, "all_dimensions.json"))
else:
    _shape_net_dimensions = dict()

if os.path.exists(os.path.join(SIM_SHAPE_NET_FOLDER, "categories_to_rotate.json")):
    _rotate_index = read_serialized(os.path.join(SIM_SHAPE_NET_FOLDER, "categories_to_rotate.json"))
    ROTATE_SHAPE_CATEGORY = {k: k for k, v in _rotate_index.items() if v > 0}
else:
    ROTATE_SHAPE_CATEGORY = dict()

if os.path.exists(os.path.join(SIM_SHAPE_FOLDER, "all_dimensions.json")):
    _shape_dimensions = read_serialized(os.path.join(SIM_SHAPE_FOLDER, "all_dimensions.json"))
    SHAPE_CATEGORY = {k: k for k in _shape_dimensions.keys()}
else:
    _shape_dimensions = dict()
SHAPE_DIMENSIONS = {**_shape_dimensions, **_shape_net_dimensions}
Пример #20
0
def plot_result(data_folder, video_file, output_folder):
    video_result = read_serialized(video_file)
    images = get_images(data_folder, video_result)
    raw_scores = video_result["scores"]["raw"]
    locations = video_result["scores"]["location"]
    all_scores = video_result["scores"]["all"]
    case_name = video_result["original_video"].replace("/", "--")

    derender_objects = read_serialized(
        video_result["scene_file"])["scene_states"]

    fig, (ax1, ax3, ax2) = plt.subplots(nrows=3, ncols=1, figsize=(4.5, 10))
    line, = ax2.plot([], [], "k")
    images_folder = "{}/.tmp_imgs/".format(output_folder)
    shutil.rmtree(images_folder, ignore_errors=True)
    mkdir(images_folder)

    for i, (image, raw_score, xs, ys, derender_object, gt_object) in enumerate(
            zip(images, raw_scores, locations[0], locations[1],
                derender_objects, repeat(None)), 1):
        ax1.imshow(image)
        ax1.axis('off')

        ax2.clear()
        line.set_xdata(range(i))
        line.set_ydata(all_scores[:i])
        ax2.plot(range(i), all_scores[:i])
        ax2.axvline(x=i, color="r", linestyle='--')
        plt.draw()

        perturbed_score = []
        for score in raw_score:
            perturbed_score.append(score + np.random.rand() * .001)
        bp = ax2.boxplot(perturbed_score,
                         positions=[i],
                         showfliers=False,
                         showcaps=False,
                         whis=[25, 75])
        for element in [
                'boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps'
        ]:
            plt.setp(bp[element], color="#1f77b4")

        ax2.set_xlim(0, len(images))
        ax2.set_ylim(0, 12)
        ax2.get_xaxis().set_ticklabels([])
        ax2.axes.get_yaxis().set_ticklabels([])

        ax3.clear()
        ax3.scatter(ys, [-x for x in xs], 40, alpha=.2)

        derender_xs = [
            obj["location"][1] for obj in derender_object["objects"]
        ]
        derender_ys = [
            -obj["location"][0] for obj in derender_object["objects"]
        ]
        ax3.scatter(derender_xs, derender_ys, 10)

        if gt_object is not None:
            gt_xs = [obj["location"][1] for obj in gt_object["objects"]]
            gt_ys = [-obj["location"][0] for obj in gt_object["objects"]]
            ax3.scatter(gt_xs, gt_ys, 10)

        ax3.set_xlim(-4, 4)
        ax3.set_ylim(-1., 2.5)

        ax3.get_xaxis().set_ticklabels([])
        ax3.get_yaxis().set_ticklabels([])
        os.makedirs(output_folder, exist_ok=True)
        fig.savefig(os.path.join(images_folder, "{:05d}.png".format(i)))
        print("{}/.tmp_imgs/{:05d}.png generated".format(output_folder, i))
    fig.savefig("{}/{}_score.png".format(output_folder, case_name))
    render_video(images_folder, output_folder, case_name)