Пример #1
0
def parse_args():
    parser = argparse.ArgumentParser(description='3DMM Fitting')
    parser.add_argument('-j', '--workers', default=6, type=int)
    parser.add_argument('--epochs', default=40, type=int)
    parser.add_argument('--start-epoch', default=1, type=int)
    parser.add_argument('-b', '--batch-size', default=128, type=int)
    parser.add_argument('-vb', '--val-batch-size', default=32, type=int)
    parser.add_argument('--base-lr',
                        '--learning-rate',
                        default=0.001,
                        type=float)
    parser.add_argument('--momentum',
                        default=0.9,
                        type=float,
                        metavar='M',
                        help='momentum')
    parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float)
    parser.add_argument('--print-freq', '-p', default=20, type=int)
    parser.add_argument('--resume', default='', type=str, metavar='PATH')
    parser.add_argument('--devices-id', default='0,1', type=str)
    parser.add_argument('--filelists-train', default='', type=str)
    parser.add_argument('--filelists-val', default='', type=str)
    parser.add_argument('--root', default='')
    parser.add_argument('--snapshot', default='', type=str)
    parser.add_argument('--log-file', default='output.log', type=str)
    parser.add_argument('--log-mode', default='w', type=str)
    parser.add_argument('--size-average', default='true', type=str2bool)
    parser.add_argument('--num-classes', default=62, type=int)
    parser.add_argument('--arch',
                        default='mobilenet_1',
                        type=str,
                        choices=arch_choices)
    parser.add_argument('--frozen', default='false', type=str2bool)
    parser.add_argument('--milestones', default='15,25,30', type=str)
    parser.add_argument('--task', default='all', type=str)
    parser.add_argument('--test_initial', default='false', type=str2bool)
    parser.add_argument('--warmup', default=-1, type=int)
    parser.add_argument('--param-fp-train', default='', type=str)
    parser.add_argument('--param-fp-val', default='')
    parser.add_argument('--opt-style', default='resample',
                        type=str)  # resample
    parser.add_argument('--resample-num', default=132, type=int)
    parser.add_argument('--loss', default='vdc', type=str)

    global args
    args = parser.parse_args()

    # some other operations
    args.devices_id = [int(d) for d in args.devices_id.split(',')]
    args.milestones = [int(m) for m in args.milestones.split(',')]

    snapshot_dir = osp.split(args.snapshot)[0]
    mkdir(snapshot_dir)
Пример #2
0
def plot_case(images, all_scores, raw_scores, locations, derender_objects, gt_objects, case_name,
              output_folder):
    fig, (ax1, ax3, ax2) = plt.subplots(nrows=3, ncols=1, figsize=(4.5, 10))
    line, = ax2.plot([], [], "k")
    mkdir(os.path.join(output_folder, "imgs"))
    for i, (image, raw_score, xs, ys, derender_object, gt_object) in enumerate(
            zip(images, raw_scores, locations[0], locations[1], derender_objects, gt_objects), 1):
        ax1.imshow(image)
        ax1.axis('off')

        ax2.clear()
        line.set_xdata(range(i))
        line.set_ydata(all_scores[:i])
        ax2.plot(range(i), all_scores[:i])
        ax2.axvline(x=i, color="r", linestyle='--')
        plt.draw()

        perturbed_score = []
        for score in raw_score:
            perturbed_score.append(score + np.random.rand() * .001)
        bp = ax2.boxplot(perturbed_score, positions=[i], showfliers=False, showcaps=False, whis=[25, 75])
        for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:
            plt.setp(bp[element], color="#1f77b4")

        ax2.set_xlim(0, len(images))
        ax2.set_ylim(0, 12)
        ax2.get_xaxis().set_ticklabels([])
        ax2.axes.get_yaxis().set_ticklabels([])

        ax3.clear()
        ax3.scatter(ys, [-x for x in xs], 40, alpha=.2)

        derender_xs = [obj["location"][1] for obj in derender_object["objects"]]
        derender_ys = [-obj["location"][0] for obj in derender_object["objects"]]
        ax3.scatter(derender_xs, derender_ys, 10)

        if gt_object is not None:
            gt_xs = [obj["location"][1] for obj in gt_object["objects"]]
            gt_ys = [-obj["location"][0] for obj in gt_object["objects"]]
            ax3.scatter(gt_xs, gt_ys, 10)

        ax3.set_xlim(-4, 4)
        ax3.set_ylim(-1., 2.5)

        ax3.get_xaxis().set_ticklabels([])
        ax3.get_yaxis().set_ticklabels([])
        fig.savefig("{}/imgs/{}_{:03d}.png".format(output_folder, case_name, i))
        print("{}/imgs/{}_{:03d}.png generated".format(output_folder, case_name, i))
    fig.savefig("{}/{}_score.png".format(output_folder, case_name))
Пример #3
0
def main():
    args = parse_args()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = mkdir(cfg.OUTPUT_DIR)
    assert_proper_output_dir(args.config_file, output_dir)

    logger = setup_logger("train_logger", os.path.join(output_dir, "log.txt"))
    logger.info("Running with config:\n{}".format(cfg))

    train(cfg, args)
def main(args):
    cfg = default_cfg.clone()
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_folder = mkdir(cfg.OUTPUT_FOLDER)
    assert_proper_output_dir(args.config_file, output_folder)

    start_time = time.time()

    processes = []
    case_names = cfg.CASE_NAMES
    if len(case_names) == 0:
        case_names = sorted([dir_name for dir_name in os.listdir(cfg.ANNOTATION_FOLDER) if
                             os.path.isdir(os.path.join(cfg.ANNOTATION_FOLDER, dir_name)) and "." not in dir_name])
    if args.start_index is None:
        start_index = get_host_id() % args.stride
    else:
        start_index = args.start_index
    case_names = case_names[start_index::args.stride]

    manager = Manager()
    n_filter = manager.Semaphore(1)
    args.n_filter = n_filter

    for case_name in case_names:
        p = Process(target=run_updater, args=(cfg, args, case_name))
        processes.append(p)
        p.start()
        break  #ERASE
    for p in processes:
        p.join()

    if not args.only_plot:
        for case_name in case_names:
            results = read_serialized(os.path.join(output_folder, "results", "{}.json".format(case_name)))
            print(case_name,
                  {key: result for key, result in results[case_name].items() if key in ["sum", "mean", "max"]})

    print('| finish with time ', time.time() - start_time)
Пример #5
0
    USER_HOME_CONFIG_PATH = f"{USER_HOME_PATH}/.config"
    BACKUP_DIR = "backup-pc"
    BACKUP_NAME = f"{BACKUP_DIR}.txz"
    BACKUP_PATH = f"{TMP_PATH}/{BACKUP_DIR}"
    BACKUP_HOME_PATH = f"{BACKUP_PATH}/_home"
    BACKUP_HOME_CONFIG_PATH = f"{BACKUP_HOME_PATH}/_config"
    BACKUP_ETC_PATH = f"{BACKUP_PATH}/_etc"
    BACKUP_VAR_PATH = f"{BACKUP_PATH}/_var"

    # Create a temp directory for backup
    io.cd(TMP_PATH)
    io.rm2([BACKUP_DIR, BACKUP_NAME])

    ## Backup ~ ##
    # .config
    io.mkdir(BACKUP_HOME_CONFIG_PATH)
    io.cp_file(f"{USER_HOME_CONFIG_PATH}/katerc", BACKUP_HOME_CONFIG_PATH)
    io.cp_dir(f"{USER_HOME_CONFIG_PATH}/bunkus.org",
              f"{BACKUP_HOME_CONFIG_PATH}/bunkus.org")
    io.cp_file(f"{USER_HOME_CONFIG_PATH}/mkvtoolnix-guirc",
               BACKUP_HOME_CONFIG_PATH)
    io.cp_file(f"{USER_HOME_CONFIG_PATH}/makemkvrc", BACKUP_HOME_CONFIG_PATH)
    io.cp_dir(f"{USER_HOME_CONFIG_PATH}/Transmission Remote GUI",
              f"{BACKUP_HOME_CONFIG_PATH}/Transmission Remote GUI")
    # mpv
    io.mkdir(f"{BACKUP_HOME_CONFIG_PATH}/mpv")
    io.cp_dir(f"{USER_HOME_CONFIG_PATH}/mpv/hrtf",
              f"{BACKUP_HOME_CONFIG_PATH}/mpv")
    io.cp_file(f"{USER_HOME_CONFIG_PATH}/mpv/input.conf",
               f"{BACKUP_HOME_CONFIG_PATH}/mpv")
    io.cp_file(f"{USER_HOME_CONFIG_PATH}/mpv/mpv.conf",
from utils.io import read_serialized, mkdir
from utils.constants import CONTENT_FOLDER

"""
_shape_net_names = ['airplane', 'ashcan', 'bag', 'basket', 'bathtub', 'bed', 'bench', 'bicycle', 'birdhouse',
                    'bookshelf', 'bottle', 'bowl', 'bus', 'cabinet', 'camera', 'can', 'cap', 'car', 'chair',
                    'computer_keyboard', 'dishwasher', 'display', 'earphone', 'faucet', 'file', 'guitar', 'helmet',
                    'jar', 'knife', 'lamp', 'laptop', 'loudspeaker', 'mailbox', 'microphone', 'microwave', 'motorbike',
                    'mug', 'piano', 'pillow', 'pistol', 'pot', 'printer', 'remote', 'rifle', 'rocket', 'skateboard',
                    'sofa', 'stove', 'table', 'telephone', 'tower', 'train', 'vessel', 'washer', 'wine_bottle']
"""

_shape_net_names_sample = ['airplane', 'ashcan', 'bag', 'basket', 'bathtub', 'bed', 'bench', 'bicycle', 'birdhouse',
                    'bookshelf', 'bottle', 'bowl', 'bus', 'cabinet', 'camera', 'can', 'cap']

SIM_SHAPE_FOLDER = mkdir(os.path.join(CONTENT_FOLDER, "phys_sim", "data", "shapes"))
RENDER_SHAPE_FOLDER = mkdir(os.path.join(CONTENT_FOLDER, "render", "data", "shapes"))

SIM_SHAPE_NET_FOLDER = mkdir(os.path.join(CONTENT_FOLDER, "phys_sim", "data", "additional_shapes"))
RENDER_SHAPE_NET_FOLDER = mkdir(os.path.join(CONTENT_FOLDER, "render", "data", "additional_shapes"))

if len(os.listdir(SIM_SHAPE_NET_FOLDER)) > 0:
    SHAPE_NET_CATEGORY = {"{:04d}".format(i): name for i, name in enumerate(_shape_net_names_sample)}
    SHAPE_NET_NUMS = {"{:04d}".format(i): len(os.listdir(os.path.join(SIM_SHAPE_NET_FOLDER, "{:04d}".format(i))))
                      for i in range(len(_shape_net_names_sample))}
    SHAPE_NET_CATEGORY_INVERSE = {v: k for k, v in SHAPE_NET_CATEGORY.items()}

if os.path.exists(os.path.join(SIM_SHAPE_NET_FOLDER, "all_dimensions.json")):
    _shape_net_dimensions = read_serialized(os.path.join(SIM_SHAPE_NET_FOLDER, "all_dimensions.json"))
else:
    _shape_net_dimensions = dict()
Пример #7
0
def main(config):
    # main script
    rendering = config.rendering
    mkdir(rendering.output_dir)
    mkdir(os.path.join(rendering.output_dir, 'imgs'))
    mkdir(os.path.join(rendering.output_dir, 'masks'))
    mkdir(os.path.join(rendering.output_dir, 'depths'))
    mkdir(os.path.join(rendering.output_dir, 'flows'))
    clr_dir(os.path.join(rendering.output_dir, 'imgs'))
    clr_dir(os.path.join(rendering.output_dir, 'masks'))
    clr_dir(os.path.join(rendering.output_dir, 'depths'))
    clr_dir(os.path.join(rendering.output_dir, 'flows'))

    add_ground(rendering)

    # set up render parameters
    render_args = bpy.context.scene.render
    render_args.engine = 'CYCLES'
    render_args.resolution_x = rendering.width
    render_args.resolution_y = rendering.height
    render_args.resolution_percentage = 100
    render_args.tile_x = rendering.render_tile_size
    render_args.tile_y = rendering.render_tile_size
    if rendering.use_gpu:
        if True:
            enable_gpus()
            render_args.tile_x = rendering.render_tile_size_gpu
            render_args.tile_y = rendering.render_tile_size_gpu
        else:
            # blender changed the API for enabling CUDA at some point
            pref = bpy.context.user_preferences.addons["cycles"].preferences
            pref.compute_device_type = "CUDA"
            for device in pref.devices:
                device.use = True
            # bpy.context.user_preferences.system.compute_device_type = 'CUDA'
            # bpy.context.user_preferences.system.compute_device = 'CUDA_0'
            render_args.tile_x = rendering.render_tile_size_gpu
            render_args.tile_y = rendering.render_tile_size_gpu

    # some CYCLES-specific stuff
    bpy.data.worlds['World'].cycles.sample_as_light = True
    bpy.context.scene.cycles.blur_glossy = 2.0
    bpy.context.scene.cycles.samples = rendering.render_num_samples
    bpy.context.scene.cycles.transparent_min_bounces = rendering.render_min_bounces
    bpy.context.scene.cycles.transparent_max_bounces = rendering.render_max_bounces

    if rendering.use_gpu:
        bpy.context.scene.cycles.device = 'GPU'

    bpy.context.scene.use_nodes = True
    bpy.context.scene.view_layers['RenderLayer'].use_pass_object_index = True

    # set up camera
    set_camera(rendering.camera_rho,
               rendering.camera_theta,
               rendering.camera_phi,
               look_at=rendering.camera_look_at)

    # apply jitter to lamp positions
    if rendering.key_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Key'].location[i] += rand_jitter(
                rendering.key_light_jitter)
    if rendering.back_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Back'].location[i] += rand_jitter(
                rendering.back_light_jitter)
    if rendering.fill_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Fill'].location[i] += rand_jitter(
                rendering.fill_light_jitter)

    # set up objects
    om = ObjectManager(config, rendering.shape_dir, rendering.material_dir,
                       rendering.back_wall)

    mask_node = set_mask(os.path.join(rendering.output_dir, "masks"))
    depth_node = set_depth(os.path.join(rendering.output_dir, "depths"))
    flow_node = set_flow(os.path.join(rendering.output_dir, "flows"))

    # load motion
    with open(rendering.motion_file, 'r') as f:
        input_file = json.load(f)
        time_step = float(input_file['timestep'])
        motion = input_file['motion']

    # render it
    render_every = int(1 / time_step / rendering.fps)
    if render_every == 0:
        render_every = 1

    camera = dict(camera_rho=rendering.camera_rho,
                  camera_theta=rendering.camera_theta,
                  camera_phi=rendering.camera_phi,
                  camera_look_at=rendering.camera_look_at)
    scene_anns = dict(case_name=config.case_name, camera=camera, scene=[])

    if rendering.intro_time > 0:
        render_intro(om, rendering, motion[0])

    for n, m in enumerate(motion):
        if n % render_every == 0:
            bpy.context.scene.frame_set(n)
            # objects are before occluders
            for i, obj_motion in enumerate(m["objects"] + m["occluders"]):
                loc = obj_motion['location']
                euler = convert_euler(obj_motion['orientation'])
                om.set_position(om.obj_names[i], loc, euler, key_frame=True)

            i = len(m["objects"]) + len(m["occluders"])

            for desk_motion in m["desks"]:
                for obj_motion in desk_motion:
                    loc = obj_motion['location']
                    euler = convert_euler(obj_motion['orientation'])
                    om.set_position(om.obj_names[i],
                                    loc,
                                    euler,
                                    key_frame=True)
                    i += 1

    for n, m in enumerate(motion):
        if "ABORT" in globals():
            if globals()["ABORT"]:
                print("Aborted")
                raise KeyboardInterrupt

        if n % render_every == 0:
            bpy.context.scene.frame_set(n)
            image_path = os.path.join(
                rendering.output_dir, 'imgs',
                '%s_%06.2fs.png' % (rendering.image_prefix, n * time_step))
            render_args.filepath = image_path
            mask_base_name = '####_%s_%06.2fs.png' % (rendering.image_prefix,
                                                      n * time_step)
            mask_node.file_slots[0].path = mask_base_name
            depth_base_name = '####_%s_%06.2fs.png' % (rendering.image_prefix,
                                                       n * time_step)
            depth_node.file_slots[0].path = depth_base_name
            for ch in "RGBA":
                flow_base_name = '%s_####_%s_%06.2fs.png' % (
                    ch, rendering.image_prefix, n * time_step)
                flow_node[ch].file_slots[0].path = flow_base_name

            bpy.ops.render.render(write_still=True)

            frame_anns = dict(image_path=image_path, objects=[])
            mask_file_path = os.path.join(
                rendering.output_dir, "masks",
                "{:04d}".format(n) + mask_base_name[4:])
            for i, obj_motion in enumerate(m["objects"] + m["occluders"]):
                mask = imread(mask_file_path)[:, :, 0] == i + 1
                frame_anns["objects"].append(om.log(i, obj_motion, mask))

            scene_anns["scene"].append(frame_anns)

    bpy.ops.wm.save_as_mainfile(filepath=os.path.join(rendering.output_dir,
                                                      "scene.blend"),
                                compress=True)
    write_serialized(
        scene_anns,
        os.path.join(rendering.output_dir,
                     "{:s}_ann.yaml".format(rendering.image_prefix)))
Пример #8
0
import numpy as np
from easydict import EasyDict

from dataset.make_all import generate
from utils.geometry import random_spherical_point, get_prospective_location
from utils.io import mkdir, write_serialized, catch_abort
from utils.constants import CONFIG_FOLDER, SIM_OUTPUT_FOLDER, RENDER_OUTPUT_FOLDER, VIDEO_OUTPUT_FOLDER, \
    OCCLUDER_HALF_WIDTH
from utils.misc import rand, random_distinct_colors, repeat_scale, get_host_id, BlenderArgumentParser
from utils.shape_net import SHAPE_DIMENSIONS, random_shape_net

default_shapes = [
    'boot', 'bowling_pin', 'cone', 'cube', 'cylinder', 'sphere', 'toy', 'truck'
]
train_prefix = "train"
TRAIN_CONFIG_FOLDER = mkdir(os.path.join(CONFIG_FOLDER, train_prefix))
TRAIN_SIM_OUTPUT_FOLDER = mkdir(os.path.join(SIM_OUTPUT_FOLDER, train_prefix))
TRAIN_RENDER_OUTPUT_FOLDER = mkdir(
    os.path.join(RENDER_OUTPUT_FOLDER, train_prefix))
TRAIN_VIDEO_OUTPUT_FOLDER = mkdir(
    os.path.join(VIDEO_OUTPUT_FOLDER, train_prefix))


def parse_args():
    parser = BlenderArgumentParser(description='')
    parser.add_argument('--start',
                        help='image index to start',
                        type=int,
                        default=0)
    parser.add_argument('--start_index', help='image index to start', type=int)
    parser.add_argument('--end',
Пример #9
0
        df_fam = pd.DataFrame({'MID': mids})
        df_fam = df_fam.join(df_rel_mat.loc[ids][cols])
        df_fam = df_fam.join(pd.DataFrame({'Gender': genders, 'Name': names}))
        if do_save:
            df_fam.to_csv(dirs_out + "/" + fid + "/mid.csv", index=False)
        dfs_fams.append(df_fam)
    return dfs_fams

logger = []
if __name__ == '__main__':
    logger = log.setup_custom_logger(__name__, f_log=CONFIGS.path.f_log,level=log.INFO)
    # logger.setLevel()
    from pyfiw.configs import CONFIGS
    out_bin = CONFIGS.path.dpairs
    io.mkdir(out_bin)
    dir_fids = dir_home() + "master-version/fiwdb/FIDs/"
    # dir_fid = dir_home() + "/Dropbox/Families_In_The_Wild/Database/Ann/FW_FIDs/"

    dir_families = io.sys_home() + 'master-version/fiwdb/FIDs/'
    do_sibs = True
    do_parents = True
    do_grandparents = True
    do_save = True
    parse_fids = False
    load_families = False
    if load_families:
        fams = fiw.load_families(dir_families)

    logger.info("Output Bin: {}\nFID folder: {}".format(out_bin, dir_fids))
    logger.info("Parsing siblings: {}\nSaving Pairs: {}\n Parse FIDs: {}".format(do_sibs, do_save, parse_fids))
Пример #10
0
#!/usr/bin/env python3
# coding: utf-8

"""
Backup Plex Media Server library
"""

import os
from pathlib import Path
from utils import io

if __name__ == "__main__":
    HOME_PATH = Path("/home/nyxouf")
    BACKUP_PATH = Path("/data/tmp_pms")
    BACKUP_FILE = Path("pms.tar.lz4")

    io.rm(BACKUP_PATH)
    io.mkdir(BACKUP_PATH)
    io.cd(BACKUP_PATH)
    os.system("rsync --quiet --archive --exclude 'Logs' --exclude 'Crash Reports' --exclude 'plexmediaserver.pid' --exclude 'Updates' '/internal/plex' .")
    #io.cd(HOME_PATH)
    io.cd("/data")
    TARCF = f"tar cf - {str(BACKUP_PATH)} | lz4 -1 > {str(BACKUP_FILE)}"
    os.system(TARCF)
    io.chown(BACKUP_FILE, "nyxouf", "nyxouf")
    io.chmod(BACKUP_FILE, 777)
    io.rm(BACKUP_PATH)
    io.mv(BACKUP_FILE, f"/backup/data/{str(BACKUP_FILE)}")
Пример #11
0
def run_updater(cfg, args, case_name):
    output_folder = cfg.OUTPUT_FOLDER
    result_folder = mkdir(os.path.join(output_folder, "results"))

    gt = read_serialized(os.path.join(cfg.ANNOTATION_FOLDER, case_name, case_name + "_ann.yaml"))
    camera = gt["camera"]

    observation_path = os.path.join(cfg.OBSERVATION_FOLDER, case_name + ".json")
    scenes = read_serialized(observation_path)['scene']
    for s in scenes:
        for o in s["objects"]:
            if "color" not in o:
                o["color"] = "green"

    if not args.only_plot:
        mkdir(os.path.join(output_folder, "logs"))
        logger = setup_logger("{}{}".format(cfg.LOG_PREFIX, case_name),
                              os.path.join(cfg.OUTPUT_FOLDER, "logs", "{}.txt".format(case_name)))
        logger.info('{} start running '.format(case_name))
        logger.info(args)
        logger.info("Running with config:\n{}".format(cfg))

        # run updater
        init_belief = scenes[0]['objects']
        filter_updater = FilterUpdater(cfg, init_belief, case_name, camera, args.n_filter)
        filter_updater.run(scenes[1:])

        score = filter_updater.get_score()
        write_serialized({case_name: score}, os.path.join(result_folder, "{}.json".format(case_name)))

        with open(os.path.join(output_folder, "{}.txt".format(case_name)), 'w') as fout:
            fout.write(
                '| negative log likelihood: ' + json.dumps(
                    {key: result for key, result in score.items() if
                     key in ["sum", "mean", "max", "sum_lower", "mean_lower", "max_lower"]}) + '\n')

        logger.info('{} completed running '.format(case_name))
    else:
        results = read_serialized(os.path.join(result_folder, "{}.json".format(case_name)))
        score = results[case_name]

    images_files = [ann["image_path"] for ann in gt["scene"]][6:]
    with ThreadPool(cpu_count() * 4) as p:
        images = p.map(Image.open, images_files)

    plot_case(images, score["all"], score["raw"], score["location"], scenes[1:], [None] * len(images),
              case_name, output_folder)

    # os.system(
    #     '/data/vision/billf/object-properties/local/bin/ffmpeg -nostdin -r %d -pattern_type glob -i \'%s/%s.png\' '
    #     '-pix_fmt yuv420p -vcodec libx264 -crf 0 %s.mp4 -y'
    #     % (15, "{}/imgs".format(output_folder), "{}_???".format(case_name),
    #        "{}/{}_summary".format(output_folder, case_name)))
    # print('ffmpeg -nostdin -r %d -pattern_type glob -i \'%s/%s.png\' '
    # '-pix_fmt yuv420p -vcodec libx264 -crf 0 %s.mp4 -y'
    # % (15, "{}/imgs".format(output_folder), "{}_???".format(case_name),
    #    "{}/{}_summary".format(output_folder, case_name)))
    os.system(
    'ffmpeg -nostdin -r %d -pattern_type glob -i \'%s/%s.png\' '
    '-pix_fmt yuv420p -vcodec libx264 -crf 0 %s.mp4 -y'
    % (15, "{}/imgs".format(output_folder), "{}_???".format(case_name),
       "{}/{}_summary".format(output_folder, case_name)))
Пример #12
0
import argparse
import os
from easydict import EasyDict

from multiprocessing import Pool, cpu_count, Process
from dataset.human.build_occluders import get_occluders
from dataset.human.build_objects import get_objects

from dataset.make_all import generate
from utils.io import write_serialized, catch_abort, mkdir
from utils.constants import SIM_OUTPUT_FOLDER, RENDER_OUTPUT_FOLDER, VIDEO_OUTPUT_FOLDER, CONFIG_FOLDER
from utils.misc import random_distinct_colors, get_host_id, BlenderArgumentParser
from utils.shape_net import SHAPE_NET_CATEGORY, SHAPE_CATEGORY

HUMAN_CONFIG_FOLDER = mkdir(os.path.join(CONFIG_FOLDER, "human"))
HUMAN_SIM_OUTPUT_FOLDER = mkdir(os.path.join(SIM_OUTPUT_FOLDER, "human"))
HUMAN_RENDER_OUTPUT_FOLDER = mkdir(os.path.join(RENDER_OUTPUT_FOLDER, "human"))
HUMAN_VIDEO_OUTPUT_FOLDER = mkdir(os.path.join(VIDEO_OUTPUT_FOLDER, "human"))


def parse_args():
    parser = BlenderArgumentParser(description='')
    parser.add_argument('--start_index', help='image index to start', type=int)
    parser.add_argument("--stride",
                        help="image index stride",
                        type=int,
                        default=1)
    parser.add_argument("--requires_valid", type=int, default=0)
    parser.add_argument("--preview", type=int, default=0)
    return parser.parse_args()
Пример #13
0
if __name__ == "__main__":
    TMP_PATH = "/tmp"
    HOME_PATH = "/home/pi"
    BACKUP_DIR = "backup-mpd"
    BACKUP_NAME = f"{BACKUP_DIR}.txz"
    BACKUP_PATH = f"{TMP_PATH}/{BACKUP_DIR}"
    BACKUP_HOME_PATH = f"{BACKUP_PATH}/_home"
    BACKUP_ETC_PATH = f"{BACKUP_PATH}/_etc"
    BACKUP_VAR_PATH = f"{BACKUP_PATH}/_var"
    BACKUP_SYSTEMD_PATH = f"{BACKUP_PATH}/_systemd"

    # Create a temp directory for backup
    io.cd(TMP_PATH)
    io.rm2([BACKUP_DIR, BACKUP_NAME])
    io.mkdir(BACKUP_PATH)

    # Backup home folder
    io.mkdir(BACKUP_HOME_PATH)
    io.cp_dir(f"{HOME_PATH}/mpd", f"{BACKUP_HOME_PATH}/mpd")
    io.cp_file(f"{HOME_PATH}/.zpreztorc", BACKUP_HOME_PATH)
    io.cp_file(f"{HOME_PATH}/.zshrc", BACKUP_HOME_PATH)

    # Backup /etc
    io.mkdir(BACKUP_ETC_PATH)
    io.cp_dir("/etc/nginx", f"{BACKUP_ETC_PATH}/nginx")
    io.cp_file("/etc/fstab", BACKUP_ETC_PATH)
    io.cp_file("/etc/mpd.conf", BACKUP_ETC_PATH)

    # Backup services
    io.mkdir(BACKUP_SYSTEMD_PATH)
        big_shapes = ["{:04d}".format(x) for x in range(55) if x % 5 == 0]
    return big_shapes


_suffices = [".mp4", ".ogv"]


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--overwrite", type=int, default=0)
    return parser.parse_args()


if __name__ == '__main__':
    args = parse_args()
    new_human_folder = mkdir(os.path.join(VIDEO_OUTPUT_FOLDER, "human_new_2"))
    clr_dir(new_human_folder)
    file_map = dict(origin=[], destination=[])
    experiment_id = 0
    for shape_cat in SHAPE_CATS:
        big_shapes = get_shapes_from_cat(shape_cat)
        for violation, pairs in CASE_PAIRS.items():
            shapes = random.sample(big_shapes, k=2)
            for shape in shapes:
                case, index = pairs[0]
                origin = "human_{}_{}_{}".format(case, shape, index)
                destination = "{:03d}_surprise_human_{}_{}_{}".format(
                    experiment_id, case, shape, index)
                print(origin, destination)
                experiment_id += 1
Пример #15
0
def plot_result(data_folder, video_file, output_folder):
    video_result = read_serialized(video_file)
    images = get_images(data_folder, video_result)
    raw_scores = video_result["scores"]["raw"]
    locations = video_result["scores"]["location"]
    all_scores = video_result["scores"]["all"]
    case_name = video_result["original_video"].replace("/", "--")

    derender_objects = read_serialized(
        video_result["scene_file"])["scene_states"]

    fig, (ax1, ax3, ax2) = plt.subplots(nrows=3, ncols=1, figsize=(4.5, 10))
    line, = ax2.plot([], [], "k")
    images_folder = "{}/.tmp_imgs/".format(output_folder)
    shutil.rmtree(images_folder, ignore_errors=True)
    mkdir(images_folder)

    for i, (image, raw_score, xs, ys, derender_object, gt_object) in enumerate(
            zip(images, raw_scores, locations[0], locations[1],
                derender_objects, repeat(None)), 1):
        ax1.imshow(image)
        ax1.axis('off')

        ax2.clear()
        line.set_xdata(range(i))
        line.set_ydata(all_scores[:i])
        ax2.plot(range(i), all_scores[:i])
        ax2.axvline(x=i, color="r", linestyle='--')
        plt.draw()

        perturbed_score = []
        for score in raw_score:
            perturbed_score.append(score + np.random.rand() * .001)
        bp = ax2.boxplot(perturbed_score,
                         positions=[i],
                         showfliers=False,
                         showcaps=False,
                         whis=[25, 75])
        for element in [
                'boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps'
        ]:
            plt.setp(bp[element], color="#1f77b4")

        ax2.set_xlim(0, len(images))
        ax2.set_ylim(0, 12)
        ax2.get_xaxis().set_ticklabels([])
        ax2.axes.get_yaxis().set_ticklabels([])

        ax3.clear()
        ax3.scatter(ys, [-x for x in xs], 40, alpha=.2)

        derender_xs = [
            obj["location"][1] for obj in derender_object["objects"]
        ]
        derender_ys = [
            -obj["location"][0] for obj in derender_object["objects"]
        ]
        ax3.scatter(derender_xs, derender_ys, 10)

        if gt_object is not None:
            gt_xs = [obj["location"][1] for obj in gt_object["objects"]]
            gt_ys = [-obj["location"][0] for obj in gt_object["objects"]]
            ax3.scatter(gt_xs, gt_ys, 10)

        ax3.set_xlim(-4, 4)
        ax3.set_ylim(-1., 2.5)

        ax3.get_xaxis().set_ticklabels([])
        ax3.get_yaxis().set_ticklabels([])
        os.makedirs(output_folder, exist_ok=True)
        fig.savefig(os.path.join(images_folder, "{:05d}.png".format(i)))
        print("{}/.tmp_imgs/{:05d}.png generated".format(output_folder, i))
    fig.savefig("{}/{}_score.png".format(output_folder, case_name))
    render_video(images_folder, output_folder, case_name)