Esempio n. 1
0
def cmd_setup_slave(args):
    ''' Setup proper slave environment and connect to master server '''
    if not args.export:
        cmd = ["mkt", "setup", "--no-dirs"]
        if args.yes:
            cmd.append("-y")
        if args.force:
            cmd.append("-f")

        subprocess.call(cmd)

    utils.init_config_file()
    section = utils.load_config_file()

    subprocess.call(["sudo", "mkdir", "-p", section['src']])
    subprocess.call(["sudo", "chown", "-R", utils.username() + ":" + utils.group(), section['src']])
    subprocess.call(["sudo", "mkdir", "-p", section['logs']])
    subprocess.call(["sudo", "chown", "-R", utils.username() + ":" + utils.group(), section['logs']])

    with tempfile.NamedTemporaryFile("w") as f:
        f.write(open("/etc/fstab").read())
        export = args.hostname[0] + ":" + \
                section['src'][:-1] + "\t" + section['src'][:-1] + \
                 "\t" + "nfs" + "\t" + "ro,nolock 0 0" + "\n" + \
                 args.hostname[0] + ":" + \
                 section['logs'][:-1] + "\t" + section['logs'][:-1] + \
                 "\t" + "nfs" + "\t" + "rw,nolock 0 0" + "\n"
        f.write(export)
        f.flush()
        subprocess.call(["sudo", "cp", f.name, "/etc/fstab"])

    subprocess.call(["sudo", "mount", "-a", "-t", "nfs", "-o", "remount"])
Esempio n. 2
0
def args_images(parser):
    parser.add_argument(
        "--no-pull",
        dest="pull",
        action="store_false",
        help=
        "Do not update the base docker images from the public docker registry",
        default=True)

    parser.add_argument(
        "--only",
        action="store",
        help="Build only the image with the given docker image tag",
        default=None)

    parser.add_argument(
        "--push",
        action="store_true",
        help=
        "Upload created images to docker registry (need to have an account in harbor registry)",
        default=False)

    section = utils.load_config_file()
    parser.add_argument("os",
                        nargs='?',
                        help="The image to build",
                        choices=sorted(supported_os),
                        default=section.get('os', default_os))
Esempio n. 3
0
def cmd_build(args):
    """Smart build."""
    from . import cmd_images
    section = utils.load_config_file()
    if not args.project:
        set_args_project(args, section)

    if args.project != 'custom' and args.with_kernel_headers:
        exit("--with-kernel-headers is applicable for \"custom\" target only.")

    build = Build(args.project)

    recipe_dir = None
    if args.build_recipe is not None:
        args.build_recipe = os.path.realpath(args.build_recipe)
        recipe_dir = os.path.dirname(args.build_recipe)

    build.pickle['shell'] = args.run_shell
    build.pickle["passwd"] = "%s:x:%s:%s:%s:%s:/bin/bash" % (
        username(), os.getuid(), os.getgid(), username(), os.getenv("HOME"))
    build.pickle["group"] = "%s:x:%s:" % (group(), os.getgid())
    build.pickle["uid"] = int(os.getuid())
    build.pickle["gid"] = int(os.getgid())
    build.pickle["home"] = os.getenv("HOME")
    build.pickle['clean'] = args.clean
    build.pickle['build_recipe'] = args.build_recipe

    if args.with_kernel_headers:
        build.pickle['kernel'] = section.get('kernel', None)

    do_cmd = ["python3", "/plugins/do-build.py"]
    docker_exec(["run"] +
                build.run_build_cmd(cmd_images.default_os, recipe_dir) +
                do_cmd)
Esempio n. 4
0
def cmd_setup_master(args):
    ''' Setup proper master environment and export it to slaves '''
    if not args.export:
        cmd = ["mkt", "setup"]
        if args.yes:
            cmd.append("-y")
        if args.force:
            cmd.append("-f")

        subprocess.call(cmd)

    utils.init_config_file()
    section = utils.load_config_file()

    with tempfile.NamedTemporaryFile("w") as f:
        f.write(open("/etc/exports").read())
        for host in args.hostnames:
            export = section['src'] + "\t" + \
                     str(host) + "(ro,async,no_subtree_check,no_root_squash)" + \
                     "\n" + section['logs'] + "\t" + \
                     str(host) + "(rw,async,no_subtree_check,no_root_squash)" + "\n"
            f.write(export)
            f.flush()
        subprocess.call(["sudo", "cp", f.name, "/etc/exports"])

    subprocess.call(["sudo", "exportfs", "-r"])
Esempio n. 5
0
def args_run(parser):
    section = utils.load_config_file()
    parser.add_argument("image",
                        nargs='?',
                        choices=sorted(utils.get_images()),
                        help="The IB card configuration to use")

    kernel = parser.add_mutually_exclusive_group()
    kernel.add_argument(
        '--kernel',
        help="Path to the top of a compiled kernel source tree to boot",
        default=None)
    kernel.add_argument('--kernel-rpm',
                        help="Path to a kernel RPM to boot",
                        default=None)

    parser.add_argument('--dir',
                        action="append",
                        help="Other paths to map",
                        default=[])
    parser.add_argument('--simx',
                        metavar='SIMX_DEV',
                        action="append",
                        default=[],
                        choices=sorted(get_simx_rdma_devices()),
                        help="Run using simx to create a mlx5 IB device")
    parser.add_argument(
        '--run-shell',
        action="store_true",
        default=False,
        help="Run a shell inside the container instead of invoking kvm")
    parser.add_argument(
        '--pci',
        metavar="PCI_BDF",
        action="append",
        default=[],
        choices=sorted(get_pci_rdma_devices().keys()),
        help="Pass a given PCI bus/device/function to the guest")
    parser.add_argument(
        '--virt',
        metavar="VIRT_DEV",
        action="append",
        default=[],
        choices=sorted(get_virt_rdma_devices()),
        help="Pass a virtual device type-interface format to the guest")
    parser.add_argument(
        '--boot-script',
        help="Path to the custom boot script which will be executed after boot",
        default=None)
    parser.add_argument('--gdbserver',
                        metavar='PORT',
                        type=int,
                        help="TCP port for QEMU's GDB server",
                        default=None)
    parser.add_argument('--nested_pci',
                        metavar='NESTED_PCI',
                        action="append",
                        default=[],
                        help="Provide PCI list for the nested VM")
Esempio n. 6
0
def test_load_config_file_error(monkeypatch):
    def mock_get_config_file_name():
        return 'invalid_path'

    monkeypatch.setattr(utils, 'get_config_file_name',
                        mock_get_config_file_name)
    with pytest.raises(FileNotFoundError):
        dictionary = utils.load_config_file()
Esempio n. 7
0
def build_content(base_path: str) -> List[Post]:
    """
    Convert all the markdown files to html files. 
    Returns a list of:
    - destination file path
    - html transformed text
    - meta information
    """

    # Load markdown with the meta extension

    configuration = load_config_file(base_path)

    html_path = os.path.join(base_path, required_folders["public"])
    result = []

    # iterate over pairs file name and file path
    for filen, file_path in list_content(base_path,
                                         required_folders["content"]):
        # Open the file
        with open(file_path, "r") as f:
            # load markdown
            md = markdown.Markdown(
                extensions=['meta', 'tables', 'sane_lists', 'attr_list'])
            # Read document
            data = f.read()
            # Convert markdown to html
            html = md.convert(data)

            # Get file extension
            filenn, extension = os.path.splitext(filen)

            # If it's not md skip file
            if extension != '.md':
                continue

            if 'index' in filen:
                result.append(
                    Post(path=os.path.join(html_path, "index.html"),
                         html=html,
                         meta=md.Meta,
                         config=configuration,
                         index=True))
            else:
                if 'slug' in md.Meta.keys():
                    slug = md.Meta['slug'][0]
                else:
                    slug = filen.split(".")[0]

                md.Meta['slug'] = slug

                result.append(
                    Post(path=os.path.join(html_path, slug, "index.html"),
                         html=html,
                         meta=md.Meta,
                         config=configuration))
    return result
Esempio n. 8
0
def get_params(file):
    params = utils.load_config_file(file)

    # Convert from JSON format to DataLoader format
    params["training_dataset_paths"] = utils.format_dataset_path(
        params["training_dataset_paths"])
    params["test_dataset_paths"] = utils.format_dataset_path(
        params["test_dataset_paths"])
    return params
Esempio n. 9
0
def main():
    parser = create_parser(True)
    args = parser.parse_args()
    spec = load_config_file(args.yaml)  # spec is a dict of dicts of dicts
    trainer = Trainer(args, args.yaml, add_timestamp=True)
    xval_merge = XvalMerge(args, spec["data"], trainer)
    data_pair, val_results = run_on_split(args, split=None, trainer=trainer)
    xval_merge.add(1, data_pair, val_results)
    xval_merge.finalize()
    xval_merge.save(xval_merge.trainer.tb_log_dir)
def main(args):

    print("Loading config file: ", args.config)
    params = utils.load_config_file(args.config)
    params["dataset_paths"] = utils.format_dataset_path(
        params["dataset_paths"])
    if "nyu" not in params:
        params["nyu"] = False

    # Data loading code
    print("Creating data loaders...")
    if params["nyu"]:
        from dataloaders.nyu import NYUDataset
        val_dataset = NYUDataset(params["dataset_paths"], split='val')
    else:
        val_dataset = Datasets.FastDepthDataset(params["dataset_paths"],
                                                split='val',
                                                depth_min=params["depth_min"],
                                                depth_max=params["depth_max"],
                                                input_shape_model=(224, 224),
                                                random_crop=False)

    # set batch size to be 1 for validation
    data_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=1,
        shuffle=False,
        num_workers=params["num_workers"],
        pin_memory=True)

    # Set GPU
    params["device"] = torch.device(
        "cuda:{}".format(params["device"])
        if params["device"] >= 0 and torch.cuda.is_available() else "cpu")
    print("Using device", params["device"])

    print("Loading models...")
    models = []
    model_names = []
    for model_dict in params["models"]:
        model_names.append(Path(model_dict["model_path"]).stem)
        model, _ = utils.load_model(model_dict, model_dict["model_path"],
                                    params["device"])
        model.to(params["device"])
        models.append(model)

    # Create output directory
    output_directory = os.path.join(params["save_folder"],
                                    ".".join(model_names))
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    params["output_directory"] = output_directory
    print("Saving results to " + output_directory)

    compare_models(params, data_loader, models)
Esempio n. 11
0
	def _get_blast_version(self,command):
		configs = utils.load_config_file()
		p = subprocess.Popen([configs[command], "-h"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
		output, err = p.communicate(b"input data that is passed to subprocess' stdin")
		rc = p.returncode
		output = output.split()
		for i in range(0,len(output)):
			if "version" == output[i] and command=="makeblastdb":
				return ("BLAST-" + output[i+1])
			elif "version" in output[i] and command == "blastp":	
				return ("BLAST-" + output[i+4])
		return ("BLAST version not found\n")
Esempio n. 12
0
	def __init__(self,
		prep_id,
		scan_id,
		hmm_db, 
		out_dir = ".",
		cpu = 2):

		self.args = {"prep_dir" : os.path.join(os.path.abspath(out_dir),prep_id + "_PREPARE") ,
		"scan_dir" : os.path.join(os.path.abspath(out_dir),scan_id + "_SCAN") ,
		"cpu" : cpu,
		"hmm_db" : hmm_db,
		"configs" : utils.load_config_file() ,
		"prep_id": prep_id,
		"scan_id": scan_id}
Esempio n. 13
0
def test_load_config(tmpdir, monkeypatch):
    yaml_string = """
        a: 1
        b: 2
    """
    yaml_file = tmpdir.mkdir("sub").join("test.yaml")
    yaml_file.write(yaml_string)

    def mock_get_config_file_name():
        return yaml_file.strpath

    monkeypatch.setattr(utils, 'get_config_file_name',
                        mock_get_config_file_name)
    dictionary = utils.load_config_file()
    assert dictionary == {'a': 1, 'b': 2}
Esempio n. 14
0
    def get_configs(cfg_type, **kwargs):
        cfg_file = load_config_file()

        config = None

        if kwargs.get('cfg_name') is not None:
            result = list(
                filter(lambda x: x.get(kwargs.get('cfg_name')) is not None,
                       cfg_file.get(cfg_type)))
            if result:
                config = result[0][kwargs.get('cfg_name')]
        else:
            config = cfg_file.get(cfg_type)

        return config
Esempio n. 15
0
def main():
    parser = create_parser(True)
    args = parser.parse_args()
    spec = utils.load_config_file(
        args.yaml)  # spec is a dict of dicts of dicts
    data_settings = procdata.apply_defaults(spec["data"])
    para_settings = utils.apply_defaults(spec["params"])
    xval_merge = XvalMerge(args, data_settings)
    data_pair, val_results = run_on_split(args,
                                          data_settings,
                                          para_settings,
                                          split=None,
                                          trainer=xval_merge.trainer)
    xval_merge.add(1, data_pair, val_results)
    xval_merge.finalize()
    xval_merge.save()
Esempio n. 16
0
File: cmd_ci.py Progetto: rleon/mkt
def cmd_ci(args):
    """Local continuous integration check."""
    from . import cmd_images
    section = utils.load_config_file()
    if not args.project:
        set_args_project(args, section)

    build = Build(args.project)
    build.pickle['checkpatch'] = args.checkpatch
    build.pickle['sparse'] = args.sparse
    build.pickle['gerrit'] = args.gerrit
    build.pickle['show_all'] = args.show_all
    build.pickle["warnings"] = args.warnings
    build.pickle["smatch"] = args.smatch
    build.pickle["clang"] = args.clang

    # FIXME: allow git revisions as input to --rev.
    # But for now, let's give an option to provide
    # commit.
    build.pickle['rev'] = args.rev[0]
    do_cmd = ["python3", "/plugins/do-ci.py"]
    docker_exec(["run"] + build.run_ci_cmd(cmd_images.default_os) + do_cmd)
Esempio n. 17
0
def watch() -> None:
    """
    Start a developing server for your content
    """

    # Get where the execution is being made
    base_path = os.getcwd()
    theme = load_config_file(base_path)['theme']
    
    content_folder = os.path.join(base_path, 'content')
    theme_folder = os.path.join(base_path, 'themes')

    # Initialize the dev server
    server = Server()

    # Build content
    content = build_content(base_path)
    render_content(base_path, content, theme)
    copy_static_assets(base_path, theme)
    
    server.watch(content_folder, shell("arcade build", cwd=base_path))
    server.watch(theme_folder, shell("arcade build", cwd=base_path))
    server.serve(root="public")
Esempio n. 18
0
def run_blast(out_dir, file_type, evalue):
    configs = utils.load_config_file()

    command = map(str, [
        configs["makeblastdb"], "-in", out_dir + "/" + file_type + ".fasta",
        "-dbtype", "prot"
    ])
    command2 = map(str, [
        configs["blastp"], "-db", out_dir + "/" + file_type + ".fasta",
        "-query", out_dir + "/" + file_type + ".fasta", "-out", out_dir + "/" +
        file_type + "_blast_results", "-outfmt", "6", "-evalue", evalue
    ])
    attempt = 0
    res = 1
    while attempt < utils.MAX_ATTEMPTS and res != 0:
        res = subprocess.call(command)
        if res != 0:
            attempt += 1
            continue
        res = subprocess.call(command2)
        attempt += 1
    if res != 0:
        sys.exit("Error: Failed to run BLASTP for [" + file_type +
                 "]. Please check log files.")
Esempio n. 19
0
def build() -> None:
    base_path = os.getcwd()
    theme = load_config_file(base_path)['theme']
    content = build_content(base_path)
    render_content(base_path, content, theme)
    copy_static_assets(base_path, theme)
Esempio n. 20
0
File: cmd_run.py Progetto: rleon/mkt
def cmd_run(args):
    """Run a system image container inside KVM"""
    from . import cmd_images
    section = utils.load_config_file()
    docker_os = section.get('os', cmd_images.default_os)

    # We have three possible options to execute:
    # 1. "mkt run" without request to specific image. We will try to find
    #    default one
    # 2. "mkt run --pci ..." or "mkt run --simx ...". We won't use default
    #    image but add supplied PCIs and SimX devices.
    # 3. "mkt run image_name --pci ..." or "mkt run image_name --simx ...". We
    #    will add those PCIs and SimX devices to the container.
    s = set()
    if not args.pci and not args.simx:
        if not args.image:
            args.image = section.get('image', None)

    if args.image:
        pci = utils.get_images(args.image)['pci']
        s = pci.split()

    union = set(get_simx_rdma_devices()).union(
        set(get_pci_rdma_devices().keys())).union(set(get_virt_rdma_devices()))

    if not set(s).issubset(union):
        # It is possible only for config files, because we sanitized
        # input to ensure that valid data is supplied.
        exit(
            "There is an error in configuration file, PCI, SIMX or VIRT devices don't exists."
        )

    args.pci += set(s).intersection(set(get_pci_rdma_devices().keys()))
    args.virt += set(s).intersection(set(get_virt_rdma_devices()))
    b = args.pci + args.virt
    args.simx += [item for item in s if item not in b]

    if len(args.simx) > 5:
        exit("SimX doesn't support more than 5 devices")

    if not args.kernel and not args.kernel_rpm:
        exit(
            "Must specify a linux kernel with --kernel, or a config file default"
        )

    # Invoke ourself as root to manipulate sysfs
    if args.pci:
        subprocess.check_call([
            "sudo", sys.executable,
            os.path.join(os.path.dirname(__file__), "../utils/vfio.py")
        ] + ["--pci=%s" % (I) for I in args.pci])

    mapdirs = DirList()
    if args.kernel_rpm is not None:
        args.kernel_rpm = os.path.realpath(args.kernel_rpm)
        if not os.path.isfile(args.kernel_rpm):
            raise ValueError("Kernel RPM %r does not exist" %
                             (args.kernel_rpm))
        mapdirs.add(os.path.dirname(args.kernel_rpm))
        args.kernel = None
    else:
        args.kernel = os.path.realpath(args.kernel)
        if not os.path.isdir(args.kernel):
            raise ValueError(
                "Kernel path %r is not a directory/does not exist" %
                (args.kernel))
        mapdirs.add(args.kernel)

    if args.image:
        try:
            if utils.get_images(args.image)['custom_qemu'] != "true":
                raise KeyError
            args.custom_qemu = section.get('simx', None)
        except KeyError:
            args.custom_qemu = None
    else:
        args.custom_qemu = None

    if args.custom_qemu:
        args.custom_qemu = os.path.realpath(args.custom_qemu)
        if not os.path.isdir(args.custom_qemu):
            raise ValueError("SimX path %r is not a directory/does not exist" %
                             (args.custom_qemu))
        mapdirs.add(args.custom_qemu)

    usr = pwd.getpwuid(os.getuid())
    args.dir.append(usr.pw_dir)
    if 'dir' in section:
        args.dir += section['dir'].split()
    args.dir = list(set(args.dir))

    for I in args.dir:
        mapdirs.add(I)

    args.boot_script = validate_and_set_boot(args)
    if args.boot_script:
        mapdirs.add(os.path.dirname(args.boot_script))

    vm_addr = get_mac()

    if args.run_shell:
        do_kvm_args = ["/bin/bash"]
    else:
        do_kvm_args = ["python3", "/plugins/do-kvm.py"]
        if vm_addr.ip:
            # Open network for QEMU, relevant for bridged mode only
            iprule = [
                "FORWARD", "-m", "physdev", "--physdev-is-bridged", "-j",
                "ACCEPT"
            ]
            # First delete old rule
            subprocess.call(["sudo", "iptables", "-D"] + iprule,
                            stdout=subprocess.DEVNULL,
                            stderr=subprocess.DEVNULL)
            subprocess.call(["sudo", "iptables", "-I"] + iprule,
                            stdout=subprocess.DEVNULL,
                            stderr=subprocess.DEVNULL)

    src_dir = os.path.dirname(
        os.path.abspath(inspect.getfile(inspect.currentframe())))

    ssh = False
    if args.pci:
        # chack if we have container running with bound PCI device to it
        # sudo docker ps --filter "label=pci" --format "{{.Names}}"
        # sudo docker inspect --format='{{.Config.Labels.pci}}' mkt_run_nps-server-14-015
        cont = docker_get_containers(label="pci")
        for c in cont:
            c = c.decode()[1:-1]
            cpci = docker_output(
                ["inspect", "--format", '"{{.Config.Labels.pci}}"', c])
            cpci = cpci.decode()[2:-2].split(', ')
            cpci = [x[1:-1] for x in cpci]
            common = set(cpci).intersection(set(args.pci))
            if common:
                ssh = True
                cname = c

    if ssh:
        subprocess.call(["ssh", "root@%s" % (get_host_name(cname))])
    else:
        cname = get_container_name(vm_addr)
        docker_exec(["run"] + mapdirs.as_docker_bind() + [
            "-v",
            "%s:/plugins:ro" % (src_dir),
            "--mount",
            "type=bind,source=%s,destination=/logs" %
            (utils.config.runtime_logs_dir),
            "--rm",
            "--net=host",
            "--privileged",
            "--name=%s" % (cname),
            "--tty",
            "-l",
            "pci=%s" % (args.pci),
            "--hostname",
            vm_addr.hostname,
            "-e",
            "KVM_PICKLE=%s" % (get_pickle(args, vm_addr)),
            "--interactive",
            make_image_name("kvm", docker_os),
        ] + do_kvm_args)
Esempio n. 21
0
                        choices=('annotation', 'train', 'evaluate',
                                 'generate'),
                        default='annotation')

    return parser.parse_args()


if __name__ == '__main__':

    args = parse_args()

    seed = 0
    np.random.seed(seed)
    mx.random.seed(seed)

    cfg = load_config_file('config.yml')
    root_dir = cfg['BASE_DIR']
    gan = cfg['GAN']
    gan_dir = cfg['GAN_DIR']
    gan_gpu_ids = cfg['GAN_GPU_IDS']
    gan_batch_size = cfg['GAN_BATCH_SIZE_PER_GPU']
    solver_gpu_ids = cfg['SOLVER_GPU_IDS']
    annotation = cfg['ANNOTATION']
    no_gan = cfg.get('NO_GAN', False)
    imgs_dir = cfg.get('IMGS_DIR', None)
    n_generate = cfg.get('GENERATE_NUM', 10000)

    if args.action == 'annotation':
        import tkinter as tk
        root = tk.Tk()
        if annotation == 'segmentation':
Esempio n. 22
0
def cmd_setup(args):
    """Setup environment."""

    if not args.dirs:
        args.kernel = False
        args.rdma_core = False
        args.iproute = False

    if args.installs:
        print(""" This setup script will update your hypervisor to latest
 distribution packages and install docker. Please restart
 the hypervisor to complete the installation. """)
        if args.yes is False and utils.query_yes_no("Do you want to proceed?",
                                                    'no') is False:
            exit("Exiting ...")

    supported_os = {
        'fedora': '26',
        'ubuntu': '16',
        'rhel': '8',
        'redhat': '8',
    }

    # Python API stability is dissaster
    # module platform was deprecated https://docs.python.org/3/library/platform.html
    # Luckily enough, they added distro module before removing platform
    try:
        import distro
        distro_id = distro.id()
        distro_v = distro.major_version()
    except ModuleNotFoundError:
        import platform
        distro_id = platform.dist()[0].lower()
        distro_v = platform.dist()[1].split('.')[0]

    if distro_id not in supported_os.keys(
    ) or distro_v < supported_os[distro_id]:
        exit("""  Your hypervisor is not supported. Exiting ...""")

    if args.installs:
        setuphv = utils.get_internal_fn('scripts/')
        if distro_id == 'redhat':
            distro_id = 'rhel'
        setuphv += 'setup-hv.' + distro_id
        subprocess.check_call(setuphv)

    utils.init_config_file()
    section = utils.load_config_file()

    if args.dirs:
        for key, value in section.items():
            if args.force:
                subprocess.call(["sudo", "rm", "-rf", value])
            if os.path.exists(value):
                exit("Please remove " + value + " Exiting ...")

            if key == "kernel" and not args.kernel:
                continue
            if key == "rdma-core" and not args.rdma_core:
                continue
            if key == "iproute2" and not args.iproute:
                continue

            if key == 'os':
                continue

            print("Prepare " + key)
            subprocess.call(["sudo", "mkdir", "-p", value])
            subprocess.call([
                "sudo", "chown", "-R",
                utils.username() + ":" + utils.group(), value
            ])

            if key == "src" or key == "logs" or key == "ccache":
                continue

            if key == "kernel":
                key = "linux"

            p = subprocess.Popen([
                "git", "clone", "ssh://" + utils.username() +
                "@l-gerrit.mtl.labs.mlnx:29418/upstream/" + key, "."
            ],
                                 cwd=value)
            p.wait()

            p = subprocess.Popen([
                "scp", "-p", "-P", "29418",
                utils.username() + "@l-gerrit.mtl.labs.mlnx:hooks/commit-msg",
                ".git/hooks/"
            ],
                                 cwd=value)
            p.wait()

            if key == "linux":
                shutil.copy(
                    os.path.join(os.path.dirname(__file__),
                                 "../configs/kconfig-kvm"), value + "/.config")

    print("Completed, PLEASE RESTART server")
Esempio n. 23
0
def main(args):

    print("Loading config file: ", args.config)
    params = utils.load_config_file(args.config)
    params["test_dataset_paths"] = utils.format_dataset_path(
        params["test_dataset_paths"])

    if args.existing_experiment:
        experiment = ExistingExperiment(
            api_key="jBFVYFo9VUsy0kb0lioKXfTmM",
            previous_experiment=args.existing_experiment)
    else:
        experiment = Experiment(api_key="jBFVYFo9VUsy0kb0lioKXfTmM",
                                project_name="fastdepth")

    # Data loading code
    print("Creating data loaders...")
    if args.nyu:
        from dataloaders.nyu import NYUDataset
        val_dataset = NYUDataset(params["test_dataset_paths"], split='val')
    else:
        val_dataset = Datasets.FastDepthDataset(params["test_dataset_paths"],
                                                split='val',
                                                depth_min=params["depth_min"],
                                                depth_max=params["depth_max"],
                                                input_shape_model=(224, 224))

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             shuffle=True,
                                             num_workers=params["num_workers"],
                                             pin_memory=True)

    # Set GPU
    params["device"] = torch.device(
        "cuda:{}".format(params["device"])
        if params["device"] >= 0 and torch.cuda.is_available() else "cpu")
    print("Using device", params["device"])

    print("Loading model '{}'".format(args.model))
    if not args.nyu:
        model, _ = utils.load_model(params, args.model, params["device"])
    else:
        # Maintain compatibility for fastdepth NYU model format
        state_dict = torch.load(args.model, map_location=params["device"])
        model = models.MobileNetSkipAdd(output_size=(224, 224),
                                        pretrained=True)
        model.load_state_dict(state_dict)
        params["start_epoch"] = 0

    model.to(params["device"])

    # Create output directory
    output_directory = os.path.join(os.path.dirname(args.model), "images")
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    params["experiment_dir"] = output_directory
    print("Saving results to " + output_directory)

    evaluate(params, val_loader, model, experiment)
Esempio n. 24
0
    def set_up(self):
        spec = load_config_file(self.args.yaml)  # spec is a dict of dicts of dicts

        # Import the correct model
        self.params_dict = apply_defaults(spec["params"])

        # time some things, like epoch time
        start_time = time.time()

        # ---------------------------------------- #
        #     DEFINE XVAL DATASETS                 #
        # ---------------------------------------- #

        # Create self.dataset_pair: DatasetPair containing train and val Datasets.
        self._prepare_data(spec["data"])
        # Number of instances to put in a training batch.
        self.n_batch = min(self.params_dict['n_batch'], self.dataset_pair.n_train)

        # This is already a model object because of the use of "!!python/object:... in the yaml file.
        model = self.params_dict["model"]
        # Set various attributes of the model
        model.init_with_params(self.params_dict, self.procdata.relevance_vectors)
        
        # Import priors from YAML
        parameters = Parameters()
        parameters.load(self.params_dict)

        print("----------------------------------------------")
        if self.args.verbose:
            print("parameters:")
            parameters.pretty_print()
        n_vals = LocalAndGlobal.from_list(parameters.get_parameter_counts())
        self.n_theta = n_vals.sum()

        #     TENSORFLOW PARTS        #
        self.placeholders = Placeholders(self.dataset_pair, n_vals)

        # feed_dicts are used to supply placeholders, these are for the entire train/val dataset, there is a batch one below.
        self._create_feed_dicts()

        # time-series of species differences: x_delta_obs is BATCH x (nTimes-1) x nSpecies
        x_delta_obs = self.placeholders.x_obs[:, 1:, :] - self.placeholders.x_obs[:, :-1, :]

        # DEFINE THE ENCODER NN: for LOCAL PARAMETERS
        print("Set up encoder")
        self.encoder = Encoder(self.args.verbose, parameters, self.placeholders, x_delta_obs)

        # DEFINE THE DECODER NN
        print("Set up decoder")
        self.decoder = Decoder(self.args.verbose, self.params_dict, self.placeholders, self.dataset_pair.times, self.encoder)

        # DEFINE THE OBJECTIVE and GRADIENTS
        # likelihood p (x | theta)
        print("Set up objective")
        self.objective = Objective(self.encoder, self.decoder, model, self.placeholders)

        # SET-UP tensorflow LEARNING/OPTIMIZER
        self.training_stepper = TrainingStepper(self.args.dreg, self.encoder, self.objective, self.params_dict)
        time_interval = time.time() - start_time
        print("Time before sess: %g" % time_interval)

        # TENSORBOARD VISUALIZATION            #
        ts_to_vis = 1
        self.encoder.q.attach_summaries()  # global and local parameters of q distribution
        unnormed_iw = self.objective.log_unnormalized_iws[ts_to_vis, :]
        self_normed_iw = self.objective.normalized_iws[ts_to_vis, :]   # not in log space
        with tf.name_scope('IWS'):
            variable_summaries(unnormed_iw, 'iws_unn_log')
            variable_summaries(self_normed_iw, 'iws_normed')
            tf.summary.scalar('nonzeros', tf.count_nonzero(self_normed_iw))

        #print(tf.shape(log_p_observations))
        with tf.name_scope('ELBO'):
            tf.summary.scalar('log_p', tf.reduce_mean(self.training_stepper.logsumexp_log_p))  # [batch, 1]
            tf.summary.scalar('log_prior', tf.reduce_mean(self.training_stepper.logsumexp_log_p_theta))
            tf.summary.scalar('loq_q', tf.reduce_mean(self.training_stepper.logsumexp_log_q_theta))
            tf.summary.scalar('elbo', self.objective.elbo)
Esempio n. 25
0
def get_result():
    test_subset_name = 'test'
    all_params = load_config_file(tal_config_file)
    locals().update(all_params)

    # cas_file allows user to select and upload a single CAS file (i.e. Explosion008_x264.npz file)
    # Doesn't matter from which CAS modalities(dirs), as all the modalities detection will be displayed
    cas_file = request.files['cas_file']
    videoname = cas_file.filename[:-4]

    
    class_name = ''
    num_class = len(ucf_crime_old_cls_names.keys())
    for clss in range(num_class):
        class_name = ucf_crime_old_cls_names[clss+1]
        if videoname.startswith(class_name):
            class_name = class_name
            break

    global video_path
    video_path = os.path.join(ucf_video_dir, '{}/{}.mp4'.format(class_name, videoname))
    video_path = video_path.replace('\\', '/')
    
    list_modality = os.listdir(cas_dir)
    talImageB64String, predicted_class_dict  = {}, {}
    merge_localized_actions = []
    #Retrieve data from TAL
    for mod in list_modality:
        modality = mod.split('-')[-1] #Only get the modality name
        pth_to_modality = os.path.join(cas_dir, mod)
        pth_to_modality = os.path.join(pth_to_modality, '{}.npz'.format(videoname))
        pth_to_modality = pth_to_modality.replace('\\', '/')
    
        _, frame_cnt, _, detected_list = metric_scores(pth_to_modality, **all_params)

        talImageB64String[modality], predicted_class, start_end_conf = tal.plot_tal_charts(
            videoname, gt_file_pth, detected_list, 
            frame_cnt, modality
        )

        predicted_class_dict[modality] = predicted_class
        start_end_conf['modality'] = modality
        start_end_conf.start = start_end_conf.start * fps
        start_end_conf.end = start_end_conf.end * fps
        merge_localized_actions.append(start_end_conf)

    # predict_df_display contains [start,end,confidence_score,modality] of every localized actions
    predict_df_display = pd.concat(merge_localized_actions)
    # For each modalities, sort [start & end] values
    predict_df_display = predict_df_display.groupby(['modality']).apply(
        lambda x: x.sort_values(['start', 'end'])
    )


    gt_talImageB64String, gt_start_end = tal.plot_gt_charts(
        videoname, gt_file_pth, detected_list, 
        frame_cnt, _
    )
    # gt_df_display contains [start,end]
    gt_df_display = pd.DataFrame(gt_start_end, columns=['start', 'end'])
    gt_df_display = gt_df_display.sort_values(by=['start', 'end'])

    #Retrieve data from MIL
    x_axis_frame, y_axis_score, gt_bar = mil.visualize_mil(
        videoname, 
        frame_cnt, 
        path_to_MIL_test_output_dir, 
        gt_file_pth
    )
    milImageB64String = mil.plot_mil_charts(
        x_axis_frame, y_axis_score, gt_bar)


    api_response = {
        'modality': modality,
        'video_path': video_path,
        'fig_mil': milImageB64String,
        'tal_gt': gt_talImageB64String,
        'fig_tal_both': talImageB64String['both'],
        'class_both': predicted_class_dict['both'],
        'fig_tal_rgb': talImageB64String['rgb'],
        'class_rgb': predicted_class_dict['rgb'],
        'fig_tal_flow': talImageB64String['flow'],
        'class_flow': predicted_class_dict['flow'],
        'fig_tal_late_fusion': talImageB64String['fusion'],
        'class_late_fusion': predicted_class_dict['fusion'],
        'frame_cnt': frame_cnt,
        'predict_df_display': [predict_df_display.to_html(index=False)],
        'gt_df_display': [gt_df_display.to_html(index=False)]
    }

    return render_template(
        "result.html", 
        **api_response
    )
Esempio n. 26
0
import numpy as np
import os
import pickle
from utils import load_config_file
import sys


if __name__ == '__main__':
    config_file_name = sys.argv[1]
    config = load_config_file(config_file_name)

    # create output folder if it doesn't exist
    output_folder = config["Output Folder"]
    if not os.path.isdir(output_folder):
        os.mkdir(output_folder)

    # load test set
    input_feature_path_testing = config["Input Feature Path Testing"]
    X_testing = np.load(input_feature_path_testing)
    input_age_path_testing = config["Input Age Path Testing"]
    Y_testing = np.load(input_age_path_testing)
    input_subjects_path_testing = config["Input Subject IDs Path Testing"]
    IDs_testing = np.load(input_subjects_path_testing)

    # extract PCs of test set
    pretrained_folder = config["Pretrained Folder"]
    with open(os.path.join(pretrained_folder, 'pca_training_set.pickle'), 'rb') as input_file:
        pca = pickle.load(input_file)
    X_testing = pca.transform(X_testing)
    age_mean = np.load(os.path.join(pretrained_folder, 'mean_age_training_set.npy'))
    Y_training_orig = np.load(os.path.join(pretrained_folder, 'y_training_set.npy'))
Esempio n. 27
0
"""Build sources to remove dependencies from host
"""
import os
import utils
from utils.docker import *
import inspect
import pickle
import base64
from utils.config import username, group

section = utils.load_config_file()


class Build(object):
    def __init__(self, project):
        if project == 'custom':
            self.src = section.get('src', None)
        else:
            self.src = section.get(project, None)
        if self.src is None:
            exit("Please configure source directory in MKT config.")

        self.project = project
        self.pickle = dict()

    def _get_pickle(self):
        self.pickle["project"] = self.project
        self.pickle["src"] = self.src
        self.pickle["checkpatch_root_dir"] = section.get('kernel', None)

        if self.project == 'custom':
Esempio n. 28
0
                row_values[1],
                "errand_status":
                row_values[2],
                "reporter":
                row_values[3].title(),
                "apartment":
                row_values[4].upper(),
                "errand_type":
                row_values[8],
                "errand_details":
                row_values[9].replace("\n", " ").replace("\r", "").strip(),
            }
            transformed_errands.append(single_errand_data)

    logging.info(
        f"Raw data transformed, found {len(transformed_errands)} errands")
    transformed_errands.reverse(
    )  # Reverse to insert oldest errand first in database

    return transformed_errands


if __name__ == "__main__":
    setup_logging()
    configs = load_config_file()
    browser = setup_browser(configs, runs_local=True)
    page_soup = download_data(browser, configs)
    all_table_rows = get_table_rows_from_soup(page_soup)
    all_errands = transform_table_rows(all_table_rows)
    print(all_errands)
Esempio n. 29
0
    parser.add_argument('--include-train',
                        dest='include_train',
                        action='store_true')
    parser.add_argument('--no-include-train',
                        dest='include_train',
                        action='store_false')
    parser.set_defaults(include_train=True)

    args = parser.parse_args()

    print(args.config_file)
    print(args.train_subset_name)
    print(args.test_subset_name)
    print(args.include_train)

    all_params = load_config_file(args.config_file)
    locals().update(all_params)

    if args.include_train:
        train_dataset_dict = get_dataset(
            dataset_name=dataset_name,
            subset=args.train_subset_name,
            file_paths=file_paths,
            sample_rate=sample_rate,
            base_sample_rate=base_sample_rate,
            action_class_num=action_class_num,
            modality='both',
            feature_type=feature_type,
            feature_oversample=False,
            temporal_aug=False,
        )
Esempio n. 30
0
    with open(
            os.path.join(config["MODEL_OUTPUT_RESULTS_PATH"], "accuracy.csv"),
            "w") as f_:
        f_.write("class_number,accuracy\n")
        for i in range(accuracy_per_class.shape[0]):
            f_.write("{},{:.2f}\n".format(class_labels[i],
                                          accuracy_per_class[i]))

    return


if __name__ == "__main__":
    print("Loading Config")
    config_file = "./default-config,yaml"
    config = load_config_file(config_file)

    # dataloaders
    print("Getting Dataloader")
    test_loader, total_classes = load_dataset(config)

    # creating model
    print("Loading Model")
    model = get_model(num_classes=total_classes)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model.to(device)

    # loading model
    if not os.path.isfile(config["MODEL_TEST_PATH"]):
        print("Model checkpoint weights does no exist, exiting....")
        import sys