Ejemplo n.º 1
0
def link_dependencies(proj_base_path, dep_path, verbosity="VERB_HIGH"):
    # import ipdb as pdb; pdb.set_trace()
    proj_rtl_dir = proj_base_path + "/rtl/"
    command = "cd " + proj_rtl_dir
    util.run_command(command, verbosity=verbosity)
    for ip_path in dep_path:
        ip_path = ip_path.replace("\n", "")
        util.check_for_file(ip_path, exit=False)
        command = "ln -s " + ip_path + " " + proj_rtl_dir
        util.run_command(command, verbosity=verbosity)
Ejemplo n.º 2
0
def publish(streams_path, publish_script, dry_run=False, verbose=False):
    juju_dist_path = os.path.join(streams_path, 'juju-dist')
    command = [publish_script, 'weekly', juju_dist_path, 'cpc']
    for attempt in range(3):
        try:
            run_command(command, dry_run=dry_run, verbose=verbose)
            break
        except subprocess.CalledProcessError:
            # Raise an error when the third attempt fails; the cloud is ill.
            if attempt == 2:
                raise
Ejemplo n.º 3
0
def create_f_file(proj_base_path, deps, verbosity="VERB_HIGH"):
    # import ipdb as pdb; pdb.set_trace()
    path_to_ffile = "{0}/scripts/files.f".format(proj_dir)
    command = "touch {0}".format(path_to_ffile)
    util.run_command(command, verbosity=verbosity)
    ips = []
    with open(path_to_ffile, "w") as f:
        for dep in deps:
            ip = dep.split("/")[-1].split("\n")[0]
            f.write("../rtl/" + ip + "\n")
        f.close()
Ejemplo n.º 4
0
def main(argv=None):
    args = parse_args(argv)
    juju_home = args.juju_home
    win_host = args.win_host
    win_path = (
        'C:\\\\Users\\\\Administrator\\\\juju-ci-tools\\\\download_juju.py')
    osx_path = '$HOME/juju-ci-tools/download_juju.py'
    osx_host = args.osx_host
    rev = get_revisions(os.environ['HOME'])
    with NamedTemporaryFile() as temp_file:
        for path, host in [[win_path, win_host], [osx_path, osx_host]]:
            create_workspace_yaml(juju_home, path, temp_file, rev)
            run_command(['workspace-run', temp_file.name, host])
Ejemplo n.º 5
0
def main(argv=None):
    args = parse_args(argv)
    juju_home = args.juju_home
    win_host = args.win_host
    win_path = (
        'C:\\\\Users\\\\Administrator\\\\juju-ci-tools\\\\download_juju.py')
    osx_path = '$HOME/juju-ci-tools/download_juju.py'
    osx_host = args.osx_host
    rev = get_revisions(os.environ['HOME'])
    with NamedTemporaryFile() as temp_file:
        for path, host in [[win_path, win_host], [osx_path, osx_host]]:
            create_workspace_yaml(juju_home, path, temp_file, rev)
            run_command(['workspace-run', temp_file.name, host])
def generate(model, batch_size, sub_model):

    if model == "LSTM":
        command = "fairseq-generate data/fairseq_binaries \
                    --path checkpoints/lstm/checkpoint_{}.pt \
                    --batch-size {} --beam 3".format(sub_model, batch_size)
    elif model == "CNN":
        command = "fairseq-generate data/fairseq_binaries \
                    --path checkpoints/conv/checkpoint_{}.pt \
                    --batch-size {} --beam 3".format(sub_model, batch_size)
    else:
        raise NotImplementedError

    run_command(command)
def generate(model, batch_size):
    if model == "LSTM":
        command = "fairseq-generate data/fairseq_binaries \
                    --path checkpoints/lstm/checkpoint_last.pt \
                    --batch-size 64 --beam 3"

    elif model == "CNN":
        command = "fairseq-generate data/fairseq_binaries \
                    --path checkpoints/conv/checkpoint_last.pt \
                    --batch-size 64 --beam 3"

    else:
        raise NotImplementedError

    run_command(command)
Ejemplo n.º 8
0
def main(argv=None):
    args = parse_args(argv)
    configure_logging(args.verbose)
    start_juju_path = None if args.upgrade else args.juju_bin
    client = client_from_config(args.env, start_juju_path, debug=args.debug,
                                soft_deadline=args.deadline)
    with boot_context(args.temp_env_name, client, None, [], args.series,
                      args.agent_url, args.agent_stream, args.logs,
                      args.keep_env, upload_tools=args.upload_tools,
                      region=args.region):
        assess_deployer(
            args, client, args.agent_timeout, args.workload_timeout)
        if args.bundle_verification_script:
            client_ser = pickle.dumps(client)
            logging.info('Calling bundle verification script {}'.format(
                args.bundle_verification_script))
            run_command([args.bundle_verification_script, client_ser])
    return 0
Ejemplo n.º 9
0
def optimize_model(expr_name, onnx_file, model_xml, weight_bin):
    run_opt = False
    if (model_xml == None):
        util.print_log("Could not find xml model", id_str="warning")
        run_opt = True
    if (weight_bin == None):
        util.print_log("Could not find binary weights", id_str="warning")
        run_opt = True
    if run_opt:
        util.print_banner(
            "Running OpenVino optimizer on {0}".format(onnx_file),
            color='green',
            verbosity="VERB_LOW")
        cmd = "python {0}/deployment_tools/model_optimizer/mo.py --input_model={1} --model_name {2}".format(
            openvino_inst_path, onnx_file, expr_name)
        util.run_command(cmd, verbosity="VERB_LOW")
        model_xml, weight_bin = expr_name + ".xml", expr_name + ".bin"
    # load model
    # import ipdb as pdb; pdb.set_trace()
    return model_xml, weight_bin
Ejemplo n.º 10
0
def publish_candidates(path, streams_path,
                       juju_release_tools=None, dry_run=False, verbose=False):
    """Assemble and publish weekly streams from the candidates."""
    timestamp = datetime.datetime.utcnow().strftime('%Y_%m_%dT%H_%M_%S')
    with temp_dir() as debs_path:
        for dir_name in get_artifact_dirs(path):
            artifacts_path = os.path.join(path, dir_name)
            branch_name = dir_name.split('-')[0]
            for deb_name in os.listdir(artifacts_path):
                deb_path = os.path.join(artifacts_path, deb_name)
                if verbose:
                    print('Copying %s' % deb_path)
                new_path = os.path.join(debs_path, deb_name)
                shutil.copyfile(deb_path, new_path)
                if deb_name == 'buildvars.json':
                    # buildvars.json is also in the artifacts_path; copied by
                    # download_candidate_files(). Set it aside so it can be
                    # sync'd to S3 as a record of what was published.
                    buildvar_dir = '{}/weekly/{}/{}'.format(
                        path, timestamp, branch_name)
                    if not os.path.isdir(buildvar_dir):
                        os.makedirs(buildvar_dir)
                    buildvar_path = '{}/{}'.format(buildvar_dir, deb_name)
                    shutil.copyfile(deb_path, buildvar_path)
        assemble_script, publish_script = get_scripts(juju_release_tools)
        # XXX sinzui 2014-12-01: IGNORE uses the local juju, but when
        # testing juju's that change generate-tools, we may need to use
        # the highest version.
        command = [
            assemble_script, '-t', debs_path, 'weekly', 'IGNORE',
            streams_path]
        run_command(command, dry_run=dry_run, verbose=verbose)
    publish(streams_path, publish_script, dry_run=dry_run, verbose=verbose)
    # Sync buildvars.json files out to s3.
    url = 's3://juju-qa-data/juju-releases/weekly/'
    s3_path = '{}/weekly/{}'.format(path, timestamp)
    if verbose:
        print('Calling s3cmd to sync %s out to %s' % (s3_path, url))
    if not dry_run:
        s3_cmd(['sync', s3_path, url])
    extract_candidates(path, dry_run=dry_run, verbose=verbose)
Ejemplo n.º 11
0
def optimize_model(expr_name, onnx_file, model_xml, weight_bin, opt_ops=""):
#    import ipdb as pdb; pdb.set_trace()
    run_opt = False
    options = ""
    if (model_xml == None):
        util.print_log("No xml model was provided", id_str="warning")
        run_opt = True
    if (weight_bin == None):
        util.print_log("No binary weight file was provided", id_str="warning")
        run_opt = True
    if run_opt:
        util.print_banner("Running OpenVino optimizer on {0}".format(onnx_file), color='green', verbosity="VERB_LOW")
        options += "--input_model={0} ".format(onnx_file)
        options += "--model_name {0} ".format(expr_name)
        options += (" --"+opt_ops[0]) if len(opt_ops)==1 else  "".join(" --"+e for e in opt_ops)
        cmd = "python {0}/deployment_tools/model_optimizer/mo_onnx.py {1}".format(openvino_inst_path, options)
        util.run_command(cmd, verbosity="VERB_LOW")
        model_xml, weight_bin = expr_name+".xml", expr_name+".bin"
    # load model
    # import ipdb as pdb; pdb.set_trace()
    return model_xml, weight_bin
Ejemplo n.º 12
0
def train(model_name, epochs, batch_size):
    if model_name == "LSTM":
        command = "CUDA_VISIBLE_DEVICES=0 fairseq-train {}  --clip-norm 5 --batch-size {} \
                    --save-dir checkpoints/lstm --arch lstm --max-epoch {} --encoder-hidden-size 258 \
                    --encoder-layers 2  --decoder-hidden-size 258 --decoder-layers 2 --optimizer adam --lr 0.001  \
                    --dropout 0.3 --encoder-embed-path glove.6B.300d.txt --encoder-bidirectional --encoder-embed-dim 300 \
                    --decoder-embed-dim 300 --no-epoch-checkpoints --decoder-embed-path glove.6B.300d.txt --decoder-out-embed-dim 300 \
                    --num-workers 3".format(
            FAIRSEQ_PREPROCESSED_DATASET, batch_size, epochs
        )
    elif model_name == "CNN":
        command = "CUDA_VISIBLE_DEVICES=0 fairseq-train {} --batch-size {} \
                    --save-dir checkpoints/conv --arch fconv_iwslt_de_en --max-epoch {} \
                    --optimizer adam --lr 0.001  \
                    --dropout 0.3 --encoder-embed-path glove.6B.300d.txt --encoder-embed-dim 300 \
                    --decoder-embed-dim 300 --no-epoch-checkpoints --decoder-embed-path glove.6B.300d.txt --decoder-out-embed-dim 300 \
                    --num-workers 3".format(
            FAIRSEQ_PREPROCESSED_DATASET, batch_size, epochs
        )
    else:
        raise NotImplementedError

    run_command(command)
Ejemplo n.º 13
0
 def test_run_command_dry_run(self):
     with patch('subprocess.check_output') as co_mock:
         run_command(['foo', 'bar'], dry_run=True)
         self.assertEqual(0, co_mock.call_count)
Ejemplo n.º 14
0
                           verbosity="VERB_LOW")
            sys.exit()

        util.print_banner("Compiling input files", verbosity=verbosity)
        if files == None:
            util.print_log("You need to provide f-file",
                           "ERROR",
                           verbosity="VERB_LOW")
            sys.exit()
        sv_rtl, v_rtl, vhdl_rtl = get_rtl_files(files)
        # import ipdb as pdb; pdb.set_trace()
        if sv_rtl != "":
            cmd_to_run = "xvlog --sv {0} ".format(sv_rtl)
            if silence:
                cmd_to_run += "> /dev/null"
            util.run_command(cmd_to_run, split=False, verbosity=verbosity)
        if v_rtl != "":
            cmd_to_run = "xvlog {0} ".format(v_rtl)
            if silence:
                cmd_to_run += "> /dev/null"
            util.run_command(cmd_to_run, split=False, verbosity=verbosity)
        if vhdl_rtl != "":
            cmd_to_run = "xvhdl {0} ".format(vhdl_rtl)
            if silence:
                cmd_to_run += "> /dev/null"
            util.run_command(cmd_to_run, split=False, verbosity=verbosity)

        util.print_banner("Creating snapshot", verbosity=verbosity)
        cmd_to_run = "xelab {0} ".format(top_level)
        if waveform:
            cmd_to_run += "--debug all "
    def preprocess(self):
        """Preprocesses the dataset"""

        # shutil.copyfile(original, target)

        if not os.path.exists(
                os.path.join(DATASET_FOLDER, PROCESSED_DATASET_FOLDER)):
            os.mkdir(os.path.join(DATASET_FOLDER, PROCESSED_DATASET_FOLDER))

        shutil.copyfile(
            os.path.join(self.dataset_location,
                         RAW_DATASET_FILENAMES["train"][0]),
            PROCESSED_DATASET["train"] + ".sentence",
        )
        shutil.copyfile(
            os.path.join(self.dataset_location,
                         RAW_DATASET_FILENAMES["train"][1]),
            PROCESSED_DATASET["train"] + ".question",
        )

        shutil.copyfile(
            os.path.join(self.dataset_location,
                         RAW_DATASET_FILENAMES["valid"][0]),
            PROCESSED_DATASET["valid"] + ".sentence",
        )
        shutil.copyfile(
            os.path.join(self.dataset_location,
                         RAW_DATASET_FILENAMES["valid"][1]),
            PROCESSED_DATASET["valid"] + ".question",
        )

        shutil.copyfile(
            os.path.join(self.dataset_location,
                         RAW_DATASET_FILENAMES["test"][0]),
            PROCESSED_DATASET["test"] + ".sentence",
        )
        shutil.copyfile(
            os.path.join(self.dataset_location,
                         RAW_DATASET_FILENAMES["test"][1]),
            PROCESSED_DATASET["test"] + ".question",
        )

        logger.info(
            "Running FairSeq Preprocessing to convert files into fairseq binaries"
        )

        if os.path.exists(FAIRSEQ_PREPROCESSED_DATASET):
            logger.debug("Old Binaries present deleting them")
            shutil.rmtree(FAIRSEQ_PREPROCESSED_DATASET)
            logger.debug("Deleted old binaries now generating new one's")

        pre_process_command = "fairseq-preprocess --source-lang sentence --target-lang question \
                              --trainpref {} --testpref {} \
                              --validpref {} --destdir {} --seed {} \
                              --nwordssrc {} --nwordstgt {}".format(
            PROCESSED_DATASET["train"],
            PROCESSED_DATASET["test"],
            PROCESSED_DATASET["valid"],
            FAIRSEQ_PREPROCESSED_DATASET,
            SEED,
            SRC_WORDS,
            TRG_WORDS,
        )

        run_command(pre_process_command)
Ejemplo n.º 16
0
    def preprocess(self):
        """Preprocesses the dataset"""
        with open(RAW_DATASET) as csv_file:
            csv_reader = csv.DictReader(csv_file, delimiter="\t")
            for row in csv_reader:
                if "_" in row["Question"]:
                    row["Question"] = re.sub(r"[_]{2,}", "_", row["Question"])

                    self.dataset["sentence"].append(row["answer"].lstrip().strip())
                    self.dataset["question"].append(row["Question"].strip())

        logger.debug("DataSet Preprocessed Successfully!")

        self.dataset = pd.DataFrame.from_dict(self.dataset)

        # Test Split
        self.trainset, self.testset = train_test_split(
            self.dataset, test_size=0.15, random_state=SEED
        )

        # Valid Split
        self.trainset, self.validset = train_test_split(
            self.trainset, test_size=0.10, random_state=SEED
        )

        self.trainset["sentence"].to_csv(
            "{}.sentence".format(PROCESSED_DATASET["train"]),
            index=False,
            sep="\t",
            header=False,
        )
        self.validset["sentence"].to_csv(
            "{}.sentence".format(PROCESSED_DATASET["valid"]),
            index=False,
            sep="\t",
            header=False,
        )
        self.testset["sentence"].to_csv(
            "{}.sentence".format(PROCESSED_DATASET["test"]),
            index=False,
            sep="\t",
            header=False,
        )
        self.trainset["question"].to_csv(
            "{}.question".format(PROCESSED_DATASET["train"]),
            index=False,
            sep="\t",
            header=False,
        )
        self.validset["question"].to_csv(
            "{}.question".format(PROCESSED_DATASET["valid"]),
            index=False,
            sep="\t",
            header=False,
        )

        self.testset["question"].to_csv(
            "{}.question".format(PROCESSED_DATASET["test"]),
            index=False,
            sep="\t",
            header=False,
        )

        logger.debug(
            "Trainset Size: {}, Validset Size: {}, Tesetset Size: {}".format(
                self.trainset.shape, self.validset.shape, self.testset.shape
            )
        )

        logger.debug(
            "Saving the file preprocessed files to : {}".format(
                PROCESSED_DATASET_FOLDER
            )
        )

        logger.info(
            "Running FairSeq Preprocessing to convert files into fairseq binaries"
        )

        if os.path.exists(FAIRSEQ_PREPROCESSED_DATASET):
            logger.debug("Old Binaries present deleting them")
            shutil.rmtree(FAIRSEQ_PREPROCESSED_DATASET)
            logger.debug("Deleted old binaries now generating new one's")

        pre_process_command = "fairseq-preprocess --source-lang sentence --target-lang question \
                              --trainpref {} --testpref {} \
                              --validpref {} --destdir {} --seed {} \
                              --nwordssrc 5000 --nwordstgt 5000".format(
            PROCESSED_DATASET["train"],
            PROCESSED_DATASET["test"],
            PROCESSED_DATASET["valid"],
            FAIRSEQ_PREPROCESSED_DATASET,
            SEED,
        )

        run_command(pre_process_command)
Ejemplo n.º 17
0
def run_cmds(commands):
    for cmd in commands:
        run_command(cmd, verbose=True)
Ejemplo n.º 18
0
 def test_run_command_dry_run(self):
     with patch('subprocess.check_output') as co_mock:
         run_command(['foo', 'bar'], dry_run=True)
         self.assertEqual(0, co_mock.call_count)
Ejemplo n.º 19
0
 def test_run_command_args(self):
     with patch('subprocess.check_output') as co_mock:
         run_command(['foo', 'bar'])
     args, kwargs = co_mock.call_args
     self.assertEqual((['foo', 'bar'], ), args)
Ejemplo n.º 20
0
 def test_run_command_args(self):
     with patch('subprocess.check_output') as co_mock:
         run_command(['foo', 'bar'])
     args, kwargs = co_mock.call_args
     self.assertEqual((['foo', 'bar'], ), args)
Ejemplo n.º 21
0
 def test_run_command_verbose(self):
     with patch('subprocess.check_output'):
         with patch('utility.print_now') as p_mock:
             run_command(['foo', 'bar'], verbose=True)
             self.assertEqual(2, p_mock.call_count)
Ejemplo n.º 22
0
 def test_run_command_verbose(self):
     with patch('subprocess.check_output'):
         with patch('utility.print_now') as p_mock:
             run_command(['foo', 'bar'], verbose=True)
             self.assertEqual(2, p_mock.call_count)
Ejemplo n.º 23
0
            default_path_to_fpga_dir))
        path_fpga = default_path_to_fpga_dir
# Check if project has already been created
    if os.path.isdir("{0}{1}".format(path_for_proj, project_name)):
        util.print_log("Project path {0}{1} already exist!".format(
            path_for_proj, project_name),
                       id_str="ERROR")
        sys.exit()
    if os.getcwd() not in path_for_proj:
        proj_dir = os.getcwd() + "/" + path_for_proj + project_name
    else:
        proj_dir = path_for_proj + project_name
    proj_dir = os.path.abspath(proj_dir)
    # import ipdb as pdb; pdb.set_trace()
    command = "mkdir {0}".format(project_name)
    util.run_command(command, verbosity=verbosity)
    command = "cd {0}".format(proj_dir)
    util.run_command(command, verbosity=verbosity)
    command = "mkdir {0}/docs {0}/results {0}/rtl {0}/scripts {0}/sw {0}/tb".format(
        proj_dir)
    util.run_command(command, verbosity=verbosity)
    command = "touch {0}/rtl/{1}.sv".format(proj_dir, project_name.lower())
    util.run_command(command, verbosity=verbosity)
    command = "touch {0}/tb/{1}_tester.sv".format(proj_dir,
                                                  project_name.lower())
    util.run_command(command, verbosity=verbosity)
    if not (__args__['dependancy']) == None:
        deps = read_dep_file(__args__['dependancy'])
        link_dependencies(proj_dir, deps, verbosity=verbosity)
        create_f_file(proj_dir, deps, verbosity=verbosity)