예제 #1
0
def cmd_ipython(args):
    args = defaults.update_check_args(args, "Could not run IPython parallel analysis.")
    args = install.docker_image_arg(args)
    parallel = clargs.to_parallel(args, "bcbiovm.docker")
    parallel["wrapper"] = "runfn"
    with open(args.sample_config) as in_handle:
        ready_config, _ = mounts.normalize_config(yaml.safe_load(in_handle), args.fcdir)
    work_dir = os.getcwd()
    ready_config_file = os.path.join(work_dir, "%s-ready%s" %
                                     (os.path.splitext(os.path.basename(args.sample_config))))
    with open(ready_config_file, "w") as out_handle:
        yaml.safe_dump(ready_config, out_handle, default_flow_style=False, allow_unicode=False)
    work_dir = os.getcwd()
    systemconfig = run.local_system_config(args.systemconfig, args.datadir, work_dir)
    cur_pack = pack.shared_filesystem(work_dir, args.datadir, args.tmpdir)
    parallel["wrapper_args"] = [devel.DOCKER, {"sample_config": ready_config_file,
                                               "fcdir": args.fcdir,
                                               "pack": cur_pack,
                                               "systemconfig": systemconfig,
                                               "image": args.image}]
    # For testing, run on a local ipython cluster
    parallel["run_local"] = parallel.get("queue") == "localrun"

    from bcbio.pipeline import main
    main.run_main(work_dir, run_info_yaml=ready_config_file,
                  config_file=systemconfig, fc_dir=args.fcdir,
                  parallel=parallel)
예제 #2
0
def prep_cwl(samples, workflow_fn, out_dir, out_file, integrations=None):
    """Output a CWL description with sub-workflows and steps.
    """
    step_dir = utils.safe_makedir(os.path.join(out_dir, "steps"))
    sample_json, variables, keyvals = _flatten_samples(samples, out_file)
    file_estimates = _calc_input_estimates(keyvals, integrations)
    out = _cwl_workflow_template(variables)
    parent_wfs = []
    steps, wfoutputs = workflow_fn()
    for cur in workflow.generate(variables, steps, wfoutputs):
        if cur[0] == "step":
            _, name, parallel, inputs, outputs, programs, disk = cur
            step_file = _write_tool(step_dir, name, inputs, outputs, parallel, programs,
                                    file_estimates, disk, samples)
            out["steps"].append(_step_template(name, step_file, inputs, outputs, parallel))
        elif cur[0] == "upload":
            out["outputs"] = cur[1]
        elif cur[0] == "wf_start":
            parent_wfs.append(out)
            out = _cwl_workflow_template(cur[1])
        elif cur[0] == "wf_finish":
            _, name, parallel, inputs, outputs = cur
            wf_out_file = "wf-%s.cwl" % name
            with open(os.path.join(out_dir, wf_out_file), "w") as out_handle:
                yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
            out = parent_wfs.pop(-1)
            out["steps"].append(_step_template(name, wf_out_file, inputs, outputs, parallel))
        else:
            raise ValueError("Unexpected workflow value %s" % str(cur))

    with open(out_file, "w") as out_handle:
        yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
    return out_file, sample_json
예제 #3
0
    def test_load_config_files(self):
        defaults = {
            'cat1': {
                'key': 'value'
            },
            'cat2': {
                'key': 'value',
                'key2': 'value2'
            },
            'cat3': {
                'key': 'value'
            }
        }
        config_file_A = {
            'cat1': {
                'key': 'valueA'
            },
            'cat2': {
                'key': 'valueA',
                'invalid_key': 'ignored'
            },
            'invalid_category': {
                'ignored': 'ignored'
            },
            'cat3': None
        }
        config_file_B = {
            'cat1': {
                'key': 'valueB'
            },
            'cat2': {
                'key2': 'value2B'
            }
        }
        temp_dir = tempfile.mkdtemp()
        config_file_A_path = os.path.join(temp_dir, "configA.conf")
        config_file_B_path = os.path.join(temp_dir, "configB.conf")
        with open(config_file_A_path, 'w') as out_file:
            yaml.safe_dump(config_file_A, out_file)

        with open(config_file_B_path, 'w') as out_file:
            yaml.safe_dump(config_file_B, out_file)

        config = _load_config_files_with_defaults([config_file_B_path,
                                                   '/invalid/path/ignored.txt',
                                                   config_file_A_path],
                                                  defaults)

        self.assertEqual(config, {
            'cat1': {
                'key': 'valueB'
            },
            'cat2': {
                'key': 'valueA',
                'key2': 'value2B'
            },
            'cat3': {
                'key': 'value'
            }
        })
예제 #4
0
파일: testtools.py 프로젝트: runcom/flocker
def flocker_deploy(test_case, deployment_config, application_config):
    """
    Run ``flocker-deploy`` with given configuration files.

    :param test_case: The ``TestCase`` running this unit test.
    :param dict deployment_config: The desired deployment configuration.
    :param dict application_config: The desired application configuration.
    """
    # This is duplicate code, see
    # https://clusterhq.atlassian.net/browse/FLOC-1903
    control_node = environ.get("FLOCKER_ACCEPTANCE_CONTROL_NODE")
    certificate_path = environ["FLOCKER_ACCEPTANCE_API_CERTIFICATES_PATH"]
    if control_node is None:
        raise SkipTest("Set control node address using "
                       "FLOCKER_ACCEPTANCE_CONTROL_NODE environment variable.")

    temp = FilePath(test_case.mktemp())
    temp.makedirs()

    deployment = temp.child(b"deployment.yml")
    deployment.setContent(safe_dump(deployment_config))

    application = temp.child(b"application.yml")
    application.setContent(safe_dump(application_config))
    check_call([b"flocker-deploy", b"--certificates-directory",
               certificate_path, control_node, deployment.path,
               application.path])
예제 #5
0
파일: node.py 프로젝트: umermansoor/ccm
    def __update_config(self):
        dir_name = self.get_path()
        if not os.path.exists(dir_name):
            os.mkdir(dir_name)
            for dir in self.__get_diretories():
                os.mkdir(os.path.join(dir_name, dir))

        filename = os.path.join(dir_name, 'node.conf')
        values = {
            'name' : self.name,
            'status' : self.status,
            'auto_bootstrap' : self.auto_bootstrap,
            'interfaces' : self.network_interfaces,
            'jmx_port' : self.jmx_port,
            'config_options' : self.__config_options,
        }
        if self.pid:
            values['pid'] = self.pid
        if self.initial_token:
            values['initial_token'] = self.initial_token
        if self.__cassandra_dir is not None:
            values['cassandra_dir'] = self.__cassandra_dir
        if self.data_center:
            values['data_center'] = self.data_center
        if self.remote_debug_port:
            values['remote_debug_port'] = self.remote_debug_port
        with open(filename, 'w') as f:
            yaml.safe_dump(values, f)
예제 #6
0
def write_project_summary(samples):
    """Write project summary information on the provided samples.
    write out dirs, genome resources,

    """
    work_dir = samples[0][0]["dirs"]["work"]
    out_file = os.path.join(work_dir, "project-summary.yaml")
    upload_dir = (os.path.join(work_dir, samples[0][0]["upload"]["dir"])
                  if "dir" in samples[0][0]["upload"] else "")
    test_run = samples[0][0].get("test_run", False)
    date = str(datetime.now())
    prev_samples = _other_pipeline_samples(out_file, samples)
    with open(out_file, "w") as out_handle:
        yaml.safe_dump({"date": date}, out_handle,
                       default_flow_style=False, allow_unicode=False)
        if test_run:
            yaml.safe_dump({"test_run": True}, out_handle, default_flow_style=False,
                           allow_unicode=False)
        yaml.safe_dump({"upload": upload_dir}, out_handle,
                       default_flow_style=False, allow_unicode=False)
        yaml.safe_dump({"bcbio_system": samples[0][0]["config"].get("bcbio_system", "")}, out_handle,
                       default_flow_style=False, allow_unicode=False)
        yaml.safe_dump({"samples": prev_samples + [_save_fields(sample[0]) for sample in samples]}, out_handle,
                       default_flow_style=False, allow_unicode=False)
    return out_file
예제 #7
0
def _run_gemini_stats(bam_file, data, out_dir):
    """Retrieve high level variant statistics from Gemini.
    """
    out = {}
    gemini_db = (data.get("variants", [{}])[0].get("population", {}).get("db") 
                 if data.get("variants") else None)
    if gemini_db:
        gemini_stat_file = "%s-stats.yaml" % os.path.splitext(gemini_db)[0]
        if not utils.file_uptodate(gemini_stat_file, gemini_db):
            gemini = config_utils.get_program("gemini", data["config"])
            tstv = subprocess.check_output([gemini, "stats", "--tstv", gemini_db])
            gt_counts = subprocess.check_output([gemini, "stats", "--gts-by-sample", gemini_db])
            dbsnp_count = subprocess.check_output([gemini, "query", gemini_db, "-q",
                                                   "SELECT count(*) FROM variants WHERE in_dbsnp==1"])
            out["Transition/Transversion"] = tstv.split("\n")[1].split()[-1]
            for line in gt_counts.split("\n"):
                parts = line.rstrip().split()
                if len(parts) > 0 and parts[0] == data["name"][-1]:
                    _, hom_ref, het, hom_var, _, total = parts
                    out["Variations (total)"] = int(total)
                    out["Variations (heterozygous)"] = int(het)
                    out["Variations (homozygous)"] = int(hom_var)
                    break
            out["Variations (in dbSNP)"] = int(dbsnp_count.strip())
            if out.get("Variations (total)") > 0:
                out["Variations (in dbSNP) pct"] = "%.1f%%" % (out["Variations (in dbSNP)"] /
                                                               float(out["Variations (total)"]) * 100.0)
            with open(gemini_stat_file, "w") as out_handle:
                yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
        else:
            with open(gemini_stat_file) as in_handle:
                out = yaml.safe_load(in_handle)
    return out
    def test_override_theme_new_style(self, checkout_path, run):
        tmpdir = tempfile.mkdtemp()
        os.mkdir(os.path.join(tmpdir, 'docs'))
        yaml_file = os.path.join(tmpdir, 'mkdocs.yml')
        yaml.safe_dump(
            {
                'theme': {
                    'name': 'readthedocs',
                },
                'site_name': 'mkdocs',
                'docs_dir': 'docs',
            },
            open(yaml_file, 'w')
        )
        checkout_path.return_value = tmpdir

        self.searchbuilder = MkdocsHTML(
            build_env=self.build_env,
            python_env=None
        )
        self.searchbuilder.append_conf()

        run.assert_called_with('cat', 'mkdocs.yml', cwd=mock.ANY)

        config = yaml.safe_load(open(yaml_file))
        self.assertEqual(
            config['theme'],
            {
                'name': 'readthedocs',
                'custom_dir': BaseMkdocs.READTHEDOCS_TEMPLATE_OVERRIDE_DIR
            }
        )
    def test_dont_override_theme(self, checkout_path, run):
        tmpdir = tempfile.mkdtemp()
        os.mkdir(os.path.join(tmpdir, 'docs'))
        yaml_file = os.path.join(tmpdir, 'mkdocs.yml')
        yaml.safe_dump(
            {
                'theme': 'not-readthedocs',
                'theme_dir': 'not-readthedocs',
                'site_name': 'mkdocs',
                'docs_dir': 'docs',
            },
            open(yaml_file, 'w')
        )
        checkout_path.return_value = tmpdir

        self.searchbuilder = MkdocsHTML(
            build_env=self.build_env,
            python_env=None
        )
        self.searchbuilder.append_conf()

        run.assert_called_with('cat', 'mkdocs.yml', cwd=mock.ANY)

        config = yaml.safe_load(open(yaml_file))
        self.assertEqual(
            config['theme_dir'],
            'not-readthedocs'
        )
예제 #10
0
def _merge_system_configs(host_config, container_config, out_file=None):
    """Create a merged system configuration from external and internal specification.
    """
    out = copy.deepcopy(container_config)
    for k, v in host_config.iteritems():
        if k in set(["galaxy_config"]):
            out[k] = v
        elif k == "resources":
            for pname, resources in v.iteritems():
                if not isinstance(resources, dict) and pname not in out[k]:
                    out[k][pname] = resources
                else:
                    for rname, rval in resources.iteritems():
                        if (rname in set(["cores", "jvm_opts", "memory"])
                              or pname in set(["gatk", "mutect"])):
                            if pname not in out[k]:
                                out[k][pname] = {}
                            out[k][pname][rname] = rval
    # Ensure final file is relocatable by mapping back to reference directory
    if "bcbio_system" in out and ("galaxy_config" not in out or not os.path.isabs(out["galaxy_config"])):
        out["galaxy_config"] = os.path.normpath(os.path.join(os.path.dirname(out["bcbio_system"]),
                                                             os.pardir, "galaxy",
                                                             "universe_wsgi.ini"))
    if out_file:
        with open(out_file, "w") as out_handle:
            yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
    return out
예제 #11
0
def loadResumeFile():
    """
    Sets the singleton stateDict object to the content of the resume file.
    If the file is empty then it will create an empty one.

    Raises:

        :class:ooni.runner.InvalidResumeFile if the resume file is not valid

    """
    if not config.stateDict:
        try:
            with open(config.resume_filename) as f:
                config.stateDict = yaml.safe_load(f)
        except:
            log.err("Error loading YAML file")
            raise InvalidResumeFile

        if not config.stateDict:
            with open(config.resume_filename, "w+") as f:
                yaml.safe_dump(dict(), f)
            config.stateDict = dict()

        elif isinstance(config.stateDict, dict):
            return
        else:
            log.err("The resume file is of the wrong format")
            raise InvalidResumeFile
예제 #12
0
def populate_config_from_appliances(appliance_data):
    """populates env.local.yaml with the appliances just obtained

    args:
        appliance_data: the data of the appliances as taken from sprout
    """
    file_name = conf_path.join('env.local.yaml').strpath
    if os.path.exists(file_name):
        with open(file_name) as f:
            y_data = yaml.load(f)
        if not y_data:
            y_data = {}
    else:
        y_data = {}
    if y_data:
        with open(conf_path.join('env.local.backup').strpath, 'w') as f:
            yaml.dump(y_data, f, default_flow_style=False)

    y_data['appliances'] = []
    for app in appliance_data:
        app_config = dict(
            hostname=app['ip_address'],
            ui_protocol="https",
            version=str(app['template_version']),
        )
        y_data['appliances'].append(app_config)
    with open(file_name, 'w') as f:
        # Use safe dump to avoid !!python/unicode tags
        yaml.safe_dump(y_data, f, default_flow_style=False)
예제 #13
0
def _write_config_file(items, global_vars, template, project_name, out_dir,
                       remotes):
    """Write configuration file, adding required top level attributes.
    """
    config_dir = utils.safe_makedir(os.path.join(out_dir, "config"))
    out_config_file = os.path.join(config_dir, "%s.yaml" % project_name)
    out = {"fc_date": datetime.datetime.now().strftime("%Y-%m-%d"),
           "fc_name": project_name,
           "upload": {"dir": "../final"},
           "details": items}
    if remotes.get("base"):
        r_base = objectstore.parse_remote(remotes.get("base"))
        out["upload"]["method"] = r_base.store
        out["upload"]["bucket"] = r_base.bucket
        out["upload"]["folder"] = os.path.join(r_base.key, "final") if r_base.key else "final"
        if r_base.region:
            out["upload"]["region"] = r_base.region
    if global_vars:
        out["globals"] = global_vars
    for k, v in template.iteritems():
        if k not in ["details"]:
            out[k] = v
    if os.path.exists(out_config_file):
        shutil.move(out_config_file,
                    out_config_file + ".bak%s" % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S"))
    with open(out_config_file, "w") as out_handle:
        yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
    return out_config_file
def write_out_bypass_compile_expansions(patch_file, **expansions):
    """
    Write out the macro expansions to given file.
    """
    with open(patch_file, "w") as out_file:
        print("Saving compile bypass expansions to {0}: ({1})".format(patch_file, expansions))
        yaml.safe_dump(expansions, out_file, default_flow_style=False)
예제 #15
0
 def restore(self):
     backup_yaml = self.get_backup_dict()
     current_yaml = self.get_current_dict()
     not_found_keys = []
     for key, subkeys in self.keys_to_restore:
         if not subkeys and key not in backup_yaml:
             not_found_keys.append(key)
             continue
         if not subkeys:
             current_yaml[key] = backup_yaml[key]
             continue
         backup_values = backup_yaml.get(key, {})
         current_yaml[key] = current_yaml.get(key, {})
         for subkey in subkeys:
             if subkey not in backup_values:
                 not_found_keys.append("{0}/{1}".format(key, subkey))
             else:
                 current_yaml[key][subkey] = backup_values[subkey]
     if not_found_keys:
         raise Exception(
             "Not found values in backup for keys: {0}".format(
                 ",".join(not_found_keys)))
     old_path_name = "{0}.old".format(self.path)
     new_path_name = "{0}.new".format(self.path)
     shutil.copy2(self.path, old_path_name)
     with open(new_path_name, "w") as new:
         yaml.safe_dump(current_yaml, new, default_flow_style=False)
     shutil.move(new_path_name, self.path)
     self._post_restore_action()
예제 #16
0
def save_install_defaults(args):
    """Save installation information to make future upgrades easier.
    """
    install_config = _get_install_config()
    if install_config is None:
        return
    if utils.file_exists(install_config):
        with open(install_config) as in_handle:
            cur_config = yaml.load(in_handle)
    else:
        cur_config = {}
    if args.tooldir:
        cur_config["tooldir"] = args.tooldir
    cur_config["sudo"] = args.sudo
    cur_config["isolate"] = args.isolate
    for attr in ["genomes", "aligners"]:
        if not cur_config.get(attr):
            cur_config[attr] = []
        for x in getattr(args, attr):
            if x not in cur_config[attr]:
                cur_config[attr].append(x)
    # toolplus -- save non-filename inputs
    attr = "toolplus"
    if not cur_config.get(attr):
        cur_config[attr] = []
    for x in getattr(args, attr):
        if not x.fname:
            if x.name not in cur_config[attr]:
                cur_config[attr].append(x.name)
    with open(install_config, "w") as out_handle:
        yaml.safe_dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False)
예제 #17
0
    def _send_configuration(self,
                            application_config_yaml=COMPLEX_APPLICATION_YAML,
                            deployment_config_yaml=COMPLEX_DEPLOYMENT_YAML):
        """
        Run ``flocker-deploy`` against the API server.

        :param application_config: Application configuration dictionary.
        :param deployment_config: Deployment configuration dictionary.

        :return: ``Deferred`` that fires with a tuple (stdout, stderr,
            exit code).
        """
        app_config = FilePath(self.mktemp())
        app_config.setContent(safe_dump(application_config_yaml))
        deployment_config = FilePath(self.mktemp())
        deployment_config.setContent(safe_dump(deployment_config_yaml))
        # This duplicates some code in the acceptance tests...
        # https://clusterhq.atlassian.net/browse/FLOC-1904
        return getProcessOutputAndValue(
            b"flocker-deploy", [
                b"--certificates-directory", self.certificate_path.path,
                b"--port",
                unicode(self.port_number).encode("ascii"), b"localhost",
                deployment_config.path, app_config.path
            ],
            env=environ)
예제 #18
0
def write_distro_specific_manifest(manifest_file, package, vcs_type, vcs_url, api_homepage, tags_db):
    m_yaml = {}
    if os.path.isfile(manifest_file):
        with open(manifest_file, 'r') as f:
            m_yaml = yaml.load(f)

    m_yaml['api_documentation'] = api_homepage
    m_yaml['vcs'] = vcs_type
    m_yaml['vcs_url'] = vcs_url

    m_yaml['depends_on'] = []
    if tags_db.has_reverse_deps(package):
        m_yaml['depends_on'] = tags_db.get_reverse_deps(package)

    if not os.path.isdir(os.path.dirname(manifest_file)):
        os.makedirs(os.path.dirname(manifest_file))

    #Update our dependency list
    if 'depends' in m_yaml and type(m_yaml['depends']) == list:
        tags_db.add_forward_deps(package, m_yaml['depends'])

    #We need to keep track of metapackages separately as they're special kinds
    #of reverse deps
    if 'package_type' in m_yaml and m_yaml['package_type'] == 'metapackage':
        m_yaml['packages'] = m_yaml['depends']
        tags_db.set_metapackage_deps(package, m_yaml['depends'])

    #Check to see if this package is part of any metapackages
    if tags_db.has_metapackages(package):
        m_yaml['metapackages'] = tags_db.get_metapackages(package)

    with open(manifest_file, 'w+') as f:
        yaml.safe_dump(m_yaml, f, default_flow_style=False)
예제 #19
0
def _install_container_bcbio_system(datadir):
    """Install limited bcbio_system.yaml file for setting core and memory usage.

    Adds any non-specific programs to the exposed bcbio_system.yaml file, only
    when upgrade happening inside a docker container.
    """
    base_file = os.path.join(datadir, "config", "bcbio_system.yaml")
    if not os.path.exists(base_file):
        return
    expose_file = os.path.join(datadir, "galaxy", "bcbio_system.yaml")
    expose = set(["memory", "cores", "jvm_opts"])
    with open(base_file) as in_handle:
        config = yaml.load(in_handle)
    if os.path.exists(expose_file):
        with open(expose_file) as in_handle:
            expose_config = yaml.load(in_handle)
    else:
        expose_config = {"resources": {}}
    for pname, vals in config["resources"].iteritems():
        expose_vals = {}
        for k, v in vals.iteritems():
            if k in expose:
                expose_vals[k] = v
        if len(expose_vals) > 0 and pname not in expose_config["resources"]:
            expose_config["resources"][pname] = expose_vals
    with open(expose_file, "w") as out_handle:
        yaml.safe_dump(expose_config, out_handle, default_flow_style=False, allow_unicode=False)
    return expose_file
예제 #20
0
def firewall_disallow(port=None, protocol='TCP', ipv6=False):
    """
    Allow connection port/protocol

    Keyword argument:
        port -- Port to open
        protocol -- Protocol associated with port
        ipv6 -- ipv6

    """
    port = int(port)
    ipv  = "ipv4"
    protocols = [protocol]

    firewall = firewall_list(raw=True)

    if ipv6:
        ipv = "ipv6"

    if protocol == "Both":
        protocols = ['UDP', 'TCP']

    for protocol in protocols:
        if port in firewall['uPnP'][protocol]:
            firewall['uPnP'][protocol].remove(port)
        if port in firewall[ipv][protocol]:
            firewall[ipv][protocol].remove(port)
        else:
            msignals.display(m18n.n('port_already_closed') % port, 'warning')

    with open('/etc/yunohost/firewall.yml', 'w') as f:
        yaml.safe_dump(firewall, f, default_flow_style=False)

    return firewall_reload()
예제 #21
0
def write_stack_manifest(output_dir, stack_name, manifest, vcs_type, vcs_url, api_homepage, packages, tags_db):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    m_yaml = {}
    m_yaml['api_documentation'] = api_homepage
    m_yaml['vcs'] = vcs_type
    m_yaml['vcs_url'] = vcs_url

    m_yaml['authors'] = manifest.author or ''
    m_yaml['brief'] = manifest.brief or ''
    m_yaml['depends'] = [dep.name for dep in manifest.depends] or ''
    m_yaml['packages'] = packages or ''
    m_yaml['description'] = manifest.description or ''
    m_yaml['license'] = manifest.license or ''
    m_yaml['msgs'] = []
    m_yaml['srvs'] = []
    m_yaml['url'] = manifest.url or ''
    m_yaml['package_type'] = 'stack'

    m_yaml['depends_on'] = []
    if tags_db.has_reverse_deps(stack_name):
        m_yaml['depends_on'] = tags_db.get_reverse_deps(stack_name)

    #Update our dependency list
    if 'depends' in m_yaml and type(m_yaml['depends']) == list:
        tags_db.add_forward_deps(stack_name, m_yaml['depends'])

    #Make sure to write stack dependencies to the tags db
    tags_db.set_metapackage_deps(stack_name, packages)

    with open(os.path.join(output_dir, 'manifest.yaml'), 'w+') as f:
        yaml.safe_dump(m_yaml, f, default_flow_style=False)
예제 #22
0
    def run(self, connection, args=None):
        connection = super(TestOverlayAction, self).run(connection, args)
        runner_path = self.data['test'][self.test_uuid]['overlay_path'][self.parameters['test_name']]
        # now read the YAML to create a testdef dict to retrieve metadata
        yaml_file = os.path.join(runner_path, self.parameters['path'])
        # FIXME: check the existence at the same time as the open.
        if not os.path.exists(yaml_file):
            raise JobError("Unable to find test definition YAML: %s" % yaml_file)

        with open(yaml_file, 'r') as test_file:
            testdef = yaml.safe_load(test_file)

        # FIXME: change lava-test-runner to accept a variable instead of duplicating the YAML?
        with open("%s/testdef.yaml" % runner_path, 'w') as run_file:
            yaml.safe_dump(testdef, run_file)

        # write out the UUID of each test definition.
        # FIXME: is this necessary any longer?
        with open('%s/uuid' % runner_path, 'w') as uuid:
            uuid.write(self.test_uuid)

        # FIXME: does this match old-world test-shell & is it needed?
        with open('%s/testdef_metadata' % runner_path, 'w') as metadata:
            metadata.write(yaml.safe_dump(self.data['test'][self.test_uuid]['testdef_metadata']))

        # Need actions for the run.sh script (calling parameter support in base class)
        # and install script (also calling parameter support here.)
        # this run then only does the incidental files.

        self.results = {'success': self.test_uuid}
        return connection
예제 #23
0
def test_configuration_with_binary_strings():
    """
    Regression test: serialization was failing on binary strings
    """

    import yaml

    obj = '\xaa\xbb\x00\xff\xff\x00ABC'
    assert yaml.load(yaml.dump(obj)) == obj
    assert yaml.safe_load(yaml.safe_dump(obj)) == obj

    obj = {'blob': '\xaa\xbb\x00\xff\xff\x00ABC'}
    assert yaml.load(yaml.dump(obj)) == obj
    assert yaml.safe_load(yaml.safe_dump(obj)) == obj

    obj = {
        'function': 'jobcontrol.utils.testing:job_simple_echo',
        'title': None,
        'notes': None,
        # 'args': ('\xaa\xbb\x00\xff\xff\x00ABC',),
        'args': '\xaa\xbb\x00\xff\xff\x00ABC',
        'dependencies': [],
        'kwargs': {},
        'id': 'f974e89f-4ae3-40cc-8316-b78e42bd5cc8',
    }
    dump(obj)
예제 #24
0
def build_manifest_yaml(manifest, msgs, srvs, actions, output_dir):
    # by default, assume that packages are on wiki
    m_yaml = {}
    m_yaml['authors'] = manifest.author or ''
    m_yaml['maintainers'] = manifest.maintainer or ''
    m_yaml['brief'] = manifest.brief or ''
    m_yaml['depends'] = manifest.depends or ''
    m_yaml['description'] = manifest.description or ''
    m_yaml['license'] = manifest.license or ''
    m_yaml['msgs'] = msgs
    m_yaml['srvs'] = srvs
    m_yaml['actions'] = actions
    m_yaml['url'] = manifest.url or ''
    m_yaml['bugtracker'] = manifest.bugtracker or ''
    m_yaml['repo_url'] = manifest.repo_url or ''
    external_docs = manifest.get_export('doxymaker', 'external')
    if external_docs:
        m_yaml['external_docmentation'] = external_docs

    metapackage = [e for e in manifest.exports if e.tagname == 'metapackage']
    if metapackage:
        m_yaml['package_type'] = 'metapackage'
    else:
        m_yaml['package_type'] = 'package'

    deprecated = [e for e in manifest.exports if e.tagname == 'deprecated']
    if deprecated:
        m_yaml['deprecated'] = deprecated[0].content or "This package is deprecated."

    with open(os.path.join(output_dir, 'manifest.yaml'), 'w') as f:
        yaml.safe_dump(m_yaml, f, default_flow_style=False)
예제 #25
0
파일: transitions.py 프로젝트: abhi11/dak
def write_transitions(from_trans):
    """
    Update the active transitions file safely.
    This function takes a parsed input file (which avoids invalid
    files or files that may be be modified while the function is
    active) and ensure the transitions file is updated atomically
    to avoid locks.

    @attention: This function may run B{within sudo}

    @type from_trans: dict
    @param from_trans: transitions dictionary, as returned by L{load_transitions}

    """

    trans_file = Cnf["Dinstall::ReleaseTransitions"]
    trans_temp = trans_file + ".tmp"

    trans_lock = lock_file(trans_file)
    temp_lock  = lock_file(trans_temp)

    destfile = file(trans_temp, 'w')
    yaml.safe_dump(from_trans, destfile, default_flow_style=False)
    destfile.close()

    os.rename(trans_temp, trans_file)
    os.close(temp_lock)
    os.close(trans_lock)
def synchronize_workspace_descriptor(workspace, session) -> None:
    """
    Updates both the workspace descriptor on disk and in
     the database to contain the same essential data
    :param workspace: the database workspace model
    :param session: the current database session
    :return:
    """
    from son_editor.models.repository import Catalogue
    with open(os.path.join(workspace.path, "workspace.yml"), "r+") as stream:
        ws_descriptor = yaml.safe_load(stream)
        if "catalogue_servers" not in ws_descriptor:
            ws_descriptor["catalogue_servers"] = []
        for catalogue_server in ws_descriptor["catalogue_servers"]:
            if len([x for x in workspace.catalogues if x.name == catalogue_server['id']]) == 0:
                session.add(Catalogue(name=catalogue_server['id'],
                                      url=catalogue_server['url'],
                                      publish=catalogue_server['publish'] == 'yes',
                                      workspace=workspace)
                            )
        for cat in workspace.catalogues:
            if len([x for x in ws_descriptor["catalogue_servers"] if x['id'] == cat.name]) == 0:
                catalogue_server = {'id': cat.name, 'url': cat.url, 'publish': cat.publish}
                ws_descriptor['catalogue_servers'].append(catalogue_server)
        ws_descriptor['name'] = workspace.name
        ws_descriptor['ns_schema_index'] = workspace.ns_schema_index
        ws_descriptor['vnf_schema_index'] = workspace.vnf_schema_index
        yaml.safe_dump(ws_descriptor, stream)
예제 #27
0
def createscan(specfile,outputfile):
    spec = yaml.load(open(specfile))

    analysis_info = recastapi.analysis.read.analysis_by_pub_identifier(*spec['pubkey'].split('/'))
    if not analysis_info:
        raise click.ClickException('Analysis {} not known, import it first.'.format(spec['pubkey']))

    scanrequest = recastapi.request.write.scan_request(
        analysis_info['id'],
        spec['title'],
        spec['description'],
        spec['reason'],
        spec['additional_information']
    )


    parnames = spec['parameters']
    points = spec['points']

    prlist, brlist = addpoints_to_scan(scanrequest['id'],spec['request_format'],parnames,points)

    yaml.safe_dump({
            'scan_id': scanrequest['id'],
            'point_requests': prlist,
            'basic_requests': brlist
        },
        open(outputfile,'w'),
        default_flow_style = False
    )
예제 #28
0
def _setup_config_files(dst_dir,configs,post_process_config_file,fc_dir,sample_name="run",fc_date=None,fc_name=None):
    
    # Setup the data structure
    config_data_structure = {'details': configs}
    if fc_date is not None:
        config_data_structure['fc_date'] = fc_date
    if fc_name is not None:
        config_data_structure['fc_name'] = fc_name
        
    # Dump the config to file
    config_file = os.path.join(dst_dir,"%s-bcbb-config.yaml" % sample_name)
    with open(config_file,'w') as fh:
        fh.write(yaml.safe_dump(config_data_structure, default_flow_style=False, allow_unicode=True, width=1000))
            
    # Copy post-process file
    with open(post_process_config_file) as fh:
        local_post_process = yaml.load(fh) 
    # Update galaxy config to point to the original location
    local_post_process['galaxy_config'] = bcbio.utils.add_full_path(local_post_process['galaxy_config'],os.path.abspath(os.path.dirname(post_process_config_file)))
    # Add job name and output paths to the cluster platform arguments
    if 'distributed' in local_post_process and 'platform_args' in local_post_process['distributed']:
        slurm_out = "%s-bcbb.log" % sample_name
        local_post_process['distributed']['platform_args'] = "%s -J %s -o %s -D %s" % (local_post_process['distributed']['platform_args'], sample_name, slurm_out, dst_dir)
    local_post_process_file = os.path.join(dst_dir,"%s-post_process.yaml" % sample_name)
    with open(local_post_process_file,'w') as fh:
        fh.write(yaml.safe_dump(local_post_process, default_flow_style=False, allow_unicode=True, width=1000))
            
    # Write the command for running the pipeline with the configuration files
    run_command_file = os.path.join(dst_dir,"%s-bcbb-command.txt" % sample_name)
    with open(run_command_file,"w") as fh:
        fh.write(" ".join([os.path.basename(__file__),"--only-run",os.path.basename(local_post_process_file), os.path.join("..",os.path.basename(dst_dir)), os.path.basename(config_file)])) 
        fh.write("\n")   
    
    return [os.path.basename(local_post_process_file), dst_dir, fc_dir, os.path.basename(config_file)]
예제 #29
0
 def save(self):
     with self.file_system.open(self.PENDING_TASK, 'w') as f:
         yaml.safe_dump(self.data, f)
     with self.file_system.open(self.REPEAT_TASK, 'w') as f:
         yaml.safe_dump(self.repeat_data, f)
     if not self.alarm_on:
         self.schedule()
예제 #30
0
파일: import.py 프로젝트: lowks/chalmers
def main(args):

    import_data = yaml.safe_load(args.input)
    groups = {k['group']['name']:k['group'] for k in import_data if 'group' in k}
    programs = {k['program']['name']:k['program'] for k in import_data if 'program' in k}

    for group in groups.values():
        group_dir = path.join(dirs.user_data_dir, 'groups')
        if not path.isdir(group_dir): os.makedirs(group_dir)

        group_path = path.join(group_dir, '%s.yaml' % group['name'])
        log.info("Writing group %s to %s" % (group['name'], group_path))
        with open(group_path, 'w') as gf:
            yaml.safe_dump(group, gf, default_flow_style=False)

    for program in programs.values():
        program = make_definition(program)
        program_dir = path.join(dirs.user_data_dir, 'programs')
        if not path.isdir(program_dir): os.makedirs(program_dir)

        program_path = path.join(program_dir, '%s.yaml' % program['name'])
        log.info("Writing program %s to %s" % (program['name'], program_path))

        with open(program_path, 'w') as pf:
            yaml.safe_dump(program, pf, default_flow_style=False)
예제 #31
0
    def do_execute_task(self, code):
        if not self.is_ansible_alive():
            logger.info("ansible is dead")
            self.do_shutdown(False)
        if self.helper is None:
            output = "No play found. Run a valid play cell"
            stream_content = {'name': 'stdout', 'text': str(output)}
            self.send_response(self.iopub_socket, 'stream', stream_content)
            return {
                'status': 'ok',
                'execution_count': self.execution_count,
                'payload': [],
                'user_expressions': {}
            }

        self.registered_variable = None
        self.current_task = code
        try:
            code_data = yaml.load(code)
        except Exception:
            code_data = code
        logger.debug('code_data %s', code_data)
        logger.debug('code_data type: %s', type(code_data))

        if isinstance(code_data, str):
            if (code_data.endswith("?")):
                module = code_data[:-1].split()[-1]
            else:
                module = code_data.split()[-1]
            data = self.get_module_doc(module)
            payload = dict(source='', data=data)
            logging.debug('payload %s', payload)
            # content = {'name': 'stdout', 'text': str(payload)}
            self.send_response(self.iopub_socket, 'display_data', payload)
            return {
                'status': 'ok',
                'execution_count': self.execution_count,
                'payload': [],
                'user_expressions': {}
            }
        elif isinstance(code_data, list):
            code_data = code_data[0]
        elif isinstance(code_data, dict):
            code_data = code_data
        elif code_data is None:
            return {
                'status': 'ok',
                'execution_count': self.execution_count,
                'payload': [],
                'user_expressions': {}
            }
        else:
            logger.error('code_data %s unsupported type', type(code_data))

        if not isinstance(code_data, dict):
            try:
                code_data = yaml.load(code)
                tb = []
            except Exception:
                tb = traceback.format_exc(1).splitlines()
            reply = {
                'status': 'error',
                'execution_count': self.execution_count,
                'payload': [],
                'user_expressions': {},
                'traceback': ['Invalid task cell\n'] + tb,
                'ename': 'Invalid cell',
                'evalue': ''
            }
            self.send_response(self.iopub_socket,
                               'error',
                               reply,
                               ident=self._topic('error'))
            return reply

        if 'include_role' in code_data.keys():
            role_name = code_data['include_role'].get('name', '')
            if '.' in role_name:
                self.get_galaxy_role(role_name)

        if 'register' in code_data.keys():
            self.registered_variable = code_data['register']

        interrupted = False
        try:

            tasks = []

            current_task_data = yaml.load(self.current_task)
            current_task_data['ignore_errors'] = True
            tasks.append(current_task_data)
            tasks.append({
                'pause_for_kernel': {
                    'host': '127.0.0.1',
                    'port': self.helper.pause_socket_port,
                    'task_num': self.tasks_counter
                }
            })

            self.process_widgets()
            tasks.append({'include_vars': {'file': 'widget_vars.yml'}})

            # Create the include file task to look for the future task
            tasks.append({
                'include_tasks':
                'next_task{0}.yml'.format(self.tasks_counter + 1)
            })

            logger.debug(yaml.safe_dump(tasks, default_flow_style=False))

            self.next_task_file = os.path.join(
                self.temp_dir, 'project',
                'next_task{0}.yml'.format(self.tasks_counter))
            self.tasks_counter += 1
            self.task_files.append(self.next_task_file)
            with open(self.next_task_file, 'w') as f:
                f.write(yaml.safe_dump(tasks, default_flow_style=False))
            logger.info('Wrote %s', self.next_task_file)

            self.helper.pause_socket.send_string('Proceed')

            while True:
                logger.info("getting message %s",
                            self.helper.pause_socket_port)
                msg = self.queue.get()
                logger.info(msg)
                if isinstance(msg, StatusMessage):
                    if self.process_message(msg.message):
                        break
                elif isinstance(msg, TaskCompletionMessage):
                    logger.info('msg.task_num %s tasks_counter %s',
                                msg.task_num, self.tasks_counter)
                    break

        except KeyboardInterrupt:
            logger.error(traceback.format_exc())

        if interrupted:
            return {'status': 'abort', 'execution_count': self.execution_count}

        return {
            'status': 'ok',
            'execution_count': self.execution_count,
            'payload': [],
            'user_expressions': {}
        }
예제 #32
0
def write_yaml(file, data):
	file.write(yaml.safe_dump(data, default_flow_style=False, explicit_start=True))
예제 #33
0
def _write_cache(cache, cache_file):
    with open(cache_file, "w") as out_handle:
        yaml.safe_dump(cache,
                       out_handle,
                       default_flow_style=False,
                       allow_unicode=False)
예제 #34
0
    data['weight'] = pokemon.weight
    data['learnset'] = []
    for pokemon_move in session.query(tables.PokemonMove).filter(tables.PokemonMove.pokemon==pokemon, tables.PokemonMove.version_group==version, tables.PokemonMove.level > 0):
        data['learnset'].append([pokemon_move.level, pokemon_move.move_id])
    data['learnset'].sort()
    moveset = set()
    for pokemon_move in session.query(tables.PokemonMove).filter(tables.PokemonMove.pokemon==pokemon).all():
        moveset.add(pokemon_move.move_id)
    data['moveset'] = list(moveset)
    data['stats'] = []
    for i in range(6):
        data['stats'].append(pokemon.stats[i].base_stat)
    data['type0'] = pokemon.types[0].identifier
    data['type1'] = pokemon.types[1].identifier if len(pokemon.types) > 1 else None
    
    mons[data['id']] = data
    #print data['id'], data['name']
    
print 'Got pokemon'

evolution_chains = []
for evolution_chain in session.query(tables.EvolutionChain):
    s = [species.id for species in evolution_chain.species]
    evolution_chains.append(s)
    
print 'Got evolution chains'

open('minidex.yaml', 'w').write(yaml.safe_dump(
    {'pokemon': mons,
     'evolution_chains': evolution_chains}))  # safe_dump to avoid !!python/unicode
예제 #35
0
 def _yaml(self, sections):
     self._deriveConfig()
     dd = {k: v for k, v in self.config.items() if k in sections}
     return yaml.safe_dump(dd, default_flow_style=False)
예제 #36
0
 def save(self):
     with open(self.fname, 'w') as yamlFile:
         yamlFile.write(
             yaml.safe_dump(self.config, default_flow_style=False))
         chmod(self.fname, 0660)
예제 #37
0
def write_config(params, filepath):
    with open(filepath, 'w') as outputfile:
        yaml.safe_dump(params, outputfile, default_flow_style=False)
예제 #38
0
    def rev_train_cls(self):
        # Get generator from adversarial training
        self._load_adv(load_disc=False, load_opt_d=False)
        yaml_path = os.path.join(self.BASE_PATH, 'data.yml')
        with open(yaml_path, 'r') as yamlfile:
            cur_yaml = yaml.safe_load(yamlfile)
            print('Test BNet acc:', cur_yaml['accuracies']['bnet_cls_acc'])
            print('Test feat vecs acc:', cur_yaml['accuracies']['gen_cls_acc'])
        self._load_class_feat_vecs(self.CLASS_FEAT_OLD_VECS_PATH)

        self.gen.eval()
        self.disc.train()

        for epoch in range(self.cls_epochs):
            _loss_cls = 0.
            for batch_idx, (feat_vec, targets) in enumerate(
                    tqdm(self.train_feat_vecs_loader)):
                feat_vec, targets = feat_vec.to(self.device), targets.to(
                    self.device, dtype=torch.long)

                self.cls_optimizer.zero_grad()

                gen_targets_ohe = F.one_hot(targets,
                                            num_classes=self.classes_count).to(
                                                self.device)

                gen_feat_maps = self.gen(feat_vec, gen_targets_ohe)
                feats, logits_cls, _ = self.disc(gen_feat_maps.detach())

                # Only train discriminator on classification
                # No adversarial training here
                loss_cls = self.cls_criterion(logits_cls, targets.long())
                loss_cls.backward()
                self.cls_optimizer.step()

                _loss_cls += loss_cls.item()
                if batch_idx % 100 == 99:  # print every 100 mini-batches
                    print(
                        '\n\n[REV CLS TRAINING] EPOCH %d, MINI-BATCH %5d LR: %f loss: %.5f'
                        % (epoch + 1, batch_idx + 1,
                           self._get_lr(self.cls_optimizer), _loss_cls / 100))
                    _loss_cls = 0.0

            # self.cls_optimizer.step()

            print("BNET FEATURE MAP CLS ACC:", self._get_disc_cls_acc())
            print("DISC FEATURE VECTORS CLS ACC:",
                  self._get_disc_cls_acc_gen())

        print("*** Finished training discriminator for classification.")

        # Check and log discriminator classification accuracy
        bnet_acc = self._get_disc_cls_acc()
        feat_vec_acc = self._get_disc_cls_acc_gen()
        print("BASE-NET FEATURE MAPS CLS ACC:", bnet_acc)
        print("DISC FEATURE VECTORS CLS ACC:", feat_vec_acc)

        acc_data = {
            'rev_train_cls': {
                'rev_bnet_cls_acc': bnet_acc,
                'rev_gen_cls_acc': feat_vec_acc,
                'cls_lr': self.cls_lr,
                'disc': self.disc_type
            }
        }
        yaml_path = os.path.join(self.BASE_PATH, 'data.yml')
        with open(yaml_path, 'r') as yamlfile:
            cur_yaml = yaml.safe_load(yamlfile)
            cur_yaml.update(acc_data)

        if cur_yaml:
            with open(yaml_path, 'w') as yamlfile:
                yaml.safe_dump(cur_yaml, yamlfile)  # Also note the safe_dump
예제 #39
0
            method_dict[method]['params'][param_name]['description'] = param_description
            method_dict[method]['params'][param_name]['required'] = required
            method_dict[method]['params'][param_name]['default'] = default
            method_dict[method]['params'][param_name]['type'] = param_type
    time.sleep(1)

for method in method_dict:

    file_name = 'actions/%s.yaml' % method
    output_dict = { 'name': method,
                    'runner_type': 'run-python',
                    'enabled': True,
                    'entry_point': 'run.py',
                    'description': method_dict[method]['description'],
                    'parameters': {}
                  }

    for param in method_dict[method]['params']:
        if param == 'token':
            method_dict[method]['params'][param]['required'] = False
        output_dict['parameters'][param] = {'type': method_dict[method]['params'][param]['type']}
        if method_dict[method]['params'][param]['default'] is not None:
            output_dict['parameters'][param]['default'] = method_dict[method]['params'][param]['default']
        output_dict['parameters'][param]['required'] = method_dict[method]['params'][param]['required']
        output_dict['parameters'][param]['description'] = method_dict[method]['params'][param]['description']

    print yaml.safe_dump(output_dict, default_flow_style=False)
    fh = open(file_name, 'w')
    fh.write(yaml.safe_dump(output_dict, default_flow_style=False))
    fh.close()
예제 #40
0
def save_model(
    fastai_learner,
    path,
    conda_env=None,
    mlflow_model=None,
    signature: ModelSignature = None,
    input_example: ModelInputExample = None,
    **kwargs
):
    """
    Save a fastai Learner to a path on the local file system.

    :param fastai_learner: fastai Learner to be saved.
    :param path: Local path where the model is to be saved.
    :param conda_env: Either a dictionary representation of a Conda environment or the path to a
                      Conda environment yaml file. If provided, this describes the environment
                      this model should be run in. At minimum, it should specify the
                      dependencies contained in :func:`get_default_conda_env()`. If
                      ``None``, the default :func:`get_default_conda_env()` environment is
                      added to the model. The following is an *example* dictionary
                      representation of a Conda environment::

                        {
                            'name': 'mlflow-env',
                            'channels': ['defaults'],
                            'dependencies': [
                                'python=3.7.0',
                                'fastai=1.0.60',
                            ]
                        }
    :param mlflow_model: MLflow model config this flavor is being added to.

    :param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
                      describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
                      The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
                      from datasets with valid model input (e.g. the training dataset with target
                      column omitted) and valid model output (e.g. model predictions generated on
                      the training dataset), for example:

                      .. code-block:: python

                        from mlflow.models.signature import infer_signature
                        train = df.drop_column("target_label")
                        predictions = ... # compute model predictions
                        signature = infer_signature(train, predictions)
    :param input_example: (Experimental) Input example provides one or several instances of valid
                          model input. The example can be used as a hint of what data to feed the
                          model. The given example will be converted to a Pandas DataFrame and then
                          serialized to json using the Pandas split-oriented format. Bytes are
                          base64-encoded.

    :param kwargs: kwargs to pass to ``Learner.save`` method.
    """
    import fastai
    from pathlib import Path

    path = os.path.abspath(path)
    if os.path.exists(path):
        raise MlflowException("Path '{}' already exists".format(path))
    model_data_subpath = "model.fastai"
    model_data_path = os.path.join(path, model_data_subpath)
    model_data_path = Path(model_data_path)
    os.makedirs(path)

    if mlflow_model is None:
        mlflow_model = Model()
    if signature is not None:
        mlflow_model.signature = signature
    if input_example is not None:
        _save_example(mlflow_model, input_example, path)

    # Save an Learner
    fastai_learner.export(model_data_path, **kwargs)

    conda_env_subpath = "conda.yaml"

    if conda_env is None:
        conda_env = get_default_conda_env()
    elif not isinstance(conda_env, dict):
        with open(conda_env, "r") as f:
            conda_env = yaml.safe_load(f)
    with open(os.path.join(path, conda_env_subpath), "w") as f:
        yaml.safe_dump(conda_env, stream=f, default_flow_style=False)

    pyfunc.add_to_model(
        mlflow_model, loader_module="mlflow.fastai", data=model_data_subpath, env=conda_env_subpath
    )
    mlflow_model.add_flavor(FLAVOR_NAME, fastai_version=fastai.__version__, data=model_data_subpath)
    mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
예제 #41
0
def export_datasource_schema(back_references):
    """Export datasource YAML schema to stdout"""
    data = dict_import_export.export_schema_to_dict(
        back_references=back_references)
    yaml.safe_dump(data, stdout, default_flow_style=False)
예제 #42
0
 def to_yaml(obj):
     return yaml.safe_dump(obj, default_flow_style=False)
예제 #43
0
def _merge_target_information(samples):
    metrics_dir = utils.safe_makedir("metrics")
    out_file = os.path.abspath(os.path.join(metrics_dir, "target_info.yaml"))
    if utils.file_exists(out_file):
        return samples

    genomes = set(dd.get_genome_build(data) for data in samples)
    coverage_beds = set(dd.get_coverage(data) for data in samples)
    original_variant_regions = set(dd.get_variant_regions_orig(data) for data in samples)

    data = samples[0]
    info = {}

    # Reporting in MultiQC only if the genome is the same across all samples
    if len(genomes) == 1:
        info["genome_info"] = {
            "name": dd.get_genome_build(data),
            "size": sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])]),
        }

    # Reporting in MultiQC only if the target is the same across all samples
    vcr_orig = None
    if len(original_variant_regions) == 1 and list(original_variant_regions)[0] is not None:
        vcr_orig = list(original_variant_regions)[0]
        vcr_clean = bedutils.clean_file(vcr_orig, data)
        info["variants_regions_info"] = {
            "bed": vcr_orig,
            "size": sum(len(x) for x in pybedtools.BedTool(dd.get_variant_regions_merged(data))),
            "regions": pybedtools.BedTool(vcr_clean).count(),
        }
        gene_num = annotate.count_genes(vcr_clean, data)
        if gene_num is not None:
            info["variants_regions_info"]["genes"] = gene_num
    else:
        info["variants_regions_info"] = {
            "bed": "callable regions",
        }
    # Reporting in MultiQC only if the target is the same across samples
    if len(coverage_beds) == 1:
        cov_bed = list(coverage_beds)[0]
        if cov_bed not in [None, "None"]:
            if vcr_orig and vcr_orig == cov_bed:
                info["coverage_bed_info"] = info["variants_regions_info"]
            else:
                clean_bed = bedutils.clean_file(cov_bed, data, prefix="cov-", simple=True)
                info["coverage_bed_info"] = {
                    "bed": cov_bed,
                    "size": pybedtools.BedTool(cov_bed).total_coverage(),
                    "regions": pybedtools.BedTool(clean_bed).count(),
                }
                gene_num = annotate.count_genes(clean_bed, data)
                if gene_num is not None:
                    info["coverage_bed_info"]["genes"] = gene_num
        else:
            info["coverage_bed_info"] = info["variants_regions_info"]

    coverage_intervals = set(data["config"]["algorithm"]["coverage_interval"] for data in samples)
    if len(coverage_intervals) == 1:
        info["coverage_interval"] = list(coverage_intervals)[0]

    if info:
        with open(out_file, "w") as out_handle:
            yaml.safe_dump(info, out_handle)

    return samples
예제 #44
0
    def generate_deployment_yaml(
        token: str = None,
        api: str = None,
        namespace: str = None,
        image_pull_secrets: str = None,
        resource_manager_enabled: bool = False,
    ) -> str:
        """
        Generate and output an installable YAML spec for the agent.

        Args:
            - token (str, optional): A `RUNNER` token to give the agent
            - api (str, optional): A URL pointing to the Prefect API. Defaults to
                `https://api.prefect.io`
            - namespace (str, optional): The namespace to create Prefect jobs in. Defaults
                to `default`
            - image_pull_secrets (str, optional): The name of an image pull secret to use
                for Prefect jobs
            - resource_manager_enabled (bool, optional): Whether to include the resource
                manager as part of the YAML. Defaults to `False`

        Returns:
            - str: A string representation of the generated YAML
        """

        # Use defaults if not provided
        token = token or ""
        api = api or "https://api.prefect.io"
        namespace = namespace or "default"

        version = prefect.__version__.split("+")
        image_version = "latest" if len(version) > 1 else version[0]

        with open(path.join(path.dirname(__file__), "deployment.yaml"),
                  "r") as deployment_file:
            deployment = yaml.safe_load(deployment_file)

        agent_env = deployment["spec"]["template"]["spec"]["containers"][0][
            "env"]

        agent_env[0]["value"] = token
        agent_env[1]["value"] = api
        agent_env[2]["value"] = namespace

        # Use local prefect version for image
        deployment["spec"]["template"]["spec"]["containers"][0][
            "image"] = "prefecthq/prefect:{}".format(image_version)

        # Populate resource manager if requested
        if resource_manager_enabled:
            resource_manager_env = deployment["spec"]["template"]["spec"][
                "containers"][1]["env"]

            resource_manager_env[0]["value"] = token
            resource_manager_env[1]["value"] = api
            resource_manager_env[3]["value"] = namespace

            # Use local prefect version for image
            deployment["spec"]["template"]["spec"]["containers"][1][
                "image"] = "prefecthq/prefect:{}".format(image_version)
        else:
            del deployment["spec"]["template"]["spec"]["containers"][1]

        # Populate image pull secrets if provided
        if image_pull_secrets:
            agent_env = deployment["spec"]["template"]["spec"][
                "imagePullSecrets"][0]["name"] = image_pull_secrets
        else:
            del deployment["spec"]["template"]["spec"]["imagePullSecrets"]

        return yaml.safe_dump(deployment)
예제 #45
0
 def _dump(self):
     return yaml.safe_dump(self.inventory, default_flow_style=False)
예제 #46
0
def yaml_dump_(context, data):
    return yaml.safe_dump(data, default_flow_style=False)
예제 #47
0
    def DEPLOY(self, request):
        nodes = request["nodes"]
        topology = request["topology"]
        inventory_file = self.deploy_base + "inventory.yml"
        ansible_become_pass = "******"

        self.PUBLISH(request, deploy=True)

        inventory = {
            'deploy_routers': {
                'vars': {
                    'topology': topology
                },
                'hosts': {}
            }
        }
        hosts = inventory['deploy_routers']['hosts']

        for node in nodes:
            if node['cls'] == 'router':
                host = node['host']
                if not host in hosts:
                    hosts[host] = {'nodes': [], 'create_console': False}
                # if any of the nodes for this host has a console, set create_console for this host to true
                hosts[host]['create_console'] = (hosts[host]['create_console']
                                                 or self.has_console(node))
                hosts[host]['nodes'].append(node['name'])
                # pass in the password for eash host if provided
                if request.get(ansible_become_pass + "_" + host):
                    hosts[host][ansible_become_pass] = request.get(
                        ansible_become_pass + "_" + host)
                # local hosts need to be marked as such
                if host in ('0.0.0.0', 'localhost', '127.0.0.1'):
                    hosts[host]['ansible_connection'] = 'local'

        with open(inventory_file, 'w') as n:
            yaml.safe_dump(inventory, n, default_flow_style=False)

        # start ansible-playbook in separate thread so we don't have to wait and can still get a callback when done
        def popenCallback(callback, args):
            def popen(callback, args):
                # send all output to deploy.txt so we can send it to the console in DEPLOY_STATUS
                with open(self.deploy_file, 'w') as fout:
                    proc = subprocess.Popen(args, stdout=fout, stderr=fout)
                    proc.wait()
                    callback(proc.returncode)
                return

            thread = threading.Thread(target=popen, args=(callback, args))
            thread.start()

        def ansible_done(returncode):
            os.remove(inventory_file)
            if self.verbose:
                print "-------------- DEPLOYMENT DONE with return code", returncode, "------------"
            if returncode:
                self.state = returncode
            else:
                self.state = "DONE"

        self.state = "DEPLOYING"
        popenCallback(ansible_done, [
            'ansible-playbook', self.deploy_base + 'install_dispatch.yaml',
            '-i', inventory_file
        ])

        return "deployment started"
예제 #48
0
    def generate(self, sigmaparser):
        # Take the log source information and figure out which set of mappings to use.
        ruleConfig = sigmaparser.parsedyaml
        ls_rule = ruleConfig['logsource']
        try:
            category = ls_rule['category']
        except KeyError:
            category = ""
        try:
            product = ls_rule['product']
        except KeyError:
            product = ""
        # try:
        #     service = ls_rule['service']
        # except KeyError:
        #     service = ""

        # If there is a timeframe component, we do not currently
        # support it for now.
        if ruleConfig.get('detection', {}).get('timeframe', None) is not None:
            raise NotImplementedError(
                "Timeframes are not supported by backend.")

        # Don't use service for now, most Windows Event Logs
        # uses a different service with no category, since we
        # treat all Windows Event Logs together we can ignore
        # the service.
        service = ""

        # See if we have a definition for the source combination.
        mappingKey = "%s/%s/%s" % (product, category, service)
        topFilter, preCond, mappings, isAllStringValues, keywordField, postOpMapper = _allFieldMappings.get(
            self.lc_target,
            {}).get(mappingKey, tuple([None, None, None, None, None, None]))
        if mappings is None:
            raise NotImplementedError(
                "Log source %s/%s/%s not supported by backend." %
                (product, category, service))

        # Field name conversions.
        self._fieldMappingInEffect = mappings

        # LC event type pre-selector for the type of data.
        self._preCondition = preCond

        # Are all the values treated as strings?
        self._isAllStringValues = isAllStringValues

        # Are we supporting keywords full text search?
        self._keywordField = keywordField

        # Call to fixup all operations after the fact.
        self._postOpMapper = postOpMapper

        # Call the original generation code.
        detectComponent = super().generate(sigmaparser)

        # We expect a string (yaml) as output, so if
        # we get anything else we assume it's a core
        # library value and just return it as-is.
        if not isinstance(detectComponent, str):
            return detectComponent

        # This redundant to deserialize it right after
        # generating the yaml, but we try to use the parent
        # official class code as much as possible for future
        # compatibility.
        detectComponent = yaml.safe_load(detectComponent)

        # Check that we got a proper node and not just a string
        # which we don't really know what to do with.
        if not isinstance(detectComponent, dict):
            raise NotImplementedError("Selection combination not supported.")

        # Apply top level filter.
        detectComponent.update(topFilter)

        # Now prepare the Response component.
        respondComponents = [{
            "action": "report",
            "name": ruleConfig["title"],
        }]

        # Add a lot of the metadata available to the report.
        if ruleConfig.get("tags", None) is not None:
            respondComponents[0].setdefault("metadata",
                                            {})["tags"] = ruleConfig["tags"]

        if ruleConfig.get("description", None) is not None:
            respondComponents[0].setdefault(
                "metadata", {})["description"] = ruleConfig["description"]

        if ruleConfig.get("references", None) is not None:
            respondComponents[0].setdefault(
                "metadata", {})["references"] = ruleConfig["references"]

        if ruleConfig.get("level", None) is not None:
            respondComponents[0].setdefault("metadata",
                                            {})["level"] = ruleConfig["level"]

        if ruleConfig.get("author", None) is not None:
            respondComponents[0].setdefault(
                "metadata", {})["author"] = ruleConfig["author"]

        if ruleConfig.get("falsepositives", None) is not None:
            respondComponents[0].setdefault(
                "metadata",
                {})["falsepositives"] = ruleConfig["falsepositives"]

        # Assemble it all as a single, complete D&R rule.
        return yaml.safe_dump(
            {
                "detect": detectComponent,
                "respond": respondComponents,
            },
            default_flow_style=False)
예제 #49
0
    def create_mes(self, context, mes):
        """Create MES and corresponding MEAs.

        :param mes: mes dict which contains mesd_id and attributes
        This method has 2 steps:
        step-1: Call MEO API to create MEAs
        step-2: Call Tacker drivers to create NSs
        """
        mes_info = mes['mes']
        name = mes_info['name']
        mes_info['mes_mapping'] = dict()

        if mes_info.get('mesd_template'):
            mesd_name = utils.generate_resource_name(name, 'inline')
            mesd = {'mesd': {
                'attributes': {'mesd': mes_info['mesd_template']},
                'description': mes_info['description'],
                'name': mesd_name,
                'template_source': 'inline',
                'tenant_id': mes_info['tenant_id']}}
            mes_info['mesd_id'] = self.create_mesd(context, mesd).get('id')

        mesd = self.get_mesd(context, mes['mes']['mesd_id'])
        mesd_dict = yaml.safe_load(mesd['attributes']['mesd'])
        meo_plugin = manager.ApmecManager.get_service_plugins()['MEO']

        region_name = mes.setdefault('placement_attr', {}).get(
            'region_name', None)
        vim_res = self.vim_client.get_vim(context, mes['mes']['vim_id'],
                                          region_name)
        # driver_type = vim_res['vim_type']
        if not mes['mes']['vim_id']:
            mes['mes']['vim_id'] = vim_res['vim_id']

        ##########################################
        # Detect MANO driver here:
        # Defined in the Tosca template
        nfv_driver = None
        if mesd_dict['imports'].get('nsds'):
            nfv_driver = mesd_dict['imports']['nsds']['nfv_driver']
            nfv_driver = nfv_driver.lower()
        if mesd_dict['imports'].get('vnffgds'):
            nfv_driver = mesd_dict['imports']['vnffgds']['nfv_driver']
            nfv_driver = nfv_driver.lower()

        ##########################################
        def _find_vnf_ins(cd_mes):
            al_ns_id_list = cd_mes['mes_mapping'].get('NS')
            if not al_ns_id_list:
                return None, None
            al_ns_id = al_ns_id_list[0]
            try:
                ns_instance = self._nfv_drivers.invoke(
                    nfv_driver,  # How to tell it is Tacker
                    'ns_get',
                    ns_id=al_ns_id,
                    auth_attr=vim_res['vim_auth'], )
            except Exception:
                return None, None
            if ns_instance['status'] != 'ACTIVE':
                return None, None
            al_vnf = ns_instance['vnf_ids']
            al_vnf_dict = ast.literal_eval(al_vnf)
            return ns_instance['id'], al_vnf_dict

        def _run_meso_algorithm(req_vnf_list):
            is_accepted = False
            al_mes_list = self.get_mess(context)
            ns_candidate = dict()
            for al_mes in al_mes_list:
                ns_candidate[al_mes['id']] = dict()
                if al_mes['status'] != "ACTIVE":
                    continue
                al_ns_id, al_vnf_dict = _find_vnf_ins(al_mes)
                if not al_ns_id:
                    continue
                ns_candidate[al_mes['id']][al_ns_id] = dict()
                for req_vnf_dict in req_vnf_list:
                    for vnf_name, al_vnf_id in al_vnf_dict.items():
                        if req_vnf_dict['name'] == vnf_name:
                            # Todo: remember to change this with VM capacity
                            len_diff =\
                                len([lend for lend in
                                     al_mes['reused'][vnf_name]
                                     if lend > 0])
                            avail = len_diff - req_vnf_dict['nf_ins']
                            ns_candidate[al_mes['id']][al_ns_id].\
                                update({vnf_name: avail})

            ns_cds = dict()
            deep_ns = dict()
            for mesid, ns_data_dict in ns_candidate.items():
                for nsid, resev_dict in ns_data_dict.items():
                    if len(resev_dict) == len(req_vnf_list):
                        nf_ins_list =\
                            [nf_ins for nf_name, nf_ins in
                             resev_dict.items() if nf_ins >= 0]
                        if len(nf_ins_list) == len(req_vnf_list):
                            total_ins = sum(nf_ins_list)
                            ns_cds[mesid] = total_ins
                        else:
                            extra_nf_ins_list =\
                                [-nf_ins for nf_name, nf_ins in
                                 resev_dict.items() if nf_ins < 0]
                            total_ins = sum(extra_nf_ins_list)
                            deep_ns[mesid] = total_ins
            if ns_cds:
                selected_mes1 = min(ns_cds, key=ns_cds.get)
                is_accepted = True
                return is_accepted, selected_mes1, None
            if deep_ns:
                selected_mes2 = min(deep_ns, key=deep_ns.get)
                is_accepted = True
                return is_accepted, selected_mes2, ns_candidate[selected_mes2]

            return is_accepted, None, None

        build_nsd_dict = dict()
        if mesd_dict['imports'].get('nsds'):
            # For framework evaluation
            nsd_template = mesd_dict['imports']['nsds']['nsd_templates']
            if isinstance(nsd_template, dict):
                if nsd_template.get('requirements'):
                    req_nf_dict = nsd_template['requirements']
                    req_nf_list = list()
                    for vnf_dict in req_nf_dict:
                        # Todo: make the requests more natural
                        req_nf_list.append(
                            {'name': vnf_dict['name'],
                             'nf_ins': int(vnf_dict['vnfd_template'][5])})
                    is_accepted, cd_mes_id, cd_vnf_dict =\
                        _run_meso_algorithm(req_nf_list)
                    if is_accepted:
                        new_mesd_dict = dict()
                        ref_mesd_dict = copy.deepcopy(mesd_dict)
                        ref_mesd_dict['imports']['nsds']['nsd_templates']['requirements'] = \
                            req_nf_list
                        new_mesd_dict['mes'] = dict()
                        new_mesd_dict['mes'] =\
                            {'mesd_template': yaml.safe_dump(ref_mesd_dict)}
                        self.update_mes(context, cd_mes_id, new_mesd_dict)
                        return cd_mes_id
                    else:
                        # Create the inline NS with the following template
                        import_list = list()
                        node_dict = dict()
                        for vnfd in req_nf_dict:
                            import_list.append(vnfd['vnfd_template'])
                            node = 'tosca.nodes.nfv.' + vnfd['name']
                            node_dict[vnfd['name']] = {'type': node}
                        build_nsd_dict['tosca_definitions_version'] =\
                            'tosca_simple_profile_for_nfv_1_0_0'
                        build_nsd_dict['description'] = mes_info['description']
                        build_nsd_dict['imports'] = import_list
                        build_nsd_dict['topology_template'] = dict()
                        build_nsd_dict['topology_template']['node_templates'] =\
                            node_dict

            nsds = mesd['attributes'].get('nsds')
            mes_info['mes_mapping']['NS'] = list()
            if nsds:
                nsds_list = nsds.split('-')
                for nsd in nsds_list:
                    ns_name = nsd + '-' + name + '-' + uuidutils.generate_uuid()  # noqa
                    nsd_instance = self._nfv_drivers.invoke(
                        nfv_driver,
                        'nsd_get_by_name',
                        nsd_name=nsd,
                        auth_attr=vim_res['vim_auth'],)
                    if nsd_instance:
                        ns_arg = {'ns': {'nsd_id': nsd_instance['id'],
                                         'name': ns_name}}
                        ns_id = self._nfv_drivers.invoke(
                            nfv_driver,  # How to tell it is Tacker
                            'ns_create',
                            ns_dict=ns_arg,
                            auth_attr=vim_res['vim_auth'], )
                        mes_info['mes_mapping']['NS'].append(ns_id)
            if build_nsd_dict:
                ns_name = 'nsd' + name + '-' + uuidutils.generate_uuid()
                ns_arg = {'ns': {'nsd_template': build_nsd_dict,
                                 'name': ns_name,
                                 'description': mes_info['description'],
                                 'vim_id': '',
                                 'tenant_id': mes_info['tenant_id'],
                                 'attributes': {}}}
                ns_id = self._nfv_drivers.invoke(
                    nfv_driver,  # How to tell it is Tacker
                    'ns_create',
                    ns_dict=ns_arg,
                    auth_attr=vim_res['vim_auth'], )
                mes_info['mes_mapping']['NS'].append(ns_id)

        vnffgds = mesd['attributes'].get('vnffgds')
        if mesd_dict['imports'].get('vnffgds'):
            vnffgds_list = vnffgds.split('-')
            mes_info['mes_mapping']['VNFFG'] = list()
            for vnffgd in vnffgds_list:
                vnffg_name = vnffgds + '-' + name + '-' + uuidutils.generate_uuid()   # noqa
                vnffgd_instance = self._nfv_drivers.invoke(
                    nfv_driver,  # How to tell it is Tacker
                    'vnffgd_get_by_name',
                    vnffgd_name=vnffgd,
                    auth_attr=vim_res['vim_auth'], )
                if vnffgd_instance:
                    vnffg_arg = {'vnffg': {'vnffgd_id': vnffgd_instance['id'], 'name': vnffg_name}}  # noqa
                    vnffg_id = self._nfv_drivers.invoke(
                        nfv_driver,  # How to tell it is Tacker
                        'vnffg_create',
                        vnffg_dict=vnffg_arg,
                        auth_attr=vim_res['vim_auth'], )
                    mes_info['mes_mapping']['VNFFG'].append(vnffg_id)

        # meca_id = dict()
        # Create MEAs using MEO APIs
        try:
            meca_name = 'meca' + '-' + name + '-' + uuidutils.generate_uuid()
            # Separate the imports out from template
            mead_tpl_dict = dict()
            mead_tpl_dict['imports'] =\
                mesd_dict['imports']['meads']['mead_templates']
            mecad_dict = copy.deepcopy(mesd_dict)
            mecad_dict.pop('imports')
            mecad_dict.update(mead_tpl_dict)
            LOG.debug('mesd %s', mecad_dict)
            meca_arg = {'meca': {'mecad_template': mecad_dict, 'name': meca_name,   # noqa
                                 'description': mes_info['description'],
                                 'tenant_id': mes_info['tenant_id'],
                                 'vim_id': mes_info['vim_id'],
                                 'attributes': {}}}
            meca_dict = meo_plugin.create_meca(context, meca_arg)
            mes_info['mes_mapping']['MECA'] = meca_dict['id']
        except Exception as e:
            LOG.error('Error while creating the MECAs: %s', e)
            # Call Tacker client driver

        mes_dict = super(MesoPlugin, self).create_mes(context, mes)

        def _create_mes_wait(self_obj, mes_id):
            args = dict()
            mes_status = "ACTIVE"
            ns_status = "PENDING_CREATE"
            vnffg_status = "PENDING_CREATE"
            mec_status = "PENDING_CREATE"
            ns_retries = NS_RETRIES
            mec_retries = MEC_RETRIES
            vnffg_retries = VNFFG_RETRIES
            mes_mapping = self.get_mes(context, mes_id)['mes_mapping']
            # Check MECA
            meca_id = mes_mapping['MECA']
            while mec_status == "PENDING_CREATE" and mec_retries > 0:
                time.sleep(MEC_RETRY_WAIT)
                mec_status = meo_plugin.get_meca(context, meca_id)['status']
                LOG.debug('status: %s', mec_status)
                if mec_status == 'ACTIVE' or mec_status == 'ERROR':
                    break
                mec_retries = mec_retries - 1
            error_reason = None
            if mec_retries == 0 and mec_status == 'PENDING_CREATE':
                error_reason = _(
                    "MES creation is not completed within"
                    " {wait} seconds as creation of MECA").format(
                    wait=MEC_RETRIES * MEC_RETRY_WAIT)
            # Check NS/VNFFG status
            if mes_mapping.get('NS'):
                ns_list = mes_mapping['NS']
                while ns_status == "PENDING_CREATE" and ns_retries > 0:
                    time.sleep(NS_RETRY_WAIT)
                    # Todo: support multiple NSs
                    ns_instance = self._nfv_drivers.invoke(
                        nfv_driver,  # How to tell it is Tacker
                        'ns_get',
                        ns_id=ns_list[0],
                        auth_attr=vim_res['vim_auth'], )
                    ns_status = ns_instance['status']
                    LOG.debug('status: %s', ns_status)
                    if ns_status == 'ACTIVE' or ns_status == 'ERROR':
                        break
                    ns_retries = ns_retries - 1
                error_reason = None
                if ns_retries == 0 and ns_status == 'PENDING_CREATE':
                    error_reason = _(
                        "MES creation is not completed within"
                        " {wait} seconds as creation of NS(s)").format(
                        wait=NS_RETRIES * NS_RETRY_WAIT)

                # Determine args
                ns_cd = self._nfv_drivers.invoke(
                    nfv_driver,  # How to tell it is Tacker
                    'ns_get',
                    ns_id=ns_list[0],
                    auth_attr=vim_res['vim_auth'], )
                ns_instance_dict = ns_cd['mgmt_urls']
                ns_instance_list = ast.literal_eval(ns_instance_dict)
                args['NS'] = dict()

                for vnf_name, mgmt_url_list in ns_instance_list.items():
                    # Todo: remember to change this with VM capacity
                    vm_capacity = VM_CAPA[vnf_name]
                    orig = [vm_capacity] * len(mgmt_url_list)
                    args['NS'][vnf_name] = [(val - 1) for val in orig]

            if mes_mapping.get('VNFFG'):
                while vnffg_status == "PENDING_CREATE" and vnffg_retries > 0:
                    time.sleep(VNFFG_RETRY_WAIT)
                    vnffg_list = mes_mapping['VNFFG']
                    # Todo: support multiple VNFFGs
                    vnffg_instance = self._nfv_drivers.invoke(
                        nfv_driver,  # How to tell it is Tacker
                        'vnffg_get',
                        vnffg_id=vnffg_list[0],
                        auth_attr=vim_res['vim_auth'], )
                    vnffg_status = vnffg_instance['status']
                    LOG.debug('status: %s', vnffg_status)
                    if vnffg_status == 'ACTIVE' or vnffg_status == 'ERROR':
                        break
                    vnffg_retries = vnffg_retries - 1
                error_reason = None
                if vnffg_retries == 0 and vnffg_status == 'PENDING_CREATE':
                    error_reason = _(
                        "MES creation is not completed within"
                        " {wait} seconds as creation of VNFFG(s)").format(
                        wait=VNFFG_RETRIES * VNFFG_RETRY_WAIT)
            if mec_status == "ERROR" or ns_status == "ERROR" or vnffg_status == "ERROR":   # noqa
                mes_status = "ERROR"
            if error_reason:
                mes_status = "PENDING_CREATE"

            super(MesoPlugin, self).create_mes_post(context, mes_id, mes_status, error_reason, args)   # noqa
        self.spawn_n(_create_mes_wait, self, mes_dict['id'])
        return mes_dict
예제 #50
0
    def load(self):

        if not os.path.exists(self.filename):

            try:
                # could use Python 3 exclusive creation open(file, 'x'), but..
                self.fd = open(self.filename, 'w')
                fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
            except IOError as _e:
                logger.warning("Race condition hit creating Inventory "
                               "file: {}".format(_e))
            else:
                try:
                    self.fd.write(
                        yaml.safe_dump(
                            AnsibleInventory.inventory_seed,  # noqa
                            default_flow_style=False))
                    fcntl.flock(self.fd, fcntl.LOCK_UN)
                except IOError as _e:
                    raise InventoryWriteError(
                        "Seeding inventory failed: {}".format(_e))  # noqa
            finally:
                self.fd.close()

        try:
            if self.exclusive_lock:
                locked = False
                self.fd = open(self.filename, 'r+')
                num_retries = 5
                for _d in range(num_retries):
                    try:
                        self.lock()
                    except IOError as _e:
                        # Can't obtain an exclusive_lock
                        logger.warning("Unable to lock inventory (attempt "
                                       "{}/{}): {}".format(
                                           _d + 1, num_retries, _e))
                        time.sleep(.05)  # wait 50ms before retry
                    else:
                        locked = True
                        raw = self.fd.read().strip()
                        break

                if not locked:
                    self.fd.close()
                    return
            else:
                raw = fread(self.filename)
        except Exception as ex:
            raise InventoryreadError("Unable to read the inventory file at "
                                     "{}, error: {}".format(self.filename, ex))

        if not raw:
            # If the inventory is empty for some extrange reason
            self.inventory = None
        else:
            # invalid yaml management
            try:
                self.inventory = yaml.safe_load(raw)
            except yaml.YAMLError as ex:
                raise InventoryCorruptError(
                    "Unable to understand the inventory"
                    " yaml file at {}, error: {}".format(self.filename, ex))
예제 #51
0
def _save_model_with_class_artifacts_params(
        path,
        python_model,
        artifacts=None,
        conda_env=None,
        code_paths=None,
        mlflow_model=Model(),
        protocol=None,
):
    """
    :param path: The path to which to save the Python model.
    :param python_model: An instance of a subclass of :class:`~PythonModel`. ``python_model``
                        defines how the model loads artifacts and how it performs inference.
    :param artifacts: A dictionary containing ``<name, artifact_uri>`` entries.
                      Remote artifact URIs
                      are resolved to absolute filesystem paths, producing a dictionary of
                      ``<name, absolute_path>`` entries. ``python_model`` can reference these
                      resolved entries as the ``artifacts`` property of the ``context``
                      attribute. If ``None``, no artifacts are added to the model.
    :param conda_env: Either a dictionary representation of a Conda environment or the
                      path to a Conda environment yaml file. If provided, this decsribes the
                      environment this model should be run in. At minimum, it should specify
                      the dependencies
                      contained in :func:`get_default_conda_env()`. If ``None``, the default
                      :func:`get_default_conda_env()` environment is added to the model.
    :param code_paths: A list of local filesystem paths to Python file dependencies (or directories
                       containing file dependencies). These files are *prepended* to the system
                       path before the model is loaded.
    :param mlflow_model: The model configuration to which to add the ``mlflow.pyfunc`` flavor.
    :param protocol: The pickle protocol version. If ``None``, the default protocol version
                     from cloudpickle will be used.
    """
    custom_model_config_kwargs = {
        CONFIG_KEY_CLOUDPICKLE_VERSION: cloudpickle.__version__,
    }
    if isinstance(python_model, PythonModel):
        saved_python_model_subpath = "python_model.pkl"
        with open(os.path.join(path, saved_python_model_subpath), "wb") as out:
            cloudpickle.dump(python_model, out, protocol)
        custom_model_config_kwargs[
            CONFIG_KEY_PYTHON_MODEL] = saved_python_model_subpath
    else:
        raise MlflowException(
            message=
            ("`python_model` must be a subclass of `PythonModel`. Instead, found an"
             " object of type: {python_model_type}".format(
                 python_model_type=type(python_model))),
            error_code=INVALID_PARAMETER_VALUE,
        )

    if artifacts:
        saved_artifacts_config = {}
        with TempDir() as tmp_artifacts_dir:
            tmp_artifacts_config = {}
            saved_artifacts_dir_subpath = "artifacts"
            for artifact_name, artifact_uri in artifacts.items():
                tmp_artifact_path = _download_artifact_from_uri(
                    artifact_uri=artifact_uri,
                    output_path=tmp_artifacts_dir.path())
                tmp_artifacts_config[artifact_name] = tmp_artifact_path
                saved_artifact_subpath = posixpath.join(
                    saved_artifacts_dir_subpath,
                    os.path.relpath(path=tmp_artifact_path,
                                    start=tmp_artifacts_dir.path()),
                )
                saved_artifacts_config[artifact_name] = {
                    CONFIG_KEY_ARTIFACT_RELATIVE_PATH: saved_artifact_subpath,
                    CONFIG_KEY_ARTIFACT_URI: artifact_uri,
                }

            shutil.move(tmp_artifacts_dir.path(),
                        os.path.join(path, saved_artifacts_dir_subpath))
        custom_model_config_kwargs[
            CONFIG_KEY_ARTIFACTS] = saved_artifacts_config

    conda_env_subpath = "conda.yaml"
    if conda_env is None:
        conda_env = get_default_conda_env()
    elif not isinstance(conda_env, dict):
        with open(conda_env, "r") as f:
            conda_env = yaml.safe_load(f)
    with open(os.path.join(path, conda_env_subpath), "w") as f:
        yaml.safe_dump(conda_env, stream=f, default_flow_style=False)

    saved_code_subpath = None
    if code_paths is not None:
        saved_code_subpath = "code"
        for code_path in code_paths:
            _copy_file_or_tree(src=code_path,
                               dst=path,
                               dst_dir=saved_code_subpath)

    mlflow.pyfunc.add_to_model(model=mlflow_model,
                               loader_module=__name__,
                               code=saved_code_subpath,
                               env=conda_env_subpath,
                               **custom_model_config_kwargs)
    mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
예제 #52
0
jujuEnv = sys.argv[3]

csarDir = 'csar'

charmsDir = 'charms'
charmSeries = 'precise'  # for now this has to be a valid name for a Ubuntu series

# Transform TOSCA service template into a charm-based model
transformer = ModelTransformer(csarFile, csarDir, serviceTpl)
model = transformer.transform()

# Print model
print ' '
print '------------------------------------'
print ' '
print yaml.safe_dump(model, default_flow_style=False)

# Build charms
charmGen = CharmGenerator(csarDir, model, charmsDir, charmSeries)
charmGen.generate()

# Generate commands
cmdGen = CommandGenerator(model, charmsDir, charmSeries, jujuEnv)
commands = cmdGen.generate()

print '------------------------------------'

for cmd in commands:
    # Print command
    printCmd = '\n'
    for arg in cmd:
예제 #53
0
    def import_from_dict(
        # pylint: disable=too-many-arguments,too-many-branches,too-many-locals
        cls,
        session: Session,
        dict_rep: Dict[Any, Any],
        parent: Optional[Any] = None,
        recursive: bool = True,
        sync: Optional[List[str]] = None,
    ) -> Any:  # pylint: disable=too-many-arguments,too-many-locals,too-many-branches
        """Import obj from a dictionary"""
        if sync is None:
            sync = []
        parent_refs = cls.parent_foreign_key_mappings()
        export_fields = set(cls.export_fields) | set(parent_refs.keys())
        new_children = {
            c: dict_rep[c]
            for c in cls.export_children if c in dict_rep
        }
        unique_constrains = cls._unique_constrains()

        filters = []  # Using these filters to check if obj already exists

        # Remove fields that should not get imported
        for k in list(dict_rep):
            if k not in export_fields:
                del dict_rep[k]

        if not parent:
            if cls.export_parent:
                for prnt in parent_refs.keys():
                    if prnt not in dict_rep:
                        raise RuntimeError("{0}: Missing field {1}".format(
                            cls.__name__, prnt))
        else:
            # Set foreign keys to parent obj
            for k, v in parent_refs.items():
                dict_rep[k] = getattr(parent, v)

        # Add filter for parent obj
        filters.extend(
            [getattr(cls, k) == dict_rep.get(k) for k in parent_refs.keys()])

        # Add filter for unique constraints
        ucs = [
            and_(*[
                getattr(cls, k) == dict_rep.get(k) for k in cs
                if dict_rep.get(k) is not None
            ]) for cs in unique_constrains
        ]
        filters.append(or_(*ucs))

        # Check if object already exists in DB, break if more than one is found
        try:
            obj_query = session.query(cls).filter(and_(*filters))
            obj = obj_query.one_or_none()
        except MultipleResultsFound as ex:
            logger.error(
                "Error importing %s \n %s \n %s",
                cls.__name__,
                str(obj_query),
                yaml.safe_dump(dict_rep),
            )
            raise ex

        if not obj:
            is_new_obj = True
            # Create new DB object
            obj = cls(**dict_rep)  # type: ignore
            logger.info("Importing new %s %s", obj.__tablename__, str(obj))
            if cls.export_parent and parent:
                setattr(obj, cls.export_parent, parent)
            session.add(obj)
        else:
            is_new_obj = False
            logger.info("Updating %s %s", obj.__tablename__, str(obj))
            # Update columns
            for k, v in dict_rep.items():
                setattr(obj, k, v)

        # Recursively create children
        if recursive:
            for child in cls.export_children:
                child_class = cls.__mapper__.relationships[
                    child].argument.class_
                added = []
                for c_obj in new_children.get(child, []):
                    added.append(
                        child_class.import_from_dict(session=session,
                                                     dict_rep=c_obj,
                                                     parent=obj,
                                                     sync=sync))
                # If children should get synced, delete the ones that did not
                # get updated.
                if child in sync and not is_new_obj:
                    back_refs = child_class.parent_foreign_key_mappings()
                    delete_filters = [
                        getattr(child_class,
                                k) == getattr(obj, back_refs.get(k))
                        for k in back_refs.keys()
                    ]
                    to_delete = set(
                        session.query(child_class).filter(
                            and_(*delete_filters))).difference(set(added))
                    for o in to_delete:
                        logger.info("Deleting %s %s", child, str(obj))
                        session.delete(o)

        return obj
예제 #54
0
파일: baseprovider.py 프로젝트: wjjmjh/quay
def get_yaml(config_obj):
    return yaml.safe_dump(config_obj, allow_unicode=True)
    def _launch_refarch_env(self):
        with open(self.inventory_file, 'r') as f:
            print yaml.safe_dump(json.load(f), default_flow_style=False)

        if not self.args.no_confirm:
            if not click.confirm('Continue adding nodes with these values?'):
                sys.exit(0)
        tags = []
        tags.append('setup')

        if self.byo_nfs == "False":
            tags.append('nfs')

        tags.append('prod')

        if self.byo_lb == "False":
            tags.append('haproxy')

        # Add section here to modify inventory file based on input from user check your vmmark scripts for parsing the file and adding the values
        for line in fileinput.input("inventory/vsphere/vms/vmware_inventory.ini", inplace=True):
            if line.startswith("server="):
                print "server=" + self.vcenter_host
            elif line.startswith("password="******"password="******"username="******"username="******",".join(tags)
        if self.clean is True:
            tags = 'clean'
        if self.tag:
            tags = self.tag

        if self.lb_ha_ip != '':
            self.lb_host = self.lb_ha_ip

        # grab the default priv key from the user"
        command='cp -f ~/.ssh/id_rsa ssh_key/ocp-installer'
        os.system(command)
        # make sure the ssh keys have the proper permissions
        command='chmod 600 ssh_key/ocp-installer'
        os.system(command)

        for tag in tags.split(','):
            playbook = "playbooks/" + tag + ".yaml"
            tags = 'all'

            devnull='> /dev/null'

            if self.verbose > 0:
                devnull=''

            command='ansible-playbook  --extra-vars "@./infrastructure.json" --tags %s -e \'vcenter_host=%s \
            vcenter_username=%s \
            vcenter_password=%s \
            vcenter_template_name=%s \
            vcenter_folder=%s \
            vcenter_cluster=%s \
            vcenter_datacenter=%s \
            vcenter_datastore=%s \
            vcenter_resource_pool=%s \
            dns_zone=%s \
            app_dns_prefix=%s \
            vm_dns=%s \
            vm_gw=%s \
            vm_netmask=%s \
            vm_network=%s \
            wildcard_zone=%s \
            console_port=%s \
            cluster_id=%s \
            deployment_type=%s \
            openshift_vers=%s \
            rhsm_user=%s \
            rhsm_password=%s \
            rhsm_satellite=%s \
            rhsm_pool="%s" \
            rhsm_katello_url="%s" \
            rhsm_activation_key="%s" \
            rhsm_org_id="%s" \
            openshift_sdn=%s \
            containerized=%s \
            container_storage=%s \
            openshift_hosted_metrics_deploy=%s \
            lb_host=%s \
            lb_ha_ip=%s \
            nfs_host=%s \
            nfs_registry_mountpoint=%s \' %s' % ( tags,
                            self.vcenter_host,
                            self.vcenter_username,
                            self.vcenter_password,
                            self.vcenter_template_name,
                            self.vcenter_folder,
                            self.vcenter_cluster,
                            self.vcenter_datacenter,
                            self.vcenter_datastore,
                            self.vcenter_resource_pool,
                            self.dns_zone,
                            self.app_dns_prefix,
                            self.vm_dns,
                            self.vm_gw,
                            self.vm_netmask,
                            self.vm_network,
                            self.wildcard_zone,
                            self.console_port,
                            self.cluster_id,
                            self.deployment_type,
                            self.openshift_vers,
                            self.rhel_subscription_user,
                            self.rhel_subscription_pass,
                            self.rhel_subscription_server,
                            self.rhel_subscription_pool,
			    self.rhsm_katello_url,
			    self.rhsm_activation_key,
			    self.rhsm_org_id,
                            self.openshift_sdn,
                            self.containerized,
                            self.container_storage,
                            self.openshift_hosted_metrics_deploy,
                            self.lb_host,
                            self.lb_ha_ip,
                            self.nfs_host,
                            self.nfs_registry_mountpoint,
                            playbook)


            if self.verbose > 0:
                command += " -vvvvvv"
                click.echo('We are running: %s' % command)

            status = os.system(command)
            if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
                return os.WEXITSTATUS(status)
            else:
                if self.clean is True:
                    self._reset_ocp_vars()
예제 #56
0
 def dump_yaml(cls, path):
     path1 = os.path.join(cls.Base_Path, path)
     with open(path1, "r+", encoding="utf-8") as f:
         yaml.safe_dump(f)
예제 #57
0
def deepcopy(item):
    return yaml.safe_load(yaml.safe_dump(item))
예제 #58
0
#!/usr/bin/python

import yaml

with open('data.yaml', 'r') as yamlfile:
    cur_yaml = yaml.safe_load(yamlfile)  # Note the safe_load
    cur_yaml['bugs_tree'].update(new_yaml_data_dict)

if cur_yaml:
    with open('data.yaml', 'w') as yamlfile:
        yaml.safe_dump(cur_yaml, yamlfile)  # Also note the safe_dump
def saveCalibrationFile(ci, filename, cname):
    """ Save calibration data to a YAML file.

    This function writes the new calibration information to a YAML
    file, if possible.

    :param ci: `sensor_msgs/CameraInfo`_ to save.
    :param filename: local file to store data.
    :param cname: Camera name.
    :returns: True if able to save the data.
    """
    # make sure the directory exists and the file is writable
    f = None
    try:
        f = open(filename, 'w')
    except IOError as e:
        if e.errno in set([errno.EACCES, errno.EPERM]):
            pass
        elif e.errno in set([errno.ENOENT]):
            # Find last slash in the name.  The URL parser ensures
            # there is at least one '/', at the beginning.
            last_slash = filename.rfind('/')
            if last_slash < 0:
                rclpy.logging._root_logger.log(
                    "filename [" + filename + "] has no '/'",
                    rclpy.logging.LoggingSeverity.ERROR)
                #print("filename [" + filename + "] has no '/'")
                return False  # not a valid URL

            # try to create the directory and all its parents
            dirname = filename[0:last_slash + 1]
            try:
                os.makedirs(dirname)
            except OSError:
                rclpy.logging._root_loggr.log(
                    "unable to create path to directory [" + dirname + "]",
                    rclpy.logging.LoggingSeverity.ERROR)
                #print("unable to create path to directory [" + dirname + "]")
                return False

            # try again to create the file
            try:
                f = open(filename, 'w')
            except IOError:
                pass

    if f is None:  # something went wrong above?
        rclpy.logging._root_logger.log(
            "file [" + filename + "] not accessible",
            rclpy.logging.LoggingSeverity.ERROR)
        #print("file [" + filename + "] not accessible")
        return False  # unable to write this file

    # make calibration dictionary from CameraInfo fields and camera name
    calib = {
        'image_width': ci.width,
        'image_height': ci.height,
        'camera_name': cname,
        'distortion_model': ci.distortion_model,
        'distortion_coefficients': {
            'data': ci.D,
            'rows': 1,
            'cols': len(ci.D)
        },
        'camera_matrix': {
            'data': ci.K,
            'rows': 3,
            'cols': 3
        },
        'rectification_matrix': {
            'data': ci.R,
            'rows': 3,
            'cols': 3
        },
        'projection_matrix': {
            'data': ci.P,
            'rows': 3,
            'cols': 4
        }
    }

    try:
        rc = yaml.safe_dump(calib, f)
        return True

    except IOError:
        return False  # fail if unable to write file
예제 #60
0
def save_results(results):
    # Print data nicely for the user.
    if results:
        rows = results.get('rows')
        rank = 1

        map = {}
        for row in rows:
            url = re.sub(r'.*(/distribution/[a-z0-9]+).*', r"\1", row[0])
            if url in map:
                print 'adding for', url
                map[url] = map[url] + int(row[1])
            else:
                map[url] = int(row[1])

        map = OrderedDict(sorted(map.items(), key=itemgetter(0)))
        map = OrderedDict(sorted(map.items(), key=itemgetter(1), reverse=True))

        with open("../../_data/rank.yaml", "r") as file:
            final_result = yaml.load(file.read())

        if not final_result:
            print "No previous record found"
            final_result = {
                'meta': {
                    'previous_date': None,
                    'current_date': datetime.date.today().isoformat()
                },
                'distributions': []
            }
        else:
            final_result['meta']['previous_date'] = final_result['meta'][
                'current_date']
            final_result['meta']['current_date'] = datetime.datetime.now(
            ).strftime('%Y-%m-%d %H:%M')

        for url in map:
            distribution = None
            for d in final_result['distributions']:
                if d['url'] == url:
                    distribution = d
                    break

            if not distribution:
                distribution = {'url': url, 'previous': None, 'current': None}
                final_result['distributions'].append(distribution)

            distribution['previous'] = distribution['current']
            distribution['current'] = {'rank': rank, 'count': map[url]}

            rank += 1

        # ut.sort(key=lambda x: x.count, reverse=True)
        final_result['distributions'].sort(key=lambda x: x['current']['rank'])

        with open("../../_data/rank.yaml", "w") as file:
            file.write(yaml.safe_dump(final_result, default_flow_style=False))

        print 'Done'

    else:
        print 'No results found'