Ejemplo n.º 1
0
def dump_yaml_by_category(items, root_dir=None):
    """ Dumps a set of files: ``$root_dir/$category/$activity.yaml``
    """
    assert root_dir
    categorized = defaultdict(lambda: defaultdict(lambda: []))
    total_facts = 0
    for raw_fact in items:
        category = raw_fact["category"]
        activity = raw_fact["activity"]
        fact = OrderedDict()
        keys = "since", "until", "description", "tags", "hamster_fact_id"
        for key in keys:
            if key in raw_fact:
                fact[key] = raw_fact[key]
        categorized[category][activity].append(fact)
        total_facts += 1
    for category in categorized:
        for activity, facts in categorized[category].iteritems():
            category_dir = os.path.join(root_dir, category)
            if not os.path.exists(category_dir):
                os.makedirs(category_dir)
            activity_file = os.path.join(category_dir, activity) + ".yaml"
            with open(activity_file, "w") as f:
                yaml.dump(facts, f, allow_unicode=True, default_flow_style=False)
    return total_facts
Ejemplo n.º 2
0
    def sync(self, args):
        """ Synchronize rtc/repository.yaml file and each rtc repository version hash. """
        options, argv = self.parse_args(args[:], self._print_alternative_rtcs)
        verbose = options.verbose_flag
        sys.stdout.write('# Writing repository.yaml for package distribution\n')

        sys.stdout.write('## Parsing RTC directory\n')
        package = admin.package.get_package_from_path(os.getcwd())
        repos = []
        for rtc in admin.rtc.get_rtcs_from_package(package, verbose=verbose):
            sys.stdout.write('### RTC %s\n' % rtc.rtcprofile.basicInfo.name)
            repo = admin.repository.get_repository_from_path(rtc.path, description=rtc.rtcprofile.basicInfo.description)

            repos.append(repo)

        repo_file = os.path.join(package.get_rtcpath(), 'repository.yaml')

        bak_file = repo_file + wasanbon.timestampstr()
        if os.path.isfile(bak_file):
            os.remove(bak_file)
        import shutil, yaml
        shutil.copy(repo_file, bak_file)
        dic = yaml.load(open(bak_file, 'r'))
        if not dic:
            dic = {}
        for repo in repos:
            if getattr(repo, 'url') != None:
                url = repo.url.strip()
            else:
                url = ''
            dic[repo.name] = {'repo_name' : repo.name, 'git': url, 'description':repo.description, 'hash':repo.hash}

        yaml.dump(dic, open(repo_file, 'w'), encoding='utf8', allow_unicode=True, default_flow_style=False)
        pass
Ejemplo n.º 3
0
def test_missing_files(project_dir, temp_dir, resolve_symlinks, caplog):
    args = ['backup', '-t', temp_dir]
    if resolve_symlinks:
        args.append('--resolve-symlinks')

    result = run(args)
    assert result.exit_code == 1
    assert log_message(logging.ERROR, r'\.FileNotFoundError:', caplog)

    extends_content = {'version': '2', 'services': {'none': {'image': 'busybox'}}}
    with project_dir.add_file('void.yml').open('tw') as f:
        yaml.dump(extends_content, f)
    caplog.clear()
    result = run(args)

    assert result.exit_code == 1
    assert log_message(logging.ERROR, r'build path .*/lost either does not exist', caplog)

    project_dir.add_folder('lost')
    caplog.clear()
    result = run(args)
    assert result.exit_code == 1
    assert log_message(logging.ERROR, r"Couldn't find env file: .*/dangling_link$", caplog)

    project_dir.add_file('gone')
    caplog.clear()
    assert result_okay(run(args))
Ejemplo n.º 4
0
def dump_yaml_by_year_week(items, root_dir=None):
    """ Dumps a set of files: ``$root_dir/$year/$week_number.yaml``
    """
    assert root_dir
    years = defaultdict(lambda: defaultdict(lambda: []))
    total_facts = 0
    for raw_fact in items:
        year = raw_fact["since"].year
        week = raw_fact["since"].isocalendar()[1]
        fact = OrderedDict()
        keys = "activity", "category", "since", "until", "description", "tags", "hamster_fact_id"
        for key in keys:
            if key in raw_fact:
                fact[key] = raw_fact[key]
        years[year][week].append(fact)
        total_facts += 1
    for year in years:
        for month, facts in years[year].iteritems():
            year_dir = os.path.join(root_dir, str(year))
            if not os.path.exists(year_dir):
                os.makedirs(year_dir)
            month_file = os.path.join(year_dir, "{0:0>2}".format(month)) + ".yaml"
            with open(month_file, "w") as f:
                yaml.dump(facts, f, allow_unicode=True, default_flow_style=False)
    return total_facts
def get_post_process_yaml(data_dir, workdir):
    try:
        from bcbiovm.docker.defaults import get_datadir
        datadir = get_datadir()
        system = os.path.join(datadir, "galaxy", "bcbio_system.yaml") if datadir else None
    except ImportError:
        system = None
    if system is None or not os.path.exists(system):
        try:
            _, system = load_system_config("bcbio_system.yaml")
        except ValueError:
            system = None
    sample = os.path.join(data_dir, "post_process-sample.yaml")
    std = os.path.join(data_dir, "post_process.yaml")
    if os.path.exists(std):
        return std
    elif system and os.path.exists(system):
        # create local config pointing to reduced genomes
        test_system = os.path.join(workdir, os.path.basename(system))
        with open(system) as in_handle:
            config = yaml.load(in_handle)
            config["galaxy_config"] = os.path.join(data_dir, "universe_wsgi.ini")
            with open(test_system, "w") as out_handle:
                yaml.dump(config, out_handle)
        return test_system
    else:
        return sample
Ejemplo n.º 6
0
def _merge_system_configs(host_config, container_config, out_file=None):
    """Create a merged system configuration from external and internal specification.
    """
    out = copy.deepcopy(container_config)
    for k, v in host_config.iteritems():
        if k in set(["galaxy_config"]):
            out[k] = v
        elif k == "resources":
            for pname, resources in v.iteritems():
                if not isinstance(resources, dict) and pname not in out[k]:
                    out[k][pname] = resources
                else:
                    for rname, rval in resources.iteritems():
                        if rname in set(["cores", "jvm_opts", "memory"]):
                            if pname not in out[k]:
                                out[k][pname] = {}
                            out[k][pname][rname] = rval
    # Ensure final file is relocatable by mapping back to reference directory
    if "bcbio_system" in out and ("galaxy_config" not in out or not os.path.isabs(out["galaxy_config"])):
        out["galaxy_config"] = os.path.normpath(os.path.join(os.path.dirname(out["bcbio_system"]),
                                                             os.pardir, "galaxy",
                                                             "universe_wsgi.ini"))
    if out_file:
        with open(out_file, "w") as out_handle:
            yaml.dump(out, out_handle, default_flow_style=False, allow_unicode=False)
    return out
Ejemplo n.º 7
0
def main():
    root = "/mnt/charts/docs"
    chart_url = os.environ.get(
        "CHARTS_URL", "https://kubernetes-charts.storage.googleapis.com/")
    repo_url = os.environ.get("GIT_REPO")
    if repo_url is None:
        raise RuntimeError("You must specify a git repo!")
    p = urlparse(repo_url)
    git_user = p.path.split("/")[-2]
    repo_name = p.path.split("/")[-1].split(".")[0]
    default_mirror = "https://%s.github.io/%s/" % (git_user.lower(), repo_name)
    mirror_url = os.environ.get("MIRROR_URL", default_mirror)
    index_file = "index.yaml"
    wget(chart_url + index_file, index_file)
    with open(index_file) as f:
        index = yaml.load(f)
    entries = index["entries"]
    new = index.copy()
    for name, charts in entries.items():
        for chart, new_chart in zip(charts, new["entries"][name]):
            url = chart["urls"][0]
            tar_name = url.split("/")[-1]
            target = os.path.join(root, tar_name)
            new_chart["urls"][0] = "/".join(
                [mirror_url[:-1] if mirror_url.endswith("/") else mirror_url, tar_name])
            # datetime format issure
            new_chart["created"] = new_chart["created"].strftime('%Y-%m-%dT%H:%M:%S.%f000Z')
            if os.path.exists(target):
                continue
            wget(url, target)
    new["generated"] = new["generated"].strftime('%Y-%m-%dT%H:%M:%S.%f000Z')
    with open(os.path.join(root, "index.yaml"), "w") as f:
        yaml.dump(new, stream=f)
Ejemplo n.º 8
0
 def save(self) -> 'Bundle':
     """Save the bundle to disk."""
     if not self._bundle_file:
         raise Exception()
     with open(self._bundle_file, 'w') as f:
         yaml.dump(self, f, **Bundle.YAML_DUMP_ARGS)
     return self
Ejemplo n.º 9
0
  def test_disks_flag(self):
    # specifying a EBS mount or PD mount is only valid for EC2/Euca/GCE, so
    # fail on a cluster deployment.
    argv = self.cluster_argv[:] + ["--disks", "ABCDFEG"]
    self.assertRaises(BadConfigurationException, ParseArgs, argv, self.function)

    # if we get a --disk flag, fail if it's not a dict (after base64, yaml load)
    bad_disks_layout = yaml.load("""
    public1,
    """)
    base64ed_bad_disks = base64.b64encode(yaml.dump(bad_disks_layout))
    cloud_argv1 = self.cloud_argv[:] + ["--disks", base64ed_bad_disks]
    self.assertRaises(BadConfigurationException, ParseArgs, cloud_argv1,
      self.function)

    # passing in a dict should be fine, and result in us seeing the same value
    # for --disks that we passed in.
    disks = {'public1' : 'vol-ABCDEFG'}
    good_disks_layout = yaml.load("""
public1 : vol-ABCDEFG
    """)
    base64ed_good_disks = base64.b64encode(yaml.dump(good_disks_layout))
    cloud_argv2 = self.cloud_argv[:] + ["--disks", base64ed_good_disks]
    actual = ParseArgs(cloud_argv2, self.function).args
    self.assertEquals(disks, actual.disks)
Ejemplo n.º 10
0
    def test_unsafe(self):
        dummy = Dummy()

        with self.assertRaises(yaml.representer.RepresenterError):
            yaml.dump_all([dummy])

        with self.assertRaises(yaml.representer.RepresenterError):
            yaml.dump(dummy, Dumper=yDumper)

        # reverse monkey patch and try again
        monkey_patch_pyyaml_reverse()

        with tempfile.TemporaryFile(suffix='.yaml') as f:
            yaml.dump_all([dummy], stream=f)
            f.seek(0)  # rewind

            doc_unsafe = yaml.load(f)
            self.assertTrue(type(doc_unsafe) is Dummy)

            monkey_patch_pyyaml()
            with self.assertRaises(yaml.constructor.ConstructorError):
                f.seek(0)  # rewind
                safe_yaml_load(f)

            with self.assertRaises(yaml.constructor.ConstructorError):
                f.seek(0)  # rewind
                yaml.load(f)
Ejemplo n.º 11
0
def settings(request):
    profile = request.user.get_profile()
    merchant_settings = MerchantSettings.load_by_merchant(profile)
    settings = MerchantSettingsForm(instance=merchant_settings)
    account_info = dict()
    try:
        cb_api = coinbase.get_api_instance(profile)
        account_info = coinbase.get_account_info(cb_api)
        print yaml.dump(account_info)
    except Exception as e:
        print "%s: %s" % (e.__class__, e)
        cb_api = None
    transactions = list()
#     if cb_api:
#         try:
#             transactions = [x for x in cb_api.transactions()]
#             if len(transactions) > 7:
#                 transactions = transactions[:7]
#         except Exception as e:
#             print "Exception getting transactions: %s %s" % (e.__class__, e)
#     for tx in cb_api.transactions():
#         print tx
#     print dir(tx)
    data = {'settings_form': settings,
            'coinbase_api': cb_api,
            'account_info': account_info,
            'transactions': transactions,
            }
    t = loader.get_template("coinexchange/account/settings.html")
    c = CoinExchangeContext(request, data)
    return HttpResponse(t.render(c))
Ejemplo n.º 12
0
def populate_config_from_appliances(appliance_data):
    """populates env.local.yaml with the appliances just obtained

    args:
        appliance_data: the data of the appliances as taken from sprout
    """
    file_name = conf_path.join('env.local.yaml').strpath
    if os.path.exists(file_name):
        with open(file_name) as f:
            y_data = yaml.load(f)
        if not y_data:
            y_data = {}
    else:
        y_data = {}
    if y_data:
        with open(conf_path.join('env.local.backup').strpath, 'w') as f:
            yaml.dump(y_data, f, default_flow_style=False)

    y_data['appliances'] = []
    for app in appliance_data:
        app_config = dict(
            hostname=app['ip_address'],
            ui_protocol="https",
            version=str(app['template_version']),
        )
        y_data['appliances'].append(app_config)
    with open(file_name, 'w') as f:
        # Use safe dump to avoid !!python/unicode tags
        yaml.safe_dump(y_data, f, default_flow_style=False)
Ejemplo n.º 13
0
def gen_maestro_yaml():
    with open("maestro.yaml", "w") as conf:
        data = {
            "name": cluster_name,
            "ships": {},
            "services": {
                "weibo-emotion-app": {
                    "image": base_image,
                    "instances": {}
                },
            },
            "audit": [
                {"type": "log", "file": "/tmp/maestro.log"}
            ]
        }
        for ship in ships:
            data["ships"][ship] = {"ip": ship}

        for i in xrange(len(ships)):
            for j in xrange(num_instance_per_ship):
                container_no = i * num_instance_per_ship + j
                service_name = "weibo-emotion-app"
                container_name = "%s%s" % (service_name, container_no)
                container_port = base_port + container_no
                data["services"][service_name]["instances"][container_name] = {
                    "ship": ships[i],
                    "ports": {"client": {"external": container_port, "exposed": 8000}},
                    "lifecycle": {
                        "running": [{"type": "http", "port": "client"}],
                    },
                }

        yaml.dump(data, conf, default_flow_style=False)
Ejemplo n.º 14
0
def evaluate(definition, args, account_info, force: bool):
    # extract Senza* meta information
    info = definition.pop("SenzaInfo")
    info["StackVersion"] = args.version

    template = yaml.dump(definition, default_flow_style=False)
    definition = evaluate_template(template, info, [], args, account_info)
    definition = yaml.load(definition)

    components = definition.pop("SenzaComponents", [])

    # merge base template with definition
    BASE_TEMPLATE.update(definition)
    definition = BASE_TEMPLATE

    # evaluate all components
    for component in components:
        componentname, configuration = named_value(component)
        configuration["Name"] = componentname

        componenttype = configuration["Type"]
        componentfn = get_component(componenttype)

        if not componentfn:
            raise click.UsageError('Component "{}" does not exist'.format(componenttype))

        definition = componentfn(definition, configuration, args, info, force)

    # throw executed template to templating engine and provide all information for substitutions
    template = yaml.dump(definition, default_flow_style=False)
    definition = evaluate_template(template, info, components, args, account_info)
    definition = yaml.load(definition)

    return definition
Ejemplo n.º 15
0
def main(run_info_yaml, lane, out_file, genome_build, barcode_type, trim, ascii, analysis, description, clear_description, verbose):
    
    if verbose: print "Verifying that %s exists" % run_info_yaml
    assert os.path.exists(run_info_yaml)
    if verbose: print "Parsing %s" % run_info_yaml
    with open(run_info_yaml) as fh:
        run_info = yaml.load(fh)

    if verbose: print "Extracting lane info"
    if lane == 0:
        lane_info = run_info
    else:
        for info in run_info:
            if (int(info.get("lane",0)) == lane):
                lane_info = [info]
                break
    for info in lane_info:
        if verbose: print "Processing lane %s" % info["lane"]
        _process_info(info,genome_build,barcode_type,trim,ascii,analysis,description,clear_description,verbose)
    
    if out_file is not None:
        with open(out_file,'w') as fh:
            yaml.dump(run_info, fh, allow_unicode=True, default_flow_style=False)
    else:
        print yaml.dump(run_info, allow_unicode=True, default_flow_style=False)
    def _configure_services(self):
        """Configure all of the services."""
        u.log.debug("Running all tests in Apparmor enforce mode.")
        nova_config = {'config-flags': 'auto_assign_floating_ip=False',
                       'enable-live-migration': 'False',
                       'aa-profile-mode': 'enforce'}
        nova_cc_config = {}
        if self.git:
            amulet_http_proxy = os.environ.get('AMULET_HTTP_PROXY')

            reqs_repo = 'git://github.com/openstack/requirements'
            neutron_repo = 'git://github.com/openstack/neutron'
            nova_repo = 'git://github.com/openstack/nova'
            if self._get_openstack_release() == self.trusty_icehouse:
                reqs_repo = 'git://github.com/coreycb/requirements'
                neutron_repo = 'git://github.com/coreycb/neutron'
                nova_repo = 'git://github.com/coreycb/nova'

            branch = 'stable/' + self._get_openstack_release_string()

            openstack_origin_git = {
                'repositories': [
                    {'name': 'requirements',
                     'repository': reqs_repo,
                     'branch': branch},
                    {'name': 'neutron',
                     'repository': neutron_repo,
                     'branch': branch},
                    {'name': 'nova',
                     'repository': nova_repo,
                     'branch': branch},
                ],
                'directory': '/mnt/openstack-git',
                'http_proxy': amulet_http_proxy,
                'https_proxy': amulet_http_proxy,
            }
            nova_config['openstack-origin-git'] = \
                yaml.dump(openstack_origin_git)

            nova_cc_config['openstack-origin-git'] = \
                yaml.dump(openstack_origin_git)

        if self._get_openstack_release() >= self.xenial_ocata:
            nova_cc_config['network-manager'] = 'Neutron'

        keystone_config = {
            'admin-password': '******',
            'admin-token': 'ubuntutesting',
        }
        pxc_config = {
            'max-connections': 1000,
        }

        configs = {
            'nova-compute': nova_config,
            'keystone': keystone_config,
            'nova-cloud-controller': nova_cc_config,
            'percona-cluster': pxc_config,
        }
        super(NovaBasicDeployment, self)._configure_services(configs)
def main():
    arg = sys.argv[1:]
    json_file = open(arg[0], 'r')
    json_obj = json.load(json_file, encoding="latin-1")
    json_obj = byteify(json_obj)
    path = os.getcwd()

    for base_object in json_obj:
        if '.' not in base_object:
            new_directory = "%s/inventory/%s" % (path, base_object)
            if not os.path.exists(new_directory):
                os.makedirs(new_directory)

    for group_vars in json_obj['group_vars']:
        stream = open("%s/inventory/group_vars/%s" % (path, group_vars), "w")
        yaml.dump(json_obj['group_vars'][group_vars], stream, default_flow_style=False)
        stream.close()

    for host_vars in json_obj['host_vars']:
        stream = open("%s/inventory/host_vars/%s" % (path, host_vars), "w")
        yaml.dump(json_obj['host_vars'][host_vars], stream, default_flow_style=False)
        stream.close()

    stream = open("%s/inventory/hosts.ini" % path, "w")
    stream.write( json_obj['hosts.ini'] )
    stream.close()
Ejemplo n.º 18
0
    def test_yaml_representation_has_all_expected_fields(self):
        """Verify that the YAML representation of reference props is ok."""

        prop = properties.ReferenceProperty(
            'name', references.Reference(
                '5f2c9a1d-1113-49f1-9d1d-29aaa4a520b0', None, None))
        string = yaml.dump(prop)
        data = yaml.load(string)
        self.assertTrue(isinstance(data, dict))
        self.assertEqual(data['uuid'], '5f2c9a1d-1113-49f1-9d1d-29aaa4a520b0')
        self.assertTrue(not 'service' in data)
        self.assertTrue(not 'ref' in data)

        prop = properties.ReferenceProperty(
            'name', references.Reference(
                '5f2c9a1d-1113-49f1-9d1d-29aaa4a520b0', 'issues', None))
        string = yaml.dump(prop)
        data = yaml.load(string)
        self.assertTrue(isinstance(data, dict))
        self.assertEqual(data['uuid'], '5f2c9a1d-1113-49f1-9d1d-29aaa4a520b0')
        self.assertEqual(data['service'], 'issues')
        self.assertTrue(not 'ref' in data)

        prop = properties.ReferenceProperty(
            'name', references.Reference(
                '5f2c9a1d-1113-49f1-9d1d-29aaa4a520b0',
                'issues', 'master'))
        string = yaml.dump(prop)
        data = yaml.load(string)
        self.assertTrue(isinstance(data, dict))
        self.assertEqual(data['uuid'], '5f2c9a1d-1113-49f1-9d1d-29aaa4a520b0')
        self.assertEqual(data['service'], 'issues')
        self.assertEqual(data['ref'], 'master')
Ejemplo n.º 19
0
    def test_yaml_representation_has_all_expected_fields(self):
        """Verify that the YAML representation of list properties is ok."""

        prop = properties.ListProperty('name', [])
        string = yaml.dump(prop)
        data = yaml.load(string)
        self.assertTrue(isinstance(data, list))
        self.assertEqual(len(data), 0)

        prop = properties.ListProperty('name', [
            properties.IntProperty('name', 5),
            properties.IntProperty('name', -17),
            ])
        string = yaml.dump(prop)
        data = yaml.load(string)
        self.assertTrue(isinstance(data, list))
        self.assertEqual(len(data), 2)
        self.assertEqual(data[0], 5)
        self.assertEqual(data[1], -17)

        prop = properties.ListProperty('name', [
            properties.TextProperty('name', 'foo'),
            properties.TextProperty('name', 'bar'),
            properties.TextProperty('name', 'baz'),
            ])
        string = yaml.dump(prop)
        data = yaml.load(string)
        self.assertTrue(isinstance(data, list))
        self.assertEqual(len(data), 3)
        self.assertEqual(data[0], 'foo')
        self.assertEqual(data[1], 'bar')
        self.assertEqual(data[2], 'baz')
Ejemplo n.º 20
0
def setup_minion(*roles):
    """Setup a minion server with a set of roles."""
    require('environment')
    for r in roles:
        if r not in VALID_ROLES:
            abort('%s is not a valid server role for this project.' % r)
    config = {
        'master': 'localhost' if env.master == env.host else env.master,
        'output': 'mixed',
        'grains': {
            'environment': env.environment,
            'roles': list(roles),
        },
        'mine_functions': {
            'network.interfaces': [],
            'network.ip_addrs': []
        },
    }
    _, path = tempfile.mkstemp()
    with open(path, 'w') as f:
        yaml.dump(config, f, default_flow_style=False)
    sudo("mkdir -p /etc/salt")
    put(local_path=path, remote_path="/etc/salt/minion", use_sudo=True)
    # install salt minion if it's not there already
    install_salt(SALT_VERSION, master=False, minion=True, restart=True)
    # queries server for its fully qualified domain name to get minion id
    key_name = run('python -c "import socket; print socket.getfqdn()"')
    execute(accept_key, key_name)
Ejemplo n.º 21
0
def main():
    if len(sys.argv) < 2:
        print 'Usage: python rectangle.py FOLDER'
        sys.exit(1)

    cvStartWindowThread()
    directory = os.path.expanduser(sys.argv[1])
    filename = raw_input('Save results to what file? ')

    config = {}
    try:
        config = yaml.load(file(filename))
        print 'WARNING: Updating file', os.path.abspath(filename)
    except IOError:
        print 'Creating new file', os.path.abspath(filename)

    files = [os.path.join(directory, x) for x in os.listdir(directory)]
    #cvNamedWindow('Image', 1)
    try:
        for i, f in enumerate(files):
            img = cvLoadImage(f)
            display = Display(os.path.basename(f), img)
            display.execute(config)
            cvReleaseImage(img)
            print '\r%d / %d' % (i+1, len(files)),
            sys.stdout.flush()
    #cvDestroyWindow('Image')
    except:
        pass

    with open(os.path.expanduser(filename), 'w') as fd:
        yaml.dump(config, fd)
Ejemplo n.º 22
0
def build_tagfile(apt_deps, tags_db, rosdoc_tagfile, current_package, ordered_deps, docspace, ros_distro):
    #Get the relevant tags from the database
    tags = []

    for dep in apt_deps:
        if tags_db.has_tags(dep):
            #Make sure that we don't pass our own tagfile to ourself
            #bad things happen when we do this
            for tag in tags_db.get_tags(dep):
                if tag['package'] != current_package:
                    tags.append(tag)

    #Add tags built locally in dependency order
    for dep in ordered_deps:
        #we'll exit the loop when we reach ourself
        if dep == current_package:
            break

        relative_tags_path = "doc/%s/api/%s/tags/%s.tag" % (ros_distro, dep, dep)
        if os.path.isfile(os.path.join(docspace, relative_tags_path)):
            tags.append({'docs_url': '../../api/%s/html' % dep, 
                         'location': 'file://%s' % os.path.join(docspace, relative_tags_path),
                         'package': '%s' % dep})
        else:
            print "DID NOT FIND TAG FILE at: %s" % os.path.join(docspace, relative_tags_path)

    with open(rosdoc_tagfile, 'w+') as tags_file:
        yaml.dump(tags, tags_file)
Ejemplo n.º 23
0
def test_print_basic(monkeypatch):
    monkeypatch.setattr('boto.cloudformation.connect_to_region', lambda x: MagicMock())
    monkeypatch.setattr('boto.iam.connect_to_region', lambda x: MagicMock())

    data = {'SenzaInfo': {'StackName': 'test'}, 'SenzaComponents': [{'Configuration': {'Type': 'Senza::Configuration',
                                                                                       'ServerSubnets': {
                                                                                           'eu-west-1': [
                                                                                               'subnet-123']}}},
                                                                    {'AppServer': {
                                                                        'Type': 'Senza::TaupageAutoScalingGroup',
                                                                        'InstanceType': 't2.micro',
                                                                        'Image': 'AppImage',
                                                                        'TaupageConfig': {'runtime': 'Docker',
                                                                                          'source': 'foo/bar'}}}]}

    runner = CliRunner()

    with runner.isolated_filesystem():
        with open('myapp.yaml', 'w') as fd:
            yaml.dump(data, fd)

        result = runner.invoke(cli, ['print', 'myapp.yaml', '--region=myregion', '123', '1.0-SNAPSHOT'],
                               catch_exceptions=False)

    assert 'AWSTemplateFormatVersion' in result.output
    assert 'subnet-123' in result.output
Ejemplo n.º 24
0
def test_console(monkeypatch):
    stack = MagicMock(stack_name='test-1')
    inst = MagicMock()
    inst.tags = {'aws:cloudformation:stack-name': 'test-1'}
    ec2 = MagicMock()
    ec2.get_only_instances.return_value = [inst]
    ec2.get_console_output.return_value.output = b'**MAGIC-CONSOLE-OUTPUT**'
    monkeypatch.setattr('boto.ec2.connect_to_region', lambda x: ec2)
    monkeypatch.setattr('boto.cloudformation.connect_to_region',
                        lambda x: MagicMock(list_stacks=lambda stack_status_filters: [stack]))
    monkeypatch.setattr('boto.iam.connect_to_region', lambda x: MagicMock())

    runner = CliRunner()

    data = {'SenzaInfo': {'StackName': 'test'}}

    with runner.isolated_filesystem():
        with open('myapp.yaml', 'w') as fd:
            yaml.dump(data, fd)
        result = runner.invoke(cli, ['console', 'myapp.yaml', '--region=myregion', '1'],
                               catch_exceptions=False)

        assert '**MAGIC-CONSOLE-OUTPUT**' in result.output

        result = runner.invoke(cli, ['console', 'foobar', '--region=myregion'],
                               catch_exceptions=False)
        assert '' == result.output

        result = runner.invoke(cli, ['console', '172.31.1.2', '--region=myregion'],
                               catch_exceptions=False)
        assert '**MAGIC-CONSOLE-OUTPUT**' in result.output

        result = runner.invoke(cli, ['console', 'i-123', '--region=myregion'],
                               catch_exceptions=False)
        assert '**MAGIC-CONSOLE-OUTPUT**' in result.output
Ejemplo n.º 25
0
def test_delete(monkeypatch):
    cf = MagicMock()
    stack = MagicMock(stack_name='test-1')
    cf.list_stacks.return_value = [stack]
    monkeypatch.setattr('boto.cloudformation.connect_to_region', lambda x: cf)
    monkeypatch.setattr('boto.iam.connect_to_region', lambda x: MagicMock())

    runner = CliRunner()

    data = {'SenzaInfo': {'StackName': 'test'}}

    with runner.isolated_filesystem():
        with open('myapp.yaml', 'w') as fd:
            yaml.dump(data, fd)
        result = runner.invoke(cli, ['delete', 'myapp.yaml', '--region=myregion', '1'],
                               catch_exceptions=False)
        assert 'OK' in result.output

        cf.list_stacks.return_value = [stack, stack]
        result = runner.invoke(cli, ['delete', 'myapp.yaml', '--region=myregion'],
                               catch_exceptions=False)
        assert 'Please use the "--force" flag if you really want to delete multiple stacks' in result.output

        result = runner.invoke(cli, ['delete', 'myapp.yaml', '--region=myregion', '--force'],
                               catch_exceptions=False)
        assert 'OK' in result.output
Ejemplo n.º 26
0
    def __init__(self):
        configure(None)
        self.logger = getLogger("Permissions")

        self.confdir = tmpdir + "/config/"
        self.datadir = tmpdir + "/data/"

        try:
            os.makedirs(self.confdir)
            os.makedirs(self.datadir)
            self.logger.debug("Config and data dirs created.")
        except Exception:
            pass

        yaml.dump({"editor_warning": False},
                  open(self.confdir + "settings.yml", "w"))

        self.storage = StorageManager(self.confdir, self.datadir)

        self.data = self.storage.get_file(self, "data", formats.YAML,
                                          "permissions.yml")

        self.handler = permissionsHandler(self, self.data)

        super(TestPlugin, self).__init__(
            AttrDict(name="test", module="test_permissions"),
            AttrDict(name="python"),
        )
Ejemplo n.º 27
0
def save_config(config, path=None):
    """ Save given quaycon configuration to YAML file

    :param dict config:
      configuration to save

    :param path:
      where to save the configuration. Can be either a path to a file, or
      an opened file-like object. If kept to `None`, then configuration is
      saved to ~/.quaycon.yml if it already exist or
      ~/.config/quaycon/config.yml
    """
    if path is None:
        for _path in DEFAULT_CONFIG_FILES:
            if osp.exists(_path):
                path = _path
                break
    if path is None:
        path = _path
    if isinstance(path, string_types):
        LOGGER.info("Saving config in '{}'".format(path))
        parent = osp.dirname(path)
        if not osp.isdir(parent):
            os.makedirs(parent)
        with open(path, "w") as ostr:
            yaml.dump(config, ostr, encoding="utf-8", default_flow_style=False, Dumper=Dumper)
    else:
        yaml.dump(config, path, encoding="utf-8", default_flow_style=False, Dumper=Dumper)
Ejemplo n.º 28
0
def test_print_replace_mustache(monkeypatch):
    sg = MagicMock()
    sg.name = 'app-master-mind'
    sg.id = 'sg-007'

    monkeypatch.setattr('boto.cloudformation.connect_to_region', lambda x: MagicMock())
    monkeypatch.setattr('boto.ec2.connect_to_region', lambda x: MagicMock(get_all_security_groups=lambda: [sg]))
    monkeypatch.setattr('boto.iam.connect_to_region', lambda x: MagicMock())
    data = {'SenzaInfo': {'StackName': 'test',
                          'Parameters': [{'ApplicationId': {'Description': 'Application ID from kio'}}]},
            'SenzaComponents': [{'Configuration': {'ServerSubnets': {'eu-west-1': ['subnet-123']},
                                                   'Type': 'Senza::Configuration'}},
                                {'AppServer': {'Image': 'AppImage',
                                               'InstanceType': 't2.micro',
                                               'SecurityGroups': ['app-{{Arguments.ApplicationId}}'],
                                               'IamRoles': ['app-{{Arguments.ApplicationId}}'],
                                               'TaupageConfig': {'runtime': 'Docker',
                                                                 'source': 'foo/bar'},
                                               'Type': 'Senza::TaupageAutoScalingGroup'}}]
            }

    runner = CliRunner()

    with runner.isolated_filesystem():
        with open('myapp.yaml', 'w') as fd:
            yaml.dump(data, fd)

        result = runner.invoke(cli, ['print', 'myapp.yaml', '--region=myregion', '123', 'master-mind'],
                               catch_exceptions=False)
    assert 'AWSTemplateFormatVersion' in result.output
    assert 'subnet-123' in result.output
    assert 'app-master-mind' in result.output
    assert 'sg-007' in result.output
Ejemplo n.º 29
0
def dump_yaml_by_year_month_day(items, root_dir=None):
    """ Dumps a set of files: ``$root_dir/$year/$month/$day.yaml``
    """
    assert root_dir
    years = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: [])))
    total_facts = 0
    for raw_fact in items:
        year = raw_fact["since"].year
        month = raw_fact["since"].month
        day = raw_fact["since"].day
        fact = OrderedDict()
        keys = "activity", "category", "since", "until", "description", "tags", "hamster_fact_id"
        for key in keys:
            if key in raw_fact:
                fact[key] = raw_fact[key]
        years[year][month][day].append(fact)
        total_facts += 1
    for year in years:
        for month in years[year]:
            for day, facts in years[year][month].iteritems():
                month_dir = os.path.join(root_dir, str(year), "{0:0>2}".format(month))
                if not os.path.exists(month_dir):
                    os.makedirs(month_dir)
                day_file = os.path.join(month_dir, "{0:0>2}".format(day)) + ".yaml"
                with open(day_file, "w") as f:
                    yaml.dump(facts, f, allow_unicode=True, default_flow_style=False)
    return total_facts
Ejemplo n.º 30
0
def openstack_client_config(engine):
    cloud_config = {
        'clouds': {
            DEFAULT_CLOUD: {
                'auth': {
                    'auth_url': OPENSTACK_AUTH_URL.format(engine.ip()),
                    'username': OPENSTACK_USERNAME,
                    'password': engine.metadata['ovirt-engine-password']
                },
                'verify': False
            }
        }
    }
    os_client_config_file_path = os.path.join(
        os.environ.get('SUITE'), OPENSTACK_CLIENT_CONFIG_FILE
    )
    with open(os_client_config_file_path, 'w') as cloud_config_file:
        yaml.dump(cloud_config, cloud_config_file, default_flow_style=False)

    original_os_client_config_file = os.environ.get('OS_CLIENT_CONFIG_FILE')
    os.environ['OS_CLIENT_CONFIG_FILE'] = os_client_config_file_path
    yield DEFAULT_CLOUD
    if original_os_client_config_file is not None:
        os.environ['OS_CLIENT_CONFIG_FILE'] = original_os_client_config_file
    else:
        del os.environ['OS_CLIENT_CONFIG_FILE']
Ejemplo n.º 31
0
def yaml_fwrite(filepath, content, preamble=""):
    with open(filepath, "w") as f:
        f.write(preamble)
        f.write(yaml.dump(content, Dumper=YamlDumper))
Ejemplo n.º 32
0
    def run(self):
        with NamedTemporaryFile(delete=True) as playbook_file:
            playbook_str = yaml.dump([self.playbook])
            playbook_file.write(playbook_str)
            playbook_file.flush()

            with NamedTemporaryFile(delete=True) as ansible_cfg:
                ansible_str = """
[defaults]
host_key_checking = False
stdout_callback = debug
remote_tmp = $HOME/.ansible/tmp
"""
                ansible_cfg.write(ansible_str)
                ansible_cfg.flush()

                with NamedTemporaryFile(delete=True) as hosts_file:

                    inventory_str = self._build_host_file_contents()
                    hosts_file.write(inventory_str)
                    hosts_file.flush()

                    options = {  
                                 'subset': None, 
                                 'ask_pass': False, 
                                 'listtags': None, 
                                 'become_method':  self.become_method or 'sudo', 
                                 'become_user':    self.become_user or 'root', 
                                 'sudo': False, 
                                 'private_key_file': None, 
                                 'syntax': None, 
                                 'skip_tags': [], 
                                 'diff': False, 
                                 'sftp_extra_args': '', 
                                 'check': False, 
                                 'force_handlers': False, 
                                 'remote_user': None, 
                                 'become_method': u'sudo', 
                                 'vault_password_file': None, 
                                 'listtasks': None, 
                                 'output_file': None, 
                                 'ask_su_pass': False, 
                                 'new_vault_password_file': None, 
                                 'inventory': u'hosts', 
                                 'forks': 100, 
                                 'listhosts': None, 
                                 'ssh_extra_args': '', 
                                 'tags': [u'all'], 
                                 'become_ask_pass': False, 
                                 'start_at_task': None, 
                                 'flush_cache': None, 
                                 'step': None, 
                                 'become': True, 
                                 'su_user': None, 
                                 'ask_sudo_pass': False, 
                                 'extra_vars': [], 
                                 'verbosity': 3, 
                                 'su': False, 
                                 'ssh_common_args': '', 
                                 'connection': 'ssh', 
                                 'ask_vault_pass': False, 
                                 'timeout': 10, 
                                 'module_path': None, 
                                 'sudo_user': None, 
                                 'scp_extra_args': ''
                                 }

                    os.environ['ANSIBLE_CONFIG'] = ansible_cfg.name

                    from ansible_utils import Runner

                    runner = Runner(
                        playbook=playbook_file.name,
                        hosts=hosts_file.name,
                        options=options
                    )

                    stats = runner.run()
Ejemplo n.º 33
0
  "kubernetesStorageVersion": "v1",
  "openShiftStoragePrefix": "openshift.io",
  "openShiftStorageVersion": "v1"
}

# k8s service account authentication
config['serviceAccountConfig']['privateKeyFile'] = "/etc/kubernetes/ssl/key.pem"
config['serviceAccountConfig']['publicKeyFiles'] = ["/etc/kubernetes/ssl/cert.pem"]

# configure kubelet access to allow tailing sti-build log files
config['kubeletClientInfo'] = {
  "ca": "/etc/kubernetes/ssl/ca.pem",
  "certFile": "/etc/kubernetes/ssl/cert.pem",
  "keyFile": "/etc/kubernetes/ssl/key.pem",
  "port": 10250
}

# user authentication
config['oauthConfig']['identityProviders'][0] = {
  "name": "rancher",
  "challenge": True,
  "login": True,
  "provider": {
    "apiVersion": "v1",
    "kind": "HTPasswdPasswordIdentityProvider",
    "file": "/users.htpasswd"
  }
}

yaml.dump(config, file(path, 'w'), default_flow_style=False)
Ejemplo n.º 34
0
def CallBackFunc(event, x, y, flags, param):
    if event == cv2.EVENT_LBUTTONDOWN:
        print('left button is clicked - position (', x, ',', y, ')')
        list_points.append([x, y])
    elif event == cv2.EVENT_RBUTTONDOWN:
        print('right button is clicked - position (', x, ',', y, ')')
        list_points.append([x, y])


cv2.setMouseCallback('Mousecallback', CallBackFunc)

if __name__ == '__main__':
    while (True):
        cv2.imshow('Mousecallback', img)
        if len(list_points) == 4:
            config_data = dict(image_parameters=dict(p2=list_points[3],
                                                     p1=list_points[2],
                                                     p4=list_points[0],
                                                     p3=list_points[1],
                                                     width_og=width,
                                                     height_og=height,
                                                     img_path=img_path,
                                                     size_frame=size_frame))
            with open('../conf/config_birdview.yml', 'w') as outfile:
                yaml.dump(config_data, outfile, default_flow_style=False)
            break
        if cv2.waitKey(20) == 27:
            break
    cv2.destroyAllWindows()
Ejemplo n.º 35
0
#!/usr/bin/env python
import yaml

docker_compose_path = "/etc/contrail/config/docker-compose.yaml"

with open(docker_compose_path) as f:
    docker_compose = yaml.load(f)

schema = docker_compose["services"]["devicemgr"]
schema["image"] = "pawelzny/contrail-controller-config-devicemgr:R6.0-1"
environment = schema.setdefault("environment", [])

notification_driver = "NOTIFICATION_DRIVER=etcd"
db_driver = "DB_DRIVER=etcd"

if notification_driver not in environment:
    environment.append(notification_driver)

if db_driver not in environment:
    environment.append(db_driver)

with open(docker_compose_path, "w") as f:
    yaml.dump(docker_compose, f)
# Save results
data = {
    "rotation_matrix": np.asarray(R).tolist(),
    "translation_vector": np.asarray(T).tolist(),
    "essential_matrix": np.asarray(E).tolist(),
    "fundamental_matrix": np.asarray(F).tolist(),
    "R1": np.asarray(R1).tolist(),
    "R2": np.asarray(R2).tolist(),
    "P1": np.asarray(P1).tolist(),
    "P2": np.asarray(P2).tolist(),
    "Q": np.asarray(Q).tolist(),
    "roi_1": np.asarray(roi1).tolist(),
    "roi_2": np.asarray(roi2).tolist()
}
yaml.dump(data, open(SAVE_PATH, 'w'))
np.savez_compressed(SAVE_PATH_MAPS,
                    ax=map_a_x,
                    ay=map_a_y,
                    bx=map_b_x,
                    by=map_b_y)

# Testing
test_a = cv2.imread(IMG_PATH_A + fnames[0])
test_b = cv2.imread(IMG_PATH_B + fnames[0])
cv2.imshow("Original A", test_a)
cv2.imshow("Original B", test_b)

remapped_a = cv2.remap(test_a, map_a_x, map_a_y, cv2.INTER_LINEAR)
remapped_b = cv2.remap(test_b, map_b_x, map_b_y, cv2.INTER_LINEAR)
cv2.imshow("Remapped A", remapped_a)
Ejemplo n.º 37
0
def save_yaml_config(filepath, config):
    with open(filepath, 'w') as f:
        yaml.dump(config, f, default_flow_style=False)
Ejemplo n.º 38
0
def LogesticRegression(dataFile,outputfolder,C,Penalty,parameters):
	inputData = yaml.load(open(dataFile))
	trainingSet = inputData['training']
	testingSet = inputData['testing']
	inputFile = inputData['inputFile']
	label = inputData['label']
	resultset = []
	modelset = []
	importanceset = []

	if not os.path.exists(outputfolder):
		try:
			os.makedirs(outputfolder)
		except OSError as exc:
			if exc.errno != errno.EEXIST:
				raise exc
			pass

	modelsfolder = outputfolder + "/models/"
	if not os.path.exists(modelsfolder):
		try:
			os.makedirs(modelsfolder)
		except OSError as exc:
			if exc.errno != errno.EEXIST:
				raise exc
			pass

	importancefolder = outputfolder + "/FeatureImportance/"
	if not os.path.exists(importancefolder):
		try:
			os.makedirs(importancefolder)
		except OSError as exc:
			if exc.errno != errno.EEXIST:
				raise exc
			pass

	for i in range(len(trainingSet)):
		train_df = pd.read_csv(trainingSet[i])
		train_labels = train_df[label]
		train_features = train_df.drop(label,axis=1)
		test_df = pd.read_csv(testingSet[i])
		test_predictions = pd.DataFrame(test_df[label])
		test_features = test_df.drop(label,axis=1)

		lr = LogisticRegression(penalty=Penalty,C=C)
		lr.fit(train_features,train_labels)

		modelFile = modelsfolder + "LogisticRegressionModel" + str(i+1) + ".pkl"
		with open(modelFile, 'wb') as fd:
			pickle.dump(lr, fd)
		modelset.append(modelFile)

		fd.close()

		importanceFile = calculateFeatureImportance(train_features,lr,importancefolder,i)
		importanceset.append(importanceFile)

		test_predictions['predictions'] = lr.predict(test_features)

		resultFile = outputfolder+'/result'+str(i+1)+'.csv'

		test_predictions.to_csv(resultFile,index=False)
		resultset.append(resultFile)

	resultDict = dict()

	resultDict['results'] = resultset
	resultDict['models'] = modelset
	resultDict['featureimportance'] = importanceset
	resultDict['label'] = label

	if not parameters:
		parameters['parameter']='default'
	resultDict['algo_params'] = parameters
	resultDict['split_params'] = inputData['split_params']
	if 'feature_selection_parameters' in inputData:
		resultDict['feature_selection_parameters'] = inputData['feature_selection_parameters']
		resultDict['feature_selection_algorithm'] = inputData['feature_selection_algorithm']
	if 'feature_extraction_parameters' in inputData:
		resultDict['feature_extraction_parameters'] = inputData['feature_extraction_parameters']
		resultDict['feature_extraction_algorithm'] = inputData['feature_extraction_algorithm']
	if 'preprocessing_params' in inputData:
		resultDict['preprocessing_params'] = inputData['preprocessing_params']
	resultDict['inputFile'] = inputFile
	resultDict['algorithm'] = "LogisticRegression"
	yaml.dump(resultDict,open(outputfolder+'/results.yaml','w'))
Ejemplo n.º 39
0
def train(hyp, opt, device, tb_writer=None, wandb=None):
    logger.info(f'Hyperparameters {hyp}')
    save_dir, epochs, batch_size, total_batch_size, weights, rank = \
        Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank

    # Directories
    wdir = save_dir / 'weights'
    wdir.mkdir(parents=True, exist_ok=True)  # make dir
    last = wdir / 'last.pt'
    best = wdir / 'best.pt'
    results_file = save_dir / 'results.txt'

    # Save run settings
    with open(save_dir / 'hyp.yaml', 'w') as f:
        yaml.dump(hyp, f, sort_keys=False)
    with open(save_dir / 'opt.yaml', 'w') as f:
        yaml.dump(vars(opt), f, sort_keys=False)

    # Configure
    plots = not opt.evolve  # create plots
    cuda = device.type != 'cpu'
    init_seeds(2 + rank)
    with open(opt.data) as f:
        data_dict = yaml.load(f, Loader=yaml.FullLoader)  # data dict
    with torch_distributed_zero_first(rank):
        check_dataset(data_dict)  # check
    train_path = data_dict['train']
    test_path = data_dict['val']
    nc, names = (1, ['item']) if opt.single_cls else (int(data_dict['nc']), data_dict['names'])  # number classes, names
    assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data)  # check

    # Model
    pretrained = weights.endswith('.pt')
    if pretrained:
        with torch_distributed_zero_first(rank):
            attempt_download(weights)  # download if not found locally
        ckpt = torch.load(weights, map_location=device)  # load checkpoint
        if hyp.get('anchors'):
            ckpt['model'].yaml['anchors'] = round(hyp['anchors'])  # force autoanchor
        model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device)  # create
        exclude = ['anchor'] if opt.cfg or hyp.get('anchors') else []  # exclude keys
        state_dict = ckpt['model'].float().state_dict()  # to FP32
        state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude)  # intersect
        model.load_state_dict(state_dict, strict=False)  # load
        logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights))  # report
    else:
        model = Model(opt.cfg, ch=3, nc=nc).to(device)  # create

    # Freeze
    freeze = []  # parameter names to freeze (full or partial)
    for k, v in model.named_parameters():
        v.requires_grad = True  # train all layers
        if any(x in k for x in freeze):
            print('freezing %s' % k)
            v.requires_grad = False

    # Optimizer
    nbs = 64  # nominal batch size
    accumulate = max(round(nbs / total_batch_size), 1)  # accumulate loss before optimizing
    hyp['weight_decay'] *= total_batch_size * accumulate / nbs  # scale weight_decay

    pg0, pg1, pg2 = [], [], []  # optimizer parameter groups
    for k, v in model.named_modules():
        if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
            pg2.append(v.bias)  # biases
        if isinstance(v, nn.BatchNorm2d):
            pg0.append(v.weight)  # no decay
        elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
            pg1.append(v.weight)  # apply decay

    if opt.adam:
        optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999))  # adjust beta1 to momentum
    else:
        optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)

    optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']})  # add pg1 with weight_decay
    optimizer.add_param_group({'params': pg2})  # add pg2 (biases)
    logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
    del pg0, pg1, pg2

    # Scheduler https://arxiv.org/pdf/1812.01187.pdf
    # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
    lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp['lrf']) + hyp['lrf']  # cosine
    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
    # plot_lr_scheduler(optimizer, scheduler, epochs)

    # Logging
    if wandb and wandb.run is None:
        opt.hyp = hyp  # add hyperparameters
        wandb_run = wandb.init(config=opt, resume="allow",
                               project='YOLOv3' if opt.project == 'runs/train' else Path(opt.project).stem,
                               name=save_dir.stem,
                               id=ckpt.get('wandb_id') if 'ckpt' in locals() else None)
    loggers = {'wandb': wandb}  # loggers dict

    # Resume
    start_epoch, best_fitness = 0, 0.0
    if pretrained:
        # Optimizer
        if ckpt['optimizer'] is not None:
            optimizer.load_state_dict(ckpt['optimizer'])
            best_fitness = ckpt['best_fitness']

        # Results
        if ckpt.get('training_results') is not None:
            with open(results_file, 'w') as file:
                file.write(ckpt['training_results'])  # write results.txt

        # Epochs
        start_epoch = ckpt['epoch'] + 1
        if opt.resume:
            assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
        if epochs < start_epoch:
            logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
                        (weights, ckpt['epoch'], epochs))
            epochs += ckpt['epoch']  # finetune additional epochs

        del ckpt, state_dict

    # Image sizes
    gs = int(max(model.stride))  # grid size (max stride)
    imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size]  # verify imgsz are gs-multiples

    # DP mode
    if cuda and rank == -1 and torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model)

    # SyncBatchNorm
    if opt.sync_bn and cuda and rank != -1:
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
        logger.info('Using SyncBatchNorm()')

    # EMA
    ema = ModelEMA(model) if rank in [-1, 0] else None

    # DDP mode
    if cuda and rank != -1:
        model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)

    # Trainloader
    dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
                                            hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
                                            world_size=opt.world_size, workers=opt.workers,
                                            image_weights=opt.image_weights)
    mlc = np.concatenate(dataset.labels, 0)[:, 0].max()  # max label class
    nb = len(dataloader)  # number of batches
    assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)

    # Process 0
    if rank in [-1, 0]:
        ema.updates = start_epoch * nb // accumulate  # set EMA updates
        testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt,  # testloader
                                       hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True,
                                       rank=-1, world_size=opt.world_size, workers=opt.workers, pad=0.5)[0]

        if not opt.resume:
            labels = np.concatenate(dataset.labels, 0)
            c = torch.tensor(labels[:, 0])  # classes
            # cf = torch.bincount(c.long(), minlength=nc) + 1.  # frequency
            # model._initialize_biases(cf.to(device))
            if plots:
                Thread(target=plot_labels, args=(labels, save_dir, loggers), daemon=True).start()
                if tb_writer:
                    tb_writer.add_histogram('classes', c, 0)

            # Anchors
            if not opt.noautoanchor:
                check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)

    # Model parameters
    hyp['cls'] *= nc / 80.  # scale coco-tuned hyp['cls'] to current dataset
    model.nc = nc  # attach number of classes to model
    model.hyp = hyp  # attach hyperparameters to model
    model.gr = 1.0  # iou loss ratio (obj_loss = 1.0 or iou)
    model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device)  # attach class weights
    model.names = names

    # Start training
    t0 = time.time()
    nw = max(round(hyp['warmup_epochs'] * nb), 1000)  # number of warmup iterations, max(3 epochs, 1k iterations)
    # nw = min(nw, (epochs - start_epoch) / 2 * nb)  # limit warmup to < 1/2 of training
    maps = np.zeros(nc)  # mAP per class
    results = (0, 0, 0, 0, 0, 0, 0)  # P, R, [email protected], [email protected], val_loss(box, obj, cls)
    scheduler.last_epoch = start_epoch - 1  # do not move
    scaler = amp.GradScaler(enabled=cuda)
    logger.info('Image sizes %g train, %g test\n'
                'Using %g dataloader workers\nLogging results to %s\n'
                'Starting training for %g epochs...' % (imgsz, imgsz_test, dataloader.num_workers, save_dir, epochs))
    for epoch in range(start_epoch, epochs):  # epoch ------------------------------------------------------------------
        model.train()

        # Update image weights (optional)
        if opt.image_weights:
            # Generate indices
            if rank in [-1, 0]:
                cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2  # class weights
                iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw)  # image weights
                dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n)  # rand weighted idx
            # Broadcast if DDP
            if rank != -1:
                indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
                dist.broadcast(indices, 0)
                if rank != 0:
                    dataset.indices = indices.cpu().numpy()

        # Update mosaic border
        # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
        # dataset.mosaic_border = [b - imgsz, -b]  # height, width borders

        mloss = torch.zeros(4, device=device)  # mean losses
        if rank != -1:
            dataloader.sampler.set_epoch(epoch)
        pbar = enumerate(dataloader)
        logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size'))
        if rank in [-1, 0]:
            pbar = tqdm(pbar, total=nb)  # progress bar
        optimizer.zero_grad()
        for i, (imgs, targets, paths, _) in pbar:  # batch -------------------------------------------------------------
            ni = i + nb * epoch  # number integrated batches (since train start)
            imgs = imgs.to(device, non_blocking=True).float() / 255.0  # uint8 to float32, 0-255 to 0.0-1.0

            # Warmup
            if ni <= nw:
                xi = [0, nw]  # x interp
                # model.gr = np.interp(ni, xi, [0.0, 1.0])  # iou loss ratio (obj_loss = 1.0 or iou)
                accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
                for j, x in enumerate(optimizer.param_groups):
                    # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
                    x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
                    if 'momentum' in x:
                        x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])

            # Multi-scale
            if opt.multi_scale:
                sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs  # size
                sf = sz / max(imgs.shape[2:])  # scale factor
                if sf != 1:
                    ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]]  # new shape (stretched to gs-multiple)
                    imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)

            # Forward
            with amp.autocast(enabled=cuda):
                pred = model(imgs)  # forward
                loss, loss_items = compute_loss(pred, targets.to(device), model)  # loss scaled by batch_size
                if rank != -1:
                    loss *= opt.world_size  # gradient averaged between devices in DDP mode

            # Backward
            scaler.scale(loss).backward()

            # Optimize
            if ni % accumulate == 0:
                scaler.step(optimizer)  # optimizer.step
                scaler.update()
                optimizer.zero_grad()
                if ema:
                    ema.update(model)

            # Print
            if rank in [-1, 0]:
                mloss = (mloss * i + loss_items) / (i + 1)  # update mean losses
                mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0)  # (GB)
                s = ('%10s' * 2 + '%10.4g' * 6) % (
                    '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
                pbar.set_description(s)

                # Plot
                if plots and ni < 3:
                    f = save_dir / f'train_batch{ni}.jpg'  # filename
                    Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
                    # if tb_writer:
                    #     tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
                    #     tb_writer.add_graph(model, imgs)  # add model to tensorboard
                elif plots and ni == 3 and wandb:
                    wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg')]})

            # end batch ------------------------------------------------------------------------------------------------
        # end epoch ----------------------------------------------------------------------------------------------------

        # Scheduler
        lr = [x['lr'] for x in optimizer.param_groups]  # for tensorboard
        scheduler.step()

        # DDP process 0 or single-GPU
        if rank in [-1, 0]:
            # mAP
            if ema:
                ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride'])
            final_epoch = epoch + 1 == epochs
            if not opt.notest or final_epoch:  # Calculate mAP
                results, maps, times = test.test(opt.data,
                                                 batch_size=total_batch_size,
                                                 imgsz=imgsz_test,
                                                 model=ema.ema,
                                                 single_cls=opt.single_cls,
                                                 dataloader=testloader,
                                                 save_dir=save_dir,
                                                 plots=plots and final_epoch,
                                                 log_imgs=opt.log_imgs if wandb else 0)

            # Write
            with open(results_file, 'a') as f:
                f.write(s + '%10.4g' * 7 % results + '\n')  # P, R, [email protected], [email protected], val_loss(box, obj, cls)
            if len(opt.name) and opt.bucket:
                os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))

            # Log
            tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss',  # train loss
                    'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
                    'val/box_loss', 'val/obj_loss', 'val/cls_loss',  # val loss
                    'x/lr0', 'x/lr1', 'x/lr2']  # params
            for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
                if tb_writer:
                    tb_writer.add_scalar(tag, x, epoch)  # tensorboard
                if wandb:
                    wandb.log({tag: x})  # W&B

            # Update best mAP
            fi = fitness(np.array(results).reshape(1, -1))  # weighted combination of [P, R, [email protected], [email protected]]
            if fi > best_fitness:
                best_fitness = fi

            # Save model
            save = (not opt.nosave) or (final_epoch and not opt.evolve)
            if save:
                with open(results_file, 'r') as f:  # create checkpoint
                    ckpt = {'epoch': epoch,
                            'best_fitness': best_fitness,
                            'training_results': f.read(),
                            'model': ema.ema,
                            'optimizer': None if final_epoch else optimizer.state_dict(),
                            'wandb_id': wandb_run.id if wandb else None}

                # Save last, best and delete
                torch.save(ckpt, last)
                if best_fitness == fi:
                    torch.save(ckpt, best)
                del ckpt
        # end epoch ----------------------------------------------------------------------------------------------------
    # end training

    if rank in [-1, 0]:
        # Strip optimizers
        for f in [last, best]:
            if f.exists():  # is *.pt
                strip_optimizer(f)  # strip optimizer
                os.system('gsutil cp %s gs://%s/weights' % (f, opt.bucket)) if opt.bucket else None  # upload

        # Plots
        if plots:
            plot_results(save_dir=save_dir)  # save as results.png
            if wandb:
                files = ['results.png', 'precision_recall_curve.png', 'confusion_matrix.png']
                wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files
                                       if (save_dir / f).exists()]})
        logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))

        # Test best.pt
        if opt.data.endswith('coco.yaml') and nc == 80:  # if COCO
            results, _, _ = test.test(opt.data,
                                      batch_size=total_batch_size,
                                      imgsz=imgsz_test,
                                      model=attempt_load(best if best.exists() else last, device).half(),
                                      single_cls=opt.single_cls,
                                      dataloader=testloader,
                                      save_dir=save_dir,
                                      save_json=True,  # use pycocotools
                                      plots=False)

    else:
        dist.destroy_process_group()

    wandb.run.finish() if wandb and wandb.run else None
    torch.cuda.empty_cache()
    return results
Ejemplo n.º 40
0
def main():
    args = parse_args()

    root = Path(args.save_path)
    load_root = Path(args.load_path) if args.load_path else None
    root.mkdir(parents=True, exist_ok=True)

    ####################################
    # Dump arguments and create logger #
    ####################################
    with open(root / "args.yml", "w") as f:
        yaml.dump(args, f)
    writer = SummaryWriter(str(root))

    #######################
    # Load PyTorch Models #
    #######################
    netG = Generator(args.n_mel_channels, args.ngf,
                     args.n_residual_layers).cuda()
    netD = Discriminator(args.num_D, args.ndf, args.n_layers_D,
                         args.downsamp_factor).cuda()
    fft = Audio2Mel(n_mel_channels=args.n_mel_channels).cuda()

    print(netG)
    print(netD)

    #####################
    # Create optimizers #
    #####################
    optG = torch.optim.Adam(netG.parameters(), lr=1e-4, betas=(0.5, 0.9))
    optD = torch.optim.Adam(netD.parameters(), lr=1e-4, betas=(0.5, 0.9))

    if load_root and load_root.exists():
        netG.load_state_dict(torch.load(load_root / "netG.pt"))
        optG.load_state_dict(torch.load(load_root / "optG.pt"))
        netD.load_state_dict(torch.load(load_root / "netD.pt"))
        optD.load_state_dict(torch.load(load_root / "optD.pt"))

    #######################
    # Create data loaders #
    #######################
    train_set = AudioDataset(Path(args.data_path) / "train_files.txt",
                             args.seq_len,
                             sampling_rate=22050)
    test_set = AudioDataset(
        Path(args.data_path) / "test_files.txt",
        22050 * 4,
        sampling_rate=22050,
        augment=False,
    )

    train_loader = DataLoader(train_set,
                              batch_size=args.batch_size,
                              num_workers=4)
    test_loader = DataLoader(test_set, batch_size=1)

    ##########################
    # Dumping original audio #
    ##########################
    test_voc = []
    test_audio = []
    for i, x_t in enumerate(test_loader):
        x_t = x_t.cuda()
        s_t = fft(x_t).detach()

        test_voc.append(s_t.cuda())
        test_audio.append(x_t)

        audio = x_t.squeeze().cpu()
        save_sample(root / ("original_%d.wav" % i), 22050, audio)
        writer.add_audio("original/sample_%d.wav" % i,
                         audio,
                         0,
                         sample_rate=22050)

        if i == args.n_test_samples - 1:
            break

    costs = []
    start = time.time()

    # enable cudnn autotuner to speed up training
    torch.backends.cudnn.benchmark = True

    best_mel_reconst = 1000000
    steps = 0
    for epoch in range(1, args.epochs + 1):
        for iterno, x_t in enumerate(train_loader):
            x_t = x_t.cuda()
            s_t = fft(x_t).detach()
            x_pred_t = netG(s_t.cuda())

            with torch.no_grad():
                s_pred_t = fft(x_pred_t.detach())
                s_error = F.l1_loss(s_t, s_pred_t).item()

            #######################
            # Train Discriminator #
            #######################
            D_fake_det = netD(x_pred_t.cuda().detach())
            D_real = netD(x_t.cuda())

            loss_D = 0
            for scale in D_fake_det:
                loss_D += F.relu(1 + scale[-1]).mean()

            for scale in D_real:
                loss_D += F.relu(1 - scale[-1]).mean()

            netD.zero_grad()
            loss_D.backward()
            optD.step()

            ###################
            # Train Generator #
            ###################
            D_fake = netD(x_pred_t.cuda())

            loss_G = 0
            for scale in D_fake:
                loss_G += -scale[-1].mean()

            loss_feat = 0
            feat_weights = 4.0 / (args.n_layers_D + 1)
            D_weights = 1.0 / args.num_D
            wt = D_weights * feat_weights
            for i in range(args.num_D):
                for j in range(len(D_fake[i]) - 1):
                    loss_feat += wt * F.l1_loss(D_fake[i][j],
                                                D_real[i][j].detach())

            netG.zero_grad()
            (loss_G + args.lambda_feat * loss_feat).backward()
            optG.step()

            ######################
            # Update tensorboard #
            ######################
            costs.append(
                [loss_D.item(),
                 loss_G.item(),
                 loss_feat.item(), s_error])

            writer.add_scalar("loss/discriminator", costs[-1][0], steps)
            writer.add_scalar("loss/generator", costs[-1][1], steps)
            writer.add_scalar("loss/feature_matching", costs[-1][2], steps)
            writer.add_scalar("loss/mel_reconstruction", costs[-1][3], steps)
            steps += 1

            if steps % args.save_interval == 0:
                st = time.time()
                with torch.no_grad():
                    for i, (voc, _) in enumerate(zip(test_voc, test_audio)):
                        pred_audio = netG(voc)
                        pred_audio = pred_audio.squeeze().cpu()
                        save_sample(root / ("generated_%d.wav" % i), 22050,
                                    pred_audio)
                        writer.add_audio(
                            "generated/sample_%d.wav" % i,
                            pred_audio,
                            epoch,
                            sample_rate=22050,
                        )

                torch.save(netG.state_dict(), root / "netG.pt")
                torch.save(optG.state_dict(), root / "optG.pt")

                torch.save(netD.state_dict(), root / "netD.pt")
                torch.save(optD.state_dict(), root / "optD.pt")

                if np.asarray(costs).mean(0)[-1] < best_mel_reconst:
                    best_mel_reconst = np.asarray(costs).mean(0)[-1]
                    torch.save(netD.state_dict(), root / "best_netD.pt")
                    torch.save(netG.state_dict(), root / "best_netG.pt")

                print("Took %5.4fs to generate samples" % (time.time() - st))
                print("-" * 100)

            if steps % args.log_interval == 0:
                print("Epoch {} | Iters {} / {} | ms/batch {:5.2f} | loss {}".
                      format(
                          epoch,
                          iterno,
                          len(train_loader),
                          1000 * (time.time() - start) / args.log_interval,
                          np.asarray(costs).mean(0),
                      ))
                costs = []
                start = time.time()
Ejemplo n.º 41
0
                             1,
                             x,
                             y,
                             len(pixels),
                             batch_size=7,
                             pixels_per_grid=49))

pixelids = dict()
for chip_idx, chip in enumerate(chip_ids):
    # Bool value is argument to right_side_up
    chip_pixels = list(range(chip_idx * 49, chip_idx * 49 + 49))
    pixelids[chip] = (True, 'plain', chip_pixels)

chips = []
for chipid, (right_side_up, shape, ids) in pixelids.items():
    assignment = pg.grid_7x7_assignments_0_64_v2_2_1
    channels = pg.assign_pixels(ids, assignment, right_side_up, range(64))
    chips.append([chipid, channels])
print('chips', len(chips))

with open('layout-2.4.0.yaml', 'w') as f:
    yaml.dump(
        {
            'pixels': pixels,
            'chips': chips,
            'x': -width / 2,
            'y': -height / 2,
            'width': width,
            'height': height
        }, f)
Ejemplo n.º 42
0
        with open(filename) as file:
            variables = yaml.load(file, Loader=yaml.FullLoader)

            variables["host"] = credentials.ip_address
            variables["username"] = credentials.username
            variables["password"] = credentials.password
            variables["tenant"] = temp[1].split('-')[1]
            variables["ap"] = temp[2].split('-')[1]
            variables["epg"] = temp[3].split('-')[1]
            variables["domain"] = domain

        filename = os.path.join(dirname,
                                'ansible/vars/vars_domain_binding.yaml')

        with open(filename, "w") as file:
            yaml.dump(variables, file)

        filename = os.path.join(dirname, 'ansible/bind_physd_epg.yaml')

        subprocess.run(['ansible-playbook', filename])
        p = subprocess.Popen(['tail', '/var/log/syslog'],
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        p.communicate()

        print("Physical Domain has been added to EPG!")

        response = input("Press Y to add this epg to a VPC:")

        if response.lower() == "y":
            print("Y pressed!")
Ejemplo n.º 43
0
def generate_user_data(taupage_config, region):
    """
    Generates the CloudFormation "UserData" field.
    It looks for AWS functions such as Fn:: and Ref and generates the appropriate UserData json field,
    It leaves nodes representing AWS functions or refs unmodified and converts into text everything else.
    Example::
      environment:
        S3_BUCKET: {"Ref": "ExhibitorBucket"}
        S3_PREFIX: exhibitor

    transforms into::
      {"Fn::Join": ["", "environment:\n  S3_BUCKET: ", {"Ref": "ExhibitorBucket"}, "\n  S3_PREFIX: exhibitor"]}

    :param taupage_config:
    :return:
    """
    def is_aws_fn(name):
        try:
            return name == "Ref" or (isinstance(name, str)
                                     and name.startswith("Fn::"))
        except:
            return False

    def transform(node):
        """Transform AWS functions and refs into an string representation for later split and substitution"""

        if isinstance(node, dict):
            num_keys = len(node)
            if 'Stack' in node and 'Output' in node:
                return resolve_referenced_resource(node, region)
            if num_keys > 0:
                key = next(iter(node.keys()))
                if num_keys == 1 and is_aws_fn(key):
                    return "".join(["{{ ", json.dumps(node), " }}"])
                else:
                    return {
                        key: transform(value)
                        for key, value in node.items()
                    }
            else:
                return node
        elif isinstance(node, list):
            return [transform(subnode) for subnode in node]
        else:
            return node

    def split(text):
        """Splits yaml text into text and AWS functions/refs"""

        parts = []
        last_pos = 0
        for m in _AWS_FN_RE.finditer(text):
            parts += [text[last_pos:m.start(1)], json.loads(m.group(2))]
            last_pos = m.end(1)
        parts += [text[last_pos:]]
        return parts

    yaml_text = yaml.dump(transform(taupage_config),
                          width=sys.maxsize,
                          default_flow_style=False)

    parts = split("#taupage-ami-config\n" + yaml_text)

    if len(parts) == 1:
        return parts[0]
    else:
        return {"Fn::Join": ["", parts]}
Ejemplo n.º 44
0
 def render(self):
     print(yaml.dump(self.data))
     YamlVariable.last = None
Ejemplo n.º 45
0
    steps += dask_tests()

    steps += dagit_tests()
    steps += lakehouse_tests()
    steps += pipenv_smoke_tests()

    steps += python_modules_tox_tests("dagster")
    steps += python_modules_tox_tests("dagster-graphql")
    steps += python_modules_tox_tests("dagstermill")
    steps += library_tests()

    steps += releasability_tests()

    if DO_COVERAGE:
        steps += [wait_step(), coverage_step()]

    print(
        yaml.dump(
            {
                "env": {
                    "CI_NAME": "buildkite",
                    "CI_BUILD_NUMBER": "$BUILDKITE_BUILD_NUMBER",
                    "CI_BUILD_URL": "$BUILDKITE_BUILD_URL",
                    "CI_BRANCH": "$BUILDKITE_BRANCH",
                    "CI_PULL_REQUEST": "$BUILDKITE_PULL_REQUEST",
                },
                "steps": steps,
            },
            default_flow_style=False,
        ))
Ejemplo n.º 46
0
 def show_config(self, args):
     """Print active configuration values to console for confirmation"""
     stream = yaml.dump(conf)
     print(stream.replace('\n-', '\n  -'))
Ejemplo n.º 47
0
def save_yaml_config(filepath, config):
    """Save yaml config file at the given path."""
    with open(filepath, 'w') as f:
        yaml.dump(config, f, default_flow_style=False)
Ejemplo n.º 48
0
def dump_key_bindings(obj):
    p = key_bindings_path()
    with open(p, 'w') as wfile:
        yaml.dump(obj, wfile)
Ejemplo n.º 49
0
    async def upgrade(self, job, release_name, options):
        """
        Upgrade `release_name` chart release.

        `upgrade_options.item_version` specifies to which item version chart release should be upgraded to.

        System will update container images being used by `release_name` chart release if
        `upgrade_options.update_container_images` is set.

        During upgrade, `upgrade_options.values` can be specified to apply configuration changes for configuration
        changes for the chart release in question.

        For upgrade, system will automatically take a snapshot of `ix_volumes` in question which can be used to
        rollback later on.
        """
        await self.middleware.call('kubernetes.validate_k8s_setup')
        release = await self.middleware.call('chart.release.get_instance',
                                             release_name)
        catalog = await self.middleware.call(
            'catalog.query',
            [['id', '=', release['catalog']]],
            {'extra': {
                'item_details': True
            }},
        )
        if not catalog:
            raise CallError(f'Unable to locate {release["catalog"]!r} catalog',
                            errno=errno.ENOENT)
        else:
            catalog = catalog[0]

        current_chart = release['chart_metadata']
        chart = current_chart['name']
        if release['catalog_train'] not in catalog['trains']:
            raise CallError(
                f'Unable to locate {release["catalog_train"]!r} catalog train in {release["catalog"]!r}',
                errno=errno.ENOENT,
            )
        if chart not in catalog['trains'][release['catalog_train']]:
            raise CallError(
                f'Unable to locate {chart!r} catalog item in {release["catalog"]!r} '
                f'catalog\'s {release["catalog_train"]!r} train.',
                errno=errno.ENOENT)

        new_version = options['item_version']
        if new_version == 'latest':
            new_version = await self.middleware.call(
                'chart.release.get_latest_version_from_item_versions',
                catalog['trains'][release['catalog_train']][chart]['versions'])

        if new_version not in catalog['trains'][
                release['catalog_train']][chart]['versions']:
            raise CallError(
                f'Unable to locate specified {new_version!r} item version.')

        verrors = ValidationErrors()
        if parse_version(new_version) <= parse_version(
                current_chart['version']):
            verrors.add(
                'upgrade_options.item_version',
                f'Upgrade version must be greater than {current_chart["version"]!r} current version.'
            )

        verrors.check()

        catalog_item = catalog['trains'][
            release['catalog_train']][chart]['versions'][new_version]
        await self.middleware.call('catalog.version_supported_error_check',
                                   catalog_item)

        config = await self.middleware.call('chart.release.upgrade_values',
                                            release, catalog_item['location'])

        # We will be performing validation for values specified. Why we want to allow user to specify values here
        # is because the upgraded catalog item version might have different schema which potentially means that
        # upgrade won't work or even if new k8s are resources are created/deployed, they won't necessarily function
        # as they should because of changed params or expecting new params
        # One tricky bit which we need to account for first is removing any key from current configured values
        # which the upgraded release will potentially not support. So we can safely remove those as otherwise
        # validation will fail as new schema does not expect those keys.
        config = clean_values_for_upgrade(config,
                                          catalog_item['schema']['questions'])
        config.update(options['values'])

        config, context = await self.middleware.call(
            'chart.release.normalise_and_validate_values',
            catalog_item,
            config,
            False,
            release['dataset'],
        )
        job.set_progress(25, 'Initial validation complete')

        # We have validated configuration now

        chart_path = os.path.join(release['path'], 'charts', new_version)
        await self.middleware.run_in_thread(shutil.rmtree,
                                            chart_path,
                                            ignore_errors=True)
        await self.middleware.run_in_thread(shutil.copytree,
                                            catalog_item['location'],
                                            chart_path)

        # If a snapshot of the volumes already exist with the same name in case of a failed upgrade, we will remove
        # it as we want the current point in time being reflected in the snapshot
        volumes_ds = os.path.join(release['dataset'], 'volumes/ix_volumes')
        snap_name = f'{volumes_ds}@{release["version"]}'
        if await self.middleware.call('zfs.snapshot.query',
                                      [['id', '=', snap_name]]):
            await self.middleware.call('zfs.snapshot.delete', snap_name)

        await self.middleware.call('zfs.snapshot.create', {
            'dataset': volumes_ds,
            'name': release['version'],
            'recursive': True
        })
        job.set_progress(40, 'Created snapshot for upgrade')

        await self.middleware.call('chart.release.perform_actions', context)

        # Let's update context options to reflect that an upgrade is taking place and from which version to which
        # version it's happening.
        # Helm considers simple config change as an upgrade as well, and we have no way of determining the old/new
        # chart versions during helm upgrade in the helm template, hence the requirement for a context object.
        config[CONTEXT_KEY_NAME].update({
            'operation': 'UPGRADE',
            'isUpgrade': True,
            'upgradeMetadata': {
                'oldChartVersion': current_chart['version'],
                'newChartVersion': new_version,
                'preUpgradeRevision': release['version'],
            }
        })

        job.set_progress(50, 'Upgrading chart release')

        with tempfile.NamedTemporaryFile(mode='w+') as f:
            f.write(yaml.dump(config))
            f.flush()

            cp = await run(
                [
                    'helm', 'upgrade', release_name, chart_path, '-n',
                    get_namespace(release_name), '-f', f.name
                ],
                check=False,
            )
            if cp.returncode:
                raise CallError(
                    f'Failed to upgrade chart release to {new_version!r}: {cp.stderr.decode()}'
                )

        job.set_progress(100, 'Upgrade complete for chart release')

        chart_release = await self.middleware.call(
            'chart.release.get_instance', release_name)
        self.middleware.send_event('chart.release.query',
                                   'CHANGED',
                                   id=release_name,
                                   fields=chart_release)

        await self.chart_release_update_check(
            catalog['trains'][release['catalog_train']][chart], chart_release)

        if options['update_container_images']:
            container_update_job = await self.middleware.call(
                'chart.release.pull_container_images', release_name)
            await job.wrap(container_update_job)

        return chart_release
        for particle in ["Pi", "P", "K", "Mu"]:
            for varname in ["Brunel_P", "Brunel_PT", "nTracks_Brunel"]:
                config = {}
                config["sampleVersion"             ] = data
                config["magnetPolarity"            ] = magtype
                config["particleName"              ] = particle
                config["priorCut"                  ] = cut[particle]
                config["pidCut"                    ] = pidcut[particle]
                config["varName"                   ] = varname
                config["outputFile"                ] = "binoutput/binning-"+data+".py"
                config["minimum"                   ] = varminmax[varname][0]
                config["maximum"                   ] = varminmax[varname][1]
                config["minimumBinWidth"           ] = varminmax[varname][2]
                config["delta"                     ] = 1
                config["nSigma"                    ] = 5
                config["schemeName"                ] = 'binning-'+particle+'-'+data+'-'+magtype
                config["numberOfInitialBins"       ] = 100 
                config["startWithIsopopulatedBins" ] = True
                config["minRun"                    ] = None
                config["maxRun"                    ] = None
                config["maxFiles"                  ] = 100 #used
                config["mergeBelow"                ] = -1000000000
                config["mergeAbove"                ] = 1000000000
                #print(config)
                if not os.path.isdir('config_files'): os.mkdir('config_files')
                with open("config_files/config-"+particle+"-"+data+"-"+magtype+"-"+varname+".yml", "w") as outfile: yaml.dump(config, outfile, default_flow_style=False)

                command = "bash "+UraniaDir+"run python "+UraniaDir+"PIDCalib/PIDPerfScripts/scripts/python/BinningOptimizer/binningPID.py "+"config_files/config-"+particle+"-"+data+"-"+magtype+"-"+varname+".yml"
                print(command)
                os.system(command)
def scvmGen(pn_nic=None,
            pn_ip=None,
            pn_prefix=24,
            cn_nic=None,
            cn_ip=None,
            cn_prefix=24,
            master=False):
    with open(f'{tmpdir}/network-config.mgmt', 'rt') as f:
        yam = yaml.load(f)
    yam['network']['config'].append({
        'mtu':
        9000,
        'name':
        pn_nic,
        'subnets': [{
            'address': f'{pn_ip}/{pn_prefix}',
            'type': 'static'
        }],
        'type':
        'physical'
    })
    yam['network']['config'].append({
        'mtu':
        9000,
        'name':
        cn_nic,
        'subnets': [{
            'address': f'{cn_ip}/{cn_prefix}',
            'type': 'static'
        }],
        'type':
        'physical'
    })
    with open(f'{tmpdir}/network-config', 'wt') as f:
        f.write(yaml.dump(yam))
    with open(f'{tmpdir}/user-data', 'rt') as f:
        yam2 = yaml.load(f)
    yam2['bootcmd'] = [[
        '/usr/bin/systemctl', 'enable', '--now', 'cockpit.socket'
    ], ['/usr/bin/systemctl', 'enable', '--now', 'cockpit.service']]
    # if master:
    #     yam2['bootcmd'].append(
    #         [f'/usr/bin/script', '-c', '/root/bootstrap.sh', 'bootstrap.log']
    #     )

    with open(f'{pluginpath}/shell/host/scvm_bootstrap.sh',
              'rt') as bootstrapfile:
        bootstrap = bootstrapfile.read()
        yam2['write_files'].append({
            'encoding':
            'base64',
            'content':
            base64.encodebytes(bootstrap.encode()),
            'owner':
            'root:root',
            'path':
            '/root/bootstrap.sh',
            'permissions':
            '0777'
        })
    with open(f'{pluginpath}/shell/host/ipcorrector', 'rt') as ipcorrectorfile:
        ipcorrector = ipcorrectorfile.read()
        yam2['write_files'].append({
            'encoding':
            'base64',
            'content':
            base64.encodebytes(ipcorrector.encode()),
            'owner':
            'root:root',
            'path':
            '/usr/local/bin/ipcorrector',
            'permissions':
            '0777'
        })
    with open(f'{tmpdir}/user-data', 'wt') as f:
        f.write('#cloud-config\n')
        f.write(yaml.dump(yam2).replace("\n\n", "\n"))
    return json.dumps(indent=4,
                      obj=json.loads(createReturn(code=200, val=yam)))
Ejemplo n.º 52
0
def test_init_compute_log_with_bad_config():
    with seven.TemporaryDirectory() as tmpdir_path:
        with open(os.path.join(tmpdir_path, 'dagster.yaml'), 'w') as fd:
            yaml.dump({'compute_logs': {'garbage': 'flargh'}}, fd, default_flow_style=False)
        with pytest.raises(DagsterInvalidConfigError, match='Undefined field "garbage"'):
            DagsterInstance.from_ref(InstanceRef.from_dir(tmpdir_path))
Ejemplo n.º 53
0
 def as_yaml(self):
     data = self.serialize()
     dump = yaml.dump(data, default_flow_style=False)
     return dump
Ejemplo n.º 54
0
            # create dict for single winners name and avg score
            winner = dict(name=competitor, avg=average)

            # add the winner to winners list
            winners.append(winner)

            # evaluate incoming items if average is bigger than lowest, then update top3scores
        elif average > min(top3scores):
            top3scores.append(average)
            top3scores.remove(min(top3scores))

            # remove the lowest name/score from winners list
            winners = [i for i in winners if not (i['avg'] < min(top3scores))]

            # create dict for single winners name and avg score
            winner = dict(name=competitor, avg=average)

            # add the winner to winners list
            winners.append(winner)

        winners = sorted(winners, key=lambda k: k['avg'], reverse=True)

# BREAK
# compile final winning results, with disqualifiers
results = {'winners': winners, 'disqualifications': disqualifications}

# dump results into yaml output
with open('results.yml', 'w') as outfile:
    yaml.dump(results, outfile, default_flow_style=False, sort_keys=False)
Ejemplo n.º 55
0
VERSION_TYPE_MINOR = "minor"
VERSION_TYPE_MAJOR = "major"
KEY_VERSION = "version"
PIP_CONFIG_FILE = "pip_config.yml"

if __name__ == "__main__":
    version_type = argv[1]
    config = yaml.safe_load(open(PIP_CONFIG_FILE, "r"))

    version_numbers = [
        int(version_number)
        for version_number in config[KEY_VERSION].split(".")
    ]

    if version_type == VERSION_TYPE_PATCH:
        version_numbers[2] += 1
    elif version_type == VERSION_TYPE_MINOR:
        version_numbers[1] += 1
    elif version_type == VERSION_TYPE_MAJOR:
        version_numbers[0] += 1
    else:
        raise Exception("Invalid version type: {}".format(version_type))

    version_numbers = [
        str(version_number) for version_number in version_numbers
    ]

    config[KEY_VERSION] = ".".join(version_numbers)

    yaml.dump(config, open(PIP_CONFIG_FILE, "w"))
def genUserFromFile(pubkeyfile: str, privkeyfile: str, hostsfile: str):
    # with open('user-data.tmpl', 'rt') as f:
    #     yam = yaml.load(f)
    #     tmp_meta = f.read()
    # pprint.pprint(yam)
    with open(pubkeyfile, 'rt') as f:
        pubkey = f.read().strip()
    with open(privkeyfile, 'rt') as f:
        # privkey = '\\n'.join(f.read().splitlines())
        # lines = f.read().splitlines()
        # privkey = ''
        # for line in lines:
        #     privkey += line.strip() + "\n"
        privkey = f.read()
    # privkey = privkey.replace("\n", "")

    with open(hostsfile, 'rt') as f:
        hosts = f.read()
    yam = {
        'disable_root':
        0,
        'ssh_pwauth':
        True,
        'users': [{
            'homedir': '/var/lib/ceph',
            'groups': 'sudo',
            'lock_passwd': False,
            'name': 'ceph',
            'plain_text_passwd': 'Ablecloud1!',
            'ssh-authorized-keys': [pubkey],
            'sudo': ['ALL=(ALL) NOPASSWD:ALL']
        }, {
            'groups': 'sudo',
            'lock_passwd': False,
            'name': 'ablecloud',
            'plain_text_passwd': 'Ablecloud1!',
            'ssh-authorized-keys': [pubkey],
            'sudo': ['ALL=(ALL) NOPASSWD:ALL']
        }, {
            'disable_root': 0,
            'ssh_pwauth': True,
            'name': 'root',
            'plain_text_passwd': 'Ablecloud1!',
            'ssh-authorized-keys': [pubkey],
        }],
        'write_files': [{
            'encoding': 'base64',
            'content': base64.encodebytes(pubkey.encode()),
            'owner': 'root:root',
            'path': '/root/.ssh/id_rsa.pub',
            'permissions': '0644'
        }, {
            'encoding': 'base64',
            'content': base64.encodebytes(privkey.encode()),
            'owner': 'root:root',
            'path': '/root/.ssh/id_rsa',
            'permissions': '0600'
        }, {
            'encoding': 'base64',
            'content': base64.encodebytes(pubkey.encode()),
            'owner': 'ceph:ceph',
            'path': '/var/lib/ceph/.ssh/id_rsa.pub',
            'permissions': '0644'
        }, {
            'encoding': 'base64',
            'content': base64.encodebytes(privkey.encode()),
            'owner': 'ceph:ceph',
            'path': '/var/lib/ceph/.ssh/id_rsa',
            'permissions': '0600'
        }, {
            'encoding': 'base64',
            'content': base64.encodebytes(pubkey.encode()),
            'owner': 'ablecloud:ablecloud',
            'path': '/home/ablecloud/.ssh/id_rsa.pub',
            'permissions': '0644'
        }, {
            'encoding': 'base64',
            'content': base64.encodebytes(privkey.encode()),
            'owner': 'ablecloud:ablecloud',
            'path': '/home/ablecloud/.ssh/id_rsa',
            'permissions': '0600'
        }, {
            'encoding': 'base64',
            'content': base64.encodebytes(hosts.encode()),
            'owner': 'root:root',
            'path': '/etc/hosts',
            'permissions': '0644'
        }]
    }
    # base64.decodebytes(base64.encodebytes(pubkey.encode())).decode()
    # with open('user-data', 'wt') as f:
    with open(f'{tmpdir}/user-data', 'wt') as f:
        f.write('#cloud-config\n')
        f.write(yaml.dump(yam).replace("\n\n", "\n"))
Ejemplo n.º 57
0
        exit_status = 1
        if is_good_status(last_status):
            current_status = 'Failure'
        else:
            current_status = 'Still Failing'
    else:
        exit_status = 0
        if is_good_status(last_status):
            current_status = 'Successful'
        else:
            current_status = 'Fixed'
    category_dict_status[scenario] = current_status

    with open(args.build_status_file, 'w') as f:
        print('Writing output file: %s' % args.build_status_file)
        yaml.dump(category_dict_status, f)

# last successful commit
    if (current_status
            in ('Successful', 'Fixed')) and trex_last_commit_hash and len(
                jobs_list) > 0 and scenario == 'nightly':
        with open(args.last_passed_commit, 'w') as f:
            print('Writing output file: %s' % args.last_passed_commit)
            try_write(f, trex_last_commit_hash)

# mail title
    mailtitle_output = scenario.capitalize()
    if branch_name:
        mailtitle_output += ' (%s)' % branch_name
    if build_id:
        mailtitle_output += ' - Build #%s' % build_id
Ejemplo n.º 58
0
 def yaml_dump(data, fp, **kwargs):
     keys = filter_keys(kwargs.keys(), "safe")
     if kwargs.get("safe", False):
         return yaml.safe_dump(data, fp, **Base.mk_opt_args(keys, kwargs))
     else:
         return yaml.dump(data, fp, **kwargs)
Ejemplo n.º 59
0
 def dump_to_yaml(self):
     open("./config.yml", "w").write(
         yaml.dump({
             "token": self.token,
             "username": self.user_name
         }))
Ejemplo n.º 60
0
    def service_get(ctx, network, name):
        """
        Get details about a service in this profile.
        """

        # See
        # https://stackoverflow.com/questions/16782112/can-pyyaml-dump-dict-items-in-non-alphabetical-order
        def represent_ordereddict(dumper, data):
            value = []

            for item_key, item_value in data.items():
                node_key = dumper.represent_data(item_key)
                node_value = dumper.represent_data(item_value)

                value.append((node_key, node_value))

            return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)

        yaml.add_representer(OrderedDict, represent_ordereddict)

        def get_paths_info_for_service(service):
            paths = ctx.obj['CLIENT'].paths.list()
            has_access_to = ["default-all-outgoing-allowed"]
            is_accessible_from = []
            for path in paths:
                if path.network.name != service.network.name:
                    continue
                if path.destination.name == service.name:
                    if path.source.name:
                        is_accessible_from.append(
                            "%s:%s:%s" %
                            (path.network.name, path.source.name, path.port))
                    else:
                        cidr_blocks = [
                            subnetwork.cidr_block
                            for subnetwork in path.source.subnetworks
                        ]
                        cidr_blocks_string = ",".join(cidr_blocks)
                        is_accessible_from.append(
                            "external:%s:%s" % (cidr_blocks_string, path.port))
                elif path.source.name == service.name:
                    has_access_to.append(
                        "%s:%s:%s" %
                        (path.network.name, path.destination.name, path.port))
            return {
                "has_access_to": has_access_to,
                "is_accessible_from": is_accessible_from
            }

        service = get_service_for_cli(ctx, network, name)
        paths_info = get_paths_info_for_service(service)
        service_info = OrderedDict()
        service_info['name'] = service.name
        service_info['has_access_to'] = paths_info['has_access_to']
        service_info['is_accessible_from'] = paths_info['is_accessible_from']
        network_info = OrderedDict()
        network_info['name'] = service.network.name
        network_info['id'] = service.network.network_id
        network_info['block'] = service.network.cidr_block
        network_info['region'] = service.network.region
        network_info['subnetworks'] = []
        service_info['network'] = network_info
        for subnetwork in service.subnetworks:
            subnetwork_info = OrderedDict()
            subnetwork_info['name'] = subnetwork.name
            subnetwork_info['id'] = subnetwork.subnetwork_id
            subnetwork_info['block'] = subnetwork.cidr_block
            subnetwork_info['region'] = subnetwork.region
            subnetwork_info['availability_zone'] = subnetwork.availability_zone
            subnetwork_info['instances'] = []
            for instance in subnetwork.instances:
                instance_info = OrderedDict()
                instance_info['id'] = instance.instance_id
                instance_info['public_ip'] = instance.public_ip
                instance_info['private_ip'] = instance.private_ip
                instance_info['state'] = instance.state
                instance_info['availability_zone'] = instance.availability_zone
                subnetwork_info["instances"].append(instance_info)
            service_info["network"]["subnetworks"].append(subnetwork_info)
        click.echo(yaml.dump(service_info, default_flow_style=False))