コード例 #1
0
ファイル: test_input_output.py プロジェクト: yaml/pyyaml
def test_unicode_input(unicode_filename, verbose=False):
    data = open(unicode_filename, 'rb').read().decode('utf-8')
    value = ' '.join(data.split())
    output = yaml.full_load(_unicode_open(StringIO.StringIO(data.encode('utf-8')), 'utf-8'))
    assert output == value, (output, value)
    for input in [data, data.encode('utf-8'),
                    codecs.BOM_UTF8+data.encode('utf-8'),
                    codecs.BOM_UTF16_BE+data.encode('utf-16-be'),
                    codecs.BOM_UTF16_LE+data.encode('utf-16-le')]:
        if verbose:
            print "INPUT:", repr(input[:10]), "..."
        output = yaml.full_load(input)
        assert output == value, (output, value)
        output = yaml.full_load(StringIO.StringIO(input))
        assert output == value, (output, value)
コード例 #2
0
 def _load_yaml(self, stream):
     if not yaml:
         raise DataError('Using YAML variable files requires PyYAML module '
                         'to be installed. Typically you can install it '
                         'by running `pip install pyyaml`.')
     if yaml.__version__.split('.')[0] == '3':
         return yaml.load(stream)
     return yaml.full_load(stream)
コード例 #3
0
ファイル: conftest.py プロジェクト: rancher/rancher
def kubernetes_api_client(rancher_client, cluster_name):
    c = rancher_client.by_id_cluster(cluster_name)
    kc = c.generateKubeconfig()
    loader = KubeConfigLoader(config_dict=yaml.full_load(kc.config))
    client_configuration = type.__call__(Configuration)
    loader.load_and_set(client_configuration)
    k8s_client = ApiClient(configuration=client_configuration)
    return k8s_client
コード例 #4
0
ファイル: test_input_output.py プロジェクト: yaml/pyyaml
def test_unicode_input_errors(unicode_filename, verbose=False):
    data = open(unicode_filename, 'rb').read().decode('utf-8')
    for input in [data.encode('utf-16-be'),
            data.encode('utf-16-le'),
            codecs.BOM_UTF8+data.encode('utf-16-be'),
            codecs.BOM_UTF8+data.encode('utf-16-le')]:

        try:
            yaml.full_load(input)
        except yaml.YAMLError, exc:
            if verbose:
                print exc
        else:
            raise AssertionError("expected an exception")
        try:
            yaml.full_load(StringIO.StringIO(input))
        except yaml.YAMLError, exc:
            if verbose:
                print exc
コード例 #5
0
ファイル: config.py プロジェクト: harej/reports_bot
 def _load(self):
     """Load or reload the bot's main configuration file (config.yml)."""
     filename = path.join(self._base_dir, "config.yml")
     try:
         with open(filename) as fp:
             self._data = yaml.full_load(fp)
     except (OSError, yaml.error.YAMLError) as exc:
         if exc.errno == errno.ENOENT:  # Ignore missing file; use defaults
             return
         err = "Couldn't read config file ({}):\n{}"
         raise ConfigError(err.format(filename, exc)) from None
コード例 #6
0
ファイル: meta.py プロジェクト: 05bit/docta
    def load(self, stream):
        meta_data = []
        meta_opened = False
        for next_line in stream:
            if next_line.startswith(DELIMITER):
                if meta_opened:
                    break  # all meta is read, stop reading
                else:
                    meta_opened = True  # meta started
                    continue
            elif not meta_opened:
                break  # no meta found
            else:
                meta_data.append(next_line)

        if meta_data:
            self.update(yaml.full_load(''.join(meta_data)))
コード例 #7
0
ファイル: mapping.py プロジェクト: makerplane/FIX-Gateway
    def __init__(self, mapfile, log=None):
        self.meta_replacements_in = {}
        self.meta_replacements_out = {}

        # This is a list of function closures
        self.input_mapping = [None] * 1280
        self.output_mapping = {}
        self.log = log
        self.sendcount = 0


        # Open and parse the YAML mapping file passed to us
        try:
            f = open(mapfile)
        except:
            self.log.error("Unable to Open Mapfile - {}".format(mapfile))
            raise
        maps = yaml.full_load(f)
        f.close()

        # dictionaries used for converting meta data strings from db to canfix and back
        self.meta_replacements_in = maps['meta replacements']
        self.meta_replacements_out = {v:k for k,v in self.meta_replacements_in.items()}

        # We really just assign all the outputs to a dictionary for the main
        # plugin code to use to assign callbacks.
        for each in maps['outputs']:
            output = {'canid':each['canid'],
                      'index':each['index'],
                      'owner':each['owner'],
                      'exclude':False,
                      'lastValue':None}
            self.output_mapping[each['fixid']] = output

        # each input mapping item := [CANID, Index, FIX DB ID, Priority]
        for each in maps['inputs']:
            p = canfix.protocol.parameters[each["canid"]]
            # Parameters start at 0x100 so we subtract that offset to index the array
            ix = each["canid"] - 0x100
            if self.input_mapping[ix] is None:
                self.input_mapping[ix] = [None] * 256
            self.input_mapping[ix][each["index"]] = self.getInputFunction(each["fixid"])
コード例 #8
0
ファイル: test_recursive.py プロジェクト: yaml/pyyaml
def test_recursive(recursive_filename, verbose=False):
    context = globals().copy()
    exec(open(recursive_filename, 'rb').read(), context)
    value1 = context['value']
    output1 = None
    value2 = None
    output2 = None
    try:
        output1 = yaml.dump(value1)
        value2 = yaml.full_load(output1)
        output2 = yaml.dump(value2)
        assert output1 == output2, (output1, output2)
    finally:
        if verbose:
            print("VALUE1:", value1)
            print("VALUE2:", value2)
            print("OUTPUT1:")
            print(output1)
            print("OUTPUT2:")
            print(output2)
コード例 #9
0
ファイル: flaws2.py プロジェクト: jamjahal/flaws2
def detect_suspicious_activity(config, files):
    """
    Looks for suspicious IPs from AWS
    
    config = string
        path to config.yaml file containing whitelisted ips
    files = string
        path to cloudtrail log files
    """
    suspicious = []
    api_calls = {}
    associate_ips = []
    with open(config) as f:
        whitelist = yaml.full_load(f)

    for file in sorted(files):
        f = None
        log.info(f'Checking File: {file}')
        if file.endswith('.gz'):
            f = gzip.open(file, 'r')
        else:
            f = open(file, 'r')
        try:
            cloudtrail = json.load(f)
        except Exception as e:
            log.error(f'Invalid JSON file: {file} - {e}')
            continue

        records = sorted(cloudtrail['Records'],
                         key=lambda x: datetime.strptime(
                             x['eventTime'], '%Y-%m-%dT%H:%M:%SZ'),
                         reverse=False)

        for record in records:
            try:
                if record['eventName'].lower() == 'assumerole':

                    session_name = record['requestParameters'][
                        'roleSessionName']
                    arn = record['requestParameters']['roleArn']
                    account = record['requestParameters']['roleArn'].split(
                        ':')[4]
                    role = record['requestParameters']['roleArn'].split(
                        '/')[-1]

                    assume_role_session = f'arn:aws:sts::{account}:assumed-role/{role}/{session_name}'

                    if not api_calls.get(session_name, None):
                        api_calls[session_name] = {
                            'source_ip': [],
                            'arn': assume_role_session,
                            'ttl': int(time.time() + 28800)
                        }
                    else:
                        # Set a TTL.  This is most useful in DynamoDB
                        api_calls[session_name]['ttl'] = int(time.time() +
                                                             28800)

                if record['userIdentity'].get('type', '') == 'AssumedRole':
                    session = record['userIdentity']['arn'].split('/')[-1]

                    # Check for open access in repository
                    check_policy(TARGET_PROFILE)

                    if api_calls.get(session, None):
                        if 'amazonaws' not in record[
                                'sourceIPAddress'] and not whitelisted_ip(
                                    whitelist.get('aws_whitelist_ips', []),
                                    record['sourceIPAddress']):

                            log.info(
                                f"Outside IP address: {record['sourceIPAddress']} - from eventName: {record['eventName']}"
                            )
                            # if this is the first call, we can add this IP to the list
                            if len(api_calls[session].get('source_ip',
                                                          [])) == 0:
                                api_calls[session]['source_ip'].append(
                                    record['sourceIPAddress'])

                            else:
                                if record['sourceIPAddress'] not in api_calls[
                                        session].get('source_ip', []):
                                    if private_ip_check(
                                            record['sourceIPAddress']):
                                        for ip in api_calls[session].get(
                                                'source_ip', []):
                                            if private_ip_check(ip):
                                                log.info(
                                                    f"Multiple IPs for this credential: {assume_role_session} - sourceIP: {record['sourceIPAddress']}"
                                                )
                                                log.debug(record)
                                                suspicious.append(record)
                                        api_calls[session]['source_ip'].append(
                                            record['sourceIPAddress'])
                                        continue

                                    # see if there was a call to change the IP
                                    if session not in associate_ips:
                                        log.info(
                                            f"Call to change IP for this credential: {assume_role_session} - sourceIP: {record['sourceIPAddress']}"
                                        )
                                        log.debug(record)
                                        suspicious.append(record)
            except Exception as e:
                log.fatal(f'Unknown error on record - {record}')
                log.fatal(f'Error - {e}')

        f.close()

    return suspicious
コード例 #10
0
ファイル: storage.py プロジェクト: adunmore/triage
 def load_metadata(self):
     """Load metadata from storage"""
     with self.metadata_base_store.open("rb") as fd:
         return yaml.full_load(fd)
コード例 #11
0
    def lint_assessment(self, file_path, assessment):
        problems = []
        # check assessment
        with open(file_path, "r") as f:
            f = yaml.full_load(f)
            # check only if there is content in the file
            if f is not None:
                # check openpatch status
                openpatch = f.get("openpatch", {})
                if (openpatch is None or not isinstance(openpatch, dict)
                        or len(openpatch.keys()) == 0):
                    problems.append(
                        Problem(
                            key="openpatch",
                            rule=Rule.NO_OPENPATCH,
                            desc="No OpenPatch found",
                            level=Level.ERROR,
                        ))
                else:
                    status = openpatch.get("status")
                    if not status or status not in openpatch_status:
                        problems.append(
                            Problem(
                                key="openpatch.status",
                                rule=Rule.INVALID_OPENPATCH_STATUS,
                                desc=
                                f"{status} is not allowed. Use one of {openpatch_status}",
                                level=Level.ERROR,
                            ))
                    elif status == "approved" and not openpatch.get("url"):
                        problems.append(
                            Problem(
                                key="openpatch.url",
                                rule=Rule.NO_OPENPATCH_URL,
                                desc="No OpenPatch url",
                                level=Level.WARNING,
                            ))

                # check classifications
                classifications = f.get("classifications", {})
                if classifications is None or len(classifications.keys()) == 0:
                    problems.append(
                        Problem(
                            key="classifications",
                            rule=Rule.NO_CLASSIFICATION,
                            desc="No classification found",
                            level=Level.WARNING,
                        ))
                else:
                    for (
                            classification_id,
                            classification,
                    ) in classifications.items():
                        if "_meta" in classification_id:
                            continue
                        if classification_id not in self.classifications:
                            problems.append(
                                Problem(
                                    key="classifications",
                                    rule=Rule.CLASSIFICATION_NOT_FOUND,
                                    desc=f"{classification_id} not found",
                                ))
                        else:
                            problems.extend(
                                self.lint_classification(
                                    classification, classification_id))

                # check authors
                authors = f.get("authors", [])
                if len(authors) == 0:
                    problems.append(
                        Problem(
                            key="authors",
                            rule=Rule.NO_AUTHOR,
                            desc="No author found",
                            level=Level.WARNING,
                        ))
                else:
                    for author in authors:
                        if author not in self.authors:
                            problems.append(
                                Problem(
                                    key="authors",
                                    rule=Rule.AUTHOR_NOT_FOUND,
                                    desc=f"{author} not found",
                                    level=Level.ERROR,
                                ))

                # check items
                items = f.get("items", None)
                if items is None:
                    problems.append(
                        Problem(
                            key="items",
                            rule=Rule.NO_ITEMS,
                            desc=f"No items found",
                            level=Level.WARNING,
                        ))
                else:
                    count = items.get("count")
                    if not count:
                        problems.append(
                            Problem(
                                key="items",
                                rule=Rule.NO_ITEMS_COUNT,
                                level=Level.WARNING,
                            ))
                    elif not isinstance(count, int) and count != "?":
                        problems.append(
                            Problem(
                                key="items.count",
                                rule=Rule.INVALID_ITEMS_COUNT,
                                desc=
                                f"invalid type {type(count)} should be int or '?'",
                                level=Level.ERROR,
                            ))

                # check papers
                papers = f.get("papers", [])
                if len(papers) == 0:
                    problems.append(
                        Problem(
                            key="papers",
                            rule=Rule.NO_PAPER,
                            desc="No paper found",
                            level=Level.WARNING,
                        ))
                else:
                    for paper in papers:
                        if isinstance(paper, str):
                            problems.append(
                                Problem(
                                    key="papers",
                                    rule=Rule.PAPER_NEEDS_TO_BE_A_MAPPING,
                                    level=Level.ERROR,
                                    desc=
                                    f"{paper} needs to be a mapping (id, category)",
                                ))
                        elif paper.get("id") not in self.papers:
                            problems.append(
                                Problem(
                                    key="papers",
                                    rule=Rule.PAPER_NOT_FOUND,
                                    desc=f"{paper['id']} not found",
                                    level=Level.ERROR,
                                ))
                        elif paper.get("category") is None:
                            problems.append(
                                Problem(
                                    key="papers",
                                    rule=Rule.NO_PAPER_CATEGORY,
                                    desc=f"{paper['id']} has no category",
                                    level=Level.WARNING,
                                ))
                        elif paper.get(
                                "category") not in self.categories["papers"]:
                            problems.append(
                                Problem(
                                    key="papers",
                                    rule=Rule.PAPER_CATEGORY_NOT_FOUND,
                                    level=Level.ERROR,
                                    desc=
                                    f"{paper['category']} for {paper['id']} not found ({[key for key in self.categories['papers'].keys() if not key.startswith('_')]})",
                                ))
        return problems
コード例 #12
0
def get_config(config_name):
    with open('../configs.yml', 'r') as config_file:
        config = yaml.full_load(config_file)
    config = config[config_name]
    return config
コード例 #13
0
 def policy_yaml(self):
     return yaml.full_load(self.policy)
コード例 #14
0
    def train(self, rank, start_time, return_dict):
        device = torch.device("cuda:" + str(rank))
        print('Running on device: ', device)
        torch.cuda.set_device(device)
        torch.set_default_tensor_type(torch.FloatTensor)

        writer = None
        if not self.args.cross_validate_hp:
            writer = SummaryWriter(logdir=os.path.join(self.save_dir, 'logs'))
            # posting parameters
            param_string = ""
            for k, v in vars(self.args).items():
                param_string += ' ' * 10 + k + ': ' + str(v) + '\n'
            writer.add_text("params", param_string)

        self.setup(rank, self.args.num_processes)
        if self.cfg.MC_DQL:
            transition = namedtuple('Transition', ('episode'))
        else:
            transition = namedtuple(
                'Transition',
                ('state', 'action', 'reward', 'next_state', 'done'))
        memory = TransitionData_ts(capacity=self.args.t_max,
                                   storage_object=transition)

        env = SpGcnEnv(self.args,
                       device,
                       writer=writer,
                       writer_counter=self.global_writer_quality_count,
                       win_event_counter=self.global_win_event_count)
        # Create shared network

        # model = GcnEdgeAC_1(self.cfg, self.args.n_raw_channels, self.args.n_embedding_features, 1, device, writer=writer)

        model = GcnEdgeAC(self.cfg, self.args, device, writer=writer)
        # model = GcnEdgeAC(self.cfg, self.args.n_raw_channels, self.args.n_embedding_features, 1, device, writer=writer)

        model.cuda(device)
        shared_model = DDP(model,
                           device_ids=[model.device],
                           find_unused_parameters=True)

        # dloader = DataLoader(MultiDiscSpGraphDsetBalanced(no_suppix=False, create=False), batch_size=1, shuffle=True, pin_memory=True,
        #                      num_workers=0)
        dloader = DataLoader(SpgDset(),
                             batch_size=self.cfg.batch_size,
                             shuffle=True,
                             pin_memory=True,
                             num_workers=0)
        # Create optimizer for shared network parameters with shared statistics
        # optimizer = CstmAdam(shared_model.parameters(), lr=self.args.lr, betas=self.args.Adam_betas,
        #                      weight_decay=self.args.Adam_weight_decay)
        ######################
        self.action_range = 1
        self.device = torch.device(device)
        self.discount = 0.5
        self.critic_tau = self.cfg.critic_tau
        self.actor_update_frequency = self.cfg.actor_update_frequency
        self.critic_target_update_frequency = self.cfg.critic_target_update_frequency
        self.batch_size = self.cfg.batch_size

        self.log_alpha = torch.tensor(np.log(self.cfg.init_temperature)).to(
            self.device)
        self.log_alpha.requires_grad = True
        # set target entropy to -|A|
        ######################
        # optimizers
        OptimizerContainer = namedtuple('OptimizerContainer',
                                        ('actor', 'critic', 'temperature'))
        actor_optimizer = torch.optim.Adam(
            shared_model.module.actor.parameters(),
            lr=self.cfg.actor_lr,
            betas=self.cfg.actor_betas)

        critic_optimizer = torch.optim.Adam(
            shared_model.module.critic.parameters(),
            lr=self.cfg.critic_lr,
            betas=self.cfg.critic_betas)

        temp_optimizer = torch.optim.Adam([self.log_alpha],
                                          lr=self.cfg.alpha_lr,
                                          betas=self.cfg.alpha_betas)

        optimizers = OptimizerContainer(actor_optimizer, critic_optimizer,
                                        temp_optimizer)

        for param in model.fe_ext.parameters():
            param.requires_grad = False

        if self.args.model_name != "":
            shared_model.load_state_dict(
                torch.load(os.path.join(self.save_dir, self.args.model_name)))
        elif self.args.model_fe_name != "":
            shared_model.module.fe_ext.load_state_dict(
                torch.load(os.path.join(self.save_dir,
                                        self.args.model_fe_name)))
        elif self.args.fe_extr_warmup:
            print('loaded fe extractor')
            shared_model.module.fe_ext.load_state_dict(
                torch.load(os.path.join(self.save_dir, 'agent_model_fe_extr')))

        dist.barrier()

        if not self.args.test_score_only:
            quality = self.args.stop_qual_scaling + self.args.stop_qual_offset
            best_quality = np.inf
            while self.global_count.value() <= self.args.T_max:
                if self.global_count.value() == 78:
                    a = 1
                self.update_env_data(env, dloader, device)
                # waff_dis = torch.softmax(env.edge_features[:, 0].squeeze() + 1e-30, dim=0)
                # waff_dis = torch.softmax(env.gt_edge_weights + 0.5, dim=0)
                # waff_dis = torch.softmax(torch.ones_like(env.b_sg_gt_edge_weights), dim=0)
                # loss_weight = torch.softmax(env.b_sg_gt_edge_weights + 1, dim=0)
                env.reset()
                # self.target_entropy = - float(env.gt_edge_weights.shape[0])
                self.target_entropy = -self.args.s_subgraph

                env.stop_quality = self.stop_qual_rule.apply(
                    self.global_count.value(), quality)
                if self.cfg.temperature_regulation == 'follow_quality':
                    self.alpha = self.eps_rule.apply(self.global_count.value(),
                                                     quality)
                    print(self.alpha.item())

                with open(os.path.join(self.save_dir,
                                       'runtime_cfg.yaml')) as info:
                    args_dict = yaml.full_load(info)
                    if args_dict is not None:
                        if 'safe_model' in args_dict:
                            self.args.safe_model = args_dict['safe_model']
                            args_dict['safe_model'] = False
                        if 'add_noise' in args_dict:
                            self.args.add_noise = args_dict['add_noise']
                        if 'critic_lr' in args_dict and args_dict[
                                'critic_lr'] != self.cfg.critic_lr:
                            self.cfg.critic_lr = args_dict['critic_lr']
                            adjust_learning_rate(critic_optimizer,
                                                 self.cfg.critic_lr)
                        if 'actor_lr' in args_dict and args_dict[
                                'actor_lr'] != self.cfg.actor_lr:
                            self.cfg.actor_lr = args_dict['actor_lr']
                            adjust_learning_rate(actor_optimizer,
                                                 self.cfg.actor_lr)
                        if 'alpha_lr' in args_dict and args_dict[
                                'alpha_lr'] != self.cfg.alpha_lr:
                            self.cfg.alpha_lr = args_dict['alpha_lr']
                            adjust_learning_rate(temp_optimizer,
                                                 self.cfg.alpha_lr)
                with open(os.path.join(self.save_dir, 'runtime_cfg.yaml'),
                          "w") as info:
                    yaml.dump(args_dict, info)

                if self.args.safe_model:
                    best_quality = quality
                    if rank == 0:
                        if self.args.model_name_dest != "":
                            torch.save(
                                shared_model.state_dict(),
                                os.path.join(self.save_dir,
                                             self.args.model_name_dest))
                        else:
                            torch.save(
                                shared_model.state_dict(),
                                os.path.join(self.save_dir, 'agent_model'))

                state = env.get_state()
                while not env.done:
                    # Calculate policy and values
                    post_input = True if (
                        self.global_count.value() +
                        1) % 15 == 0 and env.counter == 0 else False
                    round_n = env.counter
                    # sample action for data collection
                    distr = None
                    if self.global_count.value() < self.cfg.num_seed_steps:
                        action = torch.rand_like(env.sg_current_edge_weights)
                    else:
                        distr, _, _, action = self.agent_forward(
                            env,
                            shared_model,
                            state,
                            grad=False,
                            post_input=post_input)

                    logg_dict = {'temperature': self.alpha.item()}
                    if distr is not None:
                        logg_dict['mean_loc'] = distr.loc.mean().item()
                        logg_dict['mean_scale'] = distr.scale.mean().item()

                    if self.global_count.value(
                    ) >= self.cfg.num_seed_steps and memory.is_full():
                        self._step(memory,
                                   optimizers,
                                   env,
                                   shared_model,
                                   self.global_count.value(),
                                   writer=writer)
                        self.global_writer_loss_count.increment()

                    next_state, reward, quality = env.execute_action(
                        action, logg_dict)

                    if self.args.add_noise:
                        noise = torch.randn_like(reward) * self.alpha.item()
                        reward = reward + noise

                    memory.push(self.state_to_cpu(state), action.cpu(),
                                reward.cpu(), self.state_to_cpu(next_state),
                                env.done)

                    # Train the network
                    # self._step(memory, shared_model, env, optimizer, loss_weight, off_policy=True, writer=writer)

                    # reward = self.args.reward_clip and min(max(reward, -1), 1) or reward  # Optionally clamp rewards
                    # done = done or episode_length >= self.args.max_episode_length  # Stop episodes at a max length
                    state = next_state

                self.global_count.increment()
                if "self_reg" in self.args.eps_rule and quality <= 2:
                    break

        dist.barrier()
        if rank == 0:
            if not self.args.cross_validate_hp and not self.args.test_score_only and not self.args.no_save:
                # pass
                if self.args.model_name_dest != "":
                    torch.save(
                        shared_model.state_dict(),
                        os.path.join(self.save_dir, self.args.model_name_dest))
                    print('saved')
                else:
                    torch.save(shared_model.state_dict(),
                               os.path.join(self.save_dir, 'agent_model'))

        self.cleanup()
コード例 #15
0
ファイル: test_instance.py プロジェクト: helloworld/dagster
def test_k8s_run_launcher_config(template: HelmTemplate):
    job_namespace = "namespace"
    image_pull_policy = "Always"
    load_incluster_config = True
    env_config_maps = [{"name": "env_config_map"}]
    env_secrets = [{"name": "secret"}]
    env_vars = ["ENV_VAR"]
    volume_mounts = [
        {
            "mountPath": "/opt/dagster/dagster_home/dagster.yaml",
            "name": "dagster-instance",
            "subPath": "dagster.yaml",
        },
        {
            "name": "test-volume",
            "mountPath":
            "/opt/dagster/test_mount_path/volume_mounted_file.yaml",
            "subPath": "volume_mounted_file.yaml",
        },
    ]

    volumes = [
        {
            "name": "test-volume",
            "configMap": {
                "name": "test-volume-configmap"
            }
        },
        {
            "name": "test-pvc",
            "persistentVolumeClaim": {
                "claimName": "my_claim",
                "readOnly": False
            }
        },
    ]

    labels = {"my_label_key": "my_label_value"}

    helm_values = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.K8S,
            config=RunLauncherConfig.construct(
                k8sRunLauncher=K8sRunLauncherConfig.construct(
                    jobNamespace=job_namespace,
                    imagePullPolicy=image_pull_policy,
                    loadInclusterConfig=load_incluster_config,
                    envConfigMaps=env_config_maps,
                    envSecrets=env_secrets,
                    envVars=env_vars,
                    volumeMounts=volume_mounts,
                    volumes=volumes,
                    labels=labels,
                )),
        ))

    configmaps = template.render(helm_values)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    run_launcher_config = instance["run_launcher"]

    assert run_launcher_config["module"] == "dagster_k8s"
    assert run_launcher_config["class"] == "K8sRunLauncher"
    assert run_launcher_config["config"]["job_namespace"] == job_namespace
    assert run_launcher_config["config"][
        "load_incluster_config"] == load_incluster_config
    assert run_launcher_config["config"][
        "image_pull_policy"] == image_pull_policy
    assert run_launcher_config["config"]["env_config_maps"][1:] == [
        configmap["name"] for configmap in env_config_maps
    ]
    assert run_launcher_config["config"]["env_secrets"] == [
        secret["name"] for secret in env_secrets
    ]
    assert run_launcher_config["config"]["env_vars"] == env_vars
    assert run_launcher_config["config"]["volume_mounts"] == volume_mounts
    assert run_launcher_config["config"]["volumes"] == volumes
    assert run_launcher_config["config"]["labels"] == labels

    assert not "fail_pod_on_run_failure" in run_launcher_config["config"]
コード例 #16
0
                    type=str,
                    help="root directory for all datasets")
parser.add_argument('--preprocesseddir',
                    type=str,
                    help="preprocessed data directory")
parser.add_argument('--traininglogsdir',
                    type=str,
                    help="training logs directory")
parser.add_argument('--modelsdir', type=str, help="models directory")
args = parser.parse_args()
run = Run.get_context()
print("GPUs available:")
print(tf.config.experimental.list_physical_devices('GPU'))

# Update paths of input data in config to represent paths on blob.
cfg = yaml.full_load(open(os.getcwd() + "./config.yml",
                          'r'))  # Load config data
cfg['PATHS']['RAW_DATA'] = args.rawdatadir
cfg['PATHS']['PROCESSED_DATA'] = args.preprocesseddir
cfg['PATHS']['TRAIN_SET'] = cfg['PATHS']['PROCESSED_DATA'] + '/' + cfg[
    'PATHS']['TRAIN_SET'].split('/')[-1]
cfg['PATHS']['VAL_SET'] = cfg['PATHS']['PROCESSED_DATA'] + '/' + cfg['PATHS'][
    'VAL_SET'].split('/')[-1]
cfg['PATHS']['TEST_SET'] = cfg['PATHS']['PROCESSED_DATA'] + '/' + cfg['PATHS'][
    'TEST_SET'].split('/')[-1]

# Set paths to run's ./output/ directory
cfg['PATHS']['LOGS'] = args.traininglogsdir
cfg['PATHS']['MODEL_WEIGHTS'] = args.modelsdir

# Set logs directory according to datetime
cur_date = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
コード例 #17
0
def get_yaml_data1():
    file1 = open('stu_loginR.yaml', 'r', encoding='utf-8')
    file2 = yaml.full_load(file1)
    file1.close()
    return file2
コード例 #18
0
    if next_page_token == 0:
        more_subgroups = False

# Bail out if there aren't any matching subgroups in the group

if not groupsio_subgroups:
    sys.exit()

### Compare local subgroup membership against groups.io, resolve deltas ###

all_local_valid_members = dict()

# Open the local .yml file with subgroup definitions

with open (subgroup_membership_filename,'r') as subgroup_membership_file:
    local_subgroups_and_members = yaml.full_load(subgroup_membership_file)

if not local_subgroups_and_members:
    print('WARN: No lists defined. Exiting')
    sys.exit()

# Walk through definitions

for local_subgroup, local_members in local_subgroups_and_members.items():

    # Protect main and the unified list

    if local_subgroup in [main_list,unified_list]:
        print('INFO: You cannot modify %s. Ignoring.' % local_subgroup)
        continue
コード例 #19
0
""" Test file for printing session configuration """

import pprint

import yaml

from config import constants

print(f'{constants.DECORATOR}\nSESSION CONFIGURATION:\n{constants.DECORATOR}')
pprint.pprint(yaml.full_load(open('config.yml')))
print(f'{constants.DECORATOR}')
コード例 #20
0
ファイル: test_suite.py プロジェクト: gilad-shaham/functions
def get_item_yaml_requirements(directory: str):
    with open(f"{directory}/item.yaml", "r") as f:
        item = yaml.full_load(f)
    return item.get("spec", {}).get("requirements", [])
コード例 #21
0
ファイル: utils.py プロジェクト: edurange/edurange-flask
def questionReader(name):
    name = "".join(e for e in name if e.isalnum())
    with open("./data/tmp/" + name + "/questions.yml", "r") as yml:
        document = yaml.full_load(yml)
    return document
コード例 #22
0
@app.route('/')
def ready():
    return 'ready'


# Variables for input and output data
pathConfig = "/home/config/config.yml"
pathApicLoginTemplate = "/home/templates/apicLoginTemplate.json"
pathSubscriptionIds = "/home/internal/subscriptionIds.json"
basePathLogs = "/home/data/logs/"
basePathOutput = "/home/data/output/"
loginToken = ""

# Load config file, it is required in multiple follwing functions
with open(pathConfig, "r") as handle:
    config = yaml.full_load(handle)

# Disable warning for APIC Self-Signed Certificate
requests.packages.urllib3.disable_warnings()


# Write Tool logs to local logfile, can be mapped to host volume
# Messages are provided by the calling functions
def writeLog(message):
    Path(basePathLogs).mkdir(parents=True, exist_ok=True)
    logPath = basePathLogs + time.strftime('%Y-%m-%d',
                                           time.localtime()) + ".txt"
    timestamp = time.strftime("%d/%m/%Y-%H:%M:%S", time.localtime())

    old_stdout = sys.stdout
    sys.stdout = open(logPath, "a+")
コード例 #23
0
def load_yaml(file):
    """If pyyaml > 5.1 use full_load to avoid warning"""
    if hasattr(yaml, "full_load"):
        return yaml.full_load(file)
    else:
        return yaml.load(file)
コード例 #24
0
from jinja2 import Environment, FileSystemLoader
import yaml

env = Environment(loader=FileSystemLoader('.'),
                  trim_blocks=True,
                  lstrip_blocks=True)
template = env.get_template("nexus.txt.j2")

with open("config.yaml") as file:
    interface_file = yaml.full_load(file)

open('output.txt', 'w').close()

file = open('output.txt', 'a')
file.write(template.render(interface_file))
file.close()
コード例 #25
0
def read_yaml(path='devices.yml'):
    with open(path) as f:
        devices = yaml.full_load(f)
    return devices
コード例 #26
0
    def startServer(self, cmd):
        '''启动minecraft服务器'''

        os.chdir(self.config.game_version_server_dir())

        # 如果没有eula.txt文件,则启动服务器生成
        if not self.checkEULA():
            if self.config.isForge:
                self.generateForgeServerEULA()
            else:
                os.system(cmd)

        # 同意eula
        with io.open(self.config.game_version_server_eula_file_path(),
                     'r',
                     encoding='utf-8') as f:
            eula = f.read()
            checkEULA = eula.replace('false', 'true')
        with io.open(self.config.game_version_server_eula_file_path(),
                     'w',
                     encoding='utf-8') as f:
            f.write(checkEULA)

        # 修改commands.yaml中关于命令方块指令使用Mojang,不会被Essentials插件覆盖命令
        bukkit_command_yaml_file_path = self.config.game_version_server_bukkit_command_yaml_file_path(
        )
        if os.path.exists(bukkit_command_yaml_file_path):
            with io.open(bukkit_command_yaml_file_path, 'r',
                         encoding='utf-8') as f:
                commands_cfg = yaml.full_load(f)
                commands_cfg['command-block-overrides'] = ['*']
            with io.open(bukkit_command_yaml_file_path, 'w',
                         encoding='utf-8') as f:
                yaml.dump(commands_cfg, f)
                RichText.info('commands.yaml has been changed!')

        # 启动服务器

        if self.config.debug:
            print(cmd)

        os.system(cmd)

        # 设置服务器属性为离线模式
        with io.open(self.config.game_version_server_properties_file_path(),
                     'r',
                     encoding='utf-8') as f:
            properties = f.read()
            if 'online-mode=false' in properties:
                offline_properties = None
            else:
                offline_properties = properties.replace(
                    'online-mode=true', 'online-mode=false')

        if offline_properties != None:
            with io.open(
                    self.config.game_version_server_properties_file_path(),
                    'w',
                    encoding='utf-8') as f:
                f.write(offline_properties)
                print(
                    ColorString.confirm(
                        'Setting the server to offline mode, next launch this setting take effect!!!'
                    ))

        # 为服务器核心数据文件创建符号链接
        self.symlink_server_core_files_if_need()
コード例 #27
0
ファイル: test_instance.py プロジェクト: helloworld/dagster
def test_celery_k8s_run_launcher_config(template: HelmTemplate):
    image = {
        "repository": "test_repo",
        "tag": "test_tag",
        "pullPolicy": "Always"
    }

    configSource = {
        "broker_transport_options": {
            "priority_steps": [9]
        },
        "worker_concurrency": 1,
    }

    workerQueues = [
        {
            "name": "dagster",
            "replicaCount": 2
        },
        {
            "name": "extra-queue-1",
            "replicaCount": 1
        },
    ]

    volume_mounts = [
        {
            "mountPath": "/opt/dagster/dagster_home/dagster.yaml",
            "name": "dagster-instance",
            "subPath": "dagster.yaml",
        },
        {
            "name": "test-volume",
            "mountPath":
            "/opt/dagster/test_mount_path/volume_mounted_file.yaml",
            "subPath": "volume_mounted_file.yaml",
        },
    ]

    volumes = [
        {
            "name": "test-volume",
            "configMap": {
                "name": "test-volume-configmap"
            }
        },
        {
            "name": "test-pvc",
            "persistentVolumeClaim": {
                "claimName": "my_claim",
                "readOnly": False
            }
        },
    ]

    labels = {"my_label_key": "my_label_value"}

    image_pull_secrets = [{"name": "IMAGE_PULL_SECRET"}]

    helm_values = DagsterHelmValues.construct(
        imagePullSecrets=image_pull_secrets,
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.CELERY,
            config=RunLauncherConfig.construct(
                celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                    image=image,
                    configSource=configSource,
                    workerQueues=workerQueues,
                    volumeMounts=volume_mounts,
                    volumes=volumes,
                    labels=labels,
                )),
        ),
    )

    configmaps = template.render(helm_values)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    run_launcher_config = instance["run_launcher"]

    assert run_launcher_config["module"] == "dagster_celery_k8s"
    assert run_launcher_config["class"] == "CeleryK8sRunLauncher"

    assert run_launcher_config["config"]["config_source"] == configSource

    assert run_launcher_config["config"]["broker"] == {
        "env": "DAGSTER_CELERY_BROKER_URL"
    }

    assert run_launcher_config["config"]["backend"] == {
        "env": "DAGSTER_CELERY_BACKEND_URL"
    }

    assert run_launcher_config["config"]["volume_mounts"] == volume_mounts
    assert run_launcher_config["config"]["volumes"] == volumes
    assert run_launcher_config["config"]["labels"] == labels

    assert run_launcher_config["config"][
        "image_pull_secrets"] == image_pull_secrets

    assert run_launcher_config["config"]["image_pull_policy"] == "Always"

    assert run_launcher_config["config"][
        "service_account_name"] == "release-name-dagster"

    assert not "fail_pod_on_run_failure" in run_launcher_config["config"]

    helm_values_with_image_pull_policy = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.CELERY,
            config=RunLauncherConfig.construct(
                celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                    image=image,
                    configSource=configSource,
                    workerQueues=workerQueues,
                    volumeMounts=volume_mounts,
                    volumes=volumes,
                    imagePullPolicy="IfNotPresent",
                )),
        ), )

    configmaps = template.render(helm_values_with_image_pull_policy)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    run_launcher_config = instance["run_launcher"]
    assert run_launcher_config["config"]["image_pull_policy"] == "IfNotPresent"

    helm_values_with_fail_pod_on_run_failure = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.CELERY,
            config=RunLauncherConfig.construct(
                celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                    image=image,
                    configSource=configSource,
                    workerQueues=workerQueues,
                    failPodOnRunFailure=True,
                )),
        ), )

    configmaps = template.render(helm_values_with_fail_pod_on_run_failure)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    run_launcher_config = instance["run_launcher"]
    assert run_launcher_config["config"]["fail_pod_on_run_failure"]
コード例 #28
0
ファイル: inventory.py プロジェクト: nflx/ansible-inventory
def load_file(file_name):
    with open(file_name, 'r') as fh:
        return yaml.full_load(fh)
コード例 #29
0
ファイル: main.py プロジェクト: jomccr/resume_generator
def parse_yaml(filename='./static/my_resume.yaml'):
    with open(filename) as file:
        cv = yaml.full_load(file)
        cv['cachebuster'] = lambda: md5(str(time()).encode('utf-8')).hexdigest(
        )
    return cv
コード例 #30
0
def load_system_template():
    with open(f'{template_path}/system_stub.yml') as file:
        documents = yaml.full_load(file)
        return documents
コード例 #31
0
 def load_preferred_entries_file(fpath):
     with open(fpath) as h:
         return yaml.full_load(h)
コード例 #32
0
from skeleton import skeleton
import pickle
from tqdm import tqdm
import src.utils as utils
import src.stat as stats
import yaml
import sys

G, color_edge = skeleton() #skeleton of fly
legtips = [4, 9, 14, 19, 24, 29]
print('making video')

usr_input = sys.argv[-1]

#load global parameters
par = yaml.full_load(open(usr_input, "rb"))
        
#predictions
data = torch.load(par['data_dir'] + '/test_results.pth.tar')
out_offset, inp_offset = pickle.load(open('joint_locations.pkl','rb'))

#output
targets_1d = torch.load(par['data_dir'] + '/stat_3d.pth.tar')['targets_1d']
out_mean = torch.load(par['data_dir'] + '/stat_3d.pth.tar')['mean']
out_std = torch.load(par['data_dir'] + '/stat_3d.pth.tar')['std']
out = utils.unNormalize(data['output'], out_mean[targets_1d], out_std[targets_1d])

#inputs
targets_2d = torch.load(par['template_dir'] + '/stat_2d.pth.tar')['targets_2d']    
inp_mean = torch.load(par['template_dir'] + 'stat_2d.pth.tar')['mean']
inp_std = torch.load(par['template_dir'] + 'stat_2d.pth.tar')['std']
コード例 #33
0
ファイル: integration.py プロジェクト: vast-io/vast
def main():
    """The main function"""
    parser = argparse.ArgumentParser(
        description='Test runner',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        '--app',
        default='./core',
        help='Path to the executable (vast/core)')
    parser.add_argument(
        '-s',
        '--set',
        type=Path,
        help='Run the testset from this test definition YAML file')
    parser.add_argument(
        '-T', '--tag', nargs='+', help='The tag for which tests will be run')
    parser.add_argument(
        '-t',
        '--test',
        nargs='+',
        help='The test(s) to run (runs all tests if unset)')
    parser.add_argument(
        '-u',
        '--update',
        action='store_true',
        help='Update baseline for tests')
    parser.add_argument(
        '-d',
        '--directory',
        default='run_<current_ISO_timestamp>',
        type=Path,
        help='The basedir for the test runs')
    parser.add_argument(
        '-K',
        '--keep',
        action='store_true',
        help='Keep artifacts of successful runs')
    parser.add_argument(
        '--timeout',
        type=int,
        default=0,
        help='Test timeout in seconds')
    parser.add_argument(
        '-l',
        '--list',
        nargs='*',
        help='Return a list of available tests optionally filtered with tags')
    parser.add_argument(
        '-L',
        '--list-tags',
        action='store_true',
        help='Return a list of all available tags')
    parser.add_argument(
        '--flamegraph',
        action='store_true',
        help='Generate a flamegraph of the test run')
    parser.add_argument(
        '--flamegraph_path',
        default='scripts/flamegraph',
        type=Path,
        help='Path to flamegraph script')
    parser.add_argument(
        '-v',
        '--verbosity',
        default='DEBUG',
        help='Set the logging verbosity')
    args = parser.parse_args()
    # Setup logging.
    LOGGER.setLevel(args.verbosity)
    fmt = "%(asctime)s %(levelname)-8s %(message)s"
    colored_formatter = coloredlogs.ColoredFormatter(fmt)
    plain_formatter = logging.Formatter(fmt)
    formatter = colored_formatter if sys.stdout.isatty() else plain_formatter
    ch = logging.StreamHandler()
    ch.setLevel(args.verbosity)
    ch.setFormatter(formatter)
    LOGGER.addHandler(ch)
    # Create a new handler for log level CRITICAL.
    class ShutdownHandler(logging.Handler):
        def emit(self, record):
            logging.shutdown()
            signal_subprocs(signal.SIGTERM)
            sys.exit(1)
    # Register this handler with log level CRITICAL (which equals to 50).
    sh = ShutdownHandler(level=50)
    sh.setFormatter(formatter)
    LOGGER.addHandler(sh)
    # Load test set.
    if not args.set:
        args.set = Path(__file__).resolve().parent / 'default_set.yaml'
    args.set = args.set.resolve()
    LOGGER.debug(f'resolved test set path to {args.set}')
    test_file = open(args.set, 'r')
    test_dict = yaml.full_load(test_file)
    test_dec = validate(test_dict, args.set.parent)
    # Print tests.
    if args.list is not None:
        selection = tagselect(args.list, test_dec['tests'])
        for test in selection.keys():
            print(test)
        return
    # Print test tags.
    if args.list_tags:
        tags = set().union(
            *[set(t.tags) for _, t in test_dec['tests'].items()])
        for tag in tags:
            print(tag)
        return
    # Create working directory.
    if args.directory.name == 'run_<current_ISO_timestamp>':
        timestamp = datetime.now().isoformat(timespec='seconds')
        args.directory = Path(f'run_{timestamp}')
    LOGGER.debug(f'keeping state in {args.directory}')
    if not args.directory.exists():
        args.directory.mkdir(parents=True)
    # Setup signal handlers and run.
    signal.alarm(args.timeout)
    success = run(args, test_dec)
    signal.alarm(0)
    with suppress(OSError):
        args.directory.rmdir()
    retcode = 0 if success else 1
    sys.exit(retcode)
コード例 #34
0
    def __init__(self,
                 app=None,
                 name=None,
                 base_dir='~/.config/',
                 template_file=None,
                 template=None,
                 envvars={},
                 defaults={},
                 cli_args={},
                 path_override=None,
                 file_override=None):
        """
        Automatically load an app config with environment variable and
        default overrides.
        Config file path will be {base_dir}/{app}/{name}.[yaml|yml|json]

        :param str app: Application name
        :param str name: Name of app sub-config
        :param str base_dir: Base config dir. Defaults to '~/.config/`
        :param str template_file: Template file to copy to given location if none exists
        :param str/dict template: YAML/JSON string or dict to write use as template
        if no config exists.
        :param dict envvars: Map of environment variables to map to keys. { key: APP_ENV_VAR }
        These override anything in the config file
        :param dict defaults: Default values for config keys
        :param dict cli_args: Config key overrides likely from the app CLI.
        These override any config file or env var values.
        :param str path_override: Specify direct path instead of default base dir
        :param str file_override: Direct path to config file to read

        :raises exceptions.IOError: when permissions denied
        """

        self.filename = None
        self.full_path = None

        if file_override is not None:
            self.full_path = file_override
            if not os.path.isfile(self.full_path):
                raise Exception('Unable to find specified file {}'.format(
                    self.full_path))
        else:
            if path_override:
                self.base_dir = os.path.expanduser(path_override)
            else:
                self.base_dir = os.path.expanduser(os.path.join(base_dir, app))

            if not os.path.isdir(self.base_dir):
                os.makedirs(self.base_dir)

            for ext in VALID_EXT:
                for _ext in [ext, ext.upper()]:
                    filename = '{}.{}'.format(name, _ext)
                    cfg = os.path.join(self.base_dir, filename)
                    if os.path.isfile(cfg):
                        self.full_path = cfg
                        self.filename = filename
                        break

            if self.filename is None:
                self.filename = name + '.' + VALID_EXT[0]
                self.full_path = os.path.join(self.base_dir, self.filename)
                if template_file:
                    shutil.copyfile(template_file, self.full_path)
                else:
                    with open(self.full_path, 'w') as f:
                        if template:
                            if isinstance(template, string_types):
                                f.write(template)
                            elif isinstance(template, dict):
                                yaml.dump(template,
                                          f,
                                          default_flow_style=False)
                        else:
                            yaml.dump({}, f, default_flow_style=False)

        self._data = defaults
        with open(self.full_path, 'r') as f:
            data = yaml.full_load(f)
            self._data.update(data)
            # load envvars if given and override
            for k, v in envvars.items():
                if v in os.environ:
                    self._data[k] = os.environ[v]

            # finally, override with given cli args
            for k, v in cli_args.items():
                if k not in self._data or v is not None:
                    self._data[k] = v
コード例 #35
0
def load_client_template(type='default'):
    with open(f'{template_path}/client_stub_{type}.yml') as file:
        documents = yaml.full_load(file)
        return documents
コード例 #36
0
import ZCBF_module
import two_dof_module
import yaml
import sys

#========================================
# Define simlation scenario

sim_file = 'ZCBF_control_'+ sys.argv[1] + '.yaml' # 'ZCBF_cont_control_expi.yaml'
print('Evaluating bounds for experiment: '+ sim_file)

#========================================

# Load parameter file
with open(sim_file) as file:
	data = yaml.full_load(file)


# Define system constraints
q_max = data['q_max'] 
q_min = data['q_min']
v_max = data['v_max']
v_min = data['v_min']
u_max = data['u_max']
u_min = data['u_min']


# Define 2DOF model parameters
m_list = data['m_list']# link masses (kg)
l_list = data['l_list']# link lengths(m)		
f_list = data['f_list']# damping friction term
コード例 #37
0
import logging.config
from pathlib import Path

import yaml

DIR = Path(__file__).parent.absolute()
LOGPATH = DIR.parent.parent / 'logs'


def configureLogging():
    .
    Path(LOGPATH).mkdir(exist_ok=True)
    mainfilename = LOGPATH / 'main.log'
    debugfilename = LOGPATH / 'debug.log'

    with open(DIR / 'logging.yaml', 'r') as f:
        log_cfg = yaml.full_load(f)

    log_cfg['handlers']['file_handler']['filename'] = mainfilename
    log_cfg['handlers']['rotating_handler']['filename'] = debugfilename

    print(log_cfg)

    logging.config.dictConfig(log_cfg)

    # Set ERROR level logging on verbose modules
    modules = ['botocore', 'urllib3', 'googleapiclient']
    for module in modules:
        logging.getLogger(module).setLevel(logging.ERROR)

コード例 #38
0
import os
import yaml
from codecs import encode, decode

from haruka import LOGGER
from haruka.modules.sql.translation import prev_locale

LANGUAGES = ['en', 'id', 'ru']

strings = {}

for i in LANGUAGES:
    strings[i] = yaml.full_load(
        open(os.path.dirname(__file__) + f"/{i}/string.yml", "r"))
    print(f"Loaded {i}/string.yml")


def tld(chat_id, t, show_none=True):
    LANGUAGE = prev_locale(chat_id)

    if LANGUAGE:
        LOCALE = LANGUAGE.locale_name
        if LOCALE in ('en') and t in strings['en']:
            result = decode(
                encode(strings['en'][t], 'latin-1', 'backslashreplace'),
                'unicode-escape')
            return result
        elif LOCALE in ('id') and t in strings['id']:
            result = decode(
                encode(strings['id'][t], 'latin-1', 'backslashreplace'),
                'unicode-escape')
コード例 #39
0
ファイル: main.py プロジェクト: symphony233/milvus
def main():
    arg_parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    # helm mode with scheduler
    arg_parser.add_argument("--image-version",
                            default="",
                            help="image version")
    arg_parser.add_argument("--schedule-conf",
                            metavar='FILE',
                            default='',
                            help="load test schedule from FILE")

    # local mode
    arg_parser.add_argument('--local',
                            action='store_true',
                            help='use local milvus server')
    arg_parser.add_argument('--host',
                            help='server host ip param for local mode',
                            default='127.0.0.1')
    arg_parser.add_argument('--port',
                            help='server port param for local mode',
                            default=config.SERVER_PORT_DEFAULT)
    arg_parser.add_argument('--suite',
                            metavar='FILE',
                            help='load test suite from FILE',
                            default='')
    arg_parser.add_argument('--server-config',
                            metavar='FILE',
                            help='load server config from FILE',
                            default='')

    args = arg_parser.parse_args()

    if args.schedule_conf:
        if args.local:
            raise Exception(
                "Helm mode with scheduler and other mode are incompatible")
        if not args.image_version:
            raise Exception("Image version not given")
        env_mode = "helm"
        image_version = args.image_version
        with open(args.schedule_conf) as f:
            schedule_config = full_load(f)
            f.close()
        helm_path = os.path.join(os.getcwd(),
                                 "..//milvus-helm-charts/charts/milvus-ha")
        for item in schedule_config:
            server_host = item["server"] if "server" in item else ""
            server_tag = item["server_tag"] if "server_tag" in item else ""
            deploy_mode = item[
                "deploy_mode"] if "deploy_mode" in item else config.DEFAULT_DEPLOY_MODE
            suite_params = item["suite_params"]
            for suite_param in suite_params:
                suite_file = "suites/" + suite_param["suite"]
                with open(suite_file) as f:
                    suite_dict = full_load(f)
                    f.close()
                logger.debug(suite_dict)
                run_type, run_params = parser.operations_parser(suite_dict)
                collections = run_params["collections"]
                image_type = suite_param["image_type"]
                image_tag = get_image_tag(image_version)
                for suite in collections:
                    # run test cases
                    milvus_config = suite[
                        "milvus"] if "milvus" in suite else None
                    server_config = suite[
                        "server"] if "server" in suite else None
                    logger.debug(milvus_config)
                    logger.debug(server_config)
                    helm_params = {
                        "server_name": server_host,
                        "server_tag": server_tag,
                        "server_config": server_config,
                        "milvus_config": milvus_config,
                        "image_tag": image_tag,
                        "image_type": image_type
                    }
                    env_params = {
                        "deploy_mode": deploy_mode,
                        "helm_path": helm_path,
                        "helm_params": helm_params
                    }
                    # job = back_scheduler.add_job(run_suite, args=[run_type, suite, env_mode, env_params],
                    #                              misfire_grace_time=36000)
                    # logger.info(job)
                    # logger.info(job.id)

    elif args.local:
        # for local mode
        deploy_params = args.server_config
        deploy_params_dict = None
        if deploy_params:
            with open(deploy_params) as f:
                deploy_params_dict = full_load(f)
                f.close()
            logger.debug(deploy_params_dict)
        deploy_mode = utils.get_deploy_mode(deploy_params_dict)
        server_tag = utils.get_server_tag(deploy_params_dict)
        env_params = {
            "host": args.host,
            "port": args.port,
            "deploy_mode": deploy_mode,
            "server_tag": server_tag,
            "deploy_opology": deploy_params_dict
        }
        suite_file = args.suite
        with open(suite_file) as f:
            suite_dict = full_load(f)
            f.close()
        logger.debug(suite_dict)
        run_type, run_params = parser.operations_parser(suite_dict)
        collections = run_params["collections"]
        if len(collections) > 1:
            raise Exception("Multi collections not supported in Local Mode")
        # ensure there is only one case in suite
        # suite = {"run_type": run_type, "run_params": collections[0]}
        suite = collections[0]
        timeout = suite["timeout"] if "timeout" in suite else None
        env_mode = "local"
        return run_suite(run_type,
                         suite,
                         env_mode,
                         env_params,
                         timeout=timeout)
コード例 #40
0
ファイル: conf_yaml.py プロジェクト: virgil-su/api
 def read_yaml(self, k, v):
     with open(self.file, encoding='utf8') as f:
         data = yaml.full_load(f)
     return data[k][v]