Ejemplo n.º 1
0
    def test_resolve_nested_update(self):
        config = '''
        project_name: hello-world
        dirs: 
          home_dir: /home/user
          project_dir: "{dirs.home_dir}/projects/{project_name}"
        '''
        res = load(config)

        self.assertEqual('hello-world', res.project_name)
        self.assertEqual('/home/user', res.dirs.home_dir)
        self.assertEqual('/home/user/projects/hello-world',
                         res.dirs.project_dir)

        res.dirs.database_dir = '{dirs.project_dir}/databases'
        res.databases = {
            'customers': '{dirs.database_dir}/customers.sqlite',
            'items': '{dirs.database_dir}/items.sqlite'
        }
        self.assertEqual(
            '/home/user/projects/hello-world/databases/customers.sqlite',
            res.databases.customers)
        self.assertEqual(
            '/home/user/projects/hello-world/databases/items.sqlite',
            res.databases['items'])
Ejemplo n.º 2
0
    def test_json_dump(self):
        config = '''
        project_name: hello-world
        dirs: 
          home_dir: /home/user
          project_dir: "{dirs.home_dir}/projects/{project_name}"
          tool1_output_dir: "{dirs.project_dir}/tool1-{parameters.tool1.phase1.subparameters[0]}-{parameters.tool1.phase1.subparameters[1]}"
        parameters:
          tool1:
            phase1:
              subparameters:
               - 0.5
               - 0.6
            phase2:
              subparameters:
               - "{parameters.tool1.phase1.subparameters[0]}"
        '''
        res = load(config)
        self.assertEqual(
            yaml.safe_load('''
dirs:
  home_dir: /home/user
  project_dir: /home/user/projects/hello-world
  tool1_output_dir: /home/user/projects/hello-world/tool1-0.5-0.6
parameters: {tool1: {phase1: {subparameters: [0.5, 0.6]}, phase2: {subparameters: ['0.5']}}}
project_name: hello-world'''), yaml.safe_load(dump(res)))
Ejemplo n.º 3
0
 def load_config(self):
     with pkg_resources.resource_stream("sequence.bert_ner",
                                        self._config_file) as res:
         config = dynamic_yaml.load(res)
     self._config.update(config)
     return self._config
     pass
Ejemplo n.º 4
0
    def test_convertList(self):
        fhndl, fname = tempfile.mkstemp()
        os.write(fhndl, 'a: [1, 2, 3]\n')
        os.close(fhndl)

        res = load(open(fname))

        self.assertTrue(isinstance(res.a, YamlList))
Ejemplo n.º 5
0
    def test_resolve_missing(self):
        config = '''
        project_name: hello-world
        home_dir: /home/user
        project_dir: "{missing_dir}/projects/{project_name}"
        '''
        res = load(config)

        self.assertRaises(KeyError, lambda: res.project_dir)
Ejemplo n.º 6
0
    def test_orderedDictionary(self):
        fhndl, fname = tempfile.mkstemp()
        os.write(fhndl, 'a: 1\nb: 2\nc: 3\nd: 4\n')
        os.close(fhndl)

        res = load(open(fname))

        self.assertEqual(res.items(), [(u'a', 1), (u'b', 2), (u'c', 3),
                                       (u'd', 4)])
Ejemplo n.º 7
0
 def test_dict(self):
     config = '''
     a: 1
     b: 2
     c: a
     '''
     res = load(config)
     self.assertEqual(1, res.a)
     self.assertEqual(2, res.b)
     self.assertEqual('a', res.c)
Ejemplo n.º 8
0
 def test_list(self):
     config = '''
     - a
     - b
     - c
     '''
     res = load(config)
     self.assertEqual('a', res[0])
     self.assertEqual('b', res[1])
     self.assertEqual('c', res[2])
Ejemplo n.º 9
0
    def test_recursive(self):
        config = '''
        prefix: /opt/ml
        input_path: '{prefix}/input'
        training_data_path: '{input_path}/data/training'
        '''

        res = load(config, recursive=True)
        self.assertEqual('/opt/ml/input', res.input_path)
        self.assertEqual('/opt/ml/input/data/training', res.training_data_path)
Ejemplo n.º 10
0
 def test_list_resolution(self):
     config = '''
     - a
     - b
     - '{root[0]}'
     '''
     res = load(config)
     self.assertEqual('a', res[0])
     self.assertEqual('b', res[1])
     self.assertEqual('a', res[2])
Ejemplo n.º 11
0
    def test_resolve_simple(self):
        config = '''
        project_name: hello-world
        home_dir: /home/user
        project_dir: "{home_dir}/projects/{project_name}"
        '''
        res = load(config)

        self.assertEqual('hello-world', res.project_name)
        self.assertEqual('/home/user', res.home_dir)
        self.assertEqual('/home/user/projects/hello-world', res.project_dir)
Ejemplo n.º 12
0
    def test_list_iteration(self):
        config = '''
        targets:
          v1: value1
          v2: value2
        query:
          - '{targets.v1}'
          - '{targets.v2}'
        '''

        res = load(config)
        self.assertEqual(['value1', 'value2'], list(res.query))
Ejemplo n.º 13
0
 def test_nested_dict(self):
     config = '''
     a: 1
     b: 
       c: 3
       d: 4
       e: 'a'
     '''
     res = load(config)
     self.assertEqual(1, res.a)
     self.assertEqual(3, res.b.c)
     self.assertEqual(4, res.b.d)
     self.assertEqual('a', res.b.e)
Ejemplo n.º 14
0
    def test_keyword_args(self):
        config = '''
        prefix: /opt/ml
        input_path: '{prefix}/input'
        training_data_path: '{input_path}/data/training'
        '''

        def inner_test(input_path, training_data_path, **kwargs):
            self.assertEqual('/opt/ml/input', input_path)
            self.assertEqual('/opt/ml/input/data/training', training_data_path)

        res = load(config, recursive=True)
        inner_test(**res)
Ejemplo n.º 15
0
    def test_dict_iteration(self):
        config = '''
        targets:
          v1: value1
          v2: value2
        query:
          v1: '{targets.v1}'
          v2: '{targets.v2}'
        '''

        res = load(config)
        self.assertEqual(['v1', 'v2'], list(res.query))
        self.assertEqual(['value1', 'value2'], list(res.query.values()))
        self.assertEqual([('v1', 'value1'), ('v2', 'value2')],
                         list(res.query.items()))
Ejemplo n.º 16
0
    def test_resolve_simple_update(self):
        config = '''
        project_name: hello-world
        dirs: 
          home_dir: /home/user
          project_dir: "{dirs.home_dir}/projects/{project_name}"
        '''
        res = load(config)
        self.assertEqual('hello-world', res.project_name)
        self.assertEqual('/home/user', res.dirs.home_dir)
        self.assertEqual('/home/user/projects/hello-world',
                         res.dirs.project_dir)

        res.dirs.home_dir = '/winhome/user'
        self.assertEqual('/winhome/user/projects/hello-world',
                         res.dirs.project_dir)
Ejemplo n.º 17
0
    def test_argparse(self):
        config = '''
        output_dir: 'output-{parameters.parameter1}-{parameters.parameter2}'
        parameters:
          parameter1: a
          parameter2: b
        '''
        res = load(config)
        self.assertEqual('output-a-b', res.output_dir)

        parser = ArgumentParser()
        parser.add_argument('--parameter1')
        parser.add_argument('--parameter2')
        parser.parse_args(('--parameter1', 'c', '--parameter2', 'd'),
                          namespace=res.parameters)
        self.assertEqual('output-c-d', res.output_dir)
Ejemplo n.º 18
0
    def test_resolve_deeply_nested(self):
        config = '''
        project_name: hello-world
        dirs: 
          home_dir: /home/user
          project_dir: "{dirs.home_dir}/projects/{project_name}"
          tool1_output_dir: "{dirs.project_dir}/tool1-{parameters.tool1.phase1.subparameter1}-{parameters.tool1.phase1.subparameter2}"
        parameters:
          tool1:
            phase1:
              subparameter1: 0.5
              subparameter2: 1.6666
        '''
        res = load(config)

        self.assertEqual('/home/user/projects/hello-world/tool1-0.5-1.6666',
                         res.dirs.tool1_output_dir)
Ejemplo n.º 19
0
 def test_deeply_nested_dict(self):
     config = '''
     a: 1
     b: 
       c: 2
       d: 3
       e:
         f: 4
         g:
           h: 5
     '''
     res = load(config)
     self.assertEqual(1, res.a)
     self.assertEqual(2, res.b.c)
     self.assertEqual(3, res.b.d)
     self.assertEqual(4, res.b.e.f)
     self.assertEqual(5, res.b.e.g.h)
Ejemplo n.º 20
0
    def __init__(self, path=None, fields=None, encoding="utf-8", **kwargs):
        config_file = 're_config.yml'
        import dynamic_yaml

        with open(config_file, mode='r', encoding='UTF-8') as f:
            config = dynamic_yaml.load(f)
        examples = []
        with open(path, 'r', encoding='utf-8') as f:
            for jsonstr in f.readlines():
                jsonstr = json.loads(jsonstr)
                if path == config.data.chip_relation.test_path:
                    text = list(jsonstr['text'])
                    examples.append(Example.fromlist((text,''), fields))
                else:
                    text_list, tag_list = self._get_list(jsonstr)
                    examples.append(Example.fromlist((text_list, tag_list), fields))
        f.close()
        super(REDataset, self).__init__(examples, fields, **kwargs)
Ejemplo n.º 21
0
 def _load_config(self):
     with open(self._config_file, mode='r', encoding='UTF-8') as f:
         config = dynamic_yaml.load(f)
     return config
     pass
            # shift tensors to GPU if available
            batch_data, batch_token_starts = batch_data.to(
                self.device), batch_token_starts.to(self.device)
            if not interMode:
                batch_tags = batch_tags.to(self.device)
                yield batch_data, batch_token_starts, batch_tags
            else:
                yield batch_data, batch_token_starts

    def load_train(self):
        return self.train_data

    def load_valid(self):
        return self.valid_data

    def load_test(self):
        return self.test_data


if __name__ == '__main__':
    config_file = 'intention_classification_config.yml'
    import dynamic_yaml

    # Device
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    with open(config_file, mode='r', encoding='UTF-8') as f:
        config = dynamic_yaml.load(f)
    data_loader = IntentionClassificationDataLoader(config)
    datas = data_loader.cut_data()
    pass
Ejemplo n.º 23
0
def collect_inputs(config_path):
    # Load YAML config file.
    config_fh = open(config_path)
    logging.info("Config path: %s", (config_path))
    config = dynamic_yaml.load(config_fh)
    logging.info("Config: %s", (config))

    galaxy_api_key = os.getenv("GALAXY_API_KEY")
    if not galaxy_api_key:
        fail(
            "missing GALAXY_API_KEY; set this in the environment or .env file")

    def require_config(key):
        v = config.get(key)
        if not v:
            fail('missing "%s" in the config', key)
        return v

    # Load config
    galaxy_url = require_config("galaxy_url")
    common_inputs = require_config("common_inputs")
    workflow_name = require_config("workflow")
    library_folder_name = require_config("library_folder")
    sample_conf = require_config("sample")
    workflow_params_config = config.get("workflow_params", {})
    replacement_params = config.get("replacement_params", {})
    tags = config.get("tags")
    publish = config.get("publish")

    # Create galaxy object and get workflow descsription
    gi = bioblend.galaxy.GalaxyInstance(url=galaxy_url, key=galaxy_api_key)
    wfdesc = get_workflow_description(gi, workflow_name)

    # Create History
    sample = sample_conf["name"]
    alphanum = string.ascii_lowercase + string.digits
    rand = ''.join(random.choice(alphanum) for x in range(8))
    history_name = workflow_name + " " + sample + " " + rand

    logging.info("Creating history: %s", history_name)
    history = get_or_create_history(gi, history_name)
    history_id = history["id"]
    create_history_tags(gi, history_id, tags)
    gi.histories.update_history(history_id, published=publish)

    # Upload common_inputs
    logging.info("Uploading Common Inputs")
    cm_files = require_config("common_input_files")
    for cm_key, cm_val in cm_files.items():
        fd_name = os.path.dirname(common_inputs[cm_key])
        file_type = os.path.splitext(cm_val)[-1].lstrip(".")
        logging.info("Galaxy Common Inputs folder name: %s", fd_name)
        logging.info("Common Input file name: %s", cm_val)
        upload_dataset(gi, cm_val, file_type, fd_name)

    # Find files on filesystem
    logging.info("Collecting files from filesystem")
    sample_files = sample_conf["files"]

    # Upload files to Data Library
    logging.info("Uploading sample data")
    file_type = sample_conf["filetype"]
    sample_ids = []
    for sf in sample_files:
        sample_ids.append(
            upload_dataset(gi, sf, file_type, library_folder_name)["id"])

    # Create collection list in history
    logging.info("Populating sample data in history")
    sample_data = create_dataset_or_collection_in_history(
        gi, sample, history_id, sample_ids)
    logging.info("sample collection data: %s", sample_data)

    logging.info("Preparing to invoke workflow")
    common_inputs_library_ids = {}
    for k, v in common_inputs.items():
        logging.info('''Collecting common inputs from Galaxy: "%s" "%s"''', k,
                     v)
        f = galaxy_fs.get_path(gi, v)
        logging.info('''f path: "%s"''', f)
        common_inputs_library_ids[k] = f["id"]

    logging.info('''Collecting common inputs dict: "%s"''',
                 common_inputs_library_ids)
    steps_by_label = {}
    inputs = {}
    for step in wfdesc["steps"].values():
        label = step.get("label")
        logging.info('''Step label is: "%s"''', label)
        uuid = step.get("uuid")
        steps_by_label[label] = step
        if label in common_inputs_library_ids:
            lib_id = common_inputs_library_ids[label]
            inputs[uuid] = {
                "id": lib_id,
                "src": "ld",
            }

        if label == "INPUT":
            inputs[uuid] = {
                "id": sample_data["id"],
                "src": sample_data["src"],
            }

    params = {}
    if workflow_params_config:
        for step_label, step_params in workflow_params_config.items():
            if step_label not in steps_by_label:
                fail('configuring workflow params, missing step with label {}',
                     step_label)
            step_id = steps_by_label[step_label]['id']
            if steps_by_label[step_label]['type'] == 'subworkflow':
                sub_dict = {}
                for sub_label, sub_params in step_params.items():
                    sub_wfdesc = get_workflow_description(
                        gi, steps_by_label[step_label]['name'])
                    sub_wf_step = [
                        v for k, v in sub_wfdesc['steps'].items()
                        if v['label'] == sub_label
                    ][0]
                    param_key, param_value = list(sub_params.items())[0]
                    sub_id = "|".join([str(sub_wf_step['id']), param_key])
                    sub_dict[sub_id] = param_value
                params[step_id] = sub_dict
            else:
                step_dict = {}
                for step_k, step_v in step_params.items():
                    step_dict[step_k] = step_v

                params[step_id] = step_dict

    # Replacement params
    replace_dict = {}
    for k, v in replacement_params.items():
        logging.info("Collecting Replacement params: %s %s", k, v)
        replace_dict[k] = v

    # Invoke workflow
    logging.info("Invoking workflow")
    workflow_id = wfdesc["uuid"]
    logging.info("Replacement params: %s", replace_dict)
    logging.info("Workflow params: %s", params)
    logging.info("Inputs: %s", inputs)

    res = gi.workflows.invoke_workflow(workflow_id,
                                       inputs,
                                       history_id=history_id,
                                       params=params,
                                       import_inputs_to_history=False,
                                       replacement_params=replace_dict)
    print(json.dumps(res, indent=2))
 def load_config(self):
     with pkg_resources.resource_stream("sequence.intention_classification", self._config_file) as res:
         config = dynamic_yaml.load(res)
     self._config.update(config)
     return self._config
     pass
Ejemplo n.º 25
0
 def load_common_config(self):
     with pkg_resources.resource_stream("common.config", self._config_file) as res:
         self._config = dynamic_yaml.load(res)
     return self._config
Ejemplo n.º 26
0
                                pred_dict = {
                                    "text": all_subject[i]['text'],
                                    "spo_list": new,
                                }
                            else:

                                pred_dict = {
                                    "text": all_subject[i]['text'],
                                    "spo_list": new,
                                }
                            fw.write(json.dumps(pred_dict, ensure_ascii=False) + '\n')
                fo.close()
            fs.close()
        fw.close()



if __name__ == '__main__':
    # Device
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    object_config_file = 're_object_config.yml'
    subject_config_file = 're_subject_config.yml'
    import dynamic_yaml

    with open(object_config_file, mode='r', encoding='UTF-8') as f:
        object_config = dynamic_yaml.load(f)
    object_config.device = device
    with open(subject_config_file, mode='r', encoding='UTF-8') as f:
        subject_config = dynamic_yaml.load(f)
    get_result(subject_config.data.chip_relation.result_path,object_config.data.chip_relation.result_path,subject_config.data.chip_relation.save_path,subject_config.data.chip_relation.shcemas_path)