예제 #1
0
 def testPatternMatch(self):
     '''Test snake match's pattern match facility'''
     res = extract_pattern('{a}-{b}.txt', ['file-1.txt', 'file-ab.txt'])
     self.assertEqual(res['a'], ['file', 'file'])
     self.assertEqual(res['b'], ['1', 'ab'])
     res = extract_pattern('{a}-{b}.txt', ['file--ab--cd.txt'])
     self.assertEqual(res['a'], ['file--ab-'])
     self.assertEqual(res['b'], ['cd'])
     res = extract_pattern('{path}/{to}/{file}.txt', ['/tmp//1.txt'])
     self.assertEqual(res['path'], [None])
     self.assertEqual(res['to'], [None])
     self.assertEqual(res['file'], [None])
     res = extract_pattern('{path}/{to}/{file}.txt',
                           ['/tmp/test/1.txt.txt'])
     self.assertEqual(res['path'], ['/tmp'])
     self.assertEqual(res['to'], ['test'])
     self.assertEqual(res['file'], ['1.txt'])
     # expand_pattern
     env.sos_dict = WorkflowDict({
         'os': os,
         'a': 100,
         'b': 'file name',
         'c': ['file1', 'file2', 'file 3'],
         'd': {'a': 'file1', 'b': 'file2'},
     })
     self.assertEqual(expand_pattern('{b}.txt'), ['file name.txt'])
     self.assertEqual(expand_pattern('{c}.txt'), [
                      'file1.txt', 'file2.txt', 'file 3.txt'])
     self.assertEqual(expand_pattern('{a}_{c}.txt'), [
                      '100_file1.txt', '100_file2.txt', '100_file 3.txt'])
예제 #2
0
파일: test_utils.py 프로젝트: BoPeng/SOS
 def testPatternMatch(self):
     """Test snake match's pattern match facility"""
     res = extract_pattern("{a}-{b}.txt", ["file-1.txt", "file-ab.txt"])
     self.assertEqual(res["a"], ["file", "file"])
     self.assertEqual(res["b"], ["1", "ab"])
     res = extract_pattern("{a}-{b}.txt", ["file--ab--cd.txt"])
     self.assertEqual(res["a"], ["file--ab-"])
     self.assertEqual(res["b"], ["cd"])
     res = extract_pattern("{path}/{to}/{file}.txt", ["/tmp//1.txt"])
     self.assertEqual(res["path"], [None])
     self.assertEqual(res["to"], [None])
     self.assertEqual(res["file"], [None])
     res = extract_pattern("{path}/{to}/{file}.txt", ["/tmp/test/1.txt.txt"])
     self.assertEqual(res["path"], ["/tmp"])
     self.assertEqual(res["to"], ["test"])
     self.assertEqual(res["file"], ["1.txt"])
     # expand_pattern
     env.sos_dict = WorkflowDict(
         {"os": os, "a": 100, "b": "file name", "c": ["file1", "file2", "file 3"], "d": {"a": "file1", "b": "file2"}}
     )
     self.assertEqual(expand_pattern("{b}.txt"), ["file name.txt"])
     self.assertEqual(expand_pattern("{c}.txt"), ["file1.txt", "file2.txt", "file 3.txt"])
     self.assertEqual(expand_pattern("{a}_{c}.txt"), ["100_file1.txt", "100_file2.txt", "100_file 3.txt"])
예제 #3
0
파일: tasks.py 프로젝트: jdblischak/sos-pbs
    def _prepare_script(self, task_id):
        # read the task file and look for runtime info
        #
        params = TaskFile(task_id).params
        sos_dict = params.sos_dict

        # for this task, we will need walltime, nodes, cores, mem
        # however, these could be fixed in the job template and we do not need to have them all in the runtime
        runtime = self.config
        # we also use saved verbosity and sig_mode because the current sig_mode might have been changed
        # (e.g. in Jupyter) after the job is saved.
        runtime.update({
            x: sos_dict['_runtime'][x]
            for x in ('nodes', 'cores', 'mem', 'walltime', 'cur_dir',
                      'home_dir', 'verbosity', 'sig_mode', 'run_mode')
            if x in sos_dict['_runtime']
        })
        if 'name' in sos_dict['_runtime']:
            env.logger.warning(
                "Runtime option name is deprecated. Please use tags to keep track of task names."
            )
        runtime['task'] = task_id
        # this is also deprecated
        runtime['job_name'] = task_id
        if 'nodes' not in runtime:
            runtime['nodes'] = 1
        if 'cores' not in runtime:
            runtime['cores'] = 1
        # for backward compatibility
        runtime['job_file'] = f'~/.sos/tasks/{task_id}.sh'

        # let us first prepare a task file
        try:
            job_text = cfg_interpolate(self.job_template, runtime)
        except Exception as e:
            raise ValueError(
                f'Failed to generate job file for task {task_id}: {e}')

        # now we need to write a job file
        job_file = os.path.join(os.path.expanduser('~'), '.sos', 'tasks',
                                task_id + '.sh')
        # do not translate newline under windows because the script will be executed
        # under linux/mac
        with open(job_file, 'w', newline='') as job:
            job.write(job_text)

        # then copy the job file to remote host if necessary
        self.agent.send_task_file(job_file)

        if runtime['run_mode'] == 'dryrun':
            try:
                cmd = f'bash ~/.sos/tasks/{task_id}.sh'
                print(self.agent.check_output(cmd))
            except Exception as e:
                raise RuntimeError(f'Failed to submit task {task_id}: {e}')
        else:
            #
            # now we need to figure out a command to submit the task
            try:
                cmd = cfg_interpolate(self.submit_cmd, runtime)
            except Exception as e:
                raise ValueError(
                    f'Failed to generate job submission command from template "{self.submit_cmd}": {e}'
                )
            env.logger.debug(f'submit {task_id}: {cmd}')
            try:
                try:
                    cmd_output = self.agent.check_output(cmd).strip()
                except Exception as e:
                    raise RuntimeError(f'Failed to submit task {task_id}: {e}')

                if not cmd_output:
                    raise RuntimeError(
                        f'Failed to submit task {task_id} with command {cmd}. No output returned.'
                    )

                if 'submit_cmd_output' not in self.config:
                    submit_cmd_output = '{job_id}'
                else:
                    submit_cmd_output = self.config['submit_cmd_output']
                #
                if not '{job_id}' in submit_cmd_output:
                    raise ValueError(
                        f'Option submit_cmd_output should have at least a pattern for job_id, "{submit_cmd_output}" specified.'
                    )
                #
                # try to extract job_id from command output
                # let us write an job_id file so that we can check status of tasks more easily
                job_id_file = os.path.join(os.path.expanduser('~'), '.sos',
                                           'tasks', task_id + '.job_id')
                with open(job_id_file, 'w') as job:
                    res = extract_pattern(submit_cmd_output,
                                          [cmd_output.strip()])
                    if 'job_id' not in res or len(
                            res['job_id']) != 1 or res['job_id'][0] is None:
                        raise RuntimeError(
                            f'Failed to extract job_id from "{cmd_output.strip()}" using pattern "{submit_cmd_output}"'
                        )
                    else:
                        job_id = res['job_id'][0]
                        # other variables
                        for k, v in res.items():
                            job.write(f'{k}: {v[0]}\n')
                # Send job id files to remote host so that
                # 1. the job could be properly killed (with job_id) on remote host (not remotely)
                # 2. the job status could be perperly probed in case the job was not properly submitted (#911)
                self.agent.send_task_file(job_id_file)
                # output job id to stdout
                env.logger.info(
                    f'{task_id} ``submitted`` to {self.alias} with job id {job_id}'
                )
                return True
            except Exception as e:
                raise RuntimeError(f'Failed to submit task {task_id}: {e}')