예제 #1
0
    def update_task_proxies(self, task_ids=None):
        """Update dynamic task instance fields"""
        update_time = time()

        # update task instance
        for itask in self.schd.pool.get_all_tasks():
            name, point_string = TaskID.split(itask.identity)
            if ((task_ids and itask.identity not in task_ids) or
                    (itask.identity not in self.task_proxies)):
                continue
            ts = itask.get_state_summary()
            self.cycle_states.setdefault(point_string, {})[name] = ts['state']
            tproxy = self.task_proxies[itask.identity]
            tproxy.checksum = f"{itask.identity}@{update_time}"
            tproxy.state = ts['state']
            tproxy.job_submits = ts['submit_num']
            tproxy.spawned = ast.literal_eval(ts['spawned'])
            tproxy.latest_message = ts['latest_message']
            tproxy.jobs[:] = [
                f"{self.workflow_id}/{job_id}" for job_id in itask.jobs]
            tproxy.broadcasts[:] = [
                f"{key}={val}" for key, val in
                self.schd.task_events_mgr.broadcast_mgr.get_broadcast(
                    itask.identity).items()]
            prereq_list = []
            for prereq in itask.state.prerequisites:
                # Protobuf messages populated within
                prereq_obj = prereq.api_dump(self.workflow_id)
                if prereq_obj:
                    prereq_list.append(prereq_obj)
            tproxy.prerequisites.extend(prereq_list)
            for _, msg, is_completed in itask.state.outputs.get_all():
                tproxy.outputs.append(f"{msg}={is_completed}")
예제 #2
0
 def remove_task_jobs(self, task_id):
     """removed all jobs associated with a task from the pool."""
     name, point_string = TaskID.split(task_id)
     t_id = f"/{point_string}/{name}/"
     for job_d in self.pool.keys():
         if t_id in job_d:
             del self.pool[job_d]
예제 #3
0
 def match_ext_trigger(self, itask):
     """Match external triggers for a waiting task proxy."""
     if not self.ext_triggers or not itask.state.external_triggers:
         return
     has_changed = False
     for trig, satisfied in list(itask.state.external_triggers.items()):
         if satisfied:
             continue
         for qmsg, qid in self.ext_triggers.copy():
             if trig == qmsg:
                 # Matched.
                 point_string = TaskID.split(itask.identity)[1]
                 # Set trigger satisfied.
                 itask.state.external_triggers[trig] = True
                 # Broadcast the event ID to the cycle point.
                 if qid is not None:
                     self.put_broadcast(
                         [point_string],
                         ['root'],
                         [{'environment': {'CYLC_EXT_TRIGGER_ID': qid}}],
                     )
                 self.ext_triggers[(qmsg, qid)] -= 1
                 if not self.ext_triggers[(qmsg, qid)]:
                     del self.ext_triggers[(qmsg, qid)]
                 has_changed = True
                 break
     return has_changed
예제 #4
0
파일: broadcast_mgr.py 프로젝트: cylc/cylc
 def match_ext_trigger(self, itask):
     """Match external triggers for a waiting task proxy."""
     if not self.ext_triggers or not itask.state.external_triggers:
         return
     has_changed = False
     for trig, satisfied in list(itask.state.external_triggers.items()):
         if satisfied:
             continue
         for qmsg, qid in self.ext_triggers.copy():
             if trig == qmsg:
                 # Matched.
                 point_string = TaskID.split(itask.identity)[1]
                 # Set trigger satisfied.
                 itask.state.external_triggers[trig] = True
                 # Broadcast the event ID to the cycle point.
                 if qid is not None:
                     self.put_broadcast(
                         [point_string],
                         ['root'],
                         [{'environment': {'CYLC_EXT_TRIGGER_ID': qid}}],
                     )
                 self.ext_triggers[(qmsg, qid)] -= 1
                 if not self.ext_triggers[(qmsg, qid)]:
                     del self.ext_triggers[(qmsg, qid)]
                 has_changed = True
                 break
     return has_changed
예제 #5
0
 def _match_ext_trigger(self, itask):
     """Match external triggers for a waiting task proxy."""
     if not self.ext_triggers or not itask.state.external_triggers:
         return False
     for trig, satisfied in list(itask.state.external_triggers.items()):
         if satisfied:
             continue
         for qmsg, qid in self.ext_triggers.copy():
             if trig != qmsg:
                 continue
             # Matched.
             point_string = TaskID.split(itask.identity)[1]
             # Set trigger satisfied.
             itask.state.external_triggers[trig] = True
             # Broadcast the event ID to the cycle point.
             if qid is not None:
                 self.put_broadcast(
                     [point_string],
                     ['root'],
                     [{
                         'environment': {
                             'CYLC_EXT_TRIGGER_ID': qid
                         }
                     }],
                 )
             # Create data-store delta
             self.data_store_mgr.delta_task_ext_trigger(
                 itask, qid, qmsg, True)
             self.ext_triggers[(qmsg, qid)] -= 1
             if not self.ext_triggers[(qmsg, qid)]:
                 del self.ext_triggers[(qmsg, qid)]
             return True
     return False
예제 #6
0
    def _get_tasks_info(schd):
        """Retrieve task summary info and states."""

        task_summary = {}
        task_states = {}

        for task in schd.pool.get_tasks():
            ts = task.get_state_summary()
            task_summary[task.identity] = ts
            name, point_string = TaskID.split(task.identity)
            task_states.setdefault(point_string, {})
            task_states[point_string][name] = ts['state']

        for task in schd.pool.get_rh_tasks():
            ts = task.get_state_summary()
            ts['state'] = TASK_STATUS_RUNAHEAD
            task_summary[task.identity] = ts
            name, point_string = TaskID.split(task.identity)
            task_states.setdefault(point_string, {})
            task_states[point_string][name] = ts['state']

        return task_summary, task_states
예제 #7
0
    def _get_tasks_info(schd):
        """Retrieve task summary info and states."""

        task_summary = {}
        task_states = {}

        for task in schd.pool.get_tasks():
            ts = task.get_state_summary()
            task_summary[task.identity] = ts
            name, point_string = TaskID.split(task.identity)
            task_states.setdefault(point_string, {})
            task_states[point_string][name] = ts['state']

        for task in schd.pool.get_rh_tasks():
            ts = task.get_state_summary()
            ts['state'] = TASK_STATUS_RUNAHEAD
            task_summary[task.identity] = ts
            name, point_string = TaskID.split(task.identity)
            task_states.setdefault(point_string, {})
            task_states[point_string][name] = ts['state']

        return task_summary, task_states
예제 #8
0
 def insert_job(self, job_conf):
     """Insert job into pool."""
     update_time = time()
     job_owner = job_conf['owner']
     sub_num = job_conf['submit_num']
     name, point_string = TaskID.split(job_conf['task_id'])
     t_id = f'{self.workflow_id}{ID_DELIM}{point_string}{ID_DELIM}{name}'
     j_id = f'{t_id}{ID_DELIM}{sub_num}'
     j_buf = PbJob(
         stamp=f'{j_id}@{update_time}',
         id=j_id,
         submit_num=sub_num,
         state=JOB_STATUSES_ALL[0],
         task_proxy=t_id,
         batch_sys_name=job_conf['batch_system_name'],
         env_script=job_conf['env-script'],
         err_script=job_conf['err-script'],
         exit_script=job_conf['exit-script'],
         execution_time_limit=job_conf['execution_time_limit'],
         host=job_conf['host'],
         init_script=job_conf['init-script'],
         job_log_dir=job_conf['job_log_dir'],
         owner=job_owner,
         post_script=job_conf['post-script'],
         pre_script=job_conf['pre-script'],
         script=job_conf['script'],
         work_sub_dir=job_conf['work_d'],
         name=name,
         cycle_point=point_string,
     )
     j_buf.batch_sys_conf.extend([
         f'{key}={val}'
         for key, val in job_conf['batch_system_conf'].items()
     ])
     j_buf.directives.extend(
         [f'{key}={val}' for key, val in job_conf['directives'].items()])
     j_buf.environment.extend(
         [f'{key}={val}' for key, val in job_conf['environment'].items()])
     j_buf.param_env_tmpl.extend([
         f'{key}={val}' for key, val in job_conf['param_env_tmpl'].items()
     ])
     j_buf.param_var.extend(
         [f'{key}={val}' for key, val in job_conf['param_var'].items()])
     j_buf.extra_logs.extend(job_conf['logfiles'])
     self.updates[j_id] = j_buf
     self.task_jobs.setdefault(t_id, set()).add(j_id)
     self.updates_pending = True
예제 #9
0
def main(
    parser: COP,
    options: 'Values',
    workflow: str,
    task_id: Optional[str] = None
) -> None:
    workflow, _ = parse_reg(workflow)
    pclient = get_client(workflow, timeout=options.comms_timeout)

    if task_id and not TaskID.is_valid_id(task_id):
        raise UserInputError("Invalid task ID: %s" % task_id)

    flow_kwargs = {
        'request_string': FLOW_QUERY,
        'variables': {'wFlows': [workflow]}
    }
    task_kwargs: Dict[str, Any] = {
        'request_string': TASK_QUERY,
    }
    # cylc ping WORKFLOW
    result = pclient('graphql', flow_kwargs)
    msg = ""
    for flow in result['workflows']:
        w_name = flow['name']
        w_port = flow['port']
        w_pub_port = flow['pubPort']
        if cylc.flow.flags.verbosity > 0:
            sys.stdout.write(
                f'{w_name} running on '
                f'{pclient.host}:{w_port} {w_pub_port}\n'
            )
        # cylc ping WORKFLOW TASKID
        if task_id:
            task, point = TaskID.split(task_id)
            w_id = flow['id']
            task_kwargs['variables'] = {
                'tProxy': f'{w_id}{ID_DELIM}{point}{ID_DELIM}{task}'
            }
            task_result = pclient('graphql', task_kwargs)
            if not task_result.get('taskProxy'):
                msg = "task not found"
            elif task_result['taskProxy']['state'] != TASK_STATUS_RUNNING:
                msg = f"task not {TASK_STATUS_RUNNING}"
            if msg:
                print(cparse(f'<red>{msg}</red>'))
                sys.exit(1)
예제 #10
0
 def insert_job(self, job_conf):
     """Insert job into pool."""
     update_time = time()
     int_id = job_conf['job_d']
     job_owner = job_conf['owner']
     name, point_string = TaskID.split(job_conf['task_id'])
     t_id = f"{self.owner}/{self.suite}/{point_string}/{name}"
     j_id = f"{self.owner}/{self.suite}/{int_id}"
     j_buf = PbJob(
         checksum=f"{int_id}@{update_time}",
         id=j_id,
         submit_num=job_conf['submit_num'],
         state=JOB_STATUSES_ALL[0],
         task_proxy=t_id,
         batch_sys_name=job_conf['batch_system_name'],
         env_script=job_conf['env-script'],
         err_script=job_conf['err-script'],
         exit_script=job_conf['exit-script'],
         execution_time_limit=job_conf['execution_time_limit'],
         host=job_conf['host'],
         init_script=job_conf['init-script'],
         job_log_dir=job_conf['job_log_dir'],
         owner=job_owner,
         post_script=job_conf['post-script'],
         pre_script=job_conf['pre-script'],
         script=job_conf['script'],
         work_sub_dir=job_conf['work_d'],
     )
     j_buf.batch_sys_conf.extend(
         [f"{key}={val}" for key, val in
             job_conf['batch_system_conf'].items()])
     j_buf.directives.extend(
         [f"{key}={val}" for key, val in
             job_conf['directives'].items()])
     j_buf.environment.extend(
         [f"{key}={val}" for key, val in
             job_conf['environment'].items()])
     j_buf.param_env_tmpl.extend(
         [f"{key}={val}" for key, val in
             job_conf['param_env_tmpl'].items()])
     j_buf.param_var.extend(
         [f"{key}={val}" for key, val in
             job_conf['param_var'].items()])
     j_buf.extra_logs.extend(job_conf['logfiles'])
     self.pool[int_id] = j_buf
예제 #11
0
    def generate_ghost_task(self, task_id):
        """Create task-point element populated with static data.

        Args:
            task_id (str):
                valid TaskID string.

        Returns:

            object: cylc.flow.data_messages_pb2.PbTaskProxy
                Populated task proxy data element.

        """
        update_time = time()

        name, point_string = TaskID.split(task_id)
        self.cycle_states.setdefault(point_string, {})[name] = (None, False)
        t_id = f'{self.workflow_id}{ID_DELIM}{name}'
        tp_id = f'{self.workflow_id}{ID_DELIM}{point_string}{ID_DELIM}{name}'
        tp_stamp = f'{tp_id}@{update_time}'
        taskdef = self.data[self.workflow_id][TASKS].get(
            t_id,
            self.updates[TASKS].get(t_id, MESSAGE_MAP[TASKS])
        )
        tproxy = PbTaskProxy(
            stamp=tp_stamp,
            id=tp_id,
            task=taskdef.id,
            cycle_point=point_string,
            depth=taskdef.depth,
            name=name,
        )
        tproxy.namespace[:] = taskdef.namespace
        tproxy.parents[:] = [
            f'{self.workflow_id}{ID_DELIM}{point_string}{ID_DELIM}{p_name}'
            for p_name in self.parents[name]]
        tproxy.ancestors[:] = [
            f'{self.workflow_id}{ID_DELIM}{point_string}{ID_DELIM}{a_name}'
            for a_name in self.ancestors[name]
            if a_name != name]
        tproxy.first_parent = tproxy.ancestors[0]
        return tproxy
예제 #12
0
def main(parser, options, suite, task_id=None):
    pclient = SuiteRuntimeClient(suite, timeout=options.comms_timeout)

    if task_id and not TaskID.is_valid_id(task_id):
        raise UserInputError("Invalid task ID: %s" % task_id)

    flow_kwargs = {
        'request_string': FLOW_QUERY,
        'variables': {
            'wFlows': [suite]
        }
    }
    task_kwargs = {
        'request_string': TASK_QUERY,
    }
    # cylc ping SUITE
    result = pclient('graphql', flow_kwargs)
    msg = ""
    for flow in result['workflows']:
        w_name = flow['name']
        w_port = flow['port']
        w_pub_port = flow['pubPort']
        if cylc.flow.flags.verbose:
            sys.stdout.write(f'{w_name} running on '
                             f'{pclient.host}:{w_port} {w_pub_port}\n')
        # cylc ping SUITE TASKID
        if task_id:
            task, point = TaskID.split(task_id)
            w_id = flow['id']
            task_kwargs['variables'] = {
                'tProxy': f'{w_id}{ID_DELIM}{point}{ID_DELIM}{task}'
            }
            task_result = pclient('graphql', task_kwargs)
            if not task_result.get('taskProxy'):
                msg = "task not found"
            elif task_result['taskProxy']['state'] != TASK_STATUS_RUNNING:
                msg = f"task not {TASK_STATUS_RUNNING}"
            if msg:
                print(cparse(f'<red>{msg}</red>'))
                sys.exit(1)
예제 #13
0
    def insert_job(self, job_conf):
        """Insert job into pool."""
        job_owner = job_conf['owner']
        sub_num = job_conf['submit_num']
        name, point_string = TaskID.split(job_conf['task_id'])
        t_id = f'{self.workflow_id}{ID_DELIM}{point_string}{ID_DELIM}{name}'
        j_id = f'{t_id}{ID_DELIM}{sub_num}'
        j_buf = PbJob(stamp=f'{j_id}@{time()}',
                      id=j_id,
                      submit_num=sub_num,
                      state=JOB_STATUSES_ALL[0],
                      task_proxy=t_id,
                      batch_sys_name=job_conf['batch_system_name'],
                      env_script=job_conf['env-script'],
                      err_script=job_conf['err-script'],
                      exit_script=job_conf['exit-script'],
                      execution_time_limit=job_conf['execution_time_limit'],
                      host=job_conf['platform']['name'],
                      init_script=job_conf['init-script'],
                      owner=job_owner,
                      post_script=job_conf['post-script'],
                      pre_script=job_conf['pre-script'],
                      script=job_conf['script'],
                      work_sub_dir=job_conf['work_d'],
                      name=name,
                      cycle_point=point_string,
                      batch_sys_conf=json.dumps(job_conf['batch_system_conf']),
                      directives=json.dumps(job_conf['directives']),
                      environment=json.dumps(job_conf['environment']),
                      param_var=json.dumps(job_conf['param_var']))

        # Add in log files.
        j_buf.job_log_dir = get_task_job_log(self.schd.suite, point_string,
                                             name, sub_num)
        j_buf.extra_logs.extend(job_conf['logfiles'])

        self.added[j_id] = j_buf
        self.task_jobs.setdefault(t_id, set()).add(j_id)
        self.updates_pending = True
예제 #14
0
    def _generate_ghost_task(self, task_id):
        """Create task instances populated with static data fields."""
        update_time = time()

        name, point_string = TaskID.split(task_id)
        self.cycle_states.setdefault(point_string, {})[name] = None
        tp_id = f"{self.workflow_id}/{point_string}/{name}"
        tp_check = f"{task_id}@{update_time}"
        taskdef = self.tasks[name]
        tproxy = PbTaskProxy(
            checksum=tp_check,
            id=tp_id,
            task=taskdef.id,
            cycle_point=point_string,
            depth=taskdef.depth,
        )
        tproxy.namespace[:] = taskdef.namespace
        tproxy.parents[:] = [
            f"{self.workflow_id}/{point_string}/{p_name}"
            for p_name in self.parents[name]]
        p1_name = self.parents[name][0]
        tproxy.first_parent = f"{self.workflow_id}/{point_string}/{p1_name}"
        return tproxy
예제 #15
0
파일: broadcast_mgr.py 프로젝트: cylc/cylc
    def get_broadcast(self, task_id=None):
        """Retrieve all broadcast variables that target a given task ID."""
        if task_id == "None":
            task_id = None
        if not task_id:
            # all broadcasts requested
            return self.broadcasts
        try:
            name, point_string = TaskID.split(task_id)
        except ValueError:
            raise Exception("Can't split task_id %s" % task_id)

        ret = {}
        # The order is:
        #    all:root -> all:FAM -> ... -> all:task
        # -> tag:root -> tag:FAM -> ... -> tag:task
        for cycle in self.ALL_CYCLE_POINTS_STRS + [point_string]:
            if cycle not in self.broadcasts:
                continue
            for namespace in reversed(self.linearized_ancestors[name]):
                if namespace in self.broadcasts[cycle]:
                    self._addict(ret, self.broadcasts[cycle][namespace])
        return ret
예제 #16
0
    def get_broadcast(self, task_id=None):
        """Retrieve all broadcast variables that target a given task ID."""
        if task_id == "None":
            task_id = None
        if not task_id:
            # all broadcasts requested
            return self.broadcasts
        try:
            name, point_string = TaskID.split(task_id)
        except ValueError:
            raise Exception("Can't split task_id %s" % task_id)

        ret = {}
        # The order is:
        #    all:root -> all:FAM -> ... -> all:task
        # -> tag:root -> tag:FAM -> ... -> tag:task
        for cycle in ALL_CYCLE_POINTS_STRS + [point_string]:
            if cycle not in self.broadcasts:
                continue
            for namespace in reversed(self.linearized_ancestors[name]):
                if namespace in self.broadcasts[cycle]:
                    addict(ret, self.broadcasts[cycle][namespace])
        return ret
예제 #17
0
    def update_task_proxies(self, updated_tasks=None):
        """Update dynamic fields of task nodes/proxies.

        Args:
            updated_tasks (list): [cylc.flow.task_proxy.TaskProxy]
                Update task-node from corresponding given list of
                task proxy objects from the workflow task pool.

        """
        if not updated_tasks:
            return
        tasks = self.data[self.workflow_id][TASKS]
        task_proxies = self.data[self.workflow_id][TASK_PROXIES]
        update_time = time()
        task_defs = {}

        # update task instance
        for itask in updated_tasks:
            name, point_string = TaskID.split(itask.identity)
            tp_id = (
                f'{self.workflow_id}{ID_DELIM}{point_string}{ID_DELIM}{name}')
            if (tp_id not in task_proxies
                    and tp_id not in self.added[TASK_PROXIES]):
                continue
            # Gather task definitions for elapsed time recalculation.
            if name not in task_defs:
                task_defs[name] = itask.tdef
            # Create new message and copy existing message content.
            tp_delta = self.updated[TASK_PROXIES].setdefault(
                tp_id, PbTaskProxy(id=tp_id))
            tp_delta.stamp = f'{tp_id}@{update_time}'
            tp_delta.state = itask.state.status
            if tp_id in task_proxies:
                self.state_update_families.add(
                    task_proxies[tp_id].first_parent)
            else:
                self.state_update_families.add(
                    self.added[TASK_PROXIES][tp_id].first_parent)
            tp_delta.is_held = itask.state.is_held
            tp_delta.flow_label = itask.flow_label
            tp_delta.job_submits = itask.submit_num
            tp_delta.latest_message = itask.summary['latest_message']
            tp_delta.jobs[:] = [
                j_id for j_id in self.schd.job_pool.task_jobs.get(tp_id, [])
                if j_id not in task_proxies.get(tp_id, PbTaskProxy()).jobs
            ]
            prereq_list = []
            for prereq in itask.state.prerequisites:
                # Protobuf messages populated within
                prereq_obj = prereq.api_dump(self.workflow_id)
                if prereq_obj:
                    prereq_list.append(prereq_obj)
            tp_delta.prerequisites.extend(prereq_list)
            tp_delta.outputs = json.dumps({
                trigger: is_completed
                for trigger, _, is_completed in itask.state.outputs.get_all()
            })
            extras = {}
            if itask.tdef.clocktrigger_offset is not None:
                extras['Clock trigger time reached'] = (
                    itask.is_waiting_clock_done())
                extras['Triggers at'] = time2str(itask.clock_trigger_time)
            for trig, satisfied in itask.state.external_triggers.items():
                key = f'External trigger "{trig}"'
                if satisfied:
                    extras[key] = 'satisfied'
                else:
                    extras[key] = 'NOT satisfied'
            for label, satisfied in itask.state.xtriggers.items():
                sig = self.schd.xtrigger_mgr.get_xtrig_ctx(
                    itask, label).get_signature()
                extra = f'xtrigger "{label} = {sig}"'
                if satisfied:
                    extras[extra] = 'satisfied'
                else:
                    extras[extra] = 'NOT satisfied'
            tp_delta.extras = json.dumps(extras)

        # Recalculate effected task def elements elapsed time.
        for name, tdef in task_defs.items():
            elapsed_time = task_mean_elapsed_time(tdef)
            if elapsed_time:
                t_id = f'{self.workflow_id}{ID_DELIM}{name}'
                t_delta = PbTask(stamp=f'{t_id}@{update_time}',
                                 mean_elapsed_time=elapsed_time)
                self.updated[TASKS].setdefault(
                    t_id, PbTask(id=t_id)).MergeFrom(t_delta)
                tasks[t_id].MergeFrom(t_delta)
예제 #18
0
    def generate_graph_elements(self, start_point=None, stop_point=None):
        """Generate edges and [ghost] nodes (family and task proxy elements).

        Args:
            start_point (cylc.flow.cycling.PointBase):
                Edge generation start point.
            stop_point (cylc.flow.cycling.PointBase):
                Edge generation stop point.

        """
        if not self.pool_points:
            return
        config = self.schd.config
        if start_point is None:
            start_point = min(self.pool_points)
        if stop_point is None:
            stop_point = max(self.pool_points)

        # Reference set for workflow relations
        new_edges = set()

        # Generate ungrouped edges
        for edge in config.get_graph_edges(start_point, stop_point):
            # Reference or create edge source & target nodes/proxies
            s_node = edge[0]
            t_node = edge[1]
            if s_node is None:
                continue
            # Is the source cycle point in the task pool?
            s_name, s_point = TaskID.split(s_node)
            s_point_cls = get_point(s_point)
            s_pool_point = False
            s_valid = TaskID.is_valid_id(s_node)
            if s_valid:
                s_pool_point = s_point_cls in self.pool_points
            # Is the target cycle point in the task pool?
            t_pool_point = False
            t_valid = t_node and TaskID.is_valid_id(t_node)
            if t_valid:
                t_name, t_point = TaskID.split(t_node)
                t_point_cls = get_point(t_point)
                t_pool_point = get_point(t_point) in self.pool_points

            # Proceed if either source or target cycle points
            # are in the task pool.
            if not s_pool_point and not t_pool_point:
                continue

            # If source/target is valid add/create the corresponding items.
            # TODO: if xtrigger is suite_state create remote ID
            source_id = (
                f'{self.workflow_id}{ID_DELIM}{s_point}{ID_DELIM}{s_name}')

            # Add valid source before checking for no target,
            # as source may be an isolate (hence no edges).
            if s_valid:
                s_task_id = f'{self.workflow_id}{ID_DELIM}{s_name}'
                # Add source points for pruning.
                self.edge_points.setdefault(s_point_cls, set())
                self.generate_ghost_task(s_task_id, source_id, s_point)
            # If target is valid then created it.
            # Edges are only created for valid targets.
            # At present targets can't be xtriggers.
            if t_valid:
                target_id = (
                    f'{self.workflow_id}{ID_DELIM}{t_point}{ID_DELIM}{t_name}')
                t_task_id = f'{self.workflow_id}{ID_DELIM}{t_name}'
                # Add target points to associated source points for pruning.
                self.edge_points.setdefault(s_point_cls, set())
                self.edge_points[s_point_cls].add(t_point_cls)
                self.generate_ghost_task(t_task_id, target_id, t_point)

                # Initiate edge element.
                e_id = (
                    f'{self.workflow_id}{ID_DELIM}{s_node}{ID_DELIM}{t_node}')
                self.added[EDGES][e_id] = PbEdge(
                    id=e_id,
                    suicide=edge[3],
                    cond=edge[4],
                    source=source_id,
                    target=target_id,
                )
                new_edges.add(e_id)

                # Add edge id to node field for resolver reference
                self.updated[TASK_PROXIES].setdefault(
                    target_id, PbTaskProxy(id=target_id)).edges.append(e_id)
                if s_valid:
                    self.updated[TASK_PROXIES].setdefault(
                        source_id,
                        PbTaskProxy(id=source_id)).edges.append(e_id)
        if new_edges:
            getattr(self.updated[WORKFLOW], EDGES).edges.extend(new_edges)
예제 #19
0
파일: show.py 프로젝트: ColemanTom/cylc
def main(_, options, suite, *task_args):
    """Implement "cylc show" CLI."""
    pclient = SuiteRuntimeClient(suite, timeout=options.comms_timeout)
    json_filter = {}

    if not task_args:
        query = WORKFLOW_META_QUERY
        query_kwargs = {
            'request_string': query,
            'variables': {
                'wFlows': [suite]
            }
        }
        # Print suite info.
        results = pclient('graphql', query_kwargs)
        for workflow in results['workflows']:
            flat_data = flatten_data(workflow)
            if options.json:
                json_filter.update(flat_data)
            else:
                for key, value in sorted(flat_data.items(), reverse=True):
                    ansiprint(
                        f'<bold>{key}:</bold> {value or "<m>(not given)</m>"}')

    task_names = [arg for arg in task_args if TaskID.is_valid_name(arg)]
    task_ids = [arg for arg in task_args if TaskID.is_valid_id_2(arg)]

    if task_names:
        tasks_query = TASK_META_QUERY
        tasks_kwargs = {
            'request_string': tasks_query,
            'variables': {
                'wFlows': [suite],
                'taskIds': task_names
            }
        }
        # Print suite info.
        results = pclient('graphql', tasks_kwargs)
        multi = len(results['tasks']) > 1
        for task in results['tasks']:
            flat_data = flatten_data(task['meta'])
            if options.json:
                json_filter.update({task['name']: flat_data})
            else:
                if multi:
                    print(f'----\nTASK NAME: {task["name"]}')
                for key, value in sorted(flat_data.items(), reverse=True):
                    ansiprint(
                        f'<bold>{key}:</bold> {value or "<m>(not given)</m>"}')

    if task_ids:
        tp_query = TASK_PREREQS_QUERY
        tp_kwargs = {
            'request_string': tp_query,
            'variables': {
                'wFlows': [suite],
                'taskIds': [
                    f'{c}{ID_DELIM}{n}' for n, c in [
                        TaskID.split(t_id)
                        for t_id in task_ids if TaskID.is_valid_id(t_id)
                    ]
                ] + [
                    f'{c}{ID_DELIM}{n}' for c, n in [
                        t_id.rsplit(TaskID.DELIM2, 1)
                        for t_id in task_ids if not TaskID.is_valid_id(t_id)
                    ]
                ]
            }
        }
        results = pclient('graphql', tp_kwargs)
        multi = len(results['taskProxies']) > 1
        for t_proxy in results['taskProxies']:
            task_id = TaskID.get(t_proxy['name'], t_proxy['cyclePoint'])
            if options.json:
                json_filter.update({task_id: t_proxy})
            else:
                if multi:
                    print(f'----\nTASK ID: {task_id}')
                prereqs = []
                for item in t_proxy['prerequisites']:
                    prefix = ''
                    multi_cond = len(item['conditions']) > 1
                    if multi_cond:
                        prereqs.append([
                            True, '', item['expression'].replace('c', ''),
                            item['satisfied']
                        ])
                    for cond in item['conditions']:
                        if multi_cond and not options.list_prereqs:
                            prefix = f'\t{cond["exprAlias"].strip("c")} = '
                        _, _, point, name = cond['taskId'].split(ID_DELIM)
                        cond_id = TaskID.get(name, point)
                        prereqs.append([
                            False, prefix, f'{cond_id} {cond["reqState"]}',
                            cond['satisfied']
                        ])
                if options.list_prereqs:
                    for composite, _, msg, _ in prereqs:
                        if not composite:
                            print(msg)
                else:
                    flat_meta = flatten_data(t_proxy['task']['meta'])
                    for key, value in sorted(flat_meta.items(), reverse=True):
                        ansiprint(f'<bold>{key}:</bold>'
                                  f' {value or "<m>(not given)</m>"}')
                    ansiprint('\n<bold>prerequisites</bold>'
                              ' (<red>- => not satisfied</red>):')
                    if not prereqs:
                        print('  (None)')
                    for _, prefix, msg, state in prereqs:
                        print_msg_state(f'{prefix}{msg}', state)

                    ansiprint('\n<bold>outputs</bold>'
                              ' (<red>- => not completed</red>):')
                    if not t_proxy['outputs']:
                        print('  (None)')
                    for key, val in t_proxy['outputs'].items():
                        print_msg_state(f'{task_id} {key}', val)
                    if t_proxy['extras']:
                        print('\nother:')
                        for key, value in t_proxy['extras'].items():
                            print('  o  %s ... %s' % (key, value))
        if not results['taskProxies']:
            ansiprint(f"<red>No matching tasks found: {task_ids}",
                      file=sys.stderr)
            sys.exit(1)

    if options.json:
        print(json.dumps(json_filter, indent=4))
예제 #20
0
 def test_split(self):
     self.assertEqual(["a", '1'], TaskID.split("a.1"))
     self.assertEqual(["a", '_1'], TaskID.split("a._1"))
     self.assertEqual(["WTAS", '20101010T101010'],
                      TaskID.split("WTAS.20101010T101010"))
예제 #21
0
def main(parser, options, *args, color=False):
    """Implement cylc cat-log CLI.

    Determine log path, user@host, batchview_cmd, and action (print, dir-list,
    cat, edit, or tail), and then if the log path is:
      a) local: perform action on log path, or
      b) remote: re-invoke cylc cat-log as a) on the remote account

    """
    if options.remote_args:
        # Invoked on job hosts for job logs only, as a wrapper to view_log().
        # Tail and batchview commands come from global config on suite host).
        logpath, mode, tail_tmpl = options.remote_args[0:3]
        if logpath.startswith('$'):
            logpath = os.path.expandvars(logpath)
        elif logpath.startswith('~'):
            logpath = os.path.expanduser(logpath)
        try:
            batchview_cmd = options.remote_args[3]
        except IndexError:
            batchview_cmd = None
        res = view_log(logpath,
                       mode,
                       tail_tmpl,
                       batchview_cmd,
                       remote=True,
                       color=color)
        if res == 1:
            sys.exit(res)
        return

    suite_name = args[0]
    # Get long-format mode.
    try:
        mode = MODES[options.mode]
    except KeyError:
        mode = options.mode

    if len(args) == 1:
        # Cat suite logs, local only.
        if options.filename is not None:
            raise UserInputError("The '-f' option is for job logs only.")

        logpath = get_suite_run_log_name(suite_name)
        if options.rotation_num:
            logs = glob('%s.*' % logpath)
            logs.sort(key=os.path.getmtime, reverse=True)
            try:
                logpath = logs[int(options.rotation_num)]
            except IndexError:
                raise UserInputError("max rotation %d" % (len(logs) - 1))
        tail_tmpl = str(glbl_cfg().get_host_item("tail command template"))
        out = view_log(logpath, mode, tail_tmpl, color=color)
        if out == 1:
            sys.exit(1)
        if mode == 'edit':
            tmpfile_edit(out, options.geditor)
        return

    if len(args) == 2:
        # Cat task job logs, may be on suite or job host.
        if options.rotation_num is not None:
            raise UserInputError("only suite (not job) logs get rotated")
        task_id = args[1]
        try:
            task, point = TaskID.split(task_id)
        except ValueError:
            parser.error("Illegal task ID: %s" % task_id)
        if options.submit_num != NN:
            try:
                options.submit_num = "%02d" % int(options.submit_num)
            except ValueError:
                parser.error("Illegal submit number: %s" % options.submit_num)
        if options.filename is None:
            options.filename = JOB_LOG_OUT
        else:
            # Convert short filename args to long (e.g. 'o' to 'job.out').
            try:
                options.filename = JOB_LOG_OPTS[options.filename]
            except KeyError:
                # Is already long form (standard log, or custom).
                pass
        user_at_host, batch_sys_name, live_job_id = get_task_job_attrs(
            suite_name, point, task, options.submit_num)
        user, host = split_user_at_host(user_at_host)
        batchview_cmd = None
        if live_job_id is not None:
            # Job is currently running. Get special batch system log view
            # command (e.g. qcat) if one exists, and the log is out or err.
            conf_key = None
            if options.filename == JOB_LOG_OUT:
                if mode == 'cat':
                    conf_key = "out viewer"
                elif mode == 'tail':
                    conf_key = "out tailer"
            elif options.filename == JOB_LOG_ERR:
                if mode == 'cat':
                    conf_key = "err viewer"
                elif mode == 'tail':
                    conf_key = "err tailer"
            if conf_key is not None:
                conf = glbl_cfg().get_host_item("batch systems", host, user)
                batchview_cmd_tmpl = None
                try:
                    batchview_cmd_tmpl = conf[batch_sys_name][conf_key]
                except KeyError:
                    pass
                if batchview_cmd_tmpl is not None:
                    batchview_cmd = batchview_cmd_tmpl % {
                        "job_id": str(live_job_id)
                    }

        log_is_remote = (is_remote(host, user)
                         and (options.filename not in JOB_LOGS_LOCAL))
        log_is_retrieved = (glbl_cfg().get_host_item('retrieve job logs', host)
                            and live_job_id is None)
        if log_is_remote and (not log_is_retrieved or options.force_remote):
            logpath = os.path.normpath(
                get_remote_suite_run_job_dir(host, user, suite_name, point,
                                             task, options.submit_num,
                                             options.filename))
            tail_tmpl = str(glbl_cfg().get_host_item("tail command template",
                                                     host, user))
            # Reinvoke the cat-log command on the remote account.
            cmd = ['cat-log']
            if cylc.flow.flags.debug:
                cmd.append('--debug')
            for item in [logpath, mode, tail_tmpl]:
                cmd.append('--remote-arg=%s' % quote(item))
            if batchview_cmd:
                cmd.append('--remote-arg=%s' % quote(batchview_cmd))
            cmd.append(suite_name)
            is_edit_mode = (mode == 'edit')
            try:
                proc = remote_cylc_cmd(cmd,
                                       user,
                                       host,
                                       capture_process=is_edit_mode,
                                       manage=(mode == 'tail'))
            except KeyboardInterrupt:
                # Ctrl-C while tailing.
                pass
            else:
                if is_edit_mode:
                    # Write remote stdout to a temp file for viewing in editor.
                    # Only BUFSIZE bytes at a time in case huge stdout volume.
                    out = NamedTemporaryFile()
                    data = proc.stdout.read(BUFSIZE)
                    while data:
                        out.write(data)
                        data = proc.stdout.read(BUFSIZE)
                    os.chmod(out.name, S_IRUSR)
                    out.seek(0, 0)
        else:
            # Local task job or local job log.
            logpath = os.path.normpath(
                get_suite_run_job_dir(suite_name, point, task,
                                      options.submit_num, options.filename))
            tail_tmpl = str(glbl_cfg().get_host_item("tail command template"))
            out = view_log(logpath,
                           mode,
                           tail_tmpl,
                           batchview_cmd,
                           color=color)
            if mode != 'edit':
                sys.exit(out)
        if mode == 'edit':
            tmpfile_edit(out, options.geditor)
예제 #22
0
    def update_task_proxies(self, updated_tasks=None):
        """Update dynamic fields of task nodes/proxies.

        Args:
            updated_tasks (list): [cylc.flow.task_proxy.TaskProxy]
                Update task-node from corresponding given list of
                task proxy objects from the workflow task pool.

        """
        if not updated_tasks:
            return
        tasks = self.data[self.workflow_id][TASKS]
        task_proxies = self.data[self.workflow_id][TASK_PROXIES]
        update_time = time()
        task_defs = {}

        # update task instance
        for itask in updated_tasks:
            name, point_string = TaskID.split(itask.identity)
            tp_id = (
                f'{self.workflow_id}{ID_DELIM}{point_string}{ID_DELIM}{name}')
            if (tp_id not in task_proxies and
                    tp_id not in self.updates[TASK_PROXIES]):
                continue
            self.cycle_states.setdefault(point_string, {})[name] = (
                itask.state.status, itask.state.is_held)
            # Gather task definitions for elapsed time recalculation.
            if name not in task_defs:
                task_defs[name] = itask.tdef
            # Create new message and copy existing message content.
            tp_delta = self.updates[TASK_PROXIES].setdefault(
                tp_id, PbTaskProxy(id=tp_id))
            tp_delta.stamp = f'{tp_id}@{update_time}'
            tp_delta.state = itask.state.status
            tp_delta.is_held = itask.state.is_held
            tp_delta.job_submits = itask.submit_num
            tp_delta.spawned = itask.has_spawned
            tp_delta.latest_message = itask.summary['latest_message']
            tp_delta.jobs[:] = [
                j_id
                for j_id in self.schd.job_pool.task_jobs.get(tp_id, [])
                if j_id not in task_proxies.get(tp_id, PbTaskProxy()).jobs
            ]
            tp_delta.broadcasts[:] = [
                f'{key}={val}' for key, val in
                self.schd.task_events_mgr.broadcast_mgr.get_broadcast(
                    itask.identity).items()]
            prereq_list = []
            for prereq in itask.state.prerequisites:
                # Protobuf messages populated within
                prereq_obj = prereq.api_dump(self.workflow_id)
                if prereq_obj:
                    prereq_list.append(prereq_obj)
            tp_delta.prerequisites.extend(prereq_list)
            tp_delta.outputs[:] = [
                f'{trigger}={is_completed}'
                for trigger, _, is_completed in itask.state.outputs.get_all()
            ]

        # Recalculate effected task def elements elapsed time.
        for name, tdef in task_defs.items():
            elapsed_time = task_mean_elapsed_time(tdef)
            if elapsed_time:
                t_id = f'{self.workflow_id}{ID_DELIM}{name}'
                t_delta = PbTask(
                    stamp=f'{t_id}@{update_time}',
                    mean_elapsed_time=elapsed_time
                )
                self.updates[TASKS].setdefault(
                    t_id,
                    PbTask(id=t_id)).MergeFrom(t_delta)
                tasks[t_id].MergeFrom(t_delta)
예제 #23
0
    def generate_graph_elements(self,
                                edges=None,
                                task_proxies=None,
                                family_proxies=None,
                                start_point=None,
                                stop_point=None):
        """Generate edges and [ghost] nodes (family and task proxy elements).

        Args:
            edges (dict, optional):
                ID-PbEdge key-value mapping.
            task_proxies (dict, optional):
                ID-PbTaskProxy key-value mapping.
            family_proxies (dict, optional):
                ID-PbFamilyProxy key-value mapping.
            start_point (cylc.flow.cycling.PointBase):
                Edge generation start point.
            stop_point (cylc.flow.cycling.PointBase):
                Edge generation stop point.

        """
        if not self.pool_points:
            return
        config = self.schd.config
        tasks = self.data[self.workflow_id][TASKS]
        graph = PbEdges()
        if edges is None:
            edges = {}
        if task_proxies is None:
            task_proxies = {}
        if family_proxies is None:
            family_proxies = {}
        if start_point is None:
            start_point = min(self.pool_points)
        if stop_point is None:
            stop_point = max(self.pool_points)

        # Used for generating family [ghost] nodes
        new_points = set()

        # Generate ungrouped edges
        for edge in config.get_graph_edges(start_point, stop_point):
            # Reference or create edge source & target nodes/proxies
            s_node = edge[0]
            t_node = edge[1]
            if s_node is None:
                continue
            # Is the source cycle point in the task pool?
            s_name, s_point = TaskID.split(s_node)
            s_point_cls = get_point(s_point)
            s_pool_point = False
            s_valid = TaskID.is_valid_id(s_node)
            if s_valid:
                s_pool_point = s_point_cls in self.pool_points
            # Is the target cycle point in the task pool?
            t_pool_point = False
            t_valid = t_node and TaskID.is_valid_id(t_node)
            if t_valid:
                t_name, t_point = TaskID.split(t_node)
                t_point_cls = get_point(t_point)
                t_pool_point = get_point(t_point) in self.pool_points
            # Proceed if either source or target cycle points
            # are in the task pool.
            if not s_pool_point and not t_pool_point:
                continue
            # If source/target is valid add/create the corresponding items.
            # TODO: if xtrigger is suite_state create remote ID
            source_id = (
                f'{self.workflow_id}{ID_DELIM}{s_point}{ID_DELIM}{s_name}')
            if s_valid:
                s_task_id = f'{self.workflow_id}{ID_DELIM}{s_name}'
                new_points.add(s_point)
                # Add source points for pruning.
                self.edge_points.setdefault(s_point_cls, set())
                if source_id not in task_proxies:
                    task_proxies[source_id] = self.generate_ghost_task(s_node)
                if source_id not in tasks[s_task_id].proxies:
                    tasks[s_task_id].proxies.append(source_id)
            # Add valid source before checking for no target,
            # as source may be an isolate (hence no edges).
            # At present targets can't be xtriggers.
            if t_valid:
                target_id = (
                    f'{self.workflow_id}{ID_DELIM}{t_point}{ID_DELIM}{t_name}')
                t_task_id = f'{self.workflow_id}{ID_DELIM}{t_name}'
                new_points.add(t_point)
                # Add target points to associated source points for pruning.
                self.edge_points.setdefault(s_point_cls, set())
                self.edge_points[s_point_cls].add(t_point_cls)
                if target_id not in task_proxies:
                    task_proxies[target_id] = self.generate_ghost_task(t_node)
                if target_id not in tasks[t_task_id].proxies:
                    tasks[t_task_id].proxies.append(target_id)

                # Initiate edge element.
                e_id = (
                    f'{self.workflow_id}{ID_DELIM}{s_node}{ID_DELIM}{t_node}')
                edges[e_id] = PbEdge(
                    id=e_id,
                    suicide=edge[3],
                    cond=edge[4],
                )
                edges[e_id].source = source_id
                edges[e_id].target = target_id

                # Add edge id to node field for resolver reference
                task_proxies[target_id].edges.append(e_id)
                if s_valid:
                    task_proxies[source_id].edges.append(e_id)

        graph.edges.extend(edges.keys())

        if new_points:
            self.generate_ghost_families(family_proxies, new_points)

        # Replace the originals (atomic update, for access from other threads).
        self.data[self.workflow_id][TASK_PROXIES] = task_proxies
        self.data[self.workflow_id][EDGES] = edges
        self.data[self.workflow_id][GRAPH] = graph
예제 #24
0
    def update_task_proxies(self, updated_tasks=None):
        """Update dynamic fields of task nodes/proxies.

        Args:
            updated_tasks (list): [cylc.flow.task_proxy.TaskProxy]
                Update task-node from corresponding given list of
                task proxy objects from the workflow task pool.

        """
        if not updated_tasks:
            return
        tasks = self.data[self.workflow_id][TASKS]
        task_proxies = self.data[self.workflow_id][TASK_PROXIES]
        update_time = time()
        task_defs = {}

        # update task instance
        for itask in updated_tasks:
            name, point_string = TaskID.split(itask.identity)
            tp_id = (
                f'{self.workflow_id}{ID_DELIM}{point_string}{ID_DELIM}{name}')
            if tp_id not in task_proxies:
                continue
            self.cycle_states.setdefault(point_string,
                                         {})[name] = (itask.state.status,
                                                      itask.state.is_held)
            # Gather task definitions for elapsed time recalculation.
            if name not in task_defs:
                task_defs[name] = itask.tdef
            # Create new message and copy existing message content.
            tproxy = PbTaskProxy()
            # to avoid modification while being read.
            tproxy.CopyFrom(task_proxies[tp_id])
            tproxy.stamp = f'{tp_id}@{update_time}'
            tproxy.state = itask.state.status
            tproxy.is_held = itask.state.is_held
            tproxy.job_submits = itask.submit_num
            tproxy.spawned = itask.has_spawned
            tproxy.latest_message = itask.summary['latest_message']
            tproxy.jobs[:] = self.schd.job_pool.task_jobs.get(tp_id, [])
            tproxy.broadcasts[:] = [
                f'{key}={val}' for key, val in self.schd.task_events_mgr.
                broadcast_mgr.get_broadcast(itask.identity).items()
            ]
            prereq_list = []
            for prereq in itask.state.prerequisites:
                # Protobuf messages populated within
                prereq_obj = prereq.api_dump(self.workflow_id)
                if prereq_obj:
                    prereq_list.append(prereq_obj)
            # Unlike the following list comprehension repeated message
            # fields cannot be directly assigned, so is cleared first.
            del tproxy.prerequisites[:]
            tproxy.prerequisites.extend(prereq_list)
            tproxy.outputs[:] = [
                f'{trigger}={is_completed}'
                for trigger, _, is_completed in itask.state.outputs.get_all()
            ]
            # Replace the original
            # (atomic update, for access from other threads).
            task_proxies[tp_id] = tproxy

        # Recalculate effected task def elements elapsed time.
        for name, tdef in task_defs.items():
            elapsed_time = task_mean_elapsed_time(tdef)
            if elapsed_time:
                t_id = f'{self.workflow_id}{ID_DELIM}{name}'
                tasks[t_id].stamp = f'{t_id}@{update_time}'
                tasks[t_id].mean_elapsed_time = elapsed_time
예제 #25
0
def main(parser, options, suite, *task_ids):
    """cylc submit CLI.

    No TASK EVENT HOOKS are set for the submit command because there is
    no scheduler instance watching for task failure etc.

    Note: a suite contact env file is not written by this command (it
    would overwrite the real one if the suite is running).
    """
    if not options.verbose and not options.debug:
        LOG.setLevel(WARNING)
    for task_id in task_ids:
        if not TaskID.is_valid_id(task_id):
            raise UserInputError("Invalid task ID %s" % task_id)
    suiterc = get_suite_rc(suite)
    suite_dir = os.path.dirname(suiterc)
    # For user-defined batch system handlers
    sys.path.append(os.path.join(suite_dir, 'python'))

    # Load suite config and tasks
    config = SuiteConfig(
        suite, suiterc, options,
        load_template_vars(options.templatevars, options.templatevars_file))
    itasks = []
    for task_id in task_ids:
        name_str, point_str = TaskID.split(task_id)
        taskdefs = config.find_taskdefs(name_str)
        if not taskdefs:
            raise UserInputError("No task found for %s" % task_id)
        for taskdef in taskdefs:
            itasks.append(
                TaskProxy(taskdef,
                          get_point(point_str).standardise(),
                          is_startup=True))

    # Initialise job submit environment
    make_suite_run_tree(suite)
    # Extract job.sh from library, for use in job scripts.
    extract_resources(get_suite_srv_dir(suite), ['etc/job.sh'])
    pool = SubProcPool()
    owner = get_user()
    job_pool = JobPool(suite, owner)
    db_mgr = SuiteDatabaseManager()
    task_job_mgr = TaskJobManager(
        suite, pool, db_mgr,
        TaskEventsManager(suite, pool, db_mgr, BroadcastMgr(db_mgr), job_pool),
        job_pool)
    task_job_mgr.task_remote_mgr.single_task_mode = True
    task_job_mgr.job_file_writer.set_suite_env({
        'CYLC_UTC':
        str(config.cfg['cylc']['UTC mode']),
        'CYLC_DEBUG':
        str(cylc.flow.flags.debug).lower(),
        'CYLC_VERBOSE':
        str(cylc.flow.flags.verbose).lower(),
        'CYLC_SUITE_NAME':
        suite,
        'CYLC_CYCLING_MODE':
        str(config.cfg['scheduling']['cycling mode']),
        'CYLC_SUITE_INITIAL_CYCLE_POINT':
        str(config.cfg['scheduling']['initial cycle point']),
        'CYLC_SUITE_FINAL_CYCLE_POINT':
        str(config.cfg['scheduling']['final cycle point']),
    })

    ret_code = 0
    waiting_tasks = list(itasks)
    if options.dry_run:
        while waiting_tasks:
            prep_tasks, bad_tasks = task_job_mgr.prep_submit_task_jobs(
                suite, waiting_tasks, dry_run=True)
            for itask in prep_tasks + bad_tasks:
                waiting_tasks.remove(itask)
            if waiting_tasks:
                task_job_mgr.proc_pool.process()
                sleep(1.0)

        for itask in itasks:
            if itask.local_job_file_path:
                print(('JOB SCRIPT=%s' % itask.local_job_file_path))
            else:
                print(('Unable to prepare job file for %s' % itask.identity),
                      file=sys.stderr)
                ret_code = 1
    else:
        while waiting_tasks:
            for itask in task_job_mgr.submit_task_jobs(suite, waiting_tasks):
                waiting_tasks.remove(itask)
            if waiting_tasks:
                task_job_mgr.proc_pool.process()
                sleep(1.0)
        while task_job_mgr.proc_pool.is_not_done():
            task_job_mgr.proc_pool.process()
        for itask in itasks:
            if itask.summary.get('submit_method_id') is not None:
                print(('[%s] Job ID: %s' %
                       (itask.identity, itask.summary['submit_method_id'])))
            if itask.state(TASK_STATUS_SUBMIT_FAILED):
                ret_code = 1
    sys.exit(ret_code)
예제 #26
0
def main(_, options, suite):
    """Implement cylc broadcast."""
    pclient = SuiteRuntimeClient(suite, timeout=options.comms_timeout)

    mutation_kwargs = {
        'request_string': MUTATION,
        'variables': {
            'wFlows': [suite],
            'bMode': 'Set',
            'cPoints': options.point_strings,
            'nSpaces': options.namespaces,
            'bSettings': options.settings,
            'bCutoff': options.expire,
        }
    }

    query_kwargs = {
        'request_string': QUERY,
        'variables': {
            'wFlows': [suite],
            'nIds': []
        }
    }

    if options.show or options.showtask:
        if options.showtask:
            try:
                task, point = TaskID.split(options.showtask)
                query_kwargs['variables']['nIds'] = [
                    f'{point}{ID_DELIM}{task}'
                ]
            except ValueError:
                raise UserInputError("TASKID must be " + TaskID.SYNTAX)
        result = pclient('graphql', query_kwargs)
        for wflow in result['workflows']:
            settings = wflow['broadcasts']
            padding = get_padding(settings) * ' '
            if options.raw:
                print(str(settings))
            else:
                print_tree(settings, padding, options.unicode)
        sys.exit(0)

    report_cancel = True
    report_set = False
    if options.clear:
        mutation_kwargs['variables']['bMode'] = 'Clear'

    if options.expire:
        mutation_kwargs['variables']['bMode'] = 'Expire'

    # implement namespace and cycle point defaults here
    namespaces = options.namespaces
    if not namespaces:
        namespaces = ["root"]
    point_strings = options.point_strings
    if not point_strings:
        point_strings = ["*"]

    if options.cancel or options.cancel_files:
        settings = []
        for option_item in options.cancel:
            if "=" in option_item:
                raise UserInputError(
                    "--cancel=[SEC]ITEM does not take a value")
            option_item = option_item.strip()
            setting = get_rdict(option_item)
            settings.append(setting)
        files_to_settings(settings, options.cancel_files, options.cancel)
        mutation_kwargs['variables'].update({
            'bMode': 'Clear',
            'cPoints': point_strings,
            'nSpaces': namespaces,
            'bSettings': settings,
        })

    if options.settings or options.setting_files:
        settings = []
        for option_item in options.settings:
            if "=" not in option_item:
                raise UserInputError("--set=[SEC]ITEM=VALUE requires a value")
            lhs, rhs = [s.strip() for s in option_item.split("=", 1)]
            setting = get_rdict(lhs, rhs)
            settings.append(setting)
        files_to_settings(settings, options.setting_files)
        mutation_kwargs['variables'].update({
            'bMode': 'Set',
            'cPoints': point_strings,
            'nSpaces': namespaces,
            'bSettings': settings,
        })
        report_cancel = False
        report_set = True

    results = pclient('graphql', mutation_kwargs)
    for result in results['broadcast']['result']:
        modified_settings = result['response'][0]
        bad_options = result['response'][1]
        if modified_settings:
            print(
                get_broadcast_change_report(modified_settings,
                                            is_cancel=report_cancel))
    sys.exit(report_bad_options(bad_options, is_set=report_set))
예제 #27
0
    def generate_graph_elements(self, edges=None, graph=None,
                                task_proxies=None, family_proxies=None,
                                start_point=None, stop_point=None):
        """Generate edges and ghost nodes (proxy elements)."""
        config = self.schd.config
        if edges is None:
            edges = {}
        if graph is None:
            graph = PbEdges()
        if task_proxies is None:
            task_proxies = {}
        if family_proxies is None:
            family_proxies = {}
        if start_point is None:
            start_point = str(self.schd.pool.get_min_point() or '')
        if stop_point is None:
            stop_point = str(self.schd.pool.get_max_point() or '')

        cycle_points = set([])

        # Generate ungrouped edges
        try:
            graph_edges = config.get_graph_edges(start_point, stop_point)
        except TypeError:
            graph_edges = []

        if graph_edges:
            for e_list in graph_edges:
                # Reference or create edge source & target nodes/proxies
                s_node = e_list[0]
                t_node = e_list[1]
                if s_node is None:
                    continue
                else:
                    name, point = TaskID.split(s_node)
                    if name not in self.tasks:
                        continue
                    cycle_points.add(point)
                    if s_node not in task_proxies:
                        task_proxies[s_node] = (
                            self._generate_ghost_task(s_node))
                    source_id = task_proxies[s_node].id
                    if source_id not in self.tasks[name].proxies:
                        self.tasks[name].proxies.append(source_id)
                if t_node:
                    if t_node not in task_proxies:
                        task_proxies[t_node] = (
                            self._generate_ghost_task(t_node))
                    target_id = task_proxies[t_node].id
                e_id = s_node + '/' + (t_node or 'NoTargetNode')
                edges[e_id] = PbEdge(
                    id=f"{self.workflow_id}/{e_id}",
                    source=source_id,
                    suicide=e_list[3],
                    cond=e_list[4],
                )
                if t_node:
                    edges[e_id].target = target_id
            graph.edges.extend([e.id for e in edges.values()])
            graph.leaves.extend(config.leaves)
            graph.feet.extend(config.feet)
            for key, info in config.suite_polling_tasks.items():
                graph.workflow_polling_tasks.add(
                    local_proxy=key,
                    workflow=info[0],
                    remote_proxy=info[1],
                    req_state=info[2],
                    graph_string=info[3],
                )

        self._generate_ghost_families(family_proxies, cycle_points)
        self.workflow.edges.CopyFrom(graph)
        # Replace the originals (atomic update, for access from other threads).
        self.task_proxies = task_proxies
        self.edges = edges
        self.graph = graph
예제 #28
0
def main(_, options, suite):
    """Implement cylc broadcast."""
    pclient = SuiteRuntimeClient(
        suite, options.owner, options.host, options.port,
        options.comms_timeout)

    if options.show or options.showtask:
        if options.showtask:
            try:
                TaskID.split(options.showtask)
            except ValueError:
                raise UserInputError("TASKID must be " + TaskID.SYNTAX)
        settings = pclient('get_broadcast', {'task_id': options.showtask})
        padding = get_padding(settings) * ' '
        if options.raw:
            print(str(settings))
        else:
            print_tree(settings, padding, options.unicode)
        sys.exit(0)

    if options.clear:
        modified_settings, bad_options = pclient(
            'clear_broadcast',
            {'point_strings': options.point_strings,
             'namespaces': options.namespaces}
        )
        if modified_settings:
            print(get_broadcast_change_report(
                modified_settings, is_cancel=True))
        sys.exit(report_bad_options(bad_options))

    if options.expire:
        modified_settings, bad_options = pclient(
            'expire_broadcast',
            {'cutoff': options.expire}
        )
        if modified_settings:
            print(get_broadcast_change_report(
                modified_settings, is_cancel=True))
        sys.exit(report_bad_options(bad_options))

    # implement namespace and cycle point defaults here
    namespaces = options.namespaces
    if not namespaces:
        namespaces = ["root"]
    point_strings = options.point_strings
    if not point_strings:
        point_strings = ["*"]

    if options.cancel or options.cancel_files:
        settings = []
        for option_item in options.cancel:
            if "=" in option_item:
                raise UserInputError(
                    "--cancel=[SEC]ITEM does not take a value")
            option_item = option_item.strip()
            setting = get_rdict(option_item)
            settings.append(setting)
        files_to_settings(settings, options.cancel_files, options.cancel)
        modified_settings, bad_options = pclient(
            'clear_broadcast',
            {'point_strings': point_strings,
             'namespaces': namespaces,
             'cancel_settings': settings}
        )
        if modified_settings:
            print(get_broadcast_change_report(
                modified_settings, is_cancel=True))
        sys.exit(report_bad_options(bad_options))

    if options.settings or options.setting_files:
        settings = []
        for option_item in options.settings:
            if "=" not in option_item:
                raise UserInputError(
                    "--set=[SEC]ITEM=VALUE requires a value")
            lhs, rhs = [s.strip() for s in option_item.split("=", 1)]
            setting = get_rdict(lhs, rhs)
            settings.append(setting)
        files_to_settings(settings, options.setting_files)
        modified_settings, bad_options = pclient(
            'put_broadcast',
            {'point_strings': point_strings,
             'namespaces': namespaces,
             'settings': settings
             }
        )
        print(get_broadcast_change_report(modified_settings))
        sys.exit(report_bad_options(bad_options, is_set=True))
예제 #29
0
def main(parser: COP,
         options: 'Values',
         reg: str,
         task_id: Optional[str] = None,
         color: bool = False) -> None:
    """Implement cylc cat-log CLI.

    Determine log path, user@host, batchview_cmd, and action (print, dir-list,
    cat, edit, or tail), and then if the log path is:
      a) local: perform action on log path, or
      b) remote: re-invoke cylc cat-log as a) on the remote account

    """
    if options.remote_args:
        # Invoked on job hosts for job logs only, as a wrapper to view_log().
        # Tail and batchview commands from global config on workflow host).
        logpath, mode, tail_tmpl = options.remote_args[0:3]
        logpath = expand_path(logpath)
        tail_tmpl = expand_path(tail_tmpl)
        try:
            batchview_cmd = options.remote_args[3]
        except IndexError:
            batchview_cmd = None
        res = view_log(logpath,
                       mode,
                       tail_tmpl,
                       batchview_cmd,
                       remote=True,
                       color=color)
        if res == 1:
            sys.exit(res)
        return

    workflow_name, _ = parse_reg(reg)
    # Get long-format mode.
    try:
        mode = MODES[options.mode]
    except KeyError:
        mode = options.mode

    if not task_id:
        # Cat workflow logs, local only.
        if options.filename is not None:
            raise UserInputError("The '-f' option is for job logs only.")

        logpath = get_workflow_run_log_name(workflow_name)
        if options.rotation_num:
            logs = glob('%s.*' % logpath)
            logs.sort(key=os.path.getmtime, reverse=True)
            try:
                logpath = logs[int(options.rotation_num)]
            except IndexError:
                raise UserInputError("max rotation %d" % (len(logs) - 1))
        tail_tmpl = os.path.expandvars(get_platform()["tail command template"])
        out = view_log(logpath, mode, tail_tmpl, color=color)
        if out == 1:
            sys.exit(1)
        if mode == 'edit':
            tmpfile_edit(out, options.geditor)
        return

    if task_id:
        # Cat task job logs, may be on workflow or job host.
        if options.rotation_num is not None:
            raise UserInputError("only workflow (not job) logs get rotated")
        try:
            task, point = TaskID.split(task_id)
        except ValueError:
            parser.error("Illegal task ID: %s" % task_id)
        if options.submit_num != NN:
            try:
                options.submit_num = "%02d" % int(options.submit_num)
            except ValueError:
                parser.error("Illegal submit number: %s" % options.submit_num)
        if options.filename is None:
            options.filename = JOB_LOG_OUT
        else:
            # Convert short filename args to long (e.g. 'o' to 'job.out').
            with suppress(KeyError):
                options.filename = JOB_LOG_OPTS[options.filename]
                # KeyError: Is already long form (standard log, or custom).
        platform_name, job_runner_name, live_job_id = get_task_job_attrs(
            workflow_name, point, task, options.submit_num)
        platform = get_platform(platform_name)
        batchview_cmd = None
        if live_job_id is not None:
            # Job is currently running. Get special job runner log view
            # command (e.g. qcat) if one exists, and the log is out or err.
            conf_key = None
            if options.filename == JOB_LOG_OUT:
                if mode == 'cat':
                    conf_key = "out viewer"
                elif mode == 'tail':
                    conf_key = "out tailer"
            elif options.filename == JOB_LOG_ERR:
                if mode == 'cat':
                    conf_key = "err viewer"
                elif mode == 'tail':
                    conf_key = "err tailer"
            if conf_key is not None:
                batchview_cmd_tmpl = None
                with suppress(KeyError):
                    batchview_cmd_tmpl = platform[conf_key]
                if batchview_cmd_tmpl is not None:
                    batchview_cmd = batchview_cmd_tmpl % {
                        "job_id": str(live_job_id)
                    }

        log_is_remote = (is_remote_platform(platform)
                         and (options.filename != JOB_LOG_ACTIVITY))
        log_is_retrieved = (platform['retrieve job logs']
                            and live_job_id is None)
        if log_is_remote and (not log_is_retrieved or options.force_remote):
            logpath = os.path.normpath(
                get_remote_workflow_run_job_dir(workflow_name, point, task,
                                                options.submit_num,
                                                options.filename))
            tail_tmpl = platform["tail command template"]
            # Reinvoke the cat-log command on the remote account.
            cmd = ['cat-log', *verbosity_to_opts(cylc.flow.flags.verbosity)]
            for item in [logpath, mode, tail_tmpl]:
                cmd.append('--remote-arg=%s' % shlex.quote(item))
            if batchview_cmd:
                cmd.append('--remote-arg=%s' % shlex.quote(batchview_cmd))
            cmd.append(workflow_name)
            is_edit_mode = (mode == 'edit')
            # TODO: Add Intelligent Host selection to this
            try:
                proc = remote_cylc_cmd(cmd,
                                       platform,
                                       capture_process=is_edit_mode,
                                       manage=(mode == 'tail'))
            except KeyboardInterrupt:
                # Ctrl-C while tailing.
                pass
            else:
                if is_edit_mode:
                    # Write remote stdout to a temp file for viewing in editor.
                    # Only BUFSIZE bytes at a time in case huge stdout volume.
                    out = NamedTemporaryFile()
                    data = proc.stdout.read(BUFSIZE)
                    while data:
                        out.write(data)
                        data = proc.stdout.read(BUFSIZE)
                    os.chmod(out.name, S_IRUSR)
                    out.seek(0, 0)
        else:
            # Local task job or local job log.
            logpath = os.path.normpath(
                get_workflow_run_job_dir(workflow_name, point, task,
                                         options.submit_num, options.filename))
            tail_tmpl = os.path.expandvars(platform["tail command template"])
            out = view_log(logpath,
                           mode,
                           tail_tmpl,
                           batchview_cmd,
                           color=color)
            if mode != 'edit':
                sys.exit(out)
        if mode == 'edit':
            tmpfile_edit(out, options.geditor)
예제 #30
0
파일: test_task_id.py 프로젝트: cylc/cylc
 def test_split(self):
     self.assertEqual(["a", '1'], TaskID.split("a.1"))
     self.assertEqual(["a", '_1'], TaskID.split("a._1"))
     self.assertEqual(
         ["WTAS", '20101010T101010'], TaskID.split("WTAS.20101010T101010"))