Beispiel #1
0
    def __init__(self, tdef, point, status, is_held):
        self.identity = TaskID.get(tdef.name, str(point))
        self.status = status
        self.is_held = is_held
        self.is_updated = False
        self.time_updated = None

        self._is_satisfied = None
        self._suicide_is_satisfied = None

        # Prerequisites.
        self.prerequisites = []
        self.suicide_prerequisites = []
        self._add_prerequisites(point, tdef)

        # External Triggers.
        self.external_triggers = {}
        for ext in tdef.external_triggers:
            # Allow cycle-point-specific external triggers - GitHub #1893.
            if '$CYLC_TASK_CYCLE_POINT' in ext:
                ext = ext.replace('$CYLC_TASK_CYCLE_POINT', str(point))
            # set unsatisfied
            self.external_triggers[ext] = False

        # xtriggers (represented by labels) satisfied or not
        self.xtriggers = {}
        self._add_xtriggers(point, tdef)

        # Message outputs.
        self.outputs = TaskOutputs(tdef)
        self.kill_failed = False
Beispiel #2
0
    def __init__(self,
                 tdef: 'TaskDef',
                 start_point: 'PointBase',
                 flow_label: Optional[str],
                 status: str = TASK_STATUS_WAITING,
                 is_held: bool = False,
                 submit_num: int = 0,
                 is_late: bool = False,
                 reflow: bool = True) -> None:

        self.tdef = tdef
        if submit_num is None:
            submit_num = 0
        self.submit_num = submit_num
        self.jobs: List[str] = []
        self.flow_label = flow_label
        self.reflow = reflow
        self.point = start_point
        self.identity: str = TaskID.get(self.tdef.name, self.point)

        self.reload_successor: Optional['TaskProxy'] = None
        self.point_as_seconds: Optional[int] = None

        self.is_manual_submit = False
        self.summary: Dict[str, Any] = {
            'submitted_time': None,
            'submitted_time_string': None,
            'started_time': None,
            'started_time_string': None,
            'finished_time': None,
            'finished_time_string': None,
            'logfiles': [],
            'platforms_used': {},
            'execution_time_limit': None,
            'job_runner_name': None,
            'submit_method_id': None,
            'flow_label': None
        }

        self.local_job_file_path: Optional[str] = None

        self.platform = get_platform()

        self.job_vacated = False
        self.poll_timer: Optional['TaskActionTimer'] = None
        self.timeout: Optional[float] = None
        self.try_timers: Dict[str, 'TaskActionTimer'] = {}
        self.non_unique_events = Counter()  # type: ignore # TODO: figure out

        self.clock_trigger_time: Optional[float] = None
        self.expire_time: Optional[float] = None
        self.late_time: Optional[float] = None
        self.is_late = is_late
        self.waiting_on_job_prep = True

        self.state = TaskState(tdef, self.point, status, is_held)

        # Determine graph children of this task (for spawning).
        self.graph_children = generate_graph_children(tdef, self.point)
Beispiel #3
0
 def insert_db_job(self, row_idx, row):
     """Load job element from DB post restart."""
     if row_idx == 0:
         LOG.info("LOADING job data")
     (point_string, name, status, submit_num, time_submit, time_run,
      time_run_exit, batch_sys_name, batch_sys_job_id, platform_name) = row
     if status not in JOB_STATUS_SET:
         return
     t_id = f'{self.workflow_id}{ID_DELIM}{point_string}{ID_DELIM}{name}'
     j_id = f'{t_id}{ID_DELIM}{submit_num}'
     try:
         tdef = self.schd.config.get_taskdef(name)
         j_owner = self.schd.owner
         if platform_name:
             j_host = get_host_from_platform(get_platform(platform_name))
         else:
             j_host = self.schd.host
         j_buf = PbJob(
             stamp=f'{j_id}@{time()}',
             id=j_id,
             submit_num=submit_num,
             state=status,
             task_proxy=t_id,
             submitted_time=time_submit,
             started_time=time_run,
             finished_time=time_run_exit,
             batch_sys_name=batch_sys_name,
             batch_sys_job_id=batch_sys_job_id,
             host=j_host,
             owner=j_owner,
             name=name,
             cycle_point=point_string,
         )
         # Add in log files.
         j_buf.job_log_dir = get_task_job_log(self.schd.suite, point_string,
                                              name, submit_num)
         overrides = self.schd.task_events_mgr.broadcast_mgr.get_broadcast(
             TaskID.get(name, point_string))
         if overrides:
             rtconfig = pdeepcopy(tdef.rtconfig)
             poverride(rtconfig, overrides, prepend=True)
         else:
             rtconfig = tdef.rtconfig
         j_buf.extra_logs.extend([
             os.path.expanduser(os.path.expandvars(log_file))
             for log_file in rtconfig['extra log files']
         ])
     except SuiteConfigError:
         LOG.exception(
             ('ignoring job %s from the suite run database\n'
              '(its task definition has probably been deleted).') % j_id)
     except Exception:
         LOG.exception('could not load job %s' % j_id)
     else:
         self.added[j_id] = j_buf
         self.task_jobs.setdefault(t_id, set()).add(j_id)
         self.updates_pending = True
Beispiel #4
0
    def _generate_ghost_families(self, family_proxies=None, cycle_points=None):
        """Generate the family proxies from tasks in cycle points."""
        update_time = time()
        if family_proxies is None:
            family_proxies = {}
        fam_proxy_ids = {}
        for point_string, tasks in self.cycle_states.items():
            # construct family tree based on the
            # first-parent single-inheritance tree
            if not cycle_points or point_string not in cycle_points:
                continue
            cycle_first_parents = set([])

            for key in tasks:
                for parent in self.ancestors.get(key, []):
                    if parent == key:
                        continue
                    cycle_first_parents.add(parent)

            for fam in cycle_first_parents:
                if fam not in self.families:
                    continue
                int_id = TaskID.get(fam, point_string)
                fp_id = f"{self.workflow_id}/{point_string}/{fam}"
                fp_check = f"{int_id}@{update_time}"
                fproxy = PbFamilyProxy(
                    checksum=fp_check,
                    id=fp_id,
                    cycle_point=point_string,
                    name=fam,
                    family=f"{self.workflow_id}/{fam}",
                    depth=self.families[fam].depth,
                )
                for child_name in self.descendants[fam]:
                    ch_id = f"{self.workflow_id}/{point_string}/{child_name}"
                    if self.parents[child_name][0] == fam:
                        if child_name in cycle_first_parents:
                            fproxy.child_families.append(ch_id)
                        elif child_name in self.tasks:
                            fproxy.child_tasks.append(ch_id)
                if self.parents[fam]:
                    fproxy.parents.extend([
                        f"{self.workflow_id}/{point_string}/{p_name}"
                        for p_name in self.parents[fam]
                    ])
                    p1_name = self.parents[fam][0]
                    fproxy.first_parent = (
                        f"{self.workflow_id}/{point_string}/{p1_name}")
                family_proxies[int_id] = fproxy
                fam_proxy_ids.setdefault(fam, []).append(fp_id)
        self.family_proxies = family_proxies
        for fam, ids in fam_proxy_ids.items():
            self.families[fam].proxies[:] = ids
Beispiel #5
0
    def update_family_proxies(self, cycle_points=None):
        """Update state of family proxies"""
        update_time = time()

        # Compute state_counts (total, and per cycle).
        all_states = []
        state_count_cycles = {}

        for point_string, c_task_states in self.cycle_states.items():
            # For each cycle point, construct a family state tree
            # based on the first-parent single-inheritance tree
            if cycle_points and point_string not in cycle_points:
                continue
            c_fam_task_states = {}
            count = {}

            for key in c_task_states:
                state = c_task_states[key]
                if state is None:
                    continue
                try:
                    count[state] += 1
                except KeyError:
                    count[state] = 1

                all_states.append(state)
                for parent in self.ancestors.get(key, []):
                    if parent == key:
                        continue
                    c_fam_task_states.setdefault(parent, set([]))
                    c_fam_task_states[parent].add(state)

            state_count_cycles[point_string] = count

            for fam, child_states in c_fam_task_states.items():
                state = extract_group_state(child_states)
                int_id = TaskID.get(fam, point_string)
                if state is None or int_id not in self.family_proxies:
                    continue
                fproxy = self.family_proxies[int_id]
                fproxy.checksum = f"{int_id}@{update_time}"
                fproxy.state = state

        self.all_states = all_states
        self.state_count_cycles = state_count_cycles
Beispiel #6
0
def main(_, options, suite, *task_args):
    """Implement "cylc show" CLI."""
    pclient = SuiteRuntimeClient(suite, timeout=options.comms_timeout)
    json_filter = {}

    if not task_args:
        query = WORKFLOW_META_QUERY
        query_kwargs = {
            'request_string': query,
            'variables': {
                'wFlows': [suite]
            }
        }
        # Print suite info.
        results = pclient('graphql', query_kwargs)
        for workflow in results['workflows']:
            flat_data = flatten_data(workflow)
            if options.json:
                json_filter.update(flat_data)
            else:
                for key, value in sorted(flat_data.items(), reverse=True):
                    ansiprint(
                        f'<bold>{key}:</bold> {value or "<m>(not given)</m>"}')

    task_names = [arg for arg in task_args if TaskID.is_valid_name(arg)]
    task_ids = [arg for arg in task_args if TaskID.is_valid_id_2(arg)]

    if task_names:
        tasks_query = TASK_META_QUERY
        tasks_kwargs = {
            'request_string': tasks_query,
            'variables': {
                'wFlows': [suite],
                'taskIds': task_names
            }
        }
        # Print suite info.
        results = pclient('graphql', tasks_kwargs)
        multi = len(results['tasks']) > 1
        for task in results['tasks']:
            flat_data = flatten_data(task['meta'])
            if options.json:
                json_filter.update({task['name']: flat_data})
            else:
                if multi:
                    print(f'----\nTASK NAME: {task["name"]}')
                for key, value in sorted(flat_data.items(), reverse=True):
                    ansiprint(
                        f'<bold>{key}:</bold> {value or "<m>(not given)</m>"}')

    if task_ids:
        tp_query = TASK_PREREQS_QUERY
        tp_kwargs = {
            'request_string': tp_query,
            'variables': {
                'wFlows': [suite],
                'taskIds': [
                    f'{c}{ID_DELIM}{n}' for n, c in [
                        TaskID.split(t_id)
                        for t_id in task_ids if TaskID.is_valid_id(t_id)
                    ]
                ] + [
                    f'{c}{ID_DELIM}{n}' for c, n in [
                        t_id.rsplit(TaskID.DELIM2, 1)
                        for t_id in task_ids if not TaskID.is_valid_id(t_id)
                    ]
                ]
            }
        }
        results = pclient('graphql', tp_kwargs)
        multi = len(results['taskProxies']) > 1
        for t_proxy in results['taskProxies']:
            task_id = TaskID.get(t_proxy['name'], t_proxy['cyclePoint'])
            if options.json:
                json_filter.update({task_id: t_proxy})
            else:
                if multi:
                    print(f'----\nTASK ID: {task_id}')
                prereqs = []
                for item in t_proxy['prerequisites']:
                    prefix = ''
                    multi_cond = len(item['conditions']) > 1
                    if multi_cond:
                        prereqs.append([
                            True, '', item['expression'].replace('c', ''),
                            item['satisfied']
                        ])
                    for cond in item['conditions']:
                        if multi_cond and not options.list_prereqs:
                            prefix = f'\t{cond["exprAlias"].strip("c")} = '
                        _, _, point, name = cond['taskId'].split(ID_DELIM)
                        cond_id = TaskID.get(name, point)
                        prereqs.append([
                            False, prefix, f'{cond_id} {cond["reqState"]}',
                            cond['satisfied']
                        ])
                if options.list_prereqs:
                    for composite, _, msg, _ in prereqs:
                        if not composite:
                            print(msg)
                else:
                    flat_meta = flatten_data(t_proxy['task']['meta'])
                    for key, value in sorted(flat_meta.items(), reverse=True):
                        ansiprint(f'<bold>{key}:</bold>'
                                  f' {value or "<m>(not given)</m>"}')
                    ansiprint('\n<bold>prerequisites</bold>'
                              ' (<red>- => not satisfied</red>):')
                    if not prereqs:
                        print('  (None)')
                    for _, prefix, msg, state in prereqs:
                        print_msg_state(f'{prefix}{msg}', state)

                    ansiprint('\n<bold>outputs</bold>'
                              ' (<red>- => not completed</red>):')
                    if not t_proxy['outputs']:
                        print('  (None)')
                    for key, val in t_proxy['outputs'].items():
                        print_msg_state(f'{task_id} {key}', val)
                    if t_proxy['extras']:
                        print('\nother:')
                        for key, value in t_proxy['extras'].items():
                            print('  o  %s ... %s' % (key, value))
        if not results['taskProxies']:
            ansiprint(f"<red>No matching tasks found: {task_ids}",
                      file=sys.stderr)
            sys.exit(1)

    if options.json:
        print(json.dumps(json_filter, indent=4))
 def test_get(self):
     self.assertEqual("a.1", TaskID.get("a", 1))
     self.assertEqual("a._1", TaskID.get("a", "_1"))
     self.assertEqual("WTASK.20101010T101010",
                      TaskID.get("WTASK", "20101010T101010"))
Beispiel #8
0
    def __init__(self,
                 tdef,
                 start_point,
                 flow_label,
                 status=TASK_STATUS_WAITING,
                 is_held=False,
                 submit_num=0,
                 is_late=False,
                 reflow=True):
        self.tdef = tdef
        if submit_num is None:
            submit_num = 0
        self.submit_num = submit_num
        self.jobs = []
        self.flow_label = flow_label
        self.reflow = reflow
        self.point = start_point
        self.identity = TaskID.get(self.tdef.name, self.point)

        self.reload_successor = None
        self.point_as_seconds = None

        self.manual_trigger = False
        self.is_manual_submit = False
        self.summary = {
            'latest_message': '',
            'submitted_time': None,
            'submitted_time_string': None,
            'started_time': None,
            'started_time_string': None,
            'finished_time': None,
            'finished_time_string': None,
            'logfiles': [],
            'platforms_used': {},
            'execution_time_limit': None,
            'batch_sys_name': None,
            'submit_method_id': None,
            'flow_label': None
        }

        self.local_job_file_path = None

        self.platform = get_platform()
        self.task_owner = None

        self.job_vacated = False
        self.poll_timer = None
        self.timeout = None
        self.try_timers = {}
        # Use dict here for Python 2.6 compat.
        # Should use collections.Counter in Python 2.7+
        self.non_unique_events = {}

        self.clock_trigger_time = None
        self.expire_time = None
        self.late_time = None
        self.is_late = is_late

        self.state = TaskState(tdef, self.point, status, is_held)

        # Determine graph children of this task (for spawning).
        self.graph_children = {}
        for seq, dout in tdef.graph_children.items():
            for output, downs in dout.items():
                if output not in self.graph_children:
                    self.graph_children[output] = []
                for name, trigger in downs:
                    child_point = trigger.get_child_point(self.point, seq)
                    is_abs = (trigger.offset_is_absolute
                              or trigger.offset_is_from_icp)
                    if is_abs:
                        if trigger.get_parent_point(self.point) != self.point:
                            # If 'foo[^] => bar' only spawn off of '^'.
                            continue
                    if seq.is_on_sequence(child_point):
                        # E.g.: foo should trigger only on T06:
                        #   PT6H = "waz"
                        #   T06 = "waz[-PT6H] => foo"
                        self.graph_children[output].append(
                            (name, child_point, is_abs))

        if tdef.sequential:
            # Add next-instance child.
            nexts = []
            for seq in tdef.sequences:
                nxt = seq.get_next_point(self.point)
                if nxt is not None:
                    # Within sequence bounds.
                    nexts.append(nxt)
            if nexts:
                if TASK_OUTPUT_SUCCEEDED not in self.graph_children:
                    self.graph_children[TASK_OUTPUT_SUCCEEDED] = []
                self.state.outputs.add(TASK_OUTPUT_SUCCEEDED)
                self.graph_children[TASK_OUTPUT_SUCCEEDED].append(
                    (tdef.name, min(nexts), False))

        if TASK_OUTPUT_FAILED in self.graph_children:
            self.failure_handled = True
        else:
            self.failure_handled = False
Beispiel #9
0
    def __init__(
            self, tdef, start_point, status=TASK_STATUS_WAITING,
            hold_swap=None, has_spawned=False, stop_point=None,
            is_startup=False, submit_num=0, is_late=False):
        self.tdef = tdef
        if submit_num is None:
            submit_num = 0
        self.submit_num = submit_num

        if is_startup:
            # adjust up to the first on-sequence cycle point
            adjusted = []
            for seq in self.tdef.sequences:
                adj = seq.get_first_point(start_point)
                if adj:
                    # may be None if out of sequence bounds
                    adjusted.append(adj)
            if not adjusted:
                # This task is out of sequence bounds
                raise TaskProxySequenceBoundsError(self.tdef.name)
            self.point = min(adjusted)
            self.late_time = None
        else:
            self.point = start_point
        self.cleanup_cutoff = self.tdef.get_cleanup_cutoff_point(self.point)
        self.identity = TaskID.get(self.tdef.name, self.point)

        self.has_spawned = has_spawned
        self.reload_successor = None
        self.point_as_seconds = None

        # Manually inserted tasks may have a final cycle point set.
        self.stop_point = stop_point

        self.manual_trigger = False
        self.is_manual_submit = False
        self.summary = {
            'latest_message': '',
            'submitted_time': None,
            'submitted_time_string': None,
            'started_time': None,
            'started_time_string': None,
            'finished_time': None,
            'finished_time_string': None,
            'logfiles': [],
            'job_hosts': {},
            'execution_time_limit': None,
            'batch_sys_name': None,
            'submit_method_id': None
        }

        self.local_job_file_path = None

        self.task_host = 'localhost'
        self.task_owner = None

        self.job_vacated = False
        self.poll_timer = None
        self.timeout = None
        self.try_timers = {}
        # Use dict here for Python 2.6 compat.
        # Should use collections.Counter in Python 2.7+
        self.non_unique_events = {}

        self.clock_trigger_time = None
        self.expire_time = None
        self.late_time = None
        self.is_late = is_late

        self.state = TaskState(tdef, self.point, status, hold_swap)

        if tdef.sequential:
            # Adjust clean-up cutoff.
            p_next = None
            adjusted = []
            for seq in tdef.sequences:
                nxt = seq.get_next_point(self.point)
                if nxt:
                    # may be None if beyond the sequence bounds
                    adjusted.append(nxt)
            if adjusted:
                p_next = min(adjusted)
                if (self.cleanup_cutoff is not None and
                        self.cleanup_cutoff < p_next):
                    self.cleanup_cutoff = p_next
Beispiel #10
0
    def update(self, schd):
        """Update."""
        self.update_time = time()
        global_summary = {}
        family_summary = {}

        task_summary, task_states = self._get_tasks_info(schd)

        all_states = []
        ancestors_dict = schd.config.get_first_parent_ancestors()

        # Compute state_counts (total, and per cycle).
        state_count_totals = {}
        state_count_cycles = {}

        for point_string, c_task_states in task_states.items():
            # For each cycle point, construct a family state tree
            # based on the first-parent single-inheritance tree

            c_fam_task_states = {}

            count = {}

            for key in c_task_states:
                state = c_task_states[key]
                if state is None:
                    continue
                try:
                    count[state] += 1
                except KeyError:
                    count[state] = 1

                all_states.append(state)
                for parent in ancestors_dict.get(key, []):
                    if parent == key:
                        continue
                    c_fam_task_states.setdefault(parent, set([]))
                    c_fam_task_states[parent].add(state)

            state_count_cycles[point_string] = count

            for fam, child_states in c_fam_task_states.items():
                f_id = TaskID.get(fam, point_string)
                state = extract_group_state(child_states)
                if state is None:
                    continue
                try:
                    famcfg = schd.config.cfg['runtime'][fam]['meta']
                except KeyError:
                    famcfg = {}
                description = famcfg.get('description')
                title = famcfg.get('title')
                family_summary[f_id] = {'name': fam,
                                        'description': description,
                                        'title': title,
                                        'label': point_string,
                                        'state': state}

        state_count_totals = {}
        for point_string, count in list(state_count_cycles.items()):
            for state, state_count in count.items():
                state_count_totals.setdefault(state, 0)
                state_count_totals[state] += state_count

        all_states.sort()

        for key, value in (
                ('oldest cycle point string', schd.pool.get_min_point()),
                ('newest cycle point string', schd.pool.get_max_point()),
                ('newest runahead cycle point string',
                 schd.pool.get_max_point_runahead())):
            if value:
                global_summary[key] = str(value)
            else:
                global_summary[key] = None
        if get_utc_mode():
            global_summary['time zone info'] = TIME_ZONE_UTC_INFO
        else:
            global_summary['time zone info'] = TIME_ZONE_LOCAL_INFO
        global_summary['last_updated'] = self.update_time
        global_summary['run_mode'] = schd.config.run_mode()
        global_summary['states'] = all_states
        global_summary['namespace definition order'] = (
            schd.config.ns_defn_order)
        global_summary['reloading'] = schd.pool.do_reload
        global_summary['state totals'] = state_count_totals
        # Extract suite and task URLs from config.
        global_summary['suite_urls'] = dict(
            (i, j['meta']['URL'])
            for (i, j) in schd.config.cfg['runtime'].items())
        global_summary['suite_urls']['suite'] = schd.config.cfg['meta']['URL']

        # Construct a suite status string for use by monitoring clients.
        status, status_msg = get_suite_status(schd)
        global_summary['status'] = str(status)
        global_summary['status_string'] = status_msg

        # Replace the originals (atomic update, for access from other threads).
        self.task_summary = task_summary
        self.global_summary = global_summary
        self.family_summary = family_summary
        self.state_count_totals = state_count_totals
        self.state_count_cycles = state_count_cycles
Beispiel #11
0
    def update(self, schd):
        """Update."""
        self.update_time = time()
        global_summary = {}
        family_summary = {}

        task_summary, task_states = self._get_tasks_info(schd)

        all_states = []
        ancestors_dict = schd.config.get_first_parent_ancestors()

        # Compute state_counts (total, and per cycle).
        state_count_totals = {}
        state_count_cycles = {}

        for point_string, c_task_states in task_states.items():
            # For each cycle point, construct a family state tree
            # based on the first-parent single-inheritance tree

            c_fam_task_states = {}

            count = {}

            for key in c_task_states:
                state = c_task_states[key]
                if state is None:
                    continue
                try:
                    count[state] += 1
                except KeyError:
                    count[state] = 1

                all_states.append(state)
                for parent in ancestors_dict.get(key, []):
                    if parent == key:
                        continue
                    c_fam_task_states.setdefault(parent, set([]))
                    c_fam_task_states[parent].add(state)

            state_count_cycles[point_string] = count

            for fam, child_states in c_fam_task_states.items():
                f_id = TaskID.get(fam, point_string)
                state = extract_group_state(child_states)
                if state is None:
                    continue
                try:
                    famcfg = schd.config.cfg['runtime'][fam]['meta']
                except KeyError:
                    famcfg = {}
                description = famcfg.get('description')
                title = famcfg.get('title')
                family_summary[f_id] = {'name': fam,
                                        'description': description,
                                        'title': title,
                                        'label': point_string,
                                        'state': state}

        state_count_totals = {}
        for point_string, count in list(state_count_cycles.items()):
            for state, state_count in count.items():
                state_count_totals.setdefault(state, 0)
                state_count_totals[state] += state_count

        all_states.sort()

        for key, value in (
                ('oldest cycle point string', schd.pool.get_min_point()),
                ('newest cycle point string', schd.pool.get_max_point()),
                ('newest runahead cycle point string',
                 schd.pool.get_max_point_runahead())):
            if value:
                global_summary[key] = str(value)
            else:
                global_summary[key] = None
        if get_utc_mode():
            global_summary['time zone info'] = TIME_ZONE_UTC_INFO
        else:
            global_summary['time zone info'] = TIME_ZONE_LOCAL_INFO
        global_summary['last_updated'] = self.update_time
        global_summary['run_mode'] = schd.run_mode
        global_summary['states'] = all_states
        global_summary['namespace definition order'] = (
            schd.config.ns_defn_order)
        global_summary['reloading'] = schd.pool.do_reload
        global_summary['state totals'] = state_count_totals
        # Extract suite and task URLs from config.
        global_summary['suite_urls'] = dict(
            (i, j['meta']['URL'])
            for (i, j) in schd.config.cfg['runtime'].items())
        global_summary['suite_urls']['suite'] = schd.config.cfg['meta']['URL']

        # Construct a suite status string for use by monitoring clients.
        if schd.pool.is_held:
            global_summary['status_string'] = SUITE_STATUS_HELD
        elif schd.stop_mode is not None:
            global_summary['status_string'] = SUITE_STATUS_STOPPING
        elif schd.pool.hold_point:
            global_summary['status_string'] = (
                SUITE_STATUS_RUNNING_TO_HOLD % schd.pool.hold_point)
        elif schd.stop_point:
            global_summary['status_string'] = (
                SUITE_STATUS_RUNNING_TO_STOP % schd.stop_point)
        elif schd.stop_clock_time is not None:
            global_summary['status_string'] = (
                SUITE_STATUS_RUNNING_TO_STOP % schd.stop_clock_time_string)
        elif schd.stop_task:
            global_summary['status_string'] = (
                SUITE_STATUS_RUNNING_TO_STOP % schd.stop_task)
        elif schd.final_point:
            global_summary['status_string'] = (
                SUITE_STATUS_RUNNING_TO_STOP % schd.final_point)
        else:
            global_summary['status_string'] = SUITE_STATUS_RUNNING

        # Replace the originals (atomic update, for access from other threads).
        self.task_summary = task_summary
        self.global_summary = global_summary
        self.family_summary = family_summary
        self.state_count_totals = state_count_totals
        self.state_count_cycles = state_count_cycles
Beispiel #12
0
    def __init__(self,
                 tdef,
                 start_point,
                 status=TASK_STATUS_WAITING,
                 is_held=False,
                 has_spawned=False,
                 stop_point=None,
                 is_startup=False,
                 submit_num=0,
                 is_late=False):
        self.tdef = tdef
        if submit_num is None:
            submit_num = 0
        self.submit_num = submit_num
        self.jobs = []

        if is_startup:
            # adjust up to the first on-sequence cycle point
            adjusted = []
            for seq in self.tdef.sequences:
                adj = seq.get_first_point(start_point)
                if adj:
                    # may be None if out of sequence bounds
                    adjusted.append(adj)
            if not adjusted:
                # This task is out of sequence bounds
                raise TaskProxySequenceBoundsError(self.tdef.name)
            self.point = min(adjusted)
            self.late_time = None
        else:
            self.point = start_point
        self.cleanup_cutoff = self.tdef.get_cleanup_cutoff_point(self.point)
        self.identity = TaskID.get(self.tdef.name, self.point)

        self.has_spawned = has_spawned
        self.reload_successor = None
        self.point_as_seconds = None

        # Manually inserted tasks may have a final cycle point set.
        self.stop_point = stop_point

        self.manual_trigger = False
        self.is_manual_submit = False
        self.summary = {
            'latest_message': '',
            'submitted_time': None,
            'submitted_time_string': None,
            'started_time': None,
            'started_time_string': None,
            'finished_time': None,
            'finished_time_string': None,
            'logfiles': [],
            'job_hosts': {},
            'execution_time_limit': None,
            'batch_sys_name': None,
            'submit_method_id': None
        }

        self.local_job_file_path = None

        self.task_host = 'localhost'
        self.task_owner = None

        self.job_vacated = False
        self.poll_timer = None
        self.timeout = None
        self.try_timers = {}
        # Use dict here for Python 2.6 compat.
        # Should use collections.Counter in Python 2.7+
        self.non_unique_events = {}

        self.clock_trigger_time = None
        self.expire_time = None
        self.late_time = None
        self.is_late = is_late

        self.state = TaskState(tdef, self.point, status, is_held)

        if tdef.sequential:
            # Adjust clean-up cutoff.
            p_next = None
            adjusted = []
            for seq in tdef.sequences:
                nxt = seq.get_next_point(self.point)
                if nxt:
                    # may be None if beyond the sequence bounds
                    adjusted.append(nxt)
            if adjusted:
                p_next = min(adjusted)
                if (self.cleanup_cutoff is not None
                        and self.cleanup_cutoff < p_next):
                    self.cleanup_cutoff = p_next
Beispiel #13
0
    def __init__(self,
                 tdef,
                 start_point,
                 flow_label,
                 status=TASK_STATUS_WAITING,
                 is_held=False,
                 submit_num=0,
                 is_late=False,
                 reflow=True):
        self.tdef = tdef
        if submit_num is None:
            submit_num = 0
        self.submit_num = submit_num
        self.jobs = []
        self.flow_label = flow_label
        self.reflow = reflow
        self.point = start_point
        self.identity = TaskID.get(self.tdef.name, self.point)

        self.reload_successor = None
        self.point_as_seconds = None

        self.manual_trigger = False
        self.is_manual_submit = False
        self.summary = {
            'latest_message': '',
            'submitted_time': None,
            'submitted_time_string': None,
            'started_time': None,
            'started_time_string': None,
            'finished_time': None,
            'finished_time_string': None,
            'logfiles': [],
            'platforms_used': {},
            'execution_time_limit': None,
            'job_runner_name': None,
            'submit_method_id': None,
            'flow_label': None
        }

        self.local_job_file_path = None

        self.platform = get_platform()

        self.job_vacated = False
        self.poll_timer = None
        self.timeout = None
        self.try_timers = {}
        self.non_unique_events = Counter()

        self.clock_trigger_time = None
        self.expire_time = None
        self.late_time = None
        self.is_late = is_late
        self.waiting_on_job_prep = True

        self.state = TaskState(tdef, self.point, status, is_held)

        # Determine graph children of this task (for spawning).
        self.graph_children = generate_graph_children(tdef, self.point)
        if TASK_OUTPUT_SUCCEEDED in self.graph_children:
            self.state.outputs.add(TASK_OUTPUT_SUCCEEDED)

        if TASK_OUTPUT_FAILED in self.graph_children:
            self.failure_handled = True
        else:
            self.failure_handled = False
Beispiel #14
0
 def test_get(self):
     self.assertEqual("a.1", TaskID.get("a", 1))
     self.assertEqual("a._1", TaskID.get("a", "_1"))
     self.assertEqual(
         "WTASK.20101010T101010", TaskID.get("WTASK", "20101010T101010"))