예제 #1
0
def test_get_child_point(cycling_mode):
    cycling_mode()

    zero = get_point('0')
    one = get_point('1')
    two = get_point('2')
    p1 = get_sequence('P1', one)

    trigger = TaskTrigger('name', None, 'output')
    assert trigger.get_child_point(one, p1) == one
    assert trigger.get_child_point(two, p1) == two

    trigger = TaskTrigger('name', '+P1', 'output', offset_is_absolute=True)
    assert trigger.get_child_point(None, p1) == one

    trigger = TaskTrigger('name', '+P1', 'output', offset_is_from_icp=True)
    assert trigger.get_child_point(None, p1) == one

    trigger = TaskTrigger('name', '+P1', 'output', offset_is_irregular=True)
    assert trigger.get_child_point(one, p1) == zero

    trigger = TaskTrigger('name', '-P1', 'output', offset_is_irregular=True)
    assert trigger.get_child_point(one, p1) == two

    trigger = TaskTrigger('name', '+P1', 'output')
    assert trigger.get_child_point(one, None) == zero

    trigger = TaskTrigger('name', '-P1', 'output')
    assert trigger.get_child_point(one, None) == two
예제 #2
0
def test_str(cycling_mode):
    cycling_mode()

    one = get_point('1')
    two = get_point('2')

    trigger = TaskTrigger('name', '1', 'output', offset_is_absolute=True)
    assert str(trigger) == 'name[1]:output'

    trigger = TaskTrigger('name', '+P1', 'output')
    assert str(trigger) == 'name[+P1]:output'

    trigger = TaskTrigger('name', None, 'output')
    assert str(trigger) == 'name:output'
예제 #3
0
 def expire_broadcast(self, cutoff=None, **kwargs):
     """Clear all broadcasts targeting cycle points earlier than cutoff."""
     point_strings = []
     cutoff_point = None
     if cutoff is not None:
         cutoff_point = get_point(str(cutoff))
     with self.lock:
         for point_string in self.broadcasts:
             if cutoff_point is None or (
                     point_string not in ALL_CYCLE_POINTS_STRS and
                     get_point(point_string) < cutoff_point):
                 point_strings.append(point_string)
     if not point_strings:
         return (None, {"expire": [cutoff]})
     return self.clear_broadcast(point_strings=point_strings, **kwargs)
예제 #4
0
파일: broadcast_mgr.py 프로젝트: cylc/cylc
 def expire_broadcast(self, cutoff=None):
     """Clear all broadcasts targeting cycle points earlier than cutoff."""
     point_strings = []
     cutoff_point = None
     if cutoff is not None:
         cutoff_point = get_point(str(cutoff))
     with self.lock:
         for point_string in self.broadcasts:
             if cutoff_point is None or (
                     point_string not in self.ALL_CYCLE_POINTS_STRS and
                     get_point(point_string) < cutoff_point):
                 point_strings.append(point_string)
     if not point_strings:
         return (None, {"expire": [cutoff]})
     return self.clear_broadcast(point_strings=point_strings)
예제 #5
0
def test_process_fcp(cycling_type: str, scheduling_cfg: dict,
                     options_fcp: Optional[str], expected_fcp: Optional[str],
                     expected_err: Optional[Tuple[Type[Exception], str]],
                     set_cycling_type: Fixture) -> None:
    """Test WorkflowConfig.process_final_cycle_point().

    Params:
        cycling_type: Workflow cycling type.
        scheduling_cfg: 'scheduling' section of workflow config.
        options_fcp: The fcp set by cli option.
        expected_fcp: The expected fcp value that gets set.
        expected_err: Exception class expected to be raised plus the message.
    """
    set_cycling_type(cycling_type, time_zone='+0530')
    mocked_config = Mock(cycling_type=cycling_type)
    mocked_config.cfg = {'scheduling': scheduling_cfg}
    mocked_config.initial_point = loader.get_point(
        scheduling_cfg['initial cycle point']).standardise()
    mocked_config.final_point = None
    mocked_config.options.fcp = options_fcp

    if expected_err:
        err, msg = expected_err
        with pytest.raises(err) as exc:
            WorkflowConfig.process_final_cycle_point(mocked_config)
        assert msg in str(exc.value)
    else:
        WorkflowConfig.process_final_cycle_point(mocked_config)
        assert mocked_config.cfg['scheduling'][
            'final cycle point'] == expected_fcp
        assert str(mocked_config.final_point) == str(expected_fcp)
예제 #6
0
 def __str__(self):
     if not self.offset_is_irregular and self.offset_is_absolute:
         point = get_point(self.cycle_point_offset).standardise()
         return '%s[%s]:%s' % (self.task_name, point, self.output)
     elif self.cycle_point_offset:
         return '%s[%s]:%s' % (self.task_name, self.cycle_point_offset,
                               self.output)
     else:
         return '%s:%s' % (self.task_name, self.output)
예제 #7
0
def test_get_point(cycling_mode):
    cycling_mode()

    one = get_point('1')
    two = get_point('2')

    trigger = TaskTrigger('name', '1', 'output', offset_is_absolute=True)
    assert trigger.get_point(None) == one

    trigger = TaskTrigger(
        'name', '+P1', 'output', offset_is_from_icp=True, initial_point=one)
    assert trigger.get_point(None) == two

    trigger = TaskTrigger('name', '+P1', 'output')
    assert trigger.get_point(one) == two

    trigger = TaskTrigger('name', None, 'output')
    assert trigger.get_point(one) == one
def get_cycling_bounds(config, start_point=None, stop_point=None):
    """Determine the start and stop points for graphing a suite."""
    # default start and stop points to values in the visualization section
    if not start_point:
        start_point = config.cfg['visualization']['initial cycle point']
    if not stop_point:
        viz_stop_point = config.cfg['visualization']['final cycle point']
        if viz_stop_point:
            stop_point = viz_stop_point

    # don't allow stop_point before start_point
    if stop_point is not None:
        if get_point(stop_point) < get_point(start_point):
            # NOTE: we need to cast with get_point for this comparison due to
            #       ISO8061 extended datetime formats
            stop_point = start_point
        else:
            stop_point = stop_point
    else:
        stop_point = None

    return start_point, stop_point
예제 #9
0
def test_integer_cycling_default_initial_point(cycling_mode):
    """Test that the initial cycle point defaults to 1 for integer cycling
    mode."""
    cycling_mode(integer=True)  # This is a pytest fixture; sets cycling mode
    mocked_config = Mock()
    mocked_config.cfg = {
        'scheduling': {
            'cycling mode': 'integer',
            'initial cycle point': None
        }
    }
    SuiteConfig.process_initial_cycle_point(mocked_config)
    assert mocked_config.cfg['scheduling']['initial cycle point'] == '1'
    assert mocked_config.initial_point == loader.get_point(1)
예제 #10
0
    def get_point(self, point):
        """Return the point of the output to which this TaskTrigger pertains.

        Args:
            point (cylc.flow.cycling.PointBase): cycle point of dependent task.

        Returns:
            cylc.flow.cycling.PointBase: cycle point of the dependency.

        """
        if self.offset_is_absolute:
            point = get_point(self.cycle_point_offset).standardise()
        elif self.offset_is_from_icp:
            point = get_point_relative(
                self.cycle_point_offset, self.initial_point)
        elif self.cycle_point_offset:
            point = get_point_relative(self.cycle_point_offset, point)
        return point
예제 #11
0
def test_process_icp(
        scheduling_cfg: Dict[str, Any], expected_icp: Optional[str],
        expected_opt_icp: Optional[str],
        expected_err: Optional[Tuple[Type[Exception], str]],
        monkeypatch: Fixture, cycling_mode: Fixture):
    """Test SuiteConfig.process_initial_cycle_point().

    "now" is assumed to be 2005-01-02T06:15+0530

    Params:
        scheduling_cfg: 'scheduling' section of workflow config.
        expected_icp: The expected icp value that gets set.
        expected_opt_icp: The expected value of options.icp that gets set
            (this gets stored in the workflow DB).
        expected_err: Exception class expected to be raised plus the message.
    """
    int_cycling_mode = True
    if scheduling_cfg['cycling mode'] == loader.ISO8601_CYCLING_TYPE:
        int_cycling_mode = False
        iso8601.init(time_zone="+0530")
    cycling_mode(integer=int_cycling_mode)
    mocked_config = Mock()
    mocked_config.cfg = {
        'scheduling': scheduling_cfg
    }
    mocked_config.options.icp = None
    monkeypatch.setattr('cylc.flow.config.get_current_time_string',
                        lambda: '20050102T0615+0530')

    if expected_err:
        err, msg = expected_err
        with pytest.raises(err) as exc:
            SuiteConfig.process_initial_cycle_point(mocked_config)
        assert msg in str(exc.value)
    else:
        SuiteConfig.process_initial_cycle_point(mocked_config)
        assert mocked_config.cfg[
            'scheduling']['initial cycle point'] == expected_icp
        assert str(mocked_config.initial_point) == expected_icp
        opt_icp = mocked_config.options.icp
        if opt_icp is not None:
            opt_icp = str(loader.get_point(opt_icp).standardise())
        assert opt_icp == expected_opt_icp
예제 #12
0
def test_process_icp(
    cycling_type: str,
    scheduling_cfg: Dict[str, Any],
    expected_icp: Optional[str],
    expected_opt_icp: Optional[str],
    expected_err: Optional[Tuple[Type[Exception], str]],
    monkeypatch: pytest.MonkeyPatch, set_cycling_type: Fixture
) -> None:
    """Test WorkflowConfig.process_initial_cycle_point().

    "now" is assumed to be 2005-01-02T06:15+0530

    Params:
        cycling_type: Workflow cycling type.
        scheduling_cfg: 'scheduling' section of workflow config.
        expected_icp: The expected icp value that gets set.
        expected_opt_icp: The expected value of options.icp that gets set
            (this gets stored in the workflow DB).
        expected_err: Exception class expected to be raised plus the message.
    """
    set_cycling_type(cycling_type, time_zone="+0530")
    mocked_config = Mock(cycling_type=cycling_type)
    mocked_config.cfg = {
        'scheduling': scheduling_cfg
    }
    mocked_config.options.icp = None
    monkeypatch.setattr('cylc.flow.config.get_current_time_string',
                        lambda: '20050102T0615+0530')

    if expected_err:
        err, msg = expected_err
        with pytest.raises(err) as exc:
            WorkflowConfig.process_initial_cycle_point(mocked_config)
        assert msg in str(exc.value)
    else:
        WorkflowConfig.process_initial_cycle_point(mocked_config)
        assert mocked_config.cfg[
            'scheduling']['initial cycle point'] == expected_icp
        assert str(mocked_config.initial_point) == expected_icp
        opt_icp = mocked_config.options.icp
        if opt_icp is not None:
            opt_icp = str(loader.get_point(opt_icp).standardise())
        assert opt_icp == expected_opt_icp
예제 #13
0
    def get_parent_point(self, from_point):
        """Return the specific parent point of this trigger.

        Args:
            from_point (cylc.flow.cycling.PointBase): parent task point.

        Returns:
            cylc.flow.cycling.PointBase: cycle point of the child.

        """
        if self.cycle_point_offset is None:
            point = from_point
        elif self.offset_is_absolute:
            point = get_point(self.cycle_point_offset).standardise()
        else:
            if self.offset_is_from_icp:
                from_point = self.initial_point
            # works with offset_is_irregular or not:
            point = get_point_relative(self.cycle_point_offset, from_point)
        return point
예제 #14
0
    def set_condition(self, expr):
        """Set the conditional expression for this prerequisite.

        Resets the cached state (self._all_satisfied).

        """

        drop_these = []
        self._all_satisfied = None

        if self.pre_initial_messages:
            for message in self.pre_initial_messages:
                drop_these.append(message)

        # Needed to drop pre warm-start dependence:
        for message in self.satisfied:
            if message in drop_these:
                continue
            if self.start_point:
                if message[1]:  # Cycle point.
                    if get_point(message[1]) < self.start_point <= self.point:
                        # Drop if outside of relevant point range.
                        drop_these.append(message)

        for message in drop_these:
            if message in self.satisfied:
                self.satisfied.pop(message)

        if '|' in expr:
            if drop_these:
                simpler = ConditionalSimplifier(
                    expr, [self.MESSAGE_TEMPLATE % m for m in drop_these])
                expr = simpler.get_cleaned()
            # Make a Python expression so we can eval() the logic.
            for message in self.satisfied:
                expr = expr.replace(self.MESSAGE_TEMPLATE % message,
                                    self.SATISFIED_TEMPLATE % message)
            self.conditional_expression = expr
예제 #15
0
def test_process_fcp(scheduling_cfg: dict, options_fcp: Optional[str],
                     expected_fcp: Optional[str],
                     expected_err: Optional[Tuple[Type[Exception], str]],
                     cycling_mode: Fixture):
    """Test SuiteConfig.process_final_cycle_point().

    Params:
        scheduling_cfg: 'scheduling' section of workflow config.
        options_fcp: The fcp set by cli option.
        expected_fcp: The expected fcp value that gets set.
        expected_err: Exception class expected to be raised plus the message.
    """
    if scheduling_cfg['cycling mode'] == loader.ISO8601_CYCLING_TYPE:
        iso8601.init(time_zone="+0530")
        cycling_mode(integer=False)
    else:
        cycling_mode(integer=True)
    mocked_config = Mock(cycling_type=scheduling_cfg['cycling mode'])
    mocked_config.cfg = {
        'scheduling': scheduling_cfg
    }
    mocked_config.initial_point = loader.get_point(
        scheduling_cfg['initial cycle point']).standardise()
    mocked_config.final_point = None
    mocked_config.options.fcp = options_fcp

    if expected_err:
        err, msg = expected_err
        with pytest.raises(err) as exc:
            SuiteConfig.process_final_cycle_point(mocked_config)
        assert msg in str(exc.value)
    else:
        SuiteConfig.process_final_cycle_point(mocked_config)
        assert mocked_config.cfg[
            'scheduling']['final cycle point'] == expected_fcp
        assert str(mocked_config.final_point) == str(expected_fcp)
예제 #16
0
 def get_standardised_point(cls, point_string):
     """Return a standardised point."""
     return get_point(cls.get_standardised_point_string(point_string))
예제 #17
0
 def get_target_points(self):
     """Return a list of cycle points target by each prerequisite,
     including each component of conditionals."""
     return [get_point(p) for p in self.target_point_strings]
예제 #18
0
    def generate_graph_elements(self,
                                edges=None,
                                task_proxies=None,
                                family_proxies=None,
                                start_point=None,
                                stop_point=None):
        """Generate edges and [ghost] nodes (family and task proxy elements).

        Args:
            edges (dict, optional):
                ID-PbEdge key-value mapping.
            task_proxies (dict, optional):
                ID-PbTaskProxy key-value mapping.
            family_proxies (dict, optional):
                ID-PbFamilyProxy key-value mapping.
            start_point (cylc.flow.cycling.PointBase):
                Edge generation start point.
            stop_point (cylc.flow.cycling.PointBase):
                Edge generation stop point.

        """
        if not self.pool_points:
            return
        config = self.schd.config
        tasks = self.data[self.workflow_id][TASKS]
        graph = PbEdges()
        if edges is None:
            edges = {}
        if task_proxies is None:
            task_proxies = {}
        if family_proxies is None:
            family_proxies = {}
        if start_point is None:
            start_point = min(self.pool_points)
        if stop_point is None:
            stop_point = max(self.pool_points)

        # Used for generating family [ghost] nodes
        new_points = set()

        # Generate ungrouped edges
        for edge in config.get_graph_edges(start_point, stop_point):
            # Reference or create edge source & target nodes/proxies
            s_node = edge[0]
            t_node = edge[1]
            if s_node is None:
                continue
            # Is the source cycle point in the task pool?
            s_name, s_point = TaskID.split(s_node)
            s_point_cls = get_point(s_point)
            s_pool_point = False
            s_valid = TaskID.is_valid_id(s_node)
            if s_valid:
                s_pool_point = s_point_cls in self.pool_points
            # Is the target cycle point in the task pool?
            t_pool_point = False
            t_valid = t_node and TaskID.is_valid_id(t_node)
            if t_valid:
                t_name, t_point = TaskID.split(t_node)
                t_point_cls = get_point(t_point)
                t_pool_point = get_point(t_point) in self.pool_points
            # Proceed if either source or target cycle points
            # are in the task pool.
            if not s_pool_point and not t_pool_point:
                continue
            # If source/target is valid add/create the corresponding items.
            # TODO: if xtrigger is suite_state create remote ID
            source_id = (
                f'{self.workflow_id}{ID_DELIM}{s_point}{ID_DELIM}{s_name}')
            if s_valid:
                s_task_id = f'{self.workflow_id}{ID_DELIM}{s_name}'
                new_points.add(s_point)
                # Add source points for pruning.
                self.edge_points.setdefault(s_point_cls, set())
                if source_id not in task_proxies:
                    task_proxies[source_id] = self.generate_ghost_task(s_node)
                if source_id not in tasks[s_task_id].proxies:
                    tasks[s_task_id].proxies.append(source_id)
            # Add valid source before checking for no target,
            # as source may be an isolate (hence no edges).
            # At present targets can't be xtriggers.
            if t_valid:
                target_id = (
                    f'{self.workflow_id}{ID_DELIM}{t_point}{ID_DELIM}{t_name}')
                t_task_id = f'{self.workflow_id}{ID_DELIM}{t_name}'
                new_points.add(t_point)
                # Add target points to associated source points for pruning.
                self.edge_points.setdefault(s_point_cls, set())
                self.edge_points[s_point_cls].add(t_point_cls)
                if target_id not in task_proxies:
                    task_proxies[target_id] = self.generate_ghost_task(t_node)
                if target_id not in tasks[t_task_id].proxies:
                    tasks[t_task_id].proxies.append(target_id)

                # Initiate edge element.
                e_id = (
                    f'{self.workflow_id}{ID_DELIM}{s_node}{ID_DELIM}{t_node}')
                edges[e_id] = PbEdge(
                    id=e_id,
                    suicide=edge[3],
                    cond=edge[4],
                )
                edges[e_id].source = source_id
                edges[e_id].target = target_id

                # Add edge id to node field for resolver reference
                task_proxies[target_id].edges.append(e_id)
                if s_valid:
                    task_proxies[source_id].edges.append(e_id)

        graph.edges.extend(edges.keys())

        if new_points:
            self.generate_ghost_families(family_proxies, new_points)

        # Replace the originals (atomic update, for access from other threads).
        self.data[self.workflow_id][TASK_PROXIES] = task_proxies
        self.data[self.workflow_id][EDGES] = edges
        self.data[self.workflow_id][GRAPH] = graph
예제 #19
0
def main(parser, options, suite, *task_ids):
    """cylc submit CLI.

    No TASK EVENT HOOKS are set for the submit command because there is
    no scheduler instance watching for task failure etc.

    Note: a suite contact env file is not written by this command (it
    would overwrite the real one if the suite is running).
    """
    if not options.verbose and not options.debug:
        LOG.setLevel(WARNING)
    for task_id in task_ids:
        if not TaskID.is_valid_id(task_id):
            raise UserInputError("Invalid task ID %s" % task_id)
    suiterc = get_suite_rc(suite)
    suite_dir = os.path.dirname(suiterc)
    # For user-defined batch system handlers
    sys.path.append(os.path.join(suite_dir, 'python'))

    # Load suite config and tasks
    config = SuiteConfig(
        suite, suiterc, options,
        load_template_vars(options.templatevars, options.templatevars_file))
    itasks = []
    for task_id in task_ids:
        name_str, point_str = TaskID.split(task_id)
        taskdefs = config.find_taskdefs(name_str)
        if not taskdefs:
            raise UserInputError("No task found for %s" % task_id)
        for taskdef in taskdefs:
            itasks.append(
                TaskProxy(taskdef,
                          get_point(point_str).standardise(),
                          is_startup=True))

    # Initialise job submit environment
    make_suite_run_tree(suite)
    # Extract job.sh from library, for use in job scripts.
    extract_resources(get_suite_srv_dir(suite), ['etc/job.sh'])
    pool = SubProcPool()
    owner = get_user()
    job_pool = JobPool(suite, owner)
    db_mgr = SuiteDatabaseManager()
    task_job_mgr = TaskJobManager(
        suite, pool, db_mgr,
        TaskEventsManager(suite, pool, db_mgr, BroadcastMgr(db_mgr), job_pool),
        job_pool)
    task_job_mgr.task_remote_mgr.single_task_mode = True
    task_job_mgr.job_file_writer.set_suite_env({
        'CYLC_UTC':
        str(config.cfg['cylc']['UTC mode']),
        'CYLC_DEBUG':
        str(cylc.flow.flags.debug).lower(),
        'CYLC_VERBOSE':
        str(cylc.flow.flags.verbose).lower(),
        'CYLC_SUITE_NAME':
        suite,
        'CYLC_CYCLING_MODE':
        str(config.cfg['scheduling']['cycling mode']),
        'CYLC_SUITE_INITIAL_CYCLE_POINT':
        str(config.cfg['scheduling']['initial cycle point']),
        'CYLC_SUITE_FINAL_CYCLE_POINT':
        str(config.cfg['scheduling']['final cycle point']),
    })

    ret_code = 0
    waiting_tasks = list(itasks)
    if options.dry_run:
        while waiting_tasks:
            prep_tasks, bad_tasks = task_job_mgr.prep_submit_task_jobs(
                suite, waiting_tasks, dry_run=True)
            for itask in prep_tasks + bad_tasks:
                waiting_tasks.remove(itask)
            if waiting_tasks:
                task_job_mgr.proc_pool.process()
                sleep(1.0)

        for itask in itasks:
            if itask.local_job_file_path:
                print(('JOB SCRIPT=%s' % itask.local_job_file_path))
            else:
                print(('Unable to prepare job file for %s' % itask.identity),
                      file=sys.stderr)
                ret_code = 1
    else:
        while waiting_tasks:
            for itask in task_job_mgr.submit_task_jobs(suite, waiting_tasks):
                waiting_tasks.remove(itask)
            if waiting_tasks:
                task_job_mgr.proc_pool.process()
                sleep(1.0)
        while task_job_mgr.proc_pool.is_not_done():
            task_job_mgr.proc_pool.process()
        for itask in itasks:
            if itask.summary.get('submit_method_id') is not None:
                print(('[%s] Job ID: %s' %
                       (itask.identity, itask.summary['submit_method_id'])))
            if itask.state(TASK_STATUS_SUBMIT_FAILED):
                ret_code = 1
    sys.exit(ret_code)
예제 #20
0
    def generate_graph_elements(self, start_point=None, stop_point=None):
        """Generate edges and [ghost] nodes (family and task proxy elements).

        Args:
            start_point (cylc.flow.cycling.PointBase):
                Edge generation start point.
            stop_point (cylc.flow.cycling.PointBase):
                Edge generation stop point.

        """
        if not self.pool_points:
            return
        config = self.schd.config
        if start_point is None:
            start_point = min(self.pool_points)
        if stop_point is None:
            stop_point = max(self.pool_points)

        # Reference set for workflow relations
        new_edges = set()

        # Generate ungrouped edges
        for edge in config.get_graph_edges(start_point, stop_point):
            # Reference or create edge source & target nodes/proxies
            s_node = edge[0]
            t_node = edge[1]
            if s_node is None:
                continue
            # Is the source cycle point in the task pool?
            s_name, s_point = TaskID.split(s_node)
            s_point_cls = get_point(s_point)
            s_pool_point = False
            s_valid = TaskID.is_valid_id(s_node)
            if s_valid:
                s_pool_point = s_point_cls in self.pool_points
            # Is the target cycle point in the task pool?
            t_pool_point = False
            t_valid = t_node and TaskID.is_valid_id(t_node)
            if t_valid:
                t_name, t_point = TaskID.split(t_node)
                t_point_cls = get_point(t_point)
                t_pool_point = get_point(t_point) in self.pool_points

            # Proceed if either source or target cycle points
            # are in the task pool.
            if not s_pool_point and not t_pool_point:
                continue

            # If source/target is valid add/create the corresponding items.
            # TODO: if xtrigger is suite_state create remote ID
            source_id = (
                f'{self.workflow_id}{ID_DELIM}{s_point}{ID_DELIM}{s_name}')

            # Add valid source before checking for no target,
            # as source may be an isolate (hence no edges).
            if s_valid:
                s_task_id = f'{self.workflow_id}{ID_DELIM}{s_name}'
                # Add source points for pruning.
                self.edge_points.setdefault(s_point_cls, set())
                self.generate_ghost_task(s_task_id, source_id, s_point)
            # If target is valid then created it.
            # Edges are only created for valid targets.
            # At present targets can't be xtriggers.
            if t_valid:
                target_id = (
                    f'{self.workflow_id}{ID_DELIM}{t_point}{ID_DELIM}{t_name}')
                t_task_id = f'{self.workflow_id}{ID_DELIM}{t_name}'
                # Add target points to associated source points for pruning.
                self.edge_points.setdefault(s_point_cls, set())
                self.edge_points[s_point_cls].add(t_point_cls)
                self.generate_ghost_task(t_task_id, target_id, t_point)

                # Initiate edge element.
                e_id = (
                    f'{self.workflow_id}{ID_DELIM}{s_node}{ID_DELIM}{t_node}')
                self.added[EDGES][e_id] = PbEdge(
                    id=e_id,
                    suicide=edge[3],
                    cond=edge[4],
                    source=source_id,
                    target=target_id,
                )
                new_edges.add(e_id)

                # Add edge id to node field for resolver reference
                self.updated[TASK_PROXIES].setdefault(
                    target_id, PbTaskProxy(id=target_id)).edges.append(e_id)
                if s_valid:
                    self.updated[TASK_PROXIES].setdefault(
                        source_id,
                        PbTaskProxy(id=source_id)).edges.append(e_id)
        if new_edges:
            getattr(self.updated[WORKFLOW], EDGES).edges.extend(new_edges)
예제 #21
0
 def get_standardised_point(cls, point_string: str) -> Optional[PointBase]:
     """Return a standardised point."""
     return get_point(cls.get_standardised_point_string(point_string))