Exemplo n.º 1
0
 def test_is_valid_id(self):
     for id1 in ["a.1", "_.098098439535$#%#@!#~"]:
         self.assertTrue(TaskID.is_valid_id(id1))
     for id2 in [
             "abc", "123", "____", "_", "a_b", "a_1", "1_b", "ABC", "a.A A"
     ]:
         self.assertFalse(TaskID.is_valid_id(id2))
Exemplo n.º 2
0
 def test_is_valid_id(self):
     for id1 in [
         "a.1", "_.098098439535$#%#@!#~"
     ]:
         self.assertTrue(TaskID.is_valid_id(id1))
     for id2 in [
         "abc", "123", "____", "_", "a_b", "a_1", "1_b", "ABC", "a.A A"
     ]:
         self.assertFalse(TaskID.is_valid_id(id2))
Exemplo n.º 3
0
def main(parser: COP,
         options: 'Values',
         reg: str,
         shutdown_arg: Optional[str] = None) -> None:
    if shutdown_arg is not None and options.kill:
        parser.error("ERROR: --kill is not compatible with [STOP]")

    if options.kill and options.now:
        parser.error("ERROR: --kill is not compatible with --now")

    if options.flow_label and int(options.max_polls) > 0:
        parser.error("ERROR: --flow is not compatible with --max-polls")

    reg, _ = parse_reg(reg)
    pclient = get_client(reg, timeout=options.comms_timeout)

    if int(options.max_polls) > 0:
        # (test to avoid the "nothing to do" warning for # --max-polls=0)
        spoller = StopPoller(pclient, "workflow stopped", options.interval,
                             options.max_polls)

    # mode defaults to 'Clean'
    mode = None
    task = None
    cycle_point = None
    if shutdown_arg is not None and TaskID.is_valid_id(shutdown_arg):
        # STOP argument detected
        task = shutdown_arg
    elif shutdown_arg is not None:
        # not a task ID, may be a cycle point
        cycle_point = shutdown_arg
    elif options.kill:
        mode = WorkflowStopMode.Kill.name
    elif options.now > 1:
        mode = WorkflowStopMode.NowNow.name
    elif options.now:
        mode = WorkflowStopMode.Now.name

    mutation_kwargs = {
        'request_string': MUTATION,
        'variables': {
            'wFlows': [reg],
            'stopMode': mode,
            'cyclePoint': cycle_point,
            'clockTime': options.wall_clock,
            'task': task,
            'flowLabel': options.flow_label,
        }
    }

    pclient('graphql', mutation_kwargs)

    if int(options.max_polls) > 0 and not spoller.poll():
        # (test to avoid the "nothing to do" warning for # --max-polls=0)
        sys.exit(1)
Exemplo n.º 4
0
def main(parser, options, workflow, shutdown_arg=None):
    if shutdown_arg is not None and options.kill:
        parser.error("ERROR: --kill is not compatible with [STOP]")

    if options.kill and options.now:
        parser.error("ERROR: --kill is not compatible with --now")

    if options.flow_label and int(options.max_polls) > 0:
        parser.error("ERROR: --flow is not compatible with --max-polls")

    workflow = os.path.normpath(workflow)
    pclient = get_client(workflow, timeout=options.comms_timeout)

    if int(options.max_polls) > 0:
        # (test to avoid the "nothing to do" warning for # --max-polls=0)
        spoller = StopPoller(pclient, "workflow stopped", options.interval,
                             options.max_polls)

    # mode defaults to 'Clean'
    mode = None
    task = None
    cycle_point = None
    if shutdown_arg is not None and TaskID.is_valid_id(shutdown_arg):
        # STOP argument detected
        task = shutdown_arg
    elif shutdown_arg is not None:
        # not a task ID, may be a cycle point
        cycle_point = shutdown_arg
    elif options.kill:
        mode = 'Kill'
    elif options.now > 1:
        mode = 'NowNow'
    elif options.now:
        mode = 'Now'

    mutation_kwargs = {
        'request_string': MUTATION,
        'variables': {
            'wFlows': [workflow],
            'stopMode': mode,
            'cyclePoint': cycle_point,
            'clockTime': options.wall_clock,
            'task': task,
            'flowLabel': options.flow_label,
        }
    }

    pclient('graphql', mutation_kwargs)

    if int(options.max_polls) > 0:
        # (test to avoid the "nothing to do" warning for # --max-polls=0)
        if not spoller.poll():
            sys.exit(1)
Exemplo n.º 5
0
def main(
    parser: COP,
    options: 'Values',
    workflow: str,
    task_id: Optional[str] = None
) -> None:
    workflow, _ = parse_reg(workflow)
    pclient = get_client(workflow, timeout=options.comms_timeout)

    if task_id and not TaskID.is_valid_id(task_id):
        raise UserInputError("Invalid task ID: %s" % task_id)

    flow_kwargs = {
        'request_string': FLOW_QUERY,
        'variables': {'wFlows': [workflow]}
    }
    task_kwargs: Dict[str, Any] = {
        'request_string': TASK_QUERY,
    }
    # cylc ping WORKFLOW
    result = pclient('graphql', flow_kwargs)
    msg = ""
    for flow in result['workflows']:
        w_name = flow['name']
        w_port = flow['port']
        w_pub_port = flow['pubPort']
        if cylc.flow.flags.verbosity > 0:
            sys.stdout.write(
                f'{w_name} running on '
                f'{pclient.host}:{w_port} {w_pub_port}\n'
            )
        # cylc ping WORKFLOW TASKID
        if task_id:
            task, point = TaskID.split(task_id)
            w_id = flow['id']
            task_kwargs['variables'] = {
                'tProxy': f'{w_id}{ID_DELIM}{point}{ID_DELIM}{task}'
            }
            task_result = pclient('graphql', task_kwargs)
            if not task_result.get('taskProxy'):
                msg = "task not found"
            elif task_result['taskProxy']['state'] != TASK_STATUS_RUNNING:
                msg = f"task not {TASK_STATUS_RUNNING}"
            if msg:
                print(cparse(f'<red>{msg}</red>'))
                sys.exit(1)
Exemplo n.º 6
0
def main(parser, options, suite, shutdown_arg=None):
    if shutdown_arg is not None and options.kill:
        parser.error("ERROR: --kill is not compatible with [STOP]")

    if options.kill and options.now:
        parser.error("ERROR: --kill is not compatible with --now")

    pclient = SuiteRuntimeClient(suite, options.owner, options.host,
                                 options.port, options.comms_timeout)

    if int(options.max_polls) > 0:
        # (test to avoid the "nothing to do" warning for # --max-polls=0)
        spoller = StopPoller(pclient, "suite stopped", options.interval,
                             options.max_polls)

    if options.wall_clock:
        prompt(
            'Set shutdown at wall clock %s for %s' %
            (options.wall_clock, suite), options.force)
        pclient('set_stop_after_clock_time',
                {'datetime_string': options.wall_clock})
    elif shutdown_arg is not None and TaskID.is_valid_id(shutdown_arg):
        # STOP argument detected
        prompt('Set shutdown after task %s for %s' % (shutdown_arg, suite),
               options.force)
        pclient('set_stop_after_task', {'task_id': shutdown_arg})
    elif shutdown_arg is not None:
        # not a task ID, may be a cycle point
        prompt('Set shutdown at cycle point %s for %s' % (shutdown_arg, suite),
               options.force)
        pclient('set_stop_after_point', {'point_string': shutdown_arg})
    elif options.now > 1:
        prompt('Shut down and terminate %s now' % suite, options.force)
        pclient('stop_now', {'terminate': True})
    elif options.now:
        prompt('Shut down %s now' % suite, options.force)
        pclient('stop_now')
    else:
        prompt('Shut down %s' % suite, options.force)
        pclient('set_stop_cleanly', {'kill_active_tasks': options.kill})

    if int(options.max_polls) > 0:
        # (test to avoid the "nothing to do" warning for # --max-polls=0)
        if not spoller.poll():
            sys.exit(1)
Exemplo n.º 7
0
def main(parser, options, suite, task_id=None):
    pclient = SuiteRuntimeClient(suite, timeout=options.comms_timeout)

    if task_id and not TaskID.is_valid_id(task_id):
        raise UserInputError("Invalid task ID: %s" % task_id)

    flow_kwargs = {
        'request_string': FLOW_QUERY,
        'variables': {
            'wFlows': [suite]
        }
    }
    task_kwargs = {
        'request_string': TASK_QUERY,
    }
    # cylc ping SUITE
    result = pclient('graphql', flow_kwargs)
    msg = ""
    for flow in result['workflows']:
        w_name = flow['name']
        w_port = flow['port']
        w_pub_port = flow['pubPort']
        if cylc.flow.flags.verbose:
            sys.stdout.write(f'{w_name} running on '
                             f'{pclient.host}:{w_port} {w_pub_port}\n')
        # cylc ping SUITE TASKID
        if task_id:
            task, point = TaskID.split(task_id)
            w_id = flow['id']
            task_kwargs['variables'] = {
                'tProxy': f'{w_id}{ID_DELIM}{point}{ID_DELIM}{task}'
            }
            task_result = pclient('graphql', task_kwargs)
            if not task_result.get('taskProxy'):
                msg = "task not found"
            elif task_result['taskProxy']['state'] != TASK_STATUS_RUNNING:
                msg = f"task not {TASK_STATUS_RUNNING}"
            if msg:
                print(cparse(f'<red>{msg}</red>'))
                sys.exit(1)
Exemplo n.º 8
0
    def generate_graph_elements(self, start_point=None, stop_point=None):
        """Generate edges and [ghost] nodes (family and task proxy elements).

        Args:
            start_point (cylc.flow.cycling.PointBase):
                Edge generation start point.
            stop_point (cylc.flow.cycling.PointBase):
                Edge generation stop point.

        """
        if not self.pool_points:
            return
        config = self.schd.config
        if start_point is None:
            start_point = min(self.pool_points)
        if stop_point is None:
            stop_point = max(self.pool_points)

        # Reference set for workflow relations
        new_edges = set()

        # Generate ungrouped edges
        for edge in config.get_graph_edges(start_point, stop_point):
            # Reference or create edge source & target nodes/proxies
            s_node = edge[0]
            t_node = edge[1]
            if s_node is None:
                continue
            # Is the source cycle point in the task pool?
            s_name, s_point = TaskID.split(s_node)
            s_point_cls = get_point(s_point)
            s_pool_point = False
            s_valid = TaskID.is_valid_id(s_node)
            if s_valid:
                s_pool_point = s_point_cls in self.pool_points
            # Is the target cycle point in the task pool?
            t_pool_point = False
            t_valid = t_node and TaskID.is_valid_id(t_node)
            if t_valid:
                t_name, t_point = TaskID.split(t_node)
                t_point_cls = get_point(t_point)
                t_pool_point = get_point(t_point) in self.pool_points

            # Proceed if either source or target cycle points
            # are in the task pool.
            if not s_pool_point and not t_pool_point:
                continue

            # If source/target is valid add/create the corresponding items.
            # TODO: if xtrigger is suite_state create remote ID
            source_id = (
                f'{self.workflow_id}{ID_DELIM}{s_point}{ID_DELIM}{s_name}')

            # Add valid source before checking for no target,
            # as source may be an isolate (hence no edges).
            if s_valid:
                s_task_id = f'{self.workflow_id}{ID_DELIM}{s_name}'
                # Add source points for pruning.
                self.edge_points.setdefault(s_point_cls, set())
                self.generate_ghost_task(s_task_id, source_id, s_point)
            # If target is valid then created it.
            # Edges are only created for valid targets.
            # At present targets can't be xtriggers.
            if t_valid:
                target_id = (
                    f'{self.workflow_id}{ID_DELIM}{t_point}{ID_DELIM}{t_name}')
                t_task_id = f'{self.workflow_id}{ID_DELIM}{t_name}'
                # Add target points to associated source points for pruning.
                self.edge_points.setdefault(s_point_cls, set())
                self.edge_points[s_point_cls].add(t_point_cls)
                self.generate_ghost_task(t_task_id, target_id, t_point)

                # Initiate edge element.
                e_id = (
                    f'{self.workflow_id}{ID_DELIM}{s_node}{ID_DELIM}{t_node}')
                self.added[EDGES][e_id] = PbEdge(
                    id=e_id,
                    suicide=edge[3],
                    cond=edge[4],
                    source=source_id,
                    target=target_id,
                )
                new_edges.add(e_id)

                # Add edge id to node field for resolver reference
                self.updated[TASK_PROXIES].setdefault(
                    target_id, PbTaskProxy(id=target_id)).edges.append(e_id)
                if s_valid:
                    self.updated[TASK_PROXIES].setdefault(
                        source_id,
                        PbTaskProxy(id=source_id)).edges.append(e_id)
        if new_edges:
            getattr(self.updated[WORKFLOW], EDGES).edges.extend(new_edges)
Exemplo n.º 9
0
def main(_, options, suite, *task_args):
    """Implement "cylc show" CLI."""
    pclient = SuiteRuntimeClient(suite, timeout=options.comms_timeout)
    json_filter = {}

    if not task_args:
        query = WORKFLOW_META_QUERY
        query_kwargs = {
            'request_string': query,
            'variables': {
                'wFlows': [suite]
            }
        }
        # Print suite info.
        results = pclient('graphql', query_kwargs)
        for workflow in results['workflows']:
            flat_data = flatten_data(workflow)
            if options.json:
                json_filter.update(flat_data)
            else:
                for key, value in sorted(flat_data.items(), reverse=True):
                    ansiprint(
                        f'<bold>{key}:</bold> {value or "<m>(not given)</m>"}')

    task_names = [arg for arg in task_args if TaskID.is_valid_name(arg)]
    task_ids = [arg for arg in task_args if TaskID.is_valid_id_2(arg)]

    if task_names:
        tasks_query = TASK_META_QUERY
        tasks_kwargs = {
            'request_string': tasks_query,
            'variables': {
                'wFlows': [suite],
                'taskIds': task_names
            }
        }
        # Print suite info.
        results = pclient('graphql', tasks_kwargs)
        multi = len(results['tasks']) > 1
        for task in results['tasks']:
            flat_data = flatten_data(task['meta'])
            if options.json:
                json_filter.update({task['name']: flat_data})
            else:
                if multi:
                    print(f'----\nTASK NAME: {task["name"]}')
                for key, value in sorted(flat_data.items(), reverse=True):
                    ansiprint(
                        f'<bold>{key}:</bold> {value or "<m>(not given)</m>"}')

    if task_ids:
        tp_query = TASK_PREREQS_QUERY
        tp_kwargs = {
            'request_string': tp_query,
            'variables': {
                'wFlows': [suite],
                'taskIds': [
                    f'{c}{ID_DELIM}{n}' for n, c in [
                        TaskID.split(t_id)
                        for t_id in task_ids if TaskID.is_valid_id(t_id)
                    ]
                ] + [
                    f'{c}{ID_DELIM}{n}' for c, n in [
                        t_id.rsplit(TaskID.DELIM2, 1)
                        for t_id in task_ids if not TaskID.is_valid_id(t_id)
                    ]
                ]
            }
        }
        results = pclient('graphql', tp_kwargs)
        multi = len(results['taskProxies']) > 1
        for t_proxy in results['taskProxies']:
            task_id = TaskID.get(t_proxy['name'], t_proxy['cyclePoint'])
            if options.json:
                json_filter.update({task_id: t_proxy})
            else:
                if multi:
                    print(f'----\nTASK ID: {task_id}')
                prereqs = []
                for item in t_proxy['prerequisites']:
                    prefix = ''
                    multi_cond = len(item['conditions']) > 1
                    if multi_cond:
                        prereqs.append([
                            True, '', item['expression'].replace('c', ''),
                            item['satisfied']
                        ])
                    for cond in item['conditions']:
                        if multi_cond and not options.list_prereqs:
                            prefix = f'\t{cond["exprAlias"].strip("c")} = '
                        _, _, point, name = cond['taskId'].split(ID_DELIM)
                        cond_id = TaskID.get(name, point)
                        prereqs.append([
                            False, prefix, f'{cond_id} {cond["reqState"]}',
                            cond['satisfied']
                        ])
                if options.list_prereqs:
                    for composite, _, msg, _ in prereqs:
                        if not composite:
                            print(msg)
                else:
                    flat_meta = flatten_data(t_proxy['task']['meta'])
                    for key, value in sorted(flat_meta.items(), reverse=True):
                        ansiprint(f'<bold>{key}:</bold>'
                                  f' {value or "<m>(not given)</m>"}')
                    ansiprint('\n<bold>prerequisites</bold>'
                              ' (<red>- => not satisfied</red>):')
                    if not prereqs:
                        print('  (None)')
                    for _, prefix, msg, state in prereqs:
                        print_msg_state(f'{prefix}{msg}', state)

                    ansiprint('\n<bold>outputs</bold>'
                              ' (<red>- => not completed</red>):')
                    if not t_proxy['outputs']:
                        print('  (None)')
                    for key, val in t_proxy['outputs'].items():
                        print_msg_state(f'{task_id} {key}', val)
                    if t_proxy['extras']:
                        print('\nother:')
                        for key, value in t_proxy['extras'].items():
                            print('  o  %s ... %s' % (key, value))
        if not results['taskProxies']:
            ansiprint(f"<red>No matching tasks found: {task_ids}",
                      file=sys.stderr)
            sys.exit(1)

    if options.json:
        print(json.dumps(json_filter, indent=4))
Exemplo n.º 10
0
def main(parser, options, suite, *task_ids):
    """cylc submit CLI.

    No TASK EVENT HOOKS are set for the submit command because there is
    no scheduler instance watching for task failure etc.

    Note: a suite contact env file is not written by this command (it
    would overwrite the real one if the suite is running).
    """
    if not options.verbose and not options.debug:
        LOG.setLevel(WARNING)
    for task_id in task_ids:
        if not TaskID.is_valid_id(task_id):
            raise UserInputError("Invalid task ID %s" % task_id)
    suiterc = get_suite_rc(suite)
    suite_dir = os.path.dirname(suiterc)
    # For user-defined batch system handlers
    sys.path.append(os.path.join(suite_dir, 'python'))

    # Load suite config and tasks
    config = SuiteConfig(
        suite, suiterc, options,
        load_template_vars(options.templatevars, options.templatevars_file))
    itasks = []
    for task_id in task_ids:
        name_str, point_str = TaskID.split(task_id)
        taskdefs = config.find_taskdefs(name_str)
        if not taskdefs:
            raise UserInputError("No task found for %s" % task_id)
        for taskdef in taskdefs:
            itasks.append(
                TaskProxy(taskdef,
                          get_point(point_str).standardise(),
                          is_startup=True))

    # Initialise job submit environment
    make_suite_run_tree(suite)
    # Extract job.sh from library, for use in job scripts.
    extract_resources(get_suite_srv_dir(suite), ['etc/job.sh'])
    pool = SubProcPool()
    owner = get_user()
    job_pool = JobPool(suite, owner)
    db_mgr = SuiteDatabaseManager()
    task_job_mgr = TaskJobManager(
        suite, pool, db_mgr,
        TaskEventsManager(suite, pool, db_mgr, BroadcastMgr(db_mgr), job_pool),
        job_pool)
    task_job_mgr.task_remote_mgr.single_task_mode = True
    task_job_mgr.job_file_writer.set_suite_env({
        'CYLC_UTC':
        str(config.cfg['cylc']['UTC mode']),
        'CYLC_DEBUG':
        str(cylc.flow.flags.debug).lower(),
        'CYLC_VERBOSE':
        str(cylc.flow.flags.verbose).lower(),
        'CYLC_SUITE_NAME':
        suite,
        'CYLC_CYCLING_MODE':
        str(config.cfg['scheduling']['cycling mode']),
        'CYLC_SUITE_INITIAL_CYCLE_POINT':
        str(config.cfg['scheduling']['initial cycle point']),
        'CYLC_SUITE_FINAL_CYCLE_POINT':
        str(config.cfg['scheduling']['final cycle point']),
    })

    ret_code = 0
    waiting_tasks = list(itasks)
    if options.dry_run:
        while waiting_tasks:
            prep_tasks, bad_tasks = task_job_mgr.prep_submit_task_jobs(
                suite, waiting_tasks, dry_run=True)
            for itask in prep_tasks + bad_tasks:
                waiting_tasks.remove(itask)
            if waiting_tasks:
                task_job_mgr.proc_pool.process()
                sleep(1.0)

        for itask in itasks:
            if itask.local_job_file_path:
                print(('JOB SCRIPT=%s' % itask.local_job_file_path))
            else:
                print(('Unable to prepare job file for %s' % itask.identity),
                      file=sys.stderr)
                ret_code = 1
    else:
        while waiting_tasks:
            for itask in task_job_mgr.submit_task_jobs(suite, waiting_tasks):
                waiting_tasks.remove(itask)
            if waiting_tasks:
                task_job_mgr.proc_pool.process()
                sleep(1.0)
        while task_job_mgr.proc_pool.is_not_done():
            task_job_mgr.proc_pool.process()
        for itask in itasks:
            if itask.summary.get('submit_method_id') is not None:
                print(('[%s] Job ID: %s' %
                       (itask.identity, itask.summary['submit_method_id'])))
            if itask.state(TASK_STATUS_SUBMIT_FAILED):
                ret_code = 1
    sys.exit(ret_code)
Exemplo n.º 11
0
    def generate_graph_elements(self,
                                edges=None,
                                task_proxies=None,
                                family_proxies=None,
                                start_point=None,
                                stop_point=None):
        """Generate edges and [ghost] nodes (family and task proxy elements).

        Args:
            edges (dict, optional):
                ID-PbEdge key-value mapping.
            task_proxies (dict, optional):
                ID-PbTaskProxy key-value mapping.
            family_proxies (dict, optional):
                ID-PbFamilyProxy key-value mapping.
            start_point (cylc.flow.cycling.PointBase):
                Edge generation start point.
            stop_point (cylc.flow.cycling.PointBase):
                Edge generation stop point.

        """
        if not self.pool_points:
            return
        config = self.schd.config
        tasks = self.data[self.workflow_id][TASKS]
        graph = PbEdges()
        if edges is None:
            edges = {}
        if task_proxies is None:
            task_proxies = {}
        if family_proxies is None:
            family_proxies = {}
        if start_point is None:
            start_point = min(self.pool_points)
        if stop_point is None:
            stop_point = max(self.pool_points)

        # Used for generating family [ghost] nodes
        new_points = set()

        # Generate ungrouped edges
        for edge in config.get_graph_edges(start_point, stop_point):
            # Reference or create edge source & target nodes/proxies
            s_node = edge[0]
            t_node = edge[1]
            if s_node is None:
                continue
            # Is the source cycle point in the task pool?
            s_name, s_point = TaskID.split(s_node)
            s_point_cls = get_point(s_point)
            s_pool_point = False
            s_valid = TaskID.is_valid_id(s_node)
            if s_valid:
                s_pool_point = s_point_cls in self.pool_points
            # Is the target cycle point in the task pool?
            t_pool_point = False
            t_valid = t_node and TaskID.is_valid_id(t_node)
            if t_valid:
                t_name, t_point = TaskID.split(t_node)
                t_point_cls = get_point(t_point)
                t_pool_point = get_point(t_point) in self.pool_points
            # Proceed if either source or target cycle points
            # are in the task pool.
            if not s_pool_point and not t_pool_point:
                continue
            # If source/target is valid add/create the corresponding items.
            # TODO: if xtrigger is suite_state create remote ID
            source_id = (
                f'{self.workflow_id}{ID_DELIM}{s_point}{ID_DELIM}{s_name}')
            if s_valid:
                s_task_id = f'{self.workflow_id}{ID_DELIM}{s_name}'
                new_points.add(s_point)
                # Add source points for pruning.
                self.edge_points.setdefault(s_point_cls, set())
                if source_id not in task_proxies:
                    task_proxies[source_id] = self.generate_ghost_task(s_node)
                if source_id not in tasks[s_task_id].proxies:
                    tasks[s_task_id].proxies.append(source_id)
            # Add valid source before checking for no target,
            # as source may be an isolate (hence no edges).
            # At present targets can't be xtriggers.
            if t_valid:
                target_id = (
                    f'{self.workflow_id}{ID_DELIM}{t_point}{ID_DELIM}{t_name}')
                t_task_id = f'{self.workflow_id}{ID_DELIM}{t_name}'
                new_points.add(t_point)
                # Add target points to associated source points for pruning.
                self.edge_points.setdefault(s_point_cls, set())
                self.edge_points[s_point_cls].add(t_point_cls)
                if target_id not in task_proxies:
                    task_proxies[target_id] = self.generate_ghost_task(t_node)
                if target_id not in tasks[t_task_id].proxies:
                    tasks[t_task_id].proxies.append(target_id)

                # Initiate edge element.
                e_id = (
                    f'{self.workflow_id}{ID_DELIM}{s_node}{ID_DELIM}{t_node}')
                edges[e_id] = PbEdge(
                    id=e_id,
                    suicide=edge[3],
                    cond=edge[4],
                )
                edges[e_id].source = source_id
                edges[e_id].target = target_id

                # Add edge id to node field for resolver reference
                task_proxies[target_id].edges.append(e_id)
                if s_valid:
                    task_proxies[source_id].edges.append(e_id)

        graph.edges.extend(edges.keys())

        if new_points:
            self.generate_ghost_families(family_proxies, new_points)

        # Replace the originals (atomic update, for access from other threads).
        self.data[self.workflow_id][TASK_PROXIES] = task_proxies
        self.data[self.workflow_id][EDGES] = edges
        self.data[self.workflow_id][GRAPH] = graph