def test_config_eval(self):
        with NamedTemporaryFile(delete=False) as tmpfile:
            tmpfile.write(b'from spreadflow_core.script import *')

        tokens = config_eval(tmpfile.name)
        os.unlink(tmpfile.name)

        self.assertIsInstance(tokens, collections.Iterable)
Example #2
0
    def startService(self):
        super(SpreadFlowService, self).startService()

        if self.options['confpath']:
            confpath = self.options['confpath']
        else:
            confpath = os.path.join(os.getcwd(), 'spreadflow.conf')

        stream = config_eval(confpath)

        pipeline = list()
        pipeline.append(AliasResolverPass())
        pipeline.append(PortsValidatorPass())

        if self.options['multiprocess']:
            pipeline.append(PartitionExpanderPass())
            pipeline.append(PartitionBoundsPass())
            if self.options['partition']:
                pipeline.append(PartitionWorkerPass())
                partition = self.options['partition']
                stream.append(AddTokenOp(PartitionSelectToken(partition)))
            else:
                pipeline.append(PartitionControllersPass())

        pipeline.append(ComponentsPurgePass())
        pipeline.append(EventHandlersPass())

        for compiler_step in pipeline:
            stream = compiler_step(stream)

        self._eventdispatcher = EventDispatcher()

        if self.options['oneshot']:
            self._eventdispatcher.add_listener(JobEvent, 0, self._oneshot_job_event_handler)

        connection_parser = ConnectionParser()
        stream = connection_parser.extract(stream)
        self._scheduler = Scheduler(connection_parser.get_portmap(), self._eventdispatcher)

        event_handler_parser = EventHandlerParser()
        stream = event_handler_parser.extract(stream)
        for event_type, priority, callback in event_handler_parser.get_handlers():
            self._eventdispatcher.add_listener(event_type, priority, callback)

        if self.options['queuestatus']:
            statuslog = SpreadFlowQueuestatusLogger(self.options['queuestatus'])
            statuslog.watch(1, self._scheduler)
            globalLogPublisher.addObserver(statuslog.logstatus)

        self._scheduler.run().addBoth(self._stop)
    def run(self, args):
        parser = argparse.ArgumentParser(prog=args[0])
        parser.add_argument('path', metavar='FILE',
                            help='Path to config file')
        parser.add_argument('-l', '--level', type=int,
                            help='Level of detail (0: toplevel components, default: 1)')
        parser.add_argument('-p', '--multiprocess', action='store_true',
                            help='Simulates multiprocess support, i.e., launch a separate process for each chain')
        parser.add_argument('--partition',
                            help='Simulates multiprocess support, select the given partition of the graph')

        parser.parse_args(args[1:], namespace=self)

        stream = config_eval(self.path)

        pipeline = list()
        pipeline.append(AliasResolverPass())
        pipeline.append(PortsValidatorPass())

        if self.multiprocess:
            pipeline.append(PartitionExpanderPass())
            pipeline.append(PartitionBoundsPass())
            if self.partition:
                pipeline.append(PartitionWorkerPass())
                partition = self.partition
                stream.append(AddTokenOp(PartitionSelectToken(partition)))
            else:
                pipeline.append(PartitionControllersPass())

        pipeline.append(ComponentsPurgePass())
        pipeline.append(DepthReductionPass(self.level))

        for compiler_step in pipeline:
            stream = compiler_step(stream)

        stream = self.connection_parser.extract(stream)
        stream = self.description_parser.extract(stream)
        stream = self.label_parser.extract(stream)
        stream = self.links_parser.extract(stream)
        stream = self.parent_parser.extract(stream)

        labels = self.label_parser.get_labelmap()
        descriptions = self.description_parser.get_descriptionmap()
        ports = self.connection_parser.get_portset()

        dg = Digraph(os.path.basename(self.path), engine='dot')

        # Walk the component trees from leaves to roots and build clusters.
        subgraphs = {}
        for child, parent in self.parent_parser.get_parentmap_toposort(reverse=True):
            if parent is not None:
                try:
                    sg = subgraphs[parent]
                except KeyError:
                    sg = Digraph('cluster_{:s}'.format(str(hash(parent))))
                    label = labels.get(parent, self._strip_angle_brackets(str(parent)))
                    tooltip = descriptions.get(parent, repr(parent) + "\n" + pformat(vars(parent)))
                    sg.attr('graph', label=label, tooltip=tooltip, color="blue")
                    subgraphs[parent] = sg

                if child in subgraphs:
                    child_sg = subgraphs.pop(child)
                    if child in ports:
                        # Place the port inside its own subgraph if a port is
                        # also the parent of other ports.
                        child_sg.node(str(hash(child)))
                    sg.subgraph(child_sg)
                elif child in ports:
                    sg.node(str(hash(child)))

        for sg in subgraphs.values():
            dg.subgraph(sg)

        # Edges
        for src, sink in self.links_parser.get_links():
            dg.edge(str(hash(src)), str(hash(sink)))

        # Tooltips
        for n in ports:
            try:
                tooltip = descriptions.get(n, repr(n) + "\n" + pformat(vars(n)))
            except TypeError:
                tooltip = ''
            label = labels.get(n, self._strip_angle_brackets(str(n)))
            dg.node(str(hash(n)), label=label, tooltip=tooltip)

        print(dg.pipe(format='svg'), file=self._out)
        return 0