Ejemplo n.º 1
0
    def test_containerDrop(self):
        dropSpecList = [{"oid":"A", "type":"plain", "storage":"memory"},
                        {"oid":"B", "type":"container", "children":["A"]}]
        a = graph_loader.createGraphFromDropSpecList(dropSpecList)[0]
        self.assertIsInstance(a, InMemoryDROP)
        self.assertEquals("A", a.oid)
        self.assertEquals("A", a.uid)
        self.assertIsNotNone(a.parent)
        b = a.parent
        self.assertIsInstance(b, ContainerDROP)
        self.assertEquals("B", b.oid)
        self.assertEquals("B", b.uid)

        # A directory container
        dropSpecList = [{"oid":"A", "type":"plain", "storage":"file", "dirname":"."},
                        {"oid":"B", "type":"container", "container":"dfms.drop.DirectoryContainer", "children":["A"], "dirname":"."}]
        a = graph_loader.createGraphFromDropSpecList(dropSpecList)[0]
        b = a.parent
        self.assertIsInstance(b, DirectoryContainer)
Ejemplo n.º 2
0
 def test_consumer(self):
     dropSpecList = [{"oid":"A", "type":"plain", "storage":"memory", "consumers":["B"]},
                     {"oid":"B", "type":"app", "app":"test.test_graph_loader.DummyApp"}]
     a = graph_loader.createGraphFromDropSpecList(dropSpecList)[0]
     self.assertIsInstance(a, InMemoryDROP)
     self.assertEquals("A", a.oid)
     self.assertEquals("A", a.uid)
     self.assertEquals(1, len(a.consumers))
     b = a.consumers[0]
     self.assertIsInstance(b, DummyApp)
     self.assertEquals("B", b.oid)
     self.assertEquals("B", b.uid)
     self.assertEquals(a, b.inputs[0])
Ejemplo n.º 3
0
    def test_containerDrop(self):
        dropSpecList = [{
            "oid": "A",
            "type": "plain",
            "storage": "memory"
        }, {
            "oid": "B",
            "type": "container",
            "children": ["A"]
        }]
        a = graph_loader.createGraphFromDropSpecList(dropSpecList)[0]
        self.assertIsInstance(a, InMemoryDROP)
        self.assertEqual("A", a.oid)
        self.assertEqual("A", a.uid)
        self.assertIsNotNone(a.parent)
        b = a.parent
        self.assertIsInstance(b, ContainerDROP)
        self.assertEqual("B", b.oid)
        self.assertEqual("B", b.uid)

        # A directory container
        dropSpecList = [{
            "oid": "A",
            "type": "plain",
            "storage": "file",
            "dirname": "."
        }, {
            "oid": "B",
            "type": "container",
            "container": "dfms.drop.DirectoryContainer",
            "children": ["A"],
            "dirname": "."
        }]
        a = graph_loader.createGraphFromDropSpecList(dropSpecList)[0]
        b = a.parent
        self.assertIsInstance(b, DirectoryContainer)
Ejemplo n.º 4
0
 def test_consumer(self):
     dropSpecList = [{
         "oid": "A",
         "type": "plain",
         "storage": "memory",
         "consumers": ["B"]
     }, {
         "oid": "B",
         "type": "app",
         "app": "test.test_graph_loader.DummyApp"
     }]
     a = graph_loader.createGraphFromDropSpecList(dropSpecList)[0]
     self.assertIsInstance(a, InMemoryDROP)
     self.assertEqual("A", a.oid)
     self.assertEqual("A", a.uid)
     self.assertEqual(1, len(a.consumers))
     b = a.consumers[0]
     self.assertIsInstance(b, DummyApp)
     self.assertEqual("B", b.oid)
     self.assertEqual("B", b.uid)
     self.assertEqual(a, b.inputs[0])
Ejemplo n.º 5
0
    def deploy(self, completedDrops=[], foreach=None):
        """
        Creates the DROPs represented by all the graph specs contained in
        this session, effectively deploying them.

        When this method has finished executing a Pyro Daemon will also be
        up and running, servicing requests to access to all the DROPs
        belonging to this session
        """

        status = self.status
        if status != SessionStates.BUILDING:
            raise InvalidSessionState("Can't deploy this session in its current status: %d" % (status))

        self.status = SessionStates.DEPLOYING

        # Create the real DROPs from the graph specs
        logger.info("Creating DROPs for session %s", self._sessionId)

        self._roots = graph_loader.createGraphFromDropSpecList(self._graph.values())
        logger.info("%d drops successfully created", len(self._graph))

        for drop,_ in droputils.breadFirstTraverse(self._roots):

            # Register them
            self._drops[drop.uid] = drop

            # Register them with the error handler
            if self._error_status_listener:
                drop.subscribe(self._error_status_listener, eventType='status')
        logger.info("Stored all drops, proceeding with further customization")

        # Start the luigi task that will make sure the graph is executed
        # If we're not using luigi we still
        if self._enable_luigi:
            logger.debug("Starting Luigi FinishGraphExecution task for session %s", self._sessionId)
            task = luigi_int.FinishGraphExecution(self._sessionId, self._roots)
            sch = scheduler.CentralPlannerScheduler()
            w = worker.Worker(scheduler=sch)
            w.add(task)
            workerT = threading.Thread(None, self._run, args=[w])
            workerT.daemon = True
            workerT.start()
        else:
            leaves = droputils.getLeafNodes(self._roots)
            logger.info("Adding completion listener to leaf drops")
            listener = LeavesCompletionListener(leaves, self)
            for leaf in leaves:
                if isinstance(leaf, AppDROP):
                    leaf.subscribe(listener, 'producerFinished')
                else:
                    leaf.subscribe(listener, 'dropCompleted')
            logger.info("Listener added to leaf drops")

        # We move to COMPLETED the DROPs that we were requested to
        # InputFiredAppDROP are here considered as having to be executed and
        # not directly moved to COMPLETED.
        #
        # This is done in a separate iteration at the very end because all drops
        # to make sure all event listeners are ready
        self.trigger_drops(completedDrops)

        # Foreach
        if foreach:
            logger.info("Invoking 'foreach' on each drop")
            for drop,_ in droputils.breadFirstTraverse(self._roots):
                foreach(drop)
            logger.info("'foreach' invoked for each drop")

        # Append proxies
        logger.info("Creating %d drop proxies", len(self._proxyinfo))
        for nm, host, port, local_uid, relname, remote_uid in self._proxyinfo:
            proxy = DropProxy(nm, host, port, self._sessionId, remote_uid)
            method = getattr(self._drops[local_uid], relname)
            method(proxy, False)

        self.status = SessionStates.RUNNING
        logger.info("Session %s is now RUNNING", self._sessionId)
Ejemplo n.º 6
0
 def _test_graphFromFile(self, f, socketListeners=1):
     f = pkg_resources.resource_stream("test", "graphs/%s" % (f))  # @UndefinedVariable
     self._test_graph(graph_loader.createGraphFromDropSpecList(json.load(f)), socketListeners)
Ejemplo n.º 7
0
    def deploy(self, completedDrops=[]):
        """
        Creates the DROPs represented by all the graph specs contained in
        this session, effectively deploying them.

        When this method has finished executing a Pyro Daemon will also be
        up and running, servicing requests to access to all the DROPs
        belonging to this session
        """

        status = self.status
        if status != SessionStates.BUILDING:
            raise Exception("Can't deploy this session in its current status: %d" % (status))

        self.status = SessionStates.DEPLOYING

        # Create the Pyro daemon that will serve the DROP proxies and start it
        if logger.isEnabledFor(logging.DEBUG):
            logger.debug("Starting Pyro4 Daemon for session %s" % (self._sessionId))
        self._daemon = Pyro4.Daemon(host=self._host)
        self._daemonT = threading.Thread(target = lambda: self._daemon.requestLoop(), name="Session %s Pyro Daemon" % (self._sessionId))
        self._daemonT.daemon = True
        self._daemonT.start()

        # Create the real DROPs from the graph specs
        if logger.isEnabledFor(logging.DEBUG):
            logger.debug("Creating DROPs for session %s" % (self._sessionId))

        self._roots = graph_loader.createGraphFromDropSpecList(self._graph.values())

        # Register them
        droputils.breadFirstTraverse(self._roots, self._registerDrop)

        # Register them with the error handler
        # TODO: We should probably merge all these breadFirstTraverse calls into
        # a single one to avoid so much iteration through the drops
        if self._error_status_listener:
            def register_error_status_listener(drop):
                drop.subscribe(self._error_status_listener, eventType='status')
            droputils.breadFirstTraverse(self._roots, register_error_status_listener)

        # We move to COMPLETED the DROPs that we were requested to
        # InputFiredAppDROP are here considered as having to be executed and
        # not directly moved to COMPLETED.
        # TODO: We should possibly unify this initial triggering into a more
        #       solid concept that encompasses these two and other types of DROPs
        def triggerDrop(drop):
            if drop.uid in completedDrops:
                if isinstance(drop, InputFiredAppDROP):
                    t = threading.Thread(target=lambda:drop.execute())
                    t.daemon = True
                    t.start()
                else:
                    drop.setCompleted()
        droputils.breadFirstTraverse(self._roots, triggerDrop)

        # Start the luigi task that will make sure the graph is executed
        # If we're not using luigi we still
        if self._enable_luigi:
            if logger.isEnabledFor(logging.DEBUG):
                logger.debug("Starting Luigi FinishGraphExecution task for session %s" % (self._sessionId))
            task = luigi_int.FinishGraphExecution(self._sessionId, self._roots)
            sch = scheduler.CentralPlannerScheduler()
            w = worker.Worker(scheduler=sch)
            w.add(task)
            workerT = threading.Thread(None, self._run, args=[w])
            workerT.daemon = True
            workerT.start()
        else:
            leaves = droputils.getLeafNodes(self._roots)
            logger.debug("Adding completion listener to leaf drops %r", leaves)
            listener = LeavesCompletionListener(leaves, self)
            for leaf in leaves:
                leaf.subscribe(listener, 'dropCompleted')
                leaf.subscribe(listener, 'producerFinished')

        self.status = SessionStates.RUNNING
        if logger.isEnabledFor(logging.INFO):
            logger.info("Session %s is now RUNNING" % (self._sessionId))
Ejemplo n.º 8
0
 def _test_graphFromFile(self, f, socketListeners=1):
     with pkg_resources.resource_stream("test", "graphs/%s" %
                                        (f)) as f:  # @UndefinedVariable
         self._test_graph(
             graph_loader.createGraphFromDropSpecList(
                 json.load(codecs.getreader('utf-8')(f))), socketListeners)
Ejemplo n.º 9
0
 def test_singleMemoryDrop(self):
     dropSpecList = [{"oid": "A", "type": "plain", "storage": "memory"}]
     a = graph_loader.createGraphFromDropSpecList(dropSpecList)[0]
     self.assertIsInstance(a, InMemoryDROP)
     self.assertEqual("A", a.oid)
     self.assertEqual("A", a.uid)
Ejemplo n.º 10
0
 def test_singleMemoryDrop(self):
     dropSpecList = [{"oid":"A", "type":"plain", "storage":"memory"}]
     a = graph_loader.createGraphFromDropSpecList(dropSpecList)[0]
     self.assertIsInstance(a, InMemoryDROP)
     self.assertEquals("A", a.oid)
     self.assertEquals("A", a.uid)