def initPoolsDataFromBackend(self):
        '''
        Loads pools and workers from appropriate backend.
        '''
        try:
            if settings.POOLS_BACKEND_TYPE == "file":
                manager = FilePoolManager()
            elif settings.POOLS_BACKEND_TYPE == "ws":
                manager = WebServicePoolManager()
            elif settings.POOLS_BACKEND_TYPE == "db":
                return False
        except Exception:
            return False

        computers = manager.listComputers()

        ### recreate the pools
        poolsList = manager.listPools()
        poolsById = {}
        for poolDesc in poolsList:
            pool = Pool(id=int(poolDesc.id), name=str(poolDesc.name))
            self.dispatchTree.toCreateElements.append(pool)
            poolsById[pool.id] = pool

        ### recreate the rendernodes
        rnById = {}
        for computerDesc in computers:
            try:
                computerDesc.name = socket.getfqdn(computerDesc.name)
                ip = socket.gethostbyname(computerDesc.name)
            except socket.gaierror:
                continue
            renderNode = RenderNode(
                computerDesc.id,
                computerDesc.name + ":" + str(computerDesc.port),
                computerDesc.cpucount * computerDesc.cpucores,
                computerDesc.cpufreq, ip, computerDesc.port,
                computerDesc.ramsize, json.loads(computerDesc.properties))
            self.dispatchTree.toCreateElements.append(renderNode)
            ## add the rendernodes to the pools
            for pool in computerDesc.pools:
                poolsById[pool.id].renderNodes.append(renderNode)
                renderNode.pools.append(poolsById[pool.id])
            self.dispatchTree.renderNodes[str(renderNode.name)] = renderNode
            rnById[renderNode.id] = renderNode

        # add the pools to the dispatch tree
        for pool in poolsById.values():
            self.dispatchTree.pools[pool.name] = pool
        if self.cleanDB or not self.enablePuliDB:
            graphs = FolderNode(1, "graphs", self.dispatchTree.root, "root", 0,
                                0, 0, FifoStrategy())
            self.dispatchTree.toCreateElements.append(graphs)
            self.dispatchTree.nodes[graphs.id] = graphs
            ps = PoolShare(1, self.dispatchTree.pools["default"], graphs,
                           PoolShare.UNBOUND)
            self.dispatchTree.toCreateElements.append(ps)
        if self.enablePuliDB:
            # clean the tables pools and rendernodes (overwrite)
            self.pulidb.dropPoolsAndRnsTables()
            self.pulidb.createElements(self.dispatchTree.toCreateElements)
            self.dispatchTree.resetDbElements()

        return True
    def __init__(self, framework):
        LOGGER = logging.getLogger('main.dispatcher')
        if self.init:
            return
        self.init = True
        self.nextCycle = time.time()

        MainLoopApplication.__init__(self, framework)

        self.threadPool = ThreadPool(16, 0, 0, None)

        #
        # Class holding custom infos on the dispatcher.
        # This data can be periodically flushed in a specific log file for
        # later use
        #
        self.cycle = 1
        self.dispatchTree = DispatchTree()
        self.licenseManager = LicenseManager()
        self.enablePuliDB = settings.DB_ENABLE
        self.cleanDB = settings.DB_CLEAN_DATA
        self.restartService = False

        self.pulidb = None
        if self.enablePuliDB:
            self.pulidb = PuliDB(self.cleanDB, self.licenseManager)

        self.dispatchTree.registerModelListeners()
        rnsAlreadyInitialized = self.initPoolsDataFromBackend()

        if self.enablePuliDB and not self.cleanDB:
            LOGGER.warning("--- Reloading database (9 steps) ---")
            prevTimer = time.time()
            self.pulidb.restoreStateFromDb(self.dispatchTree,
                                           rnsAlreadyInitialized)

            LOGGER.warning("%d jobs reloaded from database" %
                           len(self.dispatchTree.tasks))
            LOGGER.warning("Total time elapsed %s" %
                           elapsedTimeToString(prevTimer))
            LOGGER.warning("")

        LOGGER.warning("--- Checking dispatcher state (3 steps) ---")
        startTimer = time.time()
        LOGGER.warning("1/3 Update completion and status")
        self.dispatchTree.updateCompletionAndStatus()
        LOGGER.warning("    Elapsed time %s" % elapsedTimeToString(startTimer))

        prevTimer = time.time()
        LOGGER.warning("2/3 Update rendernodes")
        self.updateRenderNodes()
        LOGGER.warning("    Elapsed time %s" % elapsedTimeToString(prevTimer))

        prevTimer = time.time()
        LOGGER.warning("3/3 Validate dependencies")
        self.dispatchTree.validateDependencies()
        LOGGER.warning("    Elapsed time %s" % elapsedTimeToString(prevTimer))
        LOGGER.warning("Total time elapsed %s" %
                       elapsedTimeToString(startTimer))
        LOGGER.warning("")

        if self.enablePuliDB and not self.cleanDB:
            self.dispatchTree.toModifyElements = []

        # If no 'default' pool exists, create default pool
        # When creating a pool with id=None, it is automatically appended in "toCreateElement" list in dispatcher and in the dispatcher's "pools" attribute
        if 'default' not in self.dispatchTree.pools:
            pool = Pool(None, name='default')
            LOGGER.warning(
                "Default pool was not loaded from DB, create a new default pool: %s"
                % pool)
        self.defaultPool = self.dispatchTree.pools['default']

        LOGGER.warning("--- Loading dispatch rules ---")
        startTimer = time.time()
        self.loadRules()
        LOGGER.warning("Total time elapsed %s" %
                       elapsedTimeToString(startTimer))
        LOGGER.warning("")

        # it should be better to have a maxsize
        self.queue = Queue(maxsize=10000)
    def registerNewGraph(self, graph):
        user = graph['user']
        taskDefs = graph['tasks']
        poolName = graph['poolName']
        if 'maxRN' in graph.items():
            maxRN = int(graph['maxRN'])
        else:
            maxRN = -1

        #
        # Create objects.
        #
        tasks = [None for i in xrange(len(taskDefs))]
        for (index, taskDef) in enumerate(taskDefs):
            if taskDef['type'] == 'Task':
                # logger.debug("taskDef.watcherPackages = %s" % taskDef["watcherPackages"])
                # logger.debug("taskDef.runnerPackages = %s" % taskDef["runnerPackages"])
                task = self._createTaskFromJSON(taskDef, user)
            elif taskDef['type'] == 'TaskGroup':
                task = self._createTaskGroupFromJSON(taskDef, user)
            tasks[index] = task
        root = tasks[graph['root']]

        # get the pool
        try:
            pool = self.pools[poolName]
        except KeyError:
            pool = Pool(None, poolName)
            self.pools[poolName] = pool
        #
        # Rebuild full job hierarchy
        #
        for (taskDef, task) in zip(taskDefs, tasks):
            if taskDef['type'] == 'TaskGroup':
                for taskIndex in taskDef['tasks']:
                    task.addTask(tasks[taskIndex])
                    tasks[taskIndex].parent = task
        #
        # Compute dependencies for each created task or taskgroup object.
        #
        dependencies = {}
        for (taskDef, task) in zip(taskDefs, tasks):
            taskDependencies = {}
            if not isinstance(taskDef['dependencies'], list):
                raise SyntaxError(
                    "Dependencies must be a list of (taskId, [status-list]), got %r."
                    % taskDef['dependencies'])
            if not all(((isinstance(i, int) and isinstance(sl, list) and all(
                (isinstance(s, int) for s in sl)))
                        for (i, sl) in taskDef['dependencies'])):
                raise SyntaxError(
                    "Dependencies must be a list of (taskId, [status-list]), got %r."
                    % taskDef['dependencies'])
            for (taskIndex, statusList) in taskDef['dependencies']:
                taskDependencies[tasks[taskIndex]] = statusList
            dependencies[task] = taskDependencies
        #
        # Apply rules to generate dispatch tree nodes.
        #
        if not self.rules:
            logger.warning("graph submitted but no rule has been defined")

        unprocessedTasks = [root]
        nodes = []
        while unprocessedTasks:
            unprocessedTask = unprocessedTasks.pop(0)
            for rule in self.rules:
                try:
                    nodes += rule.apply(unprocessedTask)
                except RuleError:
                    logger.warning("rule %s failed for graph %s" %
                                   (rule, graph))
                    raise
            if isinstance(unprocessedTask, TaskGroup):
                for task in unprocessedTask:
                    unprocessedTasks.append(task)

        # create the poolshare, if any, and affect it to the node
        if pool:
            # FIXME nodes[0] may not be the root node of the graph...
            ps = PoolShare(None, pool, nodes[0], maxRN)
            # if maxRN is not -1 (e.g not default) set the userDefinedMaxRN to true
            if maxRN != -1:
                ps.userDefinedMaxRN = True

        #
        # Process dependencies
        #
        for rule in self.rules:
            rule.processDependencies(dependencies)

        for node in nodes:
            assert isinstance(node.id, int)
            self.nodes[node.id] = node

        # Init number of command in hierarchy
        self.populateCommandCounts(nodes[0])
        return nodes