Ejemplo n.º 1
0
    def run(self):
        """
        Endless loop that waits for updates on the tube TUBE_UPDATES

        The update is a json string for a dictionary with key-values pairs
        that look like:

            * kimid : any form of the kimid that is recognized by the database package
            * priority : one of 'immediate', 'very high', 'high', 'normal', 'low', 'very low'
            * status : one of 'approved', 'pending' where pending signifies it has just been
                submitted and needs to go through VCs whereas approved is to initiate a full match
                run after verification or update

        """
        # connect and grab the job thread
        self.connect()
        self.bean.watch(cf.TUBE_UPDATES)

        while True:
            self.logger.info("Director Waiting for message...")
            request = self.bean.reserve()
            self.job = request

            # make sure it doesn't come alive again soon
            request.bury()

            # got a request to update a model or test
            # from the website (or other trusted place)
            if request.stats()['tube'] == cf.TUBE_UPDATES:
                # update the repository,send it out as a job to compute
                try:
                    rsync_tools.director_approved_read()
                    self.push_jobs(simplejson.loads(request.body))
                except Exception as e:
                    self.logger.exception("Director had an error on update")

            request.delete()
            self.job = None
Ejemplo n.º 2
0
    def synchronize(self, fresh=False):
        """
        Here, we bring a director up-to-date with the shared repository,
        updating the objects and syncing the database to the filesystem.
            * Pull the entire repository to make sure we are up to date
            * Incrementally build only the relevant objects that have changed
            * Place these items into the database (run matches, etc)

        If `fresh` is true, go ahead and rebuild everything
        During a complete re-run this function should take very little time.
        """
        if cf.DIRECTOR_NOSYNC:
            return

        # if the director is just being brought up, clear out the built
        # cache and rebuild everything from scratch. this happens before
        # rsync so as to be agressive in synchronizing
        if fresh:
            self.logger.info("Rebuilding KIMObjects in repository...")
            self.db.clear_build()

            self.logger.info("Syncing database to filesystem...")
            self.db.remove_missing()

        # pull the repo using rsync from the shared respository
        rsync_tools.director_approved_read()

        self.logger.info("Attempting to build new KIMOBjects...")
        for t in chain(kimobjects.TestDriver.all(), kimobjects.ModelDriver.all(),
                       kimobjects.Test.all(), kimobjects.Model.all()):
            self.db.build(t)

        self.logger.info("Attempting to add new KIMOBjects to database...")
        for t in chain(kimobjects.TestDriver.all(), kimobjects.ModelDriver.all(),
                       kimobjects.Test.all(), kimobjects.Model.all()):
            self.db.insert(t)
Ejemplo n.º 3
0
    def precheck(self, kimid, status='pending'):
        """ Perform pre-checks on a new object """
        errors = []

        if cf.DIRECTOR_NOSYNC:
            errors.append(
                'Precheck failed: DIRECTOR_NOSYNC must be set to False'
            )
            self.logger.error('%r', errors)
            return errors

        approved = (status == 'approved')
        self.logger.info("Running pre-checks for KIM item %r" % kimid)

        # try to actually get the kimobject
        if not cf.DIRECTOR_NOSYNC:
            if approved:
                # Read from Gateway's local 'precheck' repository to local 'precheck' repository
                rsync_tools.director_approved_read(precheck=2)
            else:
                rsync_tools.director_pending_read(kimid, precheck=2)

        # Can we use our ORM?
        # NOTE: This is redundant with a check done on the Gateway, but it's not expensive
        try:
            obj = kimobjects.kim_obj(kimid, precheck=True)
        except Exception as e:
            errors.append(
                'Could not initialize KIMObjects ORM:\n%r' % e
            )
            self.logger.error('%r', errors)
            return errors

        # try to get the drivers so we can start to build, etc
        try:
            if not cf.DIRECTOR_NOSYNC:
                drivers = list(obj.drivers)
                self.logger.info("Retrieving drivers for %r, %r" % (obj, drivers))
                for driver in drivers:
                    rsync_tools.director_pending_read(str(driver), precheck=2)
        except Exception as e:
            errors.append(
                'Could not find drivers associated with object:\n%r' % e
            )
            self.logger.error('%r', errors)
            return errors

        # can it be built?
        try:
            obj.make(precheck=True)
        except Exception as e:
            errors.append(
                'KIMObject could not be built using `make`:\n%r' % e
            )
            self.logger.error('%r', errors)
            return errors

        def _assert(condition):
            if not condition:
                raise AssertionError()

        checks_orm = [
            [lambda o: o.drivers, 'Searching for drivers failed'],
            [lambda o: _assert(o.kimspec), 'Does not contain kimspec'],
            [lambda o: _assert(o.kim_api_version), 'No KIM API version specified'],
            [lambda o: _assert(o.pipeline_api_version), 'No Pipeline API version specified']
        ]

        checks_runner = [
            [lambda t: t.processed_infile(next(kimobjects.Test.all(precheck=True))),
                'Could not template pipeline.stdin file'],
            [lambda t: t.runtime_dependencies('blank'), 'Could not template dependencies file'],
            [lambda t: list(t.matches), 'Matches could not be generated, exceptions'],
            [lambda t: _assert(list(t.matches)), 'No valid matches found in system']
        ]

        checks_subject = [
            [lambda t: list(t.matches), 'Matches could not be generated, errors.'],
            [lambda t: _assert(list(t.matches)), 'No valid matches found in system']
        ]

        def _run_checks(check_list, *args):
            for check in check_list:
                try:
                    check[0](*args)
                except Exception as e:
                    errors.append(check[1])
                    self.logger.error('%s:\n%r' % (check[1], e))

        _run_checks(checks_orm, obj)

        if isinstance(obj, kimobjects.Runner):
            _run_checks(checks_runner, obj)
        if isinstance(obj, kimobjects.Subject):
            _run_checks(checks_subject, obj)

        if errors:
            self.logger.error(
                "Returning errors for precheck of %r:\n%r" % (kimid, errors)
            )
        else:
            self.logger.info("No errors found during precheck for %r" % kimid)
        return errors