コード例 #1
0
def test_orm_testobj():
    import kimobjects
    test = kimobjects.Test(
        "LatticeConstantCubicEnergy_Al_fcc__TE_000000000000_000")
    assert len(list(test.models)) == 1
    assert test.kim_code == "LatticeConstantCubicEnergy_Al_fcc__TE_000000000000_000"
    assert test.kim_code_name == "LatticeConstantCubicEnergy_Al_fcc"
    assert test.kim_code_leader == "TE"
    assert test.kim_code_version == "000"
コード例 #2
0
    def run_job(self):
        """ Endless loop that awaits jobs to run """
        while True:
            with loglock:
                self.logger.info("Waiting for jobs...")
            job = self.bean.reserve()
            self.job = job

            # if appears that there is a 120sec re-birth of jobs that have been reserved
            # and I do not want to put an artificial time limit, so let's bury jobs
            # when we get them
            job.bury()
            self.comm.send_msg("running", job.body)

            # update the repository, attempt to run the job and return the results to the director
            try:
                jobmsg = Message(string=job.body)
            except simplejson.JSONDecodeError:
                # message is not JSON decodeable
                with loglock:
                    self.logger.error("Did not recieve valid JSON, {}".format(job.body))
                job.delete()
                continue
            except KeyError:
                # message does not have the right keys
                with loglock:
                    self.logger.error("Did not recieve a valid message, missing key: {}".format(job.body))
                job.delete()
                continue

            self.jobmsg = jobmsg
            # check to see if this is a verifier or an actual test
            try:
                name,leader,num,version = database.parse_kim_code(jobmsg.job[0])
            except InvalidKIMID as e:
                # we were not given a valid kimid
                with loglock:
                    self.logger.error("Could not parse {} as a valid KIMID".format(jobmsg.job[0]))
                self.job_message(jobmsg, errors=e, tube=TUBE_ERRORS)
                job.delete()
                continue

            if leader == "VT" or leader == "VM":
                try:
                    with buildlock:
                        with loglock:
                            self.logger.info("rsyncing to repo %r", jobmsg.job+jobmsg.depends)
                        rsync_tools.worker_verification_read(*jobmsg.job, depends=jobmsg.depends)
                        self.make_all()

                    verifier_kcode, subject_kcode = jobmsg.job
                    verifier = kimobjects.Verifier(verifier_kcode)
                    subject  = kimobjects.Subject(subject_kcode)

                    with loglock:
                        self.logger.info("Running (%r,%r)",verifier,subject)
                    comp = compute.Computation(verifier, subject)
                    comp.run(jobmsg.jobid)

                    result = kimobjects.Result(jobmsg.jobid).results
                    with loglock:
                        self.logger.info("rsyncing results %r", jobmsg.jobid)
                    rsync_tools.worker_verification_write(jobmsg.jobid)
                    with loglock:
                        self.logger.info("sending result message back")
                    self.job_message(jobmsg, results=result, tube=TUBE_RESULTS)
                    job.delete()

                # could be that a dependency has not been met.
                # put it back on the queue to wait
                except PipelineDataMissing as e:
                    if job.stats()['age'] < 5*PIPELINE_JOB_TIMEOUT:
                        with loglock:
                            self.logger.error("Run failed, missing data.  Returning to queue... (%r)" % e)
                        job.release(delay=PIPELINE_JOB_TIMEOUT)
                    else:
                        with loglock:
                            self.logger.error("Run failed, missing data. Lifetime has expired, deleting (%r)" % e)
                        job.delete()

                # another problem has occurred.  just remove the job
                # and send the error back along the error queue
                except Exception as e:
                    with loglock:
                        self.logger.error("Run failed, deleting... %r" % e)
                    self.job_message(jobmsg, errors=e, tube=TUBE_ERRORS)
                    job.delete()
            else:
                try:
                    with buildlock:
                        with loglock:
                            self.logger.info("rsyncing to repo %r %r", jobmsg.job,jobmsg.depends)
                        rsync_tools.worker_test_result_read(*jobmsg.job, depends=jobmsg.depends)
                        self.make_all()

                    test_kcode, model_kcode = jobmsg.job
                    test = kimobjects.Test(test_kcode)
                    model = kimobjects.Model(model_kcode)

                    with loglock:
                        self.logger.info("Running (%r,%r)",test,model)
                    comp = compute.Computation(test, model)
                    comp.run(jobmsg.jobid)

                    result = kimobjects.Result(jobmsg.jobid).results
                    with loglock:
                        self.logger.info("rsyncing results %r", jobmsg.jobid)
                    rsync_tools.worker_test_result_write(jobmsg.jobid)
                    with loglock:
                        self.logger.info("sending result message back")
                    self.job_message(jobmsg, results=result, tube=TUBE_RESULTS)
                    job.delete()

                # could be that a dependency has not been met.
                # put it back on the queue to wait
                except PipelineDataMissing as e:
                    if job.stats()['age'] < 5*PIPELINE_JOB_TIMEOUT:
                        with loglock:
                            self.logger.error("Run failed, missing data.  Returning to queue... (%r)" % e)
                        job.release(delay=PIPELINE_JOB_TIMEOUT)
                    else:
                        with loglock:
                            self.logger.error("Run failed, missing data. Lifetime has expired, deleting (%r)" % e)
                        job.delete()

                # another problem has occurred.  just remove the job
                # and send the error back along the error queue
                except Exception as e:
                    with loglock:
                        self.logger.error("Run failed, deleting... %r" % e)
                    self.job_message(jobmsg, errors="%r"%e, tube=TUBE_ERRORS)
                    job.delete()

            self.job = None
            self.jobsmsg = None
コード例 #3
0
    def push_jobs(self, update):
        """ Push all of the jobs that need to be done given an update """
        kimid = update['kimid']
        status = update['status']
        priority_factor = self.priority_to_number(update['priority'])

        name,leader,num,version = database.parse_kim_code(kimid)

        # try to build the kimid before sending jobs
        # if self.make_object(kimid) == 0:
        #     rsync_tools.director_build_write(kimid)
        # else:
        #     self.logger.error("Could not build %r", kimid)
        #     self.bsd.use(TUBE_ERRORS)
        #     self.bsd.put(simplejson.dumps({"error": "Could not build %r" % kimid}))
        #     return

        self.make_all()

        if leader=="VT":
            # for every test launch
            test = kimobjects.VerificationTest(kimid)
            models = kimobjects.Test.all()
            tests = [test]*ll(models)
        elif leader=="VM":
            #for all of the models, run a job
            test = kimobjects.VerificationModel(kimid)
            models = kimobjects.Model.all()
            tests = [test]*ll(models)
        else:
            if status == "approved":
                if leader=="TE":
                    # for all of the models, add a job
                    test = kimobjects.Test(kimid)
                    models = list(test.models)
                    tests = [test]*ll(models)
                elif leader=="MO":
                    # for all of the tests, add a job
                    model = kimobjects.Model(kimid)
                    tests = list(model.tests)
                    models = [model]*ll(tests)
                elif leader=="TD":
                    # if it is a new version of an existing test driver, hunt
                    # down all of the tests that use it and launch their
                    # corresponding jobs
                    driver = kimobjects.TestDriver(kimid)
                    temp_tests = list(driver.tests)
                    models = []
                    tests = []
                    for t in temp_tests:
                        tmodels = list(t.models)
                        if len(tmodels) > 0:
                            models.extend(tmodels)
                            tests.extend([t]*ll(tmodels))

                elif leader=="MD":
                    # if this is a new version, hunt down all of the models
                    # that rely on it and recompute their results
                    driver = kimobjects.ModelDriver(kimid)
                    temp_models = list(driver.models)
                    tests = []
                    models = []
                    for m in temp_models:
                        mtests = list(m.tests)
                        if len(mtests) > 0:
                            tests.extend(mtests)
                            models.extend([m]*ll(mtests))
                else:
                    self.logger.error("Tried to update an invalid KIM ID!: %r",kimid)
                checkmatch = True
            if status == "pending":
                if leader=="TE":
                    # a pending test
                    rsync_tools.director_test_verification_read(kimid)
                    self.make_all()

                    # run against all test verifications
                    tests = list(kimobjects.VertificationTest.all())
                    models = [kimobjects.Test(kimid, search=False)]*ll(tests)
                elif leader=="MO":
                    # a pending model
                    rsync_tools.director_model_verification_read(kimid)
                    self.make_all()

                    # run against all model verifications
                    tests = list(kimobjects.VertificationModel.all())
                    models = [kimobjects.Model(kimid, search=False)]*ll(tests)

                elif leader=="TD":
                    # a pending test driver
                    pass
                elif leader=="MD":
                    # a pending model driver
                    pass
                else:
                    self.logger.error("Tried to update an invalid KIM ID!: %r",kimid)
                checkmatch = False

        if checkmatch:
            for test, model in zip(tests,models):
                if kimapi.valid_match(test,model):
                    priority = int(priority_factor*database.test_model_to_priority(test,model) * 1000000)
                    self.check_dependencies_and_push(test,model,priority,status)
        else:
            for test, model in zip(tests,models):
                priority = int(priority_factor*database.test_model_to_priority(test,model) * 1000000)
                self.check_dependencies_and_push(test,model,priority,status)
コード例 #4
0
def test_orm_testobj_driver():
    import kimobjects
    test = kimobjects.Test(
        "LatticeConstantCubicEnergy_Ar_fcc__TE_000000000001_000")
    assert "LatticeConstantCubicEnergy__TD_000000000000_000" == next(
        test.test_drivers).kim_code
コード例 #5
0
def test_testobj2():
    import kimobjects
    test = kimobjects.Test(
        "LatticeConstantCubicEnergy_Al_fcc__TE_000000000000_000")
    test.infile.readlines()
コード例 #6
0
    def push_jobs(self, update):
        """ Push all of the jobs that need to be done given an update """
        self.make_all()

        kimid = update['kimid']
        status = update['status']
        priority_factor = self.priority_to_number(update['priority'])

        if database.isuuid(kimid):
            priority = int(priority_factor * 1000000)
            self.check_dependencies_and_push(kimid, priority, status)
            return

        name, leader, num, version = database.parse_kim_code(kimid)

        checkmatch = False
        if leader == "VT":
            # for every test launch
            test = kimobjects.TestVerification(kimid)
            models = list(kimobjects.Test.all())
            tests = [test] * ll(models)
        elif leader == "VM":
            #for all of the models, run a job
            test = kimobjects.ModelVerification(kimid)
            models = list(kimobjects.Model.all())
            tests = [test] * ll(models)
        else:
            if status == "approved":
                if leader == "TE":
                    # for all of the models, add a job
                    test = kimobjects.Test(kimid)
                    models = list(test.models)
                    tests = [test] * ll(models)
                elif leader == "MO":
                    # for all of the tests, add a job
                    model = kimobjects.Model(kimid)
                    tests = list(model.tests)
                    models = [model] * ll(tests)
                elif leader == "TD":
                    # if it is a new version of an existing test driver, hunt
                    # down all of the tests that use it and launch their
                    # corresponding jobs
                    driver = kimobjects.TestDriver(kimid)
                    temp_tests = list(driver.tests)
                    models = []
                    tests = []
                    for t in temp_tests:
                        tmodels = list(t.models)
                        if len(tmodels) > 0:
                            models.extend(tmodels)
                            tests.extend([t] * ll(tmodels))

                elif leader == "MD":
                    # if this is a new version, hunt down all of the models
                    # that rely on it and recompute their results
                    driver = kimobjects.ModelDriver(kimid)
                    temp_models = list(driver.models)
                    tests = []
                    models = []
                    for m in temp_models:
                        mtests = list(m.tests)
                        if len(mtests) > 0:
                            tests.extend(mtests)
                            models.extend([m] * ll(mtests))
                else:
                    self.logger.error("Tried to update an invalid KIM ID!: %r",
                                      kimid)
                checkmatch = True
            if status == "pending":
                rsync_tools.director_pending_read(kimid)
                self.make_all()

                if leader == "TE":
                    # run against all test verifications
                    tests = list(kimobjects.VertificationTest.all())
                    models = [kimobjects.Test(kimid, search=False)] * ll(tests)
                elif leader == "MO":
                    # run against all model verifications
                    tests = list(kimobjects.VertificationModel.all())
                    models = [kimobjects.Model(kimid, search=False)
                              ] * ll(tests)
                elif leader == "TD":
                    # a pending test driver
                    pass
                elif leader == "MD":
                    # a pending model driver
                    pass
                else:
                    self.logger.error("Tried to update an invalid KIM ID!: %r",
                                      kimid)
                checkmatch = False

        for test, model in zip(tests, models):
            if not checkmatch or (checkmatch
                                  and kimapi.valid_match(test, model)):
                priority = int(priority_factor *
                               database.test_model_to_priority(test, model) *
                               1000000)
                self.check_dependencies_and_push((test, model), priority,
                                                 status)