Esempio n. 1
0
 def test_execution_get_dftbp(self):
     """Can we execute the declared get_dftbp_* tasks?"""
     taskdict = {}
     update_taskdict(taskdict,
                     [['skpar.core.taskdict', ['get', 'sub', 'run']]])
     update_taskdict(
         taskdict, [['skpar.dftbutils', ['get_bs', 'get_meff', 'get_Ek']]])
     userinp = yaml.load(self.yamlin)
     tasklist = get_tasklist(userinp['tasks'])
     tasks = initialise_tasks(tasklist, taskdict)
     #
     database = Database()
     env = {
         'workroot': '.',
     }
     for task in tasks:
         print(task)
         task(env, database)
     # 'Si.bs' should be added through the task execution
     db = database.get('Si.bs')
     self.assertAlmostEqual(db['Egap'], 1.129, places=3)
     self.assertAlmostEqual(db['Ef'], -3.0621, places=4)
     self.assertAlmostEqual(db['me_GX'], 0.935, places=3)
     self.assertAlmostEqual(db['mh_GK'], -1.891, places=3)
     self.assertAlmostEqual(db['Ec_L_0'], 0.4, places=3)
     self.assertAlmostEqual(db['Ec_G_0'], 1.6156, places=3)
     self.assertAlmostEqual(db['Ec_X_0'], 0.2025, places=3)
     self.assertAlmostEqual(db['Ec_U_0'], 0.6915, places=3)
     self.assertAlmostEqual(db['Ec_K_0'], 0.6915, places=3)
Esempio n. 2
0
 def test_simple(self):
     """Can we parse task declarations successfully"""
     jsondata = """ {
         "tasks": [
                 {"sub": [["./tmp/template.parameters.dat"]]} ,
                 {"run": ["cp parameters.dat value.dat", "./tmp"]} ,
                 {"get": ["value", "tmp/value.dat", "model"]}
             ]
         }
     """
     yamldata = """
         tasks:
             - sub: [[./tmp/template.parameters.dat]]
             - run: ["cp parameters.dat value.dat", ./tmp]
             - get: [value, tmp/value.dat, model]
         """
     taskdict = {}
     update_taskdict(taskdict,
                     [['skpar.core.taskdict', ['get', 'sub', 'run']]])
     yamldata = yaml.load(yamldata)
     # print('yaml data')
     # pprint(yamldata)
     jsondata = json.loads(jsondata)
     # print('json data')
     # pprint(jsondata)
     # self.assertTrue(jsondata == yamldata) # fails for whatever reason
     tasklist = []
     userinp = yamldata
     tasklist = get_tasklist(userinp['tasks'])
     tasks = initialise_tasks(tasklist, taskdict)
     #
     var = 10
     database = Database()
     par = Parameter('p0', value=var)
     workroot = './'
     coreargs = {
         'workroot': workroot,
         'parametervalues': [par.value],
         'parameternames': [par.name]
     }
     try:
         shutil.rmtree('./tmp')
     except FileNotFoundError:
         pass
     os.makedirs('./tmp')
     with open('./tmp/template.parameters.dat', 'w') as template:
         template.writelines("%(p0)f\n")
     # with open('./tmp/template.parameters.dat', 'r') as template:
     #     tmplstr = template.readlines()
     # print(tmplstr)
     LOGGER.info('Executing tasks')
     for task in tasks:
         # LOGGER.info(task)
         task(coreargs, database)
     self.assertEqual(np.atleast_1d(var),
                      database.get('model', {}).get('value'))
     shutil.rmtree('./tmp')
Esempio n. 3
0
 def test_parse_tasks_core(self):
     """Can we read tasks well and initialise correctly?"""
     taskdict = {}
     update_taskdict(taskdict,
                     [['skpar.core.taskdict', ['sub', 'get', 'run']]])
     skparin = 'example-tasks.yaml'
     userinp = get_input(skparin)
     tasklist = get_tasklist(userinp['tasks'])
     for i, task in enumerate(tasklist):
         LOGGER.info('task %i : %s', i, task)
     tasks = initialise_tasks(tasklist, taskdict, report=True)
     self.assertEqual(len(tasks), 6)
Esempio n. 4
0
def parse_input(filename, verbose=True):
    """Parse input filename and return the setup
    """
    userinp = get_input(filename)
    #
    # CONFIG
    configinp = userinp.get('config', None)
    config = get_config(configinp, report=True)
    #
    # OPTIMISATION
    optinp = userinp.get('optimisation', None)
    optimisation = get_optargs(optinp)
    #
    # TASKS
    taskdict = {}
    usermodulesinp = userinp.get('usermodules', None)
    # Note the statement below emulates a yaml-like input which delivers
    # a list of [module, [list of functions]] items.
    update_taskdict(
        taskdict,
        [[coretd.__name__, list(coretd.TASKDICT.keys())]])
    # Import user tasks after the core ones, to allow potential
    # replacement of `taskdict` entries with user-defined functions
    if usermodulesinp:
        update_taskdict(taskdict, usermodulesinp)
    #
    taskinp = userinp.get('tasks', None)
    tasklist = get_tasklist(taskinp)
    check_taskdict(tasklist, taskdict)
    # do trial initialisation in order to report what and how's been parsed
    # no assignment means we discard the tasks list here
    initialise_tasks(tasklist, taskdict, report=True)
    #
    # OBJECTIVES
    objectivesinp = userinp.get('objectives', None)
    objectives = set_objectives(objectivesinp, verbose=verbose)
    #
    return taskdict, tasklist, objectives, optimisation, config
Esempio n. 5
0
 def test_parsetask(self):
     """Can we parse task declarations successfully"""
     taskdict = {}
     update_taskdict(taskdict, [['skpar.core.taskdict', ['set', 'run']]])
     update_taskdict(taskdict,
                     [['skpar.dftbutils', ['get_meff', 'get_data']]])
     userinp = yaml.load(self.yamldata)['tasks']
     tasklist = []
     tasklist = get_tasklist(userinp)
     tasks = initialise_tasks(tasklist, taskdict)
     #
     tasknames = ['set', 'run', 'get_data', 'get_meff']
     self.assertListEqual([task.name for task in tasks], tasknames)
     #
     functions = [
         coretd.substitute_parameters, coretd.execute,
         dftbtd.get_dftbp_data, dftbtd.get_effmasses
     ]
     self.assertListEqual([task.func for task in tasks], functions)
Esempio n. 6
0
 def test_twopartemplates(self):
     """Can we parse task declarations successfully"""
     yamldata = """
         tasks:
             - sub: [[./tmp/template.par1.dat, ./tmp/template.par2.dat]]
             - run: ['bash run.sh', ./tmp]
             - get: [value, tmp/values.dat, model]
         """
     taskdict = {}
     update_taskdict(taskdict,
                     [['skpar.core.taskdict', ['get', 'sub', 'run']]])
     yamldata = yaml.load(yamldata)['tasks']
     tasklist = []
     tasklist = get_tasklist(yamldata)
     tasks = initialise_tasks(tasklist, taskdict)
     #
     var1 = 10
     var2 = 20
     database = Database()
     params = [Parameter('p0', value=var1), Parameter('p1', value=var2)]
     workroot = './'
     coreargs = {
         'parametervalues': [p.value for p in params],
         'parameternames': [p.name for p in params]
     }
     try:
         shutil.rmtree('./tmp')
     except FileNotFoundError:
         pass
     os.makedirs('./tmp')
     with open('./tmp/template.par1.dat', 'w') as template:
         template.writelines("%(p0)f\n")
     with open('./tmp/template.par2.dat', 'w') as template:
         template.writelines("%(p1)f\n")
     with open('./tmp/run.sh', 'w') as template:
         template.writelines('cat par*.dat > values.dat\n')
     for task in tasks:
         LOGGER.info(task)
         task(coreargs, database)
     self.assertListEqual([var1, var2],
                          list(database.get('model', {}).get('value')))
     shutil.rmtree('./tmp')
Esempio n. 7
0
    def test_parse_input(self):
        """Can we parse input, create an optimiser instance, and run the tasks?
        """
        filename = "skpar_in_optimise.yaml"
        taskdict, tasklist, objectives, optimisation, config =\
            parse_input(filename)
        print (taskdict)
        print (tasklist)
        workroot = config.get('workroot', None)
        templatedir = config.get('templatedir', None)
        create_workdir(workroot, templatedir)
        algo, options, parameters = optimisation
        parnames = [p.name for p in parameters]
        evaluate = Evaluator(objectives, tasklist, taskdict, parnames, config)
        optimiser = Optimiser(algo, parameters, evaluate, options)
        # initialise parameter values, pretending to be optimisation engine
        params = np.array([10.0, -2.5, 0.5, 0.05])
        for pini, par in zip(params, optimiser.parameters):
            par.value = pini
        logger.debug ("### ---------------------------------------- ###")
        logger.debug ("### ----------- Parameters ----------------- ###")
        logger.debug ("### ---------------------------------------- ###")
        for pp in optimiser.parameters:
            logger.debug (pp)

        # initialise tasks manually
        optimiser.evaluate.tasks = initialise_tasks(tasklist, taskdict,
                                                    report=True)
        env = {'workroot': workroot,
               'parameternames': parnames,
               'parametervalues': params,
               'iteration': None}
        workdir = workroot
        database = Database()

        # check task 0
        self.assertEqual(optimiser.evaluate.tasks[0].name, 'set')
        self.assertEqual(optimiser.evaluate.tasks[0].func,
                         core_taskdict.substitute_parameters)
        self.assertEqual(optimiser.evaluate.tasks[0].args,
                         [['template.parameters.dat']])
        optimiser.evaluate.tasks[0](env, database)
        parfile = os.path.abspath(os.path.join(workdir, 'parameters.dat'))
        raw = np.loadtxt(parfile, dtype=[('keys', 'S15'), ('values', 'float')])
        _values = np.array([pair[1] for pair in raw])
        _names = [pair[0].decode("utf-8") for pair in raw]
        nptest.assert_array_equal(params, _values)
        self.assertListEqual(parnames, _names)

        # check task 1
        exe = 'python3'.split()
        self.assertEqual(optimiser.evaluate.tasks[1].name, 'run')
        self.assertEqual(optimiser.evaluate.tasks[1].func,
                         core_taskdict.execute)
        self.assertEqual(optimiser.evaluate.tasks[1].args,
                         ['python3 model_poly3.py'])
        optimiser.evaluate.tasks[1](env, database)

        # check task 2
        self.assertEqual(optimiser.evaluate.tasks[2].name, 'get')
        self.assertEqual(optimiser.evaluate.tasks[2].func,
                         core_taskdict.get_model_data)
        self.assertEqual(optimiser.evaluate.tasks[2].args,
                         ['yval', 'model_poly3_out.dat', 'poly3'])
        optimiser.evaluate.tasks[2](env, database)
        modeldb = database.get('poly3') 
        self.assertTrue(modeldb is not None)
        datafile = os.path.abspath(os.path.join(workdir, 'model_poly3_out.dat'))
        dataout = np.loadtxt(datafile)
        nptest.assert_array_equal(modeldb['yval'], dataout)
        logger.debug("Model DB poly3:")
        logger.debug(database.get('poly3').items())
Esempio n. 8
0
    def evaluate(self, parametervalues, iteration=None):
        """Evaluate the global fitness of a given point in parameter space.

        This is the only object accessible to the optimiser, therefore only
        two arguments can be passed.

        Args:
            parametervalues (list): current point in design/parameter space
            iteration: (int or tupple): current iteration or current
                generation and individual index within generation

        Return:
            fitness (float): global fitness of the current design point
        """

        # Create individual working directory for each evaluation
        origdir = os.getcwd()
        workroot = self.config['workroot']
        if workroot is None:
            workdir = origdir
        else:
            workdir = get_workdir(iteration, workroot)
            create_workdir(workdir, self.config['templatedir'])

        # Initialise model database
        self.logger.info('Initialising ModelDataBase.')
        # database may be something different than a dictionary, but
        # whatever object it is, should have:
        #     update_modeldb(modelname, datadict) -- update modeldb
        #     get_modeldb(modelname) -- return a ref to modeldb
        # The point is that for every individual evaluation, we must
        # have individual model DB, so evaluations can be done in parallel.
        database = Database()
        # database = {} currently works too
        # Wrap the environment in a single dict
        env = {
            'workroot': workdir,
            'logger': self.logger,
            'parameternames': self.parnames,
            'parametervalues': parametervalues,
            'iteration': iteration,
            'taskdict': self.taskdict,
            'objectives': self.objectives
        }
        # Initialise and then execute the tasks
        tasks = initialise_tasks(self.tasklist, self.taskdict, report=False)
        self.logger.info('Iteration %s', iteration)
        self.logger.info('===========================')
        if self.parnames:
            parstr = [
                '{:s}({:.4g})'.format(name, val)
                for name, val in zip(self.parnames, parametervalues)
            ]
            self.logger.info('Parameters: {:s}'.format(' '.join(parstr)))


#        do we really need to pass workdir and to os.chdir???
#        move the for loop to a function.
#        execute_tasks(tasks, env, database, workdir, logger)
        for i, task in enumerate(tasks):
            os.chdir(workdir)
            try:
                task(env, database)
            except:
                self.logger.critical('Task %i FAILED:\n%s', i, task)
                raise

        # Evaluate individual fitness for each objective
        objvfitness = eval_objectives(self.objectives, database)
        # Evaluate global fitness
        cost = self.costf(self.utopia, objvfitness, self.weights)
        self._msg('{:<15s}: {}\n'.format('Overall cost', cost))

        # Remove iteration-specific working dir if not needed:
        if (not self.config['keepworkdirs']) and (workroot is not None):
            destroy_workdir(workdir)
        os.chdir(origdir)

        return np.atleast_1d(cost)