Пример #1
0
    def testSubmitRequest(self):
        log = logging.getLogger('testSubmitRequest')

        j = Jobs()
        j1Data = {
            'exec': '/bin/date',
            'args': ['1', '2'],
            'stdin': 'std.in',
            'stdout': 'std.out',
            'stderr': 'std.err',
            'wd': 'wd_path',
            'wt': '2h',
            'numNodes': {
                'min': 2,
                'max': 3
            },
            'numCores': {
                'min': 1,
                'max': 2
            },
            'iterate': [1, 10],
            'after': 'j2'
        }
        j.add(name='j1', **j1Data)

        req = json.dumps({'request': 'submit', 'jobs': j.jobs()})

        log.debug('submit request: %s' % str(req))

        req = Request.Parse(json.loads(req))
Пример #2
0
    def testJobConversion(self):
        j = Jobs()

        log = logging.getLogger('testJobConversion')

        # create a simple job description
        j1Data = {
            'exec': '/bin/date',
            'args': ['1', '2'],
            'stdin': 'std.in',
            'stdout': 'std.out',
            'stderr': 'std.err',
            'wd': 'wd_path',
            'wt': '2h',
            'numNodes': {
                'min': 2,
                'max': 3
            },
            'numCores': {
                'min': 1,
                'max': 2
            },
            'iterate': [1, 10],
            'after': 'j2'
        }
        j.add(j1Data, name='j1')
        self.assertEqual(len(j.jobNames()), 1)
        self.assertEqual(j.jobNames()[0], 'j1')

        # get document with standard descriptions
        stdJobs = j.jobs()
        log.debug('got output document: %s' % (str(stdJobs)))
        self.assertEqual(len(stdJobs), 1)
        self.assertEqual(stdJobs[0]['name'], 'j1')

        # compate standard job description with original, simple one
        stdJob = stdJobs[0]
        self.assertEqual(stdJob['execution']['exec'], j1Data['exec'])
        self.assertEqual(len(stdJob['execution']['args']), len(j1Data['args']))
        #        for idx, arg in enumerate(stdJob['execution']['args']):
        #            self.assertEqual(arg, j1Data['args'][idx])
        self.assertEqual(stdJob['execution']['args'], j1Data['args'])
        self.assertEqual(stdJob['execution']['stdin'], j1Data['stdin'])
        self.assertEqual(stdJob['execution']['stdout'], j1Data['stdout'])
        self.assertEqual(stdJob['execution']['stderr'], j1Data['stderr'])
        self.assertEqual(stdJob['execution']['wd'], j1Data['wd'])
        self.assertEqual(stdJob['resources']['wt'], j1Data['wt'])
        self.assertEqual(stdJob['resources']['numCores'], j1Data['numCores'])
        self.assertEqual(stdJob['resources']['numNodes'], j1Data['numNodes'])
        self.assertEqual(stdJob['iterate'], j1Data['iterate'])
        self.assertEqual(stdJob['dependencies']['after'], j1Data['after'])

        # reverse convertion - std -> simple
        j1Data_conv_name, j1Data_conv = j.convertStdToSimple(stdJob)
        self.assertEqual(j1Data_conv_name, 'j1')
        self.assertEqual(j1Data_conv, j1Data)
Пример #3
0
    def __submit_jobs(self, campaign, submit_order):

        print("Starting submission of tasks to QCG Pilot Job Manager")
        if submit_order == SubmitOrder.RUN_ORIENTED_CONDENSED:
            for run in campaign.list_runs():
                self._qcgpjm.submit(Jobs().addStd(
                    self._get_encoding_and_exec_task(campaign, run)))

        elif submit_order == SubmitOrder.RUN_ORIENTED:
            for run in campaign.list_runs():
                self._qcgpjm.submit(Jobs().addStd(
                    self._get_encoding_task(campaign, run)))
                self._qcgpjm.submit(Jobs().addStd(
                    self._get_exec_task(campaign, run)))

        elif submit_order == SubmitOrder.PHASE_ORIENTED:
            for run in campaign.list_runs():
                self._qcgpjm.submit(Jobs().addStd(
                    self._get_encoding_task(campaign, run)))
            for run in campaign.list_runs():
                self._qcgpjm.submit(Jobs().addStd(
                    self._get_exec_task(campaign, run)))

        elif submit_order == SubmitOrder.EXEC_ONLY:
            for run in campaign.list_runs():
                self._qcgpjm.submit(Jobs().addStd(
                    self._get_exec_only_task(campaign, run)))
Пример #4
0
    def test_JobValidation(self):
        j = Jobs()

        try:
            j.add(name='j1', args=['/bin/date'])
            self.fail('Missing exec name accepted')
        except InvalidJobDescription:
            pass
        self.assertEqual(len(j.jobNames()), 0)

        try:
            j.addStd(name='j1')
            self.fail('Missing exec name accepted')
        except InvalidJobDescription:
            pass
        self.assertEqual(len(j.jobNames()), 0)

        try:
            j.addStd(execution={'exec': '/bin/date'})
            self.fail('Missing job name accepted')
        except InvalidJobDescription:
            pass
        self.assertEqual(len(j.jobNames()), 0)
Пример #5
0
    def test_JobNameUniq(self):
        j = Jobs()

        # validate job name uniqness
        j.add(name='j1', exec='/bin/date')

        self.assertEqual(len(j.jobNames()), 1)
        self.assertEqual(j.jobNames()[0], 'j1')

        try:
            j.add(name='j1', exec='/bin/date')
            self.fail("Duplicated job names in job list")
        except InvalidJobDescription:
            pass

        try:
            j.addStd(name='j1', execution={'exec': '/bin/date'})
            self.fail("Duplicated job names in job list")
        except InvalidJobDescription:
            pass

        j.remove('j1')
        j.addStd(name='j1', execution={'exec': '/bin/date'})

        self.assertEqual(len(j.jobNames()), 1)
        self.assertEqual(j.jobNames()[0], 'j1')

        try:
            j.add(name='j1', exec='/bin/date')
            self.fail("Duplicated job names in job list")
        except InvalidJobDescription:
            pass
Пример #6
0
    def testSerialization(self):
        log = logging.getLogger('testSerialization')

        j1 = Jobs()
        j1Data = {
            'name': 'j1',
            'exec': '/bin/date',
            'args': ['1', '2'],
            'stdin': 'std.in',
            'stdout': 'std.out',
            'stderr': 'std.err',
            'wd': 'wd_path',
            'wt': '2h',
            'numNodes': {
                'min': 2,
                'max': 3
            },
            'numCores': {
                'min': 1,
                'max': 2
            },
            'iterate': [1, 10],
            'after': 'j2'
        }
        j1.add(**j1Data)
        j2Data = {
            'name': 'j2',
            'exec': '/bin/echo',
            'args': ['--verbose'],
            'stdin': 'echo.in',
            'stdout': 'echo.out',
            'stderr': 'echo.err',
            'wd': 'echo.wd',
            'wt': '10m',
            'numNodes': {
                'exact': 1
            },
            'numCores': {
                'exact': 4
            },
            'iterate': [1, 2],
            'after': 'j1'
        }
        j1.add(**j2Data)

        j1.saveToFile('jobs.json')

        j2 = Jobs()
        j2.loadFromFile('jobs.json')

        self.assertEqual(j1.jobNames(), j2.jobNames())
        self.assertEqual(j1.jobs(), j2.jobs())
Пример #7
0
from qcg.appscheduler.api.manager import Manager
from qcg.appscheduler.api.manager import LocalManager
from qcg.appscheduler.api.job import Jobs

m = LocalManager(cfg={'log_level': 'DEBUG'}, server_args=['--log', 'debug'])
# m = Manager(cfg={'log_level': 'DEBUG'}, server_args=['--log', 'debug'])


# get available resources
print("available resources:\n%s\n" % str(m.resources()))

# submit jobs and save their names in 'ids' list
jobs = Jobs()

$submitted_jobs_list

ids = m.submit(jobs)

# wait until submited jobs finish
m.wait4(ids)

# get detailed information about submited and finished jobs
print("jobs details:\n%s\n" % str(m.info(ids)))

m.finish()
m.stopManager()
m.cleanup()
Пример #8
0
            "args": exec_args,
            "wd": cwd,
            "stdout": my_campaign.campaign_dir + '/execute_' + key + '.stdout',
            "stderr": my_campaign.campaign_dir + '/execute_' + key + '.stderr'
        },
        "resources": {
            "numCores": {
                "exact": 1
            }
        },
        "dependencies": {
            "after": ["encode_" + key]
        }
    }

    m.submit(Jobs().addStd(encode_task))
    m.submit(Jobs().addStd(execute_task))

# Wait for completion of all PJ tasks and terminate the PJ manager
print(">>> Wait for completion of all PJ tasks")
m.wait4all()
m.finish()
m.stopManager()
m.cleanup()

print(">>> Syncing state of campaign after execution of PJ")


def update_status(run_id, run_data):
    my_campaign.campaign_db.set_run_statuses([run_id],
                                             uq.constants.Status.ENCODED)
Пример #9
0
import time

from qcg.appscheduler.api.manager import Manager
from qcg.appscheduler.api.job import Jobs

#m = Manager("tcp://127.0.0.1:5555")
m = Manager()

print("available resources:\n%s\n" % str(m.resources()))
print("submited jobs:\n%s\n" % str(m.list().names()))

#j = Jobs()
#j.add( 'j1', { 'exec': '/bin/date' } )

ids = m.submit(Jobs().
        add( name = 'j1', exec = '/bin/date', stdout = 'j1.stdout' ).
        add( name = 'j2', exec = '/bin/hostname', args = [ '--fqdn'], stdout = 'j2.stdout')
        )

status = m.status(ids)
status = m.status('j1')

time.sleep(2)

status = m.status(ids)

m.wait4all()
#m.wait4(ids)
#info = m.info(ids)

m.remove(ids)
Пример #10
0
def test_cooling_pj():
    print("Job directory: " + jobdir)
    print("Temporary directory: " + tmpdir)

    # ---- CAMPAIGN INITIALISATION ---
    print("Initializing Campaign")
    # Set up a fresh campaign called "cooling"
    my_campaign = uq.Campaign(name='cooling', work_dir=tmpdir)

    # Define parameter space
    params = {
        "temp_init": {
            "type": "float",
            "min": 0.0,
            "max": 100.0,
            "default": 95.0
        },
        "kappa": {
            "type": "float",
            "min": 0.0,
            "max": 0.1,
            "default": 0.025
        },
        "t_env": {
            "type": "float",
            "min": 0.0,
            "max": 40.0,
            "default": 15.0
        },
        "out_file": {
            "type": "string",
            "default": "output.csv"
        }
    }

    output_filename = params["out_file"]["default"]
    output_columns = ["te", "ti"]

    # Create an encoder, decoder and collation element for PCE test app
    encoder = uq.encoders.GenericEncoder(template_fname=jobdir +
                                         '/tests/cooling/cooling.template',
                                         delimiter='$',
                                         target_filename='cooling_in.json')

    decoder = uq.decoders.SimpleCSV(target_filename=output_filename,
                                    output_columns=output_columns,
                                    header=0)

    collater = uq.collate.AggregateSamples(average=False)

    # Add the PCE app (automatically set as current app)
    my_campaign.add_app(name="cooling",
                        params=params,
                        encoder=encoder,
                        decoder=decoder,
                        collater=collater)

    vary = {"kappa": cp.Uniform(0.025, 0.075), "t_env": cp.Uniform(15, 25)}

    # Create the sampler
    if uqmethod == 'pce':
        my_sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=2)
    else:
        my_sampler = uq.sampling.QMCSampler(vary=vary, n_samples=10)

    # Associate the sampler with the campaign
    my_campaign.set_sampler(my_sampler)

    # Will draw all (of the finite set of samples)
    my_campaign.draw_samples()

    # ---- QCG PILOT JOB INITIALISATION ---
    # set QCG-PJ temp directory
    qcgpj_tempdir = mkdtemp(None, ".qcgpj-", my_campaign.campaign_dir)

    # establish available resources
    cores = 4

    # switch on debugging of QCGPJ API (client part)
    client_conf = {
        'log_file': qcgpj_tempdir + '/api.log',
        'log_level': 'DEBUG'
    }

    # create local LocalManager (service part)
    m = LocalManager(
        ['--log', 'debug', '--nodes',
         str(cores), '--wd', qcgpj_tempdir], client_conf)

    # This can be used for execution of the test using a separate (non-local) instance of PJManager
    #
    # get available resources
    # res = m.resources()
    # remove all jobs if they are already in PJM
    # (required when executed using the same QCG-Pilot Job Manager)
    # m.remove(m.list().keys())

    print("Available resources:\n%s\n" % str(m.resources()))

    # ---- EXECUTION ---
    # Execute encode -> execute for each run using QCG-PJ
    print("Starting submission of tasks to QCG Pilot Job Manager")
    for run in my_campaign.list_runs():

        key = run[0]
        run_dir = run[1]['run_dir']

        enc_args = [
            my_campaign.db_type, my_campaign.db_location, 'FALSE', "cooling",
            "cooling", key
        ]

        exec_args = [
            run_dir, 'easyvvuq_app',
            'python3 ' + jobdir + "/tests/cooling/cooling_model.py",
            "cooling_in.json"
        ]

        encode_task = {
            "name": 'encode_' + key,
            "execution": {
                "exec": 'easyvvuq_encode',
                "args": enc_args,
                "wd": my_campaign.campaign_dir,
                "stdout":
                my_campaign.campaign_dir + '/encode_' + key + '.stdout',
                "stderr":
                my_campaign.campaign_dir + '/encode_' + key + '.stderr'
            },
            "resources": {
                "numCores": {
                    "exact": 1
                }
            }
        }

        execute_task = {
            "name": 'execute_' + key,
            "execution": {
                "exec": 'easyvvuq_execute',
                "args": exec_args,
                "wd": my_campaign.campaign_dir,
                "stdout":
                my_campaign.campaign_dir + '/execute_' + key + '.stdout',
                "stderr":
                my_campaign.campaign_dir + '/execute_' + key + '.stderr'
            },
            "resources": {
                "numCores": {
                    "exact": 1
                }
            },
            "dependencies": {
                "after": ["encode_" + key]
            }
        }

        m.submit(Jobs().addStd(encode_task))
        m.submit(Jobs().addStd(execute_task))

    # wait for completion of all PJ tasks and terminate the PJ manager
    m.wait4all()
    m.finish()
    m.stopManager()
    m.cleanup()

    print("Syncing state of campaign after execution of PJ")

    def update_status(run_id, run_data):
        my_campaign.campaign_db.set_run_statuses([run_id],
                                                 uq.constants.Status.ENCODED)

    my_campaign.call_for_each_run(update_status,
                                  status=uq.constants.Status.NEW)

    print("Collating results")
    my_campaign.collate()

    # Post-processing analysis
    print("Making analysis")
    if uqmethod == 'pce':
        analysis = uq.analysis.PCEAnalysis(sampler=my_sampler,
                                           qoi_cols=output_columns)
    else:
        analysis = uq.analysis.QMCAnalysis(sampler=my_sampler,
                                           qoi_cols=output_columns)

    my_campaign.apply_analysis(analysis)

    results = my_campaign.get_last_analysis()

    # Get Descriptive Statistics
    stats = results['statistical_moments']['te']

    print("Processing completed")
    return stats
Пример #11
0
def test_pce_pj(tmpdir):

    print("Running in directory: " + cwd)

    # establish available resources
    cores = 1

    # set location of log file
    # client_conf = {'log_file': tmpdir.join('api.log'), 'log_level': 'DEBUG'}

    # switch on debugging (by default in api.log file)
    client_conf = {'log_level': 'DEBUG'}

    # switch on debugging (by default in api.log file)
    m = LocalManager(['--nodes', str(cores)], client_conf)

    # This can be used for execution of the test using a separate (non-local) instance of PJManager
    #
    # get available resources
    # res = m.resources()
    # remove all jobs if they are already in PJM
    # (required when executed using the same QCG-Pilot Job Manager)
    # m.remove(m.list().keys())

    print("Available resources:\n%s\n" % str(m.resources()))

    print("Initializing Camapign")

    # Set up a fresh campaign called "pce"
    my_campaign = uq.Campaign(name='pce', work_dir=tmpdir)

    # Define parameter space
    params = {
        "kappa": {
            "type": "real",
            "min": "0.0",
            "max": "0.1",
            "default": "0.025"
        },
        "t_env": {
            "type": "real",
            "min": "0.0",
            "max": "40.0",
            "default": "15.0"
        },
        "out_file": {
            "type": "str",
            "default": "output.csv"
        }
    }

    output_filename = params["out_file"]["default"]
    output_columns = ["te", "ti"]

    # Create an encoder, decoder and collation element for PCE test app
    encoder = uq.encoders.GenericEncoder(template_fname=pce_app_dir +
                                         '/pce.template',
                                         delimiter='$',
                                         target_filename='pce_in.json')

    decoder = uq.decoders.SimpleCSV(target_filename=output_filename,
                                    output_columns=output_columns,
                                    header=0)

    # Add the PCE app (automatically set as current app)
    my_campaign.add_app(name="pce",
                        params=params,
                        encoder=encoder,
                        decoder=decoder)

    # Create a collation element for this campaign
    collater = uq.collate.AggregateSamples(average=False)
    my_campaign.set_collater(collater)

    # Create the sampler
    vary = {"kappa": cp.Uniform(0.025, 0.075), "t_env": cp.Uniform(15, 25)}

    my_sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=1)

    # Associate the sampler with the campaign
    my_campaign.set_sampler(my_sampler)

    # Will draw all (of the finite set of samples)
    my_campaign.draw_samples()

    # Create & save PJ configurator
    print("Creating configuration for QCG Pilot Job Manager")
    PJConfigurator(my_campaign).save()

    # Execute encode -> execute for each run using QCG-PJ
    print("Starting submission of tasks to QCG Pilot Job Manager")
    for key in my_campaign.list_runs():
        encode_job = {
            "name": 'encode_' + key,
            "execution": {
                "exec": 'easyvvuq_encode',
                "args": [my_campaign.campaign_dir, key],
                "wd": cwd,
                "env": {
                    "EASYPJ_CONF": easypj_conf
                },
            },
            "resources": {
                "numCores": {
                    "exact": 1
                }
            }
        }

        execute_job = {
            "name": 'execute_' + key,
            "execution": {
                "exec":
                'easyvvuq_execute',
                "args": [
                    my_campaign.campaign_dir, key, 'easyvvuq_app',
                    pce_app_dir + "/pce_model.py", "pce_in.json"
                ],
                "wd":
                cwd,
                "env": {
                    "EASYPJ_CONF": easypj_conf
                },
            },
            "resources": {
                "numCores": {
                    "exact": 1
                }
            },
            "dependencies": {
                "after": ["encode_" + key]
            }
        }

        m.submit(Jobs().addStd(encode_job))
        m.submit(Jobs().addStd(execute_job))

    print("Waiting for completion of all QCG PJ tasks")
    # wait for completion of all PJ tasks and terminate the PJ manager
    m.wait4all()
    m.finish()
    m.stopManager()
    m.cleanup()

    print("Collating results")
    my_campaign.collate()

    # Update after here

    # Post-processing analysis
    print("Making analysis")
    pce_analysis = uq.analysis.PCEAnalysis(sampler=my_sampler,
                                           qoi_cols=output_columns)

    my_campaign.apply_analysis(pce_analysis)

    results = my_campaign.get_last_analysis()

    # Get Descriptive Statistics
    stats = results['statistical_moments']['te']
    sobols = results['sobols_first']['te']

    print("Stats: ")
    print(stats)
    print("Sobols: ")
    print(sobols)
    print("Processing completed")
Пример #12
0
import zmq
import time

from qcg.appscheduler.api.manager import Manager
from qcg.appscheduler.api.job import Jobs

#m = Manager("tcp://127.0.0.1:5555")
m = Manager(cfg={'log_level': 'DEBUG', 'poll_delay': 1})

print("available resources:\n%s\n" % str(m.resources()))
print("submited jobs:\n%s\n" % str(m.list()))

#j = Jobs()
#j.add( 'j1', { 'exec': '/bin/date' } )

ids = m.submit(Jobs().add(name='msleep2', exec='/bin/env',
                          stdout='env.stdout').add(name='echo',
                                                   exec='/bin/date',
                                                   stdout='date.stdout'))

m.list()
m.status(ids)
m.info(ids)

m.wait4(ids)

m.remove(ids)

m.finish()
Пример #13
0
import zmq
import time

from qcg.appscheduler.api.manager import Manager
from qcg.appscheduler.api.job import Jobs

#m = Manager("tcp://127.0.0.1:5555")
m = Manager()

print("available resources:\n%s\n" % str(m.resources()))
print("submited jobs:\n%s\n" % str(m.list().names()))

#j = Jobs()
#j.add( 'j1', { 'exec': '/bin/date' } )

ids = m.submit(Jobs().
        add( name = 'j_${it}', iterate = [ 0, 100 ], exec = '/bin/sleep', args = [ '2s' ] )
        )

#status = m.status(ids)

#time.sleep(2)

m.wait4all()
#m.wait4(ids)
#info = m.info(ids)

#m.remove(ids)

m.finish()

#time.sleep(1)