Esempio n. 1
0
def test():
    from pandaharvester.harvestercore.work_spec import WorkSpec
    wspec = WorkSpec()
    jobid = "gsiftp://pcoslo5.cern.ch:2811/jobs/XkNNDmultdtn1ZPzno6AuCjpABFKDmABFKDmwqyLDmABFKDm8dOcOn"
    wspec.batchID = jobid
    workAttributes = {"arcjob": {}}
    workAttributes["arcjob"]["JobID"] = wspec.batchID
    workAttributes["arcjob"][
        "JobStatusURL"] = "ldap://{0}:2135/mds-vo-name=local,o=grid??sub?(nordugrid-job-globalid={1})".format(
            urlparse(jobid).netloc, jobid)
    workAttributes["arcjob"]["JobStatusInterfaceName"] = "org.nordugrid.ldapng"
    jobmanagementurl = arc.URL(wspec.batchID)
    jobmanagementurl.ChangePath("/jobs")
    workAttributes["arcjob"]["JobManagementURL"] = jobmanagementurl.str()
    workAttributes["arcjob"][
        "JobManagementInterfaceName"] = "org.nordugrid.gridftpjob"
    workAttributes["proxyrole"] = 'production'

    wspec.workAttributes = workAttributes
    wspec.accessPoint = '/tmp'
    wspec.mapType = WorkSpec.MT_OneToOne
    wspec.pandaid_list = [1234]
    print wspec.workAttributes

    messenger = ARCMessenger()
    print messenger.events_requested(wspec)
    print messenger.feed_events(wspec, {'event': 1234})
    print messenger.events_to_update(wspec)
    messenger.acknowledge_events_files(wspec)
Esempio n. 2
0
def single_thread_test(nObjects=3):
    time_point = time.time()
    print('clear')
    mq.fifo.clear()
    print('size', mq.size())
    time_consumed = time.time() - time_point
    print('Time consumed: ', time_consumed)

    time_point = time.time()
    for i in range(nObjects):
        workspec = WorkSpec()
        workspec.workerID = i
        data = {'random': [random.random(), random.random()]}
        workspec.workAttributes = data
        # print('put')
        mq.put(workspec)
        # print('size', mq.size())
    time_consumed = time.time() - time_point
    print('Time consumed: {0} sec ; Avg: {1} obj/sec '.format(
        time_consumed, nObjects / time_consumed))

    print('size', mq.size())

    print('peek')
    print(mq.peek())

    time_point = time.time()
    for i in range(nObjects):
        # print('get')
        obj = mq.get(timeout=3)
        # print(obj)
        # print('size', mq.size())
    time_consumed = time.time() - time_point
    print('Time consumed: {0} sec ; Avg: {1} obj/sec '.format(
        time_consumed, nObjects / time_consumed))
def single_thread_test(nObjects=3, protective=False):
    time_point = time.time()
    print('clear')
    mq.fifo.clear()
    print('size', mq.size())
    time_consumed = time.time() - time_point
    print('Time consumed: ', time_consumed)

    time_point = time.time()
    for i in range(nObjects):
        workspec = WorkSpec()
        workspec.workerID = i
        data = {'random': [random.random(), random.random()]}
        workspec.workAttributes = data
        # print('put')
        mq.put(workspec)
        # print('size', mq.size())
    time_consumed = time.time() - time_point
    print('Time consumed: {0} sec ; Avg: {1} obj/sec '.format(time_consumed, nObjects/time_consumed))

    print('size', mq.size())

    print('peek')
    print(mq.peek())

    time_point = time.time()
    for i in range(nObjects):
        # print('get')
        obj = mq.get(timeout=3, protective=protective)
        # print(obj)
        # print('size', mq.size())
    time_consumed = time.time() - time_point
    print('Time consumed: {0} sec ; Avg: {1} obj/sec '.format(time_consumed, nObjects/time_consumed))
Esempio n. 4
0
def test():
    from pandaharvester.harvestercore.work_spec import WorkSpec
    wspec = WorkSpec()
    jobid = "gsiftp://pcoslo5.cern.ch:2811/jobs/XkNNDmultdtn1ZPzno6AuCjpABFKDmABFKDmwqyLDmABFKDm8dOcOn"
    wspec.batchID = jobid
    workAttributes = {"arcjob": {}}
    workAttributes["arcjob"]["JobID"] = wspec.batchID
    workAttributes["arcjob"]["JobStatusURL"] = "ldap://{0}:2135/mds-vo-name=local,o=grid??sub?(nordugrid-job-globalid={1})".format(urlparse(jobid).netloc, jobid)
    workAttributes["arcjob"]["JobStatusInterfaceName"] = "org.nordugrid.ldapng"
    jobmanagementurl = arc.URL(wspec.batchID)
    jobmanagementurl.ChangePath("/jobs")
    workAttributes["arcjob"]["JobManagementURL"] = jobmanagementurl.str()
    workAttributes["arcjob"]["JobManagementInterfaceName"] = "org.nordugrid.gridftpjob"
    workAttributes["proxyrole"] = 'production'

    wspec.workAttributes = workAttributes
    wspec.accessPoint = '/tmp'
    wspec.mapType = WorkSpec.MT_OneToOne
    wspec.pandaid_list = [1234]
    print wspec.workAttributes

    messenger = ARCMessenger()
    print messenger.events_requested(wspec)
    print messenger.feed_events(wspec, {'event': 1234})
    print messenger.events_to_update(wspec)
    messenger.acknowledge_events_files(wspec)
Esempio n. 5
0
def test(jobid):
    '''Kill a job'''
    from pandaharvester.harvestercore.work_spec import WorkSpec
    import json
    wspec = WorkSpec()
    wspec.batchID = jobid
    workAttributes = {"arcjob": {}}
    workAttributes["arcjob"]["JobID"] = wspec.batchID
    workAttributes["arcjob"]["JobStatusURL"] = "ldap://{0}:2135/mds-vo-name=local,o=grid??sub?(nordugrid-job-globalid={1})".format(urlparse.urlparse(jobid).netloc, wspec.batchID)
    workAttributes["arcjob"]["JobStatusInterfaceName"] = "org.nordugrid.ldapng"
    jobmanagementurl = arc.URL(wspec.batchID)
    jobmanagementurl.ChangePath("/jobs")
    workAttributes["arcjob"]["JobManagementURL"] = jobmanagementurl.str()
    workAttributes["arcjob"]["JobManagementInterfaceName"] = "org.nordugrid.gridftpjob"
    
    wspec.workAttributes = workAttributes
    print wspec.workAttributes

    sweeper = ARCSweeper()
    print sweeper.kill_worker(wspec)
Esempio n. 6
0
def test(jobid):
    '''Test checking status'''
    from pandaharvester.harvestercore.work_spec import WorkSpec
    wspec = WorkSpec()
    wspec.batchID = jobid  #"gsiftp://pikolit.ijs.si:2811/jobs/HtgKDmtCe7qn4J8tmqCBXHLnABFKDmABFKDmBcGKDmABFKDm4NCTCn"
    workAttributes = {"arcjob": {}}
    workAttributes["arcjob"]["JobID"] = wspec.batchID
    workAttributes["arcjob"][
        "JobStatusURL"] = "ldap://{0}:2135/mds-vo-name=local,o=grid??sub?(nordugrid-job-globalid={1})".format(
            urlparse.urlparse(jobid).netloc, jobid)
    workAttributes["arcjob"]["JobStatusInterfaceName"] = "org.nordugrid.ldapng"
    jobmanagementurl = arc.URL(wspec.batchID)
    jobmanagementurl.ChangePath("/jobs")
    workAttributes["arcjob"]["JobManagementURL"] = jobmanagementurl.str()
    workAttributes["arcjob"][
        "JobManagementInterfaceName"] = "org.nordugrid.gridftpjob"

    wspec.workAttributes = workAttributes
    print wspec.workAttributes

    monitor = ARCMonitor()
    print monitor.check_workers([wspec])
Esempio n. 7
0
def test(jobid):
    '''Kill a job'''
    from pandaharvester.harvestercore.work_spec import WorkSpec
    import json
    wspec = WorkSpec()
    wspec.batchID = jobid
    workAttributes = {"arcjob": {}}
    workAttributes["arcjob"]["JobID"] = wspec.batchID
    workAttributes["arcjob"][
        "JobStatusURL"] = "ldap://{0}:2135/mds-vo-name=local,o=grid??sub?(nordugrid-job-globalid={1})".format(
            urlparse.urlparse(jobid).netloc, wspec.batchID)
    workAttributes["arcjob"]["JobStatusInterfaceName"] = "org.nordugrid.ldapng"
    jobmanagementurl = arc.URL(wspec.batchID)
    jobmanagementurl.ChangePath("/jobs")
    workAttributes["arcjob"]["JobManagementURL"] = jobmanagementurl.str()
    workAttributes["arcjob"][
        "JobManagementInterfaceName"] = "org.nordugrid.gridftpjob"

    wspec.workAttributes = workAttributes
    print wspec.workAttributes

    sweeper = ARCSweeper()
    print sweeper.kill_worker(wspec)
Esempio n. 8
0
    """
    from pandaharvester.harvestercore.queue_config_mapper import QueueConfigMapper
    queue_config_mapper = QueueConfigMapper()

    apfmon = Apfmon(queue_config_mapper)
    apfmon.create_factory()
    apfmon.create_labels()

    worker_a = WorkSpec()
    worker_a.batchID = 1
    worker_a.computingSite = 'CERN-PROD-DEV_UCORE'
    worker_a.computingElement = 'bla1'
    worker_a.workAttributes = {
        "batchLog":
        "https://aipanda024.cern.ch/condor_logs/18-07-19_09/grid.9659.0.log",
        "stdErr":
        "https://aipanda024.cern.ch/condor_logs/18-07-19_09/grid.9659.0.err",
        "stdOut":
        "https://aipanda024.cern.ch/condor_logs/18-07-19_09/grid.9659.0.out"
    }
    worker_a.pandaid_list = [1234, 5678]

    worker_b = WorkSpec()
    worker_b.batchID = 2
    worker_b.computingSite = 'CERN-PROD-DEV_UCORE'
    worker_b.computingElement = 'bla2'
    worker_b.workAttributes = {
        "batchLog":
        "https://aipanda024.cern.ch/condor_logs/18-07-19_09/grid.9659.0.log",
        "stdErr":
        "https://aipanda024.cern.ch/condor_logs/18-07-19_09/grid.9659.0.err",
        "stdOut":
 def _put_object(i_index):
     workspec = WorkSpec()
     workspec.workerID = i_index
     data = {'random': [(i_index**2) % 2**16, random.random()]}
     workspec.workAttributes = data
     mq.put(workspec)