Пример #1
0
    def start_agent(self, agent_uuid):
        self.logit('Starting agent {}'.format(agent_uuid))
        self.logit("VOLTTRON_HOME SETTING: {}".format(
            self.env['VOLTTRON_HOME']))
        cmd = ['volttron-ctl']
        cmd.extend(['start', agent_uuid])
        p = Popen(cmd, env=self.env,
                  stdout=sys.stdout, stderr=sys.stderr)
        p.wait()

        # Confirm agent running
        cmd = ['volttron-ctl']
        cmd.extend(['status', agent_uuid])
        res = subprocess.check_output(cmd, env=self.env)
        # 776 TODO: Timing issue where check fails
        time.sleep(.1)
        self.logit("Subprocess res is {}".format(res))
        assert 'running' in res
        pidpos = res.index('[') + 1
        pidend = res.index(']')
        pid = int(res[pidpos: pidend])

        assert psutil.pid_exists(pid), \
            "The pid associated with agent {} does not exist".format(pid)

        self.started_agent_pids.append(pid)
        return pid
Пример #2
0
def request_with_curl(method, url, params, headers, json):
    if params:
        url += "?" + "&".join("%s=%s" % (k, _quote(v))
                              for k, v in params.items())
    cmd = [
        "curl", "-s", "--connect-timeout", "3", "--max-time", "30", "-X",
        method, url
    ]
    for kv in headers.items():
        cmd.extend(["-H", "%s: %s" % kv])
    if json:
        cmd.extend(["-H", "Content-Type: application/json; charset=utf8"])
        cmd.extend(["--data-raw", dumps(json)])
    #print(" ".join(('"' + c + '"') if ' ' in c else c for c in cmd))
    print(">>", cmd)
    p = Popen(cmd, stdout=PIPE)
    buf = b""
    while True:
        tmp = p.stdout.read(4096)
        if tmp == b"":
            break
        buf += tmp
    p.wait()
    print("<<", buf)
    if buf == b"":
        return {"errors": "no data returned (timeout?)", "status": "FAIL"}
    try:
        return loads(buf.decode("utf8"))
    except JSONDecodeError:
        return {"errors": str(buf), "status": "FAIL"}
Пример #3
0
    def start_agent(self, agent_uuid):
        self.logit('Starting agent {}'.format(agent_uuid))
        self.logit("VOLTTRON_HOME SETTING: {}".format(
            self.env['VOLTTRON_HOME']))
        cmd = ['volttron-ctl']
        cmd.extend(['start', agent_uuid])
        p = Popen(cmd, env=self.env, stdout=sys.stdout, stderr=sys.stderr)
        p.wait()

        # Confirm agent running
        cmd = ['volttron-ctl']
        cmd.extend(['status', agent_uuid])
        res = subprocess.check_output(cmd, env=self.env)
        # 776 TODO: Timing issue where check fails
        time.sleep(.1)
        self.logit("Subprocess res is {}".format(res))
        assert 'running' in res
        pidpos = res.index('[') + 1
        pidend = res.index(']')
        pid = int(res[pidpos:pidend])

        assert psutil.pid_exists(pid), \
            "The pid associated with agent {} does not exist".format(pid)

        self.started_agent_pids.append(pid)
        return pid
Пример #4
0
    def run(self, command_line, split_lines, pipe_as_input):

        logging.info("command line: %s", command_line)

        if pipe_as_input is None:
            process = Popen(shlex.split(command_line), \
                        stdout=PIPE, stderr=PIPE)

            (output, error_out) = process.communicate()
            self.exit_code = process.wait()
        else:
            process = Popen(shlex.split(command_line), \
                        stdin=PIPE, stdout=PIPE, stderr=PIPE)

            (output, error_out) = process.communicate(
                input=pipe_as_input.encode("utf-8"))
            self.exit_code = process.wait()

        if split_lines:
            self.lines = output.splitlines()
        else:
            self.output = output.decode("utf-8")

        self.error_out = error_out

        return self.exit_code
Пример #5
0
    def _job_monitor_glet(self, job_group, jobid, description, args, data):
        jobname = (REDIS_JOBS_GROUP_PREFIX + '-{}').format(job_group, jobid)
        joblogfile = os.path.join(
            config.get('MINEMELD_LOG_DIRECTORY_PATH', '/tmp'),
            '{}.log'.format(jobname))
        jobtempdir = tempfile.mkdtemp(prefix=jobname)

        LOG.info('Executing job {} - {} cwd: {} logfile: {}'.format(
            jobname, args, jobtempdir, joblogfile))

        try:
            with open(joblogfile, 'w+') as logfile:
                jobprocess = Popen(args=args,
                                   close_fds=True,
                                   cwd=jobtempdir,
                                   shell=False,
                                   stdout=logfile,
                                   stderr=subprocess.STDOUT)

        except OSError:
            self._safe_remove(joblogfile)
            self._safe_rmtree(jobtempdir)
            LOG.exception('Error starting job {}'.format(jobname))
            return

        jobpsproc = psutil.Process(pid=jobprocess.pid)

        jobdata = data
        if jobdata is None:
            jobdata = {}

        jobdata['create_time'] = int(time.time() * 1000)
        jobdata['description'] = description
        jobdata['job_id'] = jobid
        jobdata['pid'] = jobpsproc.pid
        jobdata['hash'] = hash(jobpsproc)
        jobdata['logfile'] = joblogfile
        jobdata['cwd'] = jobtempdir
        jobdata['status'] = 'RUNNING'

        self.SR.hset(REDIS_JOBS_GROUP_PREFIX.format(job_group), jobid,
                     json.dumps(jobdata))

        jobprocess.wait()

        if jobprocess.returncode != 0:
            jobdata['status'] = 'ERROR'
        else:
            jobdata['status'] = 'DONE'
        jobdata['returncode'] = jobprocess.returncode
        jobdata['end_time'] = int(time.time() * 1000)

        self._collect_job(jobdata)

        self.SR.hset(REDIS_JOBS_GROUP_PREFIX.format(job_group), jobid,
                     json.dumps(jobdata))

        self.running_jobs[job_group].pop(jobid, None)

        return jobprocess.returncode
Пример #6
0
    def start_agent(self, agent_uuid):
        self.logit('Starting agent {}'.format(agent_uuid))
        self.logit("VOLTTRON_HOME SETTING: {}".format(
            self.env['VOLTTRON_HOME']))
        cmd = ['volttron-ctl']
        if self.opts.get('developer_mode', False):
            cmd.append('--developer-mode')
        cmd.extend(['start', agent_uuid])
        p = Popen(cmd, env=self.env, stdout=sys.stdout, stderr=sys.stderr)
        p.wait()

        # Confirm agent running
        cmd = ['volttron-ctl']
        if self.opts.get('developer_mode', False):
            cmd.append('--developer-mode')
        cmd.extend(['status', agent_uuid])
        res = subprocess.check_output(cmd, env=self.env)
        #776 TODO: Timing issue where check fails
        time.sleep(.1)
        self.logit("Subprocess res is {}".format(res))
        assert 'running' in res
        pidpos = res.index('[') + 1
        pidend = res.index(']')
        pid = int(res[pidpos:pidend])

        self.started_agent_pids.append(pid)
        return int(pid)
Пример #7
0
class Command(BaseCommand):
    help = 'Starts API servers for all ontologies'

    def __init__(self, *args, **kwargs):
        super(Command, self).__init__(*args, **kwargs)
        self.processes = {}
        signal.signal(signal.SIGTERM, self.stop_subprocesses)
        signal.signal(signal.SIGINT, self.stop_subprocesses)
        signal.signal(signal.SIGQUIT, self.stop_subprocesses)

    def add_arguments(self, parser):
        pass

    def stop_subprocesses(self, signum, frame):
        if self.proc.poll() is None:
            self.proc.kill()
        Ontology.objects.filter(
            nb_servers__gt=0,
            acronym__in=self.loaded).update(nb_servers=F('nb_servers') - 1)
        exit(0)

    def handle(self, *args, **options):
        ontologies = Ontology.objects.filter(status=Ontology.CLASSIFIED)
        data = []
        self.loaded = set()
        for ont in ontologies:
            ontIRI = ABEROWL_SERVER_URL + ont.get_latest_submission(
            ).get_filepath()
            data.append({'ontId': ont.acronym, 'ontIRI': ontIRI})
        data = json.dumps(data)
        env = os.environ.copy()
        env['JAVA_OPTS'] = '-Xmx200g -Xms8g'
        self.proc = Popen(['groovy', 'OntologyServer.groovy'],
                          cwd='aberowlapi/',
                          stdin=PIPE,
                          stdout=PIPE,
                          universal_newlines=True,
                          env=env)
        self.proc.stdin.write(data)
        self.proc.stdin.close()
        for line in self.proc.stdout:
            line = line.strip()
            logging.info(line)
            if line.startswith('Finished loading'):
                oid = line.split()[2]
                if oid not in self.loaded:
                    self.loaded.add(oid)
                    try:
                        Ontology.objects.filter(acronym=oid).update(
                            nb_servers=F('nb_servers') + 1)
                    except Exception as e:
                        print('Exception:', e)
                        # Reset database connection if update query fails
                        db.close_connection()
                        Ontology.objects.filter(acronym=oid).update(
                            nb_servers=F('nb_servers') + 1)

        self.proc.stdout.close()
        self.proc.wait()
Пример #8
0
    def do_job(self,jobid,props,*args,**kwargs):
        log.info("do job: %s"%(job))
        if props:
            props = json.loads(props)
        else:
            props = {}

        payload = {"id":jobid,"status":"building"}
        r = requests.post('%s/api/job'%(self.options.master_base_url),data=payload)
        log.info("update job %s status,result:%s"%(jobid,r))

        if props.has_key("steps"):
            steps = props["steps"]
            for step in steps:
                if step.has_key("shell"):
                    shell = step["shell"]
                    log.info("shell: %s"%(shell[:256]))
                    sub = Popen([shell], stdout=PIPE, stderr=PIPE, shell=True)

                    stdio_q = Queue()
                    def handle_stdout():
                        for l in sub.stdout:
                            stdio_q.put_nowait((0,l))
                    def handle_stderr():
                        for l in sub.stderr:
                            stdio_q.put_nowait((1,l))
                    def handle_stdio_q():
                        #stdout 0 stderr 1 extra 2 end 255
                        current_text_type = None
                        stdio_list = []
                        need_flush = False
                        timeout = None
                        while 1:
                            ttype,text = stdio_q.get()
                            if ttype!=current_text_type and len(stdio_list)>0:
                                need_flush = True
                            if len(stdio_list)>50:
                                need_flush = True
                            if need_flush:
                                text2flush = "".join(stdio_list)
                                payload = {"id":jobid,"text_type":current_text_type,"stdio_text":text2flush}
                                r = requests.post('%s/api/job'%(self.options.master_base_url),data=payload)
                                need_flush = False
                            if ttype==255:
                                break
                            current_text_type = ttype
                            stdio_list.append(text)
                    glet_stdout = gevent.spawn(handle_stdout)
                    glet_stderr = gevent.spawn(handle_stderr)
                    glet_stdio_q = gevent.spawn(handle_stdio_q)

                    sub.wait()
                    stdio_q.put_nowait((255,""))
                    glet_stdout.kill()
                    glet_stderr.kill()

        payload = {"id":jobid,"status":3}#JOB_STATUS_FINISHED
        r = requests.post('%s/api/job'%(self.options.master_base_url),data=payload)
        log.info("update job %d status,result:%s"%(jobid,r))
Пример #9
0
 def __os_cmd(self, cmd, args):
     args = [cmd] + args.split()
     p = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
     p.wait()
     for line in p.stderr:
         sys.stderr.write(colored(line, 'red'))
     for line in p.stdout:
         sys.stdout.write(line)
Пример #10
0
def _cmd(cmdargs):
    """Executes the passed command.

    :param cmdargs: A list of arguments that should be passed to Popen.
    :type cmdargs: [str]
    """
    if verbose:
        print(cmdargs)
    process = Popen(cmdargs, env=os.environ, stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE)
    process.wait()
Пример #11
0
def block_and_check_process_output(process_args, fail_on_error=True, retry_count=0):
    process = Popen(process_args)
    process.wait()
    if process.returncode != 0:
        if retry_count > 0:
            print '{} failed. Retrying...'.format(' '.join(process_args))
            block_and_check_process_output(process_args, fail_on_error=fail_on_error, retry_count=retry_count-1)
        else:
            print process.stderr
            if fail_on_error:
                exit(1)
Пример #12
0
def _cmd(cmdargs):
    """ Executes the passed command.
.
    @:param: list:
        A list of arguments that should be passed to Popen.
    """
    print(cmdargs)
    process = Popen(cmdargs,
                    env=os.environ,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE)
    process.wait()
Пример #13
0
def block_until_sshable(config, instance_name, tries=1, wait_time_seconds=10):
    if tries > 10:
        print "Tried {} times to ssh to {}, something bad happened".format(tries, instance_name)
        exit(1)

    process_args = zdgcutil.ssh(config, instance_name)
    process = Popen(process_args)
    process.wait()
    ret_code = process.returncode
    if ret_code != 0:
        print "{} not yet sshable, waiting {} seconds".format(instance_name, wait_time_seconds)
        time.sleep(wait_time_seconds)
        block_until_sshable(config, instance_name, tries + 1)
Пример #14
0
def _cmd(cmdargs):
    """Executes the passed command.

    :param cmdargs: A list of arguments that should be passed to Popen.
    :type cmdargs: [str]
    """
    if verbose:
        print(cmdargs)
    process = Popen(cmdargs,
                    env=os.environ,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE)
    process.wait()
Пример #15
0
def test_cov_update_published(volttron_instance, test_agent):
    """Tests the functionality of BACnet change of value forwarding in the
    Master Driver and driver.py"""
    # Reset master driver config store
    cmd = ['volttron-ctl', 'config', 'delete', PLATFORM_DRIVER, '--all']
    process = Popen(cmd, env=volttron_instance.env,
                    stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    result = process.wait()
    assert result == 0

    # Add fake device configuration
    cmd = ['volttron-ctl', 'config', 'store', PLATFORM_DRIVER,
           'fake.csv', 'examples/configurations/drivers/fake.csv', '--csv']
    process = Popen(cmd, env=volttron_instance.env,
                    stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    result = process.wait()
    assert result == 0

    cmd = ['volttron-ctl', 'config', 'store', PLATFORM_DRIVER,
           "devices/fakedriver", 'examples/configurations/drivers/fake.config',
           '--json']
    process = Popen(cmd, env=volttron_instance.env,
                    stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    result = process.wait()
    assert result == 0

    # install master driver, start the master driver, which starts the device
    master_uuid = volttron_instance.install_agent(
        agent_dir=get_services_core("MasterDriverAgent"),
        config_file={},
        start=True)
    print("agent id: ", master_uuid)

    # tell the master driver to forward the value
    point_name = "PowerState"
    device_path = "fakedriver"
    result_dict = {"fake1": "test", "fake2": "test", "fake3": "test"}
    test_agent.vip.rpc.call(PLATFORM_DRIVER, 'forward_bacnet_cov_value',
                            device_path, point_name, result_dict)
    # wait for the publishes to make it to the bus
    gevent.sleep(2)

    # Mock checks
    # Should have one "PowerState" publish for each item in the result dict
    # Total all publishes likely will include regular scrapes
    assert test_agent.cov_callback.call_count >= 3
    test_count = 0
    for call_arg in test_agent.cov_callback.call_args_list:
        if call_arg[0][5][0].get("PowerState", False):
            test_count += 1
    assert test_count == 3
Пример #16
0
def github():
    """ github回调 """
    deliveryid = request.headers.get('X-GitHub-Delivery', None)
    if deliveryid:
        signature = request.headers.get('X-Hub-Signature', None)
        if signature:
            hashtype, remotesignstr = signature.split('=')
            localsign = hmac.new(current_app.config['GITHUB_WEBHOOK_SECRET'], msg=request.data, digestmod=getattr(hashlib, hashtype))
            if remotesignstr != localsign.hexdigest():
                return jsonify({'error': 'no permission'}), 401
        event = request.headers.get('X-GitHub-Event', '')
        if event == 'push':
            subp = Popen('git checkout .;git pull', shell=True)
            subp.wait()
    return jsonify({})
 def _auction_fucn(self, args):
     process = None
     try:
         process = Popen(args)
         self.processes[process.pid] = process
         rc = process.wait()
         if rc == 0:
             self.logger.info(
                 "Finished {}".format(args[2]),
                 extra={
                     'MESSAGE_ID': 'CHRONOGRAPH_WORKER_COMPLETE_SUCCESSFUL'
                 }
             )
         else:
             self.logger.error(
                 "Exit with error {}".format(args[2]),
                 extra={
                     'MESSAGE_ID': 'CHRONOGRAPH_WORKER_COMPLETE_EXCEPTION'
                 }
             )
     except Exception as error:
         self.logger.critical(
             "Exit with error {} params: {} error: {}".format(
                 args[2], repr(args), repr(error)),
             extra={'MESSAGE_ID': 'CHRONOGRAPH_WORKER_COMPLETE_EXCEPTION'})
     if process:
         del self.processes[process.pid]
Пример #18
0
    def _auction_fucn(self, document_id, tender_id, lot_id, view_value):
        process = None
        params = [self.config['main']['auction_worker'],
                  "run", tender_id,
                  self.get_auction_worker_configuration_path(view_value)]
        if lot_id:
            params += ['--lot', lot_id]

        if view_value['api_version']:
            params += ['--with_api_version', view_value['api_version']]

        if view_value['mode'] == 'test':
            params += ['--auction_info_from_db', 'true']

        try:
            process = Popen(params)
            self.processes[process.pid] = process
            rc = process.wait()
            if rc == 0:
                self.logger.info(
                    "Finished {}".format(document_id),
                    extra={'MESSAGE_ID': 'CHRONOGRAPH_WORKER_COMPLETE_SUCCESSFUL'})
            else:
                self.logger.error(
                    "Exit with error {}".format(document_id),
                    extra={'MESSAGE_ID': 'CHRONOGRAPH_WORKER_COMPLETE_EXCEPTION'})
        except Exception, error:
            self.logger.critical(
                "Exit with error {} params: {} error: {}".format(document_id, repr(params), repr(error)),
                extra={'MESSAGE_ID': 'CHRONOGRAPH_WORKER_COMPLETE_EXCEPTION'})
Пример #19
0
def run_autobahn():
    """
    Spawn the autobahn test suite in a subprocess
    """
    import os.path

    cmd = ['wstest -m fuzzingclient -s %s/autobahn.json' % (
        os.path.dirname(__file__),)]

    wstest = Popen(cmd, stderr=PIPE, stdout=PIPE, shell=True)

    if wstest.wait():
        # something went wrong, it's boom time.
        stdout, stderr = wstest.communicate(None)

        sys.stderr.write(stderr)
        sys.stderr.flush()

        sys.stdout.write(stdout)
        sys.stderr.flush()

        raise RuntimeError

    # parse the generated report to see if we have failures
    chk = Popen(
        'fgrep gevent_websocket reports/clients/index.html | grep Fail',
        stdout=PIPE, shell=True)

    stdout, stderr = chk.communicate(None)

    if stdout:
        sys.stderr.write('Autobahn test failures:\n' + stdout)

        raise SystemExit(1)
 def _auction_fucn(self, args):
     process = None
     try:
         process = Popen(args)
         self.processes[process.pid] = process
         rc = process.wait()
         if rc == 0:
             self.logger.info("Finished {}".format(args[2]),
                              extra={
                                  'MESSAGE_ID':
                                  'CHRONOGRAPH_WORKER_COMPLETE_SUCCESSFUL'
                              })
         else:
             self.logger.error("Exit with error {}".format(args[2]),
                               extra={
                                   'MESSAGE_ID':
                                   'CHRONOGRAPH_WORKER_COMPLETE_EXCEPTION'
                               })
     except Exception as error:
         self.logger.critical(
             "Exit with error {} params: {} error: {}".format(
                 args[2], repr(args), repr(error)),
             extra={'MESSAGE_ID': 'CHRONOGRAPH_WORKER_COMPLETE_EXCEPTION'})
     if process:
         del self.processes[process.pid]
Пример #21
0
def test_drivenmatlabagent(volttron_instance1):
    print("** Setting up test_drivenagent module **")
    
    wrapper = volttron_instance1
    
    #Write config files for master driver
    process = Popen(['python', 'config_builder.py', 
                     '--count=1', 
                     '--publish-only-depth-all',
                     '--campus=fakecampus',
                     '--building=fakebuilding',
                     '--interval=5',
                     '--config-dir=../../applications/pnnl/FakeDrivenMatlabAgent/tests',
                     'fake', 
                     '../../applications/pnnl/FakeDrivenMatlabAgent/tests/test_fake.csv', 
                     'null'], 
                    env=volttron_instance1.env, cwd='scripts/scalability-testing',
                    stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    result = process.wait()
    print result
    assert result == 0
     
    #Actuator Agent
    agent_uuid = volttron_instance1.install_agent(
        agent_dir="services/core/ActuatorAgent",
        config_file="services/core/ActuatorAgent/actuator-deploy.service",
        start=True)
    print("agent id: ", agent_uuid)
    assert agent_uuid
    actuator_agent = wrapper.build_agent()
     
    #Driven Matlab Agent
    agent_uuid = volttron_instance1.install_agent(
        agent_dir="applications/pnnl/FakeDrivenMatlabAgent",
        config_file=config_wh,
        start=True)
    print("agent id: ", agent_uuid)
    assert agent_uuid
    driven_agent = wrapper.build_agent()
     
    #Fake Master Driver
    agent_uuid = volttron_instance1.install_agent(
        agent_dir="services/core/MasterDriverAgent",
        config_file="applications/pnnl/FakeDrivenMatlabAgent/tests/master-driver.agent",
        start=True)
    print("agent id: ", agent_uuid)
    assert agent_uuid
    driver_agent = wrapper.build_agent()
     
    gevent.sleep(5)
     
    path = 'fakecampus/fakebuilding/fakedriver0/HPWH_Phy0_PowerState'
    value = driven_agent.vip.rpc.call('platform.actuator', 'get_point', path).get()
    print('The set point value is '+str(value))
    assert value == 1 
     
    path = 'fakecampus/fakebuilding/fakedriver0/ERWH_Phy0_ValveState'
    value = driven_agent.vip.rpc.call('platform.actuator', 'get_point', path).get()
    print('The set point value is '+str(value))
    assert value == 1 
Пример #22
0
def github():
    """ github回调 """
    deliveryid = request.headers.get('X-GitHub-Delivery', None)
    if deliveryid:
        signature = request.headers.get('X-Hub-Signature', None)
        if signature:
            hashtype, remotesignstr = signature.split('=')
            localsign = hmac.new(current_app.config['GITHUB_WEBHOOK_SECRET'],
                                 msg=request.data,
                                 digestmod=getattr(hashlib, hashtype))
            if remotesignstr != localsign.hexdigest():
                return jsonify({'error': 'no permission'}), 401
        event = request.headers.get('X-GitHub-Event', '')
        if event == 'push':
            subp = Popen('git checkout .;git pull', shell=True)
            subp.wait()
    return jsonify({})
Пример #23
0
def proc(input, output, args, env=None, stderr=None):
	"""Run a subprocess, passing input to its stdin and sending its stdout to output,
	with each item newline-seperated.
	Args is either a string to be shell-interpreted, or a list of args.
	stderr is either not redirected (default), mixed in with stdout (pass subprocess.STDOUT),
	or redirected to a given file.
	"""
	from gevent.subprocess import Popen, PIPE, STDOUT

	if isinstance(args, unicode):
		args = args.encode('utf8')
	if isinstance(args, str):
		shell = True
	else:
		shell = False

	group = gevent.pool.Group()

	proc = None
	try:
		proc = Popen(args, shell=shell, env=env, stdin=PIPE, stdout=PIPE, stderr=stderr)

		@group.spawn
		def do_input():
			for item in input:
				item = item.encode('utf8') if isinstance(item, unicode) else str(item)
				proc.stdin.write('{}\n'.format(item))
				proc.stdin.flush()
			proc.stdin.close()

		@group.spawn
		def do_output():
			for line in proc.stdout:
				output.put(line.rstrip('\n'))
			output.close()

		proc.wait()
		group.join()
	finally:
		if proc and proc.poll() is None:
			try:
				proc.kill()
			except OSError as e:
				if e.errno != errno.ESRCH:
					raise
def run(worker_directory_path,
        tender_file_path,
        worker,
        auction_id,
        config,
        start_time,
        time_offset,
        wait_for_result=False):
    with update_auctionPeriod(tender_file_path, auction_type='simple',
                              start_time=start_time,
                              time_offset_sec=time_offset) \
            as auction_file:
        p = Popen('{0}/bin/{1} run {2} {0}/etc/{3} --planning_procerude '
                  'partial_db --auction_info {4}'.format(
                      worker_directory_path, worker, auction_id, config,
                      auction_file).split())
        if wait_for_result:
            p.wait()
Пример #25
0
    def start_agent(self, agent_uuid):
        self.logit('Starting agent {}'.format(agent_uuid))
        self.logit("VOLTTRONO_HOME SETTING: {}".format(os.environ['VOLTTRON_HOME']))
        cmd = ['volttron-ctl', 'start', agent_uuid]
        p = Popen(cmd, env=self.env,
                  stdout=sys.stdout, stderr=sys.stderr)
        p.wait()

        # Confirm agent running
        cmd = ['volttron-ctl', 'status', agent_uuid]
        res = subprocess.check_output(cmd, env=self.env)
        assert 'running' in res
        pidpos = res.index('[') + 1
        pidend = res.index(']')
        pid = int(res[pidpos: pidend])

        self._started_pids.append(pid)
        return int(pid)
Пример #26
0
    def start_agent(self, agent_uuid):
        self.logit('Starting agent {}'.format(agent_uuid))
        self.logit("VOLTTRONO_HOME SETTING: {}".format(
            os.environ['VOLTTRON_HOME']))
        cmd = ['volttron-ctl', 'start', agent_uuid]
        p = Popen(cmd, env=self.env, stdout=sys.stdout, stderr=sys.stderr)
        p.wait()

        # Confirm agent running
        cmd = ['volttron-ctl', 'status', agent_uuid]
        res = subprocess.check_output(cmd, env=self.env)
        assert 'running' in res
        pidpos = res.index('[') + 1
        pidend = res.index(']')
        pid = int(res[pidpos:pidend])

        self._started_pids.append(pid)
        return int(pid)
Пример #27
0
def run_func(queue, job_id, cmd, stop_on_failure=True):
    redirector = Redirector(_stream)
    _logrun(cmd)

    try:
        process = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, close_fds=True)
        redirector.add_redirection("marteau-stdout", process, process.stdout)
        redirector.add_redirection("marteau-stderr", process, process.stderr)
        redirector.start()
        pid = process.pid
        queue.add_pid(job_id, pid)
        process.wait()
        res = process.returncode
        if res != 0 and stop_on_failure:
            _logrun("%r failed" % cmd)
            raise Exception("%r failed" % cmd)
        return res
    finally:
        redirector.kill()
        queue.remove_pid(job_id, pid)
Пример #28
0
def build_app(console):
    def rm_dir(path_to_rm):
        for x in path_to_rm.iterdir():
            if x.is_file():
                x.unlink()
            else:
                rm_dir(x)
        path_to_rm.rmdir()

    specfile = "default.spec"
    if console:
        specfile = "console.spec"
    path = Path(".").absolute()
    spec = path / "app" / "utils" / "spec" / specfile
    tmp = path / "tmp"
    p = Popen("pyinstaller {spec} --distpath {path} --workpath {tmp}".format(
        spec=spec, path=path, tmp=tmp))
    p.wait()
    rm_dir(Path("./tmp"))
    sys.exit(0)
Пример #29
0
def _try_run_cmd(cmd, error_msg, **popen_kwargs):
    kwargs = {'stdout': PIPE, 'stderr': PIPE, 'env': os.environ}
    kwargs.update(popen_kwargs)

    cmd = _encode_cmd(cmd)

    proc = Popen(cmd, **kwargs)
    _, stderr = proc.communicate()
    if proc.wait():
        raise GenerateError('%s:\n\t %s\n\t%s' %
                            (u(error_msg), u(cmd), u(stderr)))
Пример #30
0
def exec_command(*args, **kwargs):
    shell = kwargs.get("shell", False)
    process = Popen(args, stdout=PIPE, stderr=PIPE, close_fds=True, shell=shell)
    
    retcode = process.wait()
    output = process.stdout.read()
    unused_err = process.stderr.read()

    if retcode:
        _logger.debug("Command '%s' returned non-zero exit status %d", args, retcode)
        
    return retcode, output.strip()
Пример #31
0
def copper_node(workdir):
    logpath = os.path.join(workdir, 'copper.log')
    unixpath = os.path.join(workdir, 'copper.sock')
    httppath = os.path.join(workdir, 'copper.http')
    confpath = os.path.join(workdir, 'copper.conf')
    config = {
        "listen": [
            {
                "net": "unix",
                "type": "http",
                "addr": httppath,
            },
            {
                "net": "unix",
                "addr": unixpath,
                "allow-changes": True,
            },
        ],
    }
    with open(confpath, 'w') as f:
        # YAML parses valid JSON data
        json.dump(config, f)
    with open(logpath, 'wb') as logfile:
        p = Popen(['copper-node', '-config=' + confpath], shell=False, cwd=workdir, stdout=logfile, stderr=logfile)
    try:
        while not os.path.exists(unixpath):
            time.sleep(0.001)
            rc = p.poll()
            if rc is not None:
                with open(logpath, 'rb') as logfile:
                    sys.stderr.write(logfile.read())
                raise RuntimeError('copper-node exited with status %r' % (rc,))
        yield {
            'unix': unixpath,
            'http': httppath,
        }
    finally:
        if p.poll() is None:
            p.terminate()
            p.wait()
Пример #32
0
def dump(app, logger):
    """
    Run dump script as separate process
    """
    def read_stream(stream):
        try:
            while not stream.closed:
                line = stream.readline()
                if not line:
                    break
                line = line.rstrip().decode('utf-8')
                logger.info(line.split(' - ')[-1])
        except:
            pass

    args = prepare_pack_command(app.config)
    logger.warn("Going to start dump with args {}".format(args))
    popen = Popen(args, stdout=PIPE, stderr=PIPE)
    spawn(read_stream, popen.stdout)
    spawn(read_stream, popen.stderr)
    popen.wait()
    return_code = popen.returncode
    logger.info("Dumper ended work with code {}".format(return_code))
def run_func(queue, job_id, cmd, stop_on_failure=True):
    redirector = Redirector(_stream)
    _logrun(cmd)

    try:
        process = Popen(cmd,
                        shell=True,
                        stdout=PIPE,
                        stderr=PIPE,
                        close_fds=True)
        redirector.add_redirection('marteau-stdout', process, process.stdout)
        redirector.add_redirection('marteau-stderr', process, process.stderr)
        redirector.start()
        pid = process.pid
        queue.add_pid(job_id, pid)
        process.wait()
        res = process.returncode
        if res != 0 and stop_on_failure:
            _logrun("%r failed" % cmd)
            raise Exception("%r failed" % cmd)
        return res
    finally:
        redirector.kill()
        queue.remove_pid(job_id, pid)
Пример #34
0
class TaskExecutor(object):
    def __init__(self, balancer, index):
        self.balancer = balancer
        self.index = index
        self.task = None
        self.proc = None
        self.pid = None
        self.conn = None
        self.state = None
        self.key = str(uuid.uuid4())
        self.checked_in = Event()
        self.result = AsyncResult()
        gevent.spawn(self.executor)

    def checkin(self, conn):
        self.balancer.logger.debug('Check-in of worker #{0} (key {1})'.format(self.index, self.key))
        self.conn = conn
        self.state = WorkerState.IDLE
        self.checked_in.set()

    def get_status(self):
        if not self.conn:
            return None

        try:
            st = TaskStatus(0)
            st.__setstate__(self.conn.call_client_sync('taskproxy.get_status'))
            return st
        except RpcException as err:
            self.balancer.logger.error("Cannot obtain status from task #{0}: {1}".format(self.task.id, str(err)))
            self.proc.terminate()

    def put_status(self, status):
        # Try to collect rusage at this point, when process is still alive
        try:
            kinfo = bsd.kinfo_getproc(self.pid)
            self.task.rusage = kinfo.rusage
        except LookupError:
            pass

        if status['status'] == 'FINISHED':
            self.result.set(status['result'])

        if status['status'] == 'FAILED':
            error = status['error']
            self.result.set_exception(TaskException(
                code=error['code'],
                message=error['message'],
                stacktrace=error['stacktrace'],
                extra=error.get('extra')
            ))

    def run(self, task):
        self.result = AsyncResult()
        self.task = task
        self.task.set_state(TaskState.EXECUTING)

        self.conn.call_client_sync('taskproxy.run', {
            'id': task.id,
            'class': task.clazz.__name__,
            'filename': inspect.getsourcefile(task.clazz),
            'args': task.args,
            'debugger': task.debugger
        })

        try:
            self.result.get()
        except BaseException as e:
            if not isinstance(e, TaskException):
                self.balancer.dispatcher.report_error(
                    'Task {0} raised exception other than TaskException'.format(self.task.name),
                    e
                )

            self.task.error = serialize_error(e)
            self.task.set_state(TaskState.FAILED, TaskStatus(0, str(e), extra={
                "stacktrace": traceback.format_exc()
            }))

            self.task.ended.set()
            self.balancer.task_exited(self.task)
            self.state = WorkerState.IDLE
            return

        self.task.result = self.result.value
        self.task.set_state(TaskState.FINISHED, TaskStatus(100, ''))
        self.task.ended.set()
        self.balancer.task_exited(self.task)
        self.state = WorkerState.IDLE

    def abort(self):
        self.balancer.logger.info("Trying to abort task #{0}".format(self.task.id))
        # Try to abort via RPC. If this fails, kill process
        try:
            self.conn.call_client_sync('taskproxy.abort')
        except RpcException as err:
            self.balancer.logger.warning("Failed to abort task #{0} gracefully: {1}".format(self.task.id, str(err)))
            self.balancer.logger.warning("Killing process {0}".format(self.pid))
            self.proc.terminate()

    def executor(self):
        while True:
            try:
                self.proc = Popen(
                    [TASKWORKER_PATH, self.key],
                    close_fds=True,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.STDOUT)

                self.pid = self.proc.pid
                self.balancer.logger.debug('Started executor #{0} as PID {1}'.format(self.index, self.pid))
            except OSError:
                self.result.set_exception(TaskException(errno.EFAULT, 'Cannot spawn task executor'))
                return

            for line in self.proc.stdout:
                line = line.decode('utf8')
                self.balancer.logger.debug('Executor #{0}: {1}'.format(self.index, line.strip()))
                if self.task:
                    self.task.output += line

            self.proc.wait()
            self.balancer.logger.error('Executor process with PID {0} died abruptly with exit code {1}'.format(
                self.proc.pid,
                self.proc.returncode)
            )

            self.result.set_exception(TaskException(errno.EFAULT, 'Task executor died'))
            gevent.sleep(1)

    def die(self):
        if self.proc:
            self.proc.terminate()
Пример #35
0
def publish_agent(request, volttron_instance1):
    """
    Fixture used for setting up the environment.
    1. Creates fake driver configs
    2. Starts the master driver agent with the created fake driver agents
    3. Starts the actuator agent
    4. Creates an instance Agent class for publishing and returns it

    :param request: pytest request object
    :param volttron_instance1: instance of volttron in which test cases are run
    :return: an instance of fake agent used for publishing
    """

    # Reset master driver config store
    cmd = ['volttron-ctl', 'config', 'delete', PLATFORM_DRIVER, '--all']

    process = Popen(cmd,
                    env=volttron_instance1.env,
                    cwd='scripts/scalability-testing',
                    stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE)
    result = process.wait()
    print(result)
    assert result == 0

    # Add master driver configuration files to config store.
    cmd = [
        'volttron-ctl', 'config', 'store', PLATFORM_DRIVER, 'fake.csv',
        'fake_unit_testing.csv', '--csv'
    ]
    process = Popen(cmd,
                    env=volttron_instance1.env,
                    cwd='scripts/scalability-testing',
                    stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE)
    result = process.wait()
    print(result)
    assert result == 0

    config_name = "devices/fakedriver"
    cmd = [
        'volttron-ctl', 'config', 'store', PLATFORM_DRIVER, config_name,
        'fake_unit_testing.config', '--json'
    ]
    process = Popen(cmd,
                    env=volttron_instance1.env,
                    cwd='scripts/scalability-testing',
                    stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE)
    result = process.wait()
    print(result)
    assert result == 0

    # Start the master driver agent which would intern start the fake driver
    #  using the configs created above
    master_uuid = volttron_instance1.install_agent(
        agent_dir=get_services_core("MasterDriverAgent"),
        config_file={},
        start=True)
    print("agent id: ", master_uuid)
    gevent.sleep(2)  # wait for the agent to start and start the devices

    # Start the actuator agent through which publish agent should communicate
    # to fake device. Start the master driver agent which would intern start
    # the fake driver using the configs created above
    actuator_uuid = volttron_instance1.install_agent(
        agent_dir=get_services_core("ActuatorAgent"),
        config_file=get_services_core("ActuatorAgent/tests/actuator.config"),
        start=True)
    print("agent id: ", actuator_uuid)
    gevent.sleep(2)

    example_uuid = volttron_instance1.install_agent(
        agent_dir=get_examples("ConfigActuation"),
        config_file={},
        vip_identity="config_actuation")
    gevent.sleep(2)

    # 3: Start a fake agent to publish to message bus
    publish_agent = volttron_instance1.build_agent(identity=TEST_AGENT)

    # 4: add a tear down method to stop sqlhistorian agent and the fake agent
    #  \that published to message bus
    def stop_agent():
        print("In teardown method of module")
        volttron_instance1.stop_agent(actuator_uuid)
        volttron_instance1.stop_agent(master_uuid)
        volttron_instance1.stop_agent(example_uuid)
        volttron_instance1.remove_agent(actuator_uuid)
        volttron_instance1.remove_agent(master_uuid)
        volttron_instance1.remove_agent(example_uuid)
        publish_agent.core.stop()

    request.addfinalizer(stop_agent)
    return publish_agent
Пример #36
0
class TaskExecutor(object):
    def __init__(self, balancer, index):
        self.balancer = balancer
        self.index = index
        self.task = None
        self.proc = None
        self.pid = None
        self.conn = None
        self.state = WorkerState.STARTING
        self.key = str(uuid.uuid4())
        self.result = AsyncResult()
        self.exiting = False
        self.killed = False
        self.thread = gevent.spawn(self.executor)
        self.cv = Condition()
        self.status_lock = RLock()

    def checkin(self, conn):
        with self.cv:
            self.balancer.logger.debug("Check-in of worker #{0} (key {1})".format(self.index, self.key))
            self.conn = conn
            self.state = WorkerState.IDLE
            self.cv.notify_all()

    def put_progress(self, progress):
        st = TaskStatus(None)
        st.__setstate__(progress)
        self.task.set_state(progress=st)

    def put_status(self, status):
        with self.cv:
            # Try to collect rusage at this point, when process is still alive
            try:
                kinfo = self.balancer.dispatcher.threaded(bsd.kinfo_getproc, self.pid)
                self.task.rusage = kinfo.rusage
            except LookupError:
                pass

            if status["status"] == "ROLLBACK":
                self.task.set_state(TaskState.ROLLBACK)

            if status["status"] == "FINISHED":
                self.result.set(status["result"])

            if status["status"] == "FAILED":
                error = status["error"]

                if error["type"] in ERROR_TYPES:
                    cls = ERROR_TYPES[error["type"]]
                    exc = cls(
                        code=error["code"],
                        message=error["message"],
                        stacktrace=error["stacktrace"],
                        extra=error.get("extra"),
                    )
                else:
                    exc = OtherException(
                        code=error["code"],
                        message=error["message"],
                        stacktrace=error["stacktrace"],
                        type=error["type"],
                        extra=error.get("extra"),
                    )

                self.result.set_exception(exc)

    def put_warning(self, warning):
        self.task.add_warning(warning)

    def run(self, task):
        def match_file(module, f):
            name, ext = os.path.splitext(f)
            return module == name and ext in [".py", ".pyc", ".so"]

        with self.cv:
            self.cv.wait_for(lambda: self.state == WorkerState.ASSIGNED)
            self.result = AsyncResult()
            self.task = task
            self.task.set_state(TaskState.EXECUTING)
            self.state = WorkerState.EXECUTING
            self.cv.notify_all()

        self.balancer.logger.debug("Actually starting task {0}".format(task.id))

        filename = None
        module_name = inspect.getmodule(task.clazz).__name__
        for dir in self.balancer.dispatcher.plugin_dirs:
            found = False
            try:
                for root, _, files in os.walk(dir):
                    file = first_or_default(lambda f: match_file(module_name, f), files)
                    if file:
                        filename = os.path.join(root, file)
                        found = True
                        break

                if found:
                    break
            except OSError:
                continue

        try:
            self.conn.call_sync(
                "taskproxy.run",
                {
                    "id": task.id,
                    "user": task.user,
                    "class": task.clazz.__name__,
                    "filename": filename,
                    "args": task.args,
                    "debugger": task.debugger,
                    "environment": task.environment,
                    "hooks": task.hooks,
                },
            )
        except RpcException as e:
            self.balancer.logger.warning(
                "Cannot start task {0} on executor #{1}: {2}".format(task.id, self.index, str(e))
            )

            self.balancer.logger.warning(
                "Killing unresponsive task executor #{0} (pid {1})".format(self.index, self.proc.pid)
            )

            self.terminate()

        try:
            self.result.get()
        except BaseException as e:
            if isinstance(e, OtherException):
                self.balancer.dispatcher.report_error("Task {0} raised invalid exception".format(self.task.name), e)

            if isinstance(e, TaskAbortException):
                self.task.set_state(TaskState.ABORTED, TaskStatus(0, "aborted"))
            else:
                self.task.error = serialize_error(e)
                self.task.set_state(
                    TaskState.FAILED, TaskStatus(0, str(e), extra={"stacktrace": traceback.format_exc()})
                )

            with self.cv:
                self.task.ended.set()

                if self.state == WorkerState.EXECUTING:
                    self.state = WorkerState.IDLE
                    self.cv.notify_all()

            self.balancer.task_exited(self.task)
            return

        with self.cv:
            self.task.result = self.result.value
            self.task.set_state(TaskState.FINISHED, TaskStatus(100, ""))
            self.task.ended.set()
            if self.state == WorkerState.EXECUTING:
                self.state = WorkerState.IDLE
                self.cv.notify_all()

        self.balancer.task_exited(self.task)

    def abort(self):
        self.balancer.logger.info("Trying to abort task #{0}".format(self.task.id))
        # Try to abort via RPC. If this fails, kill process
        try:
            # If task supports abort protocol we don't need to worry about subtasks - it's task
            # responsibility to kill them
            self.conn.call_sync("taskproxy.abort")
        except RpcException as err:
            self.balancer.logger.warning("Failed to abort task #{0} gracefully: {1}".format(self.task.id, str(err)))
            self.balancer.logger.warning("Killing process {0}".format(self.pid))
            self.killed = True
            self.terminate()

            # Now kill all the subtasks
            for subtask in filter(lambda t: t.parent is self.task, self.balancer.task_list):
                self.balancer.logger.warning(
                    "Aborting subtask {0} because parent task {1} died".format(subtask.id, self.task.id)
                )
                self.balancer.abort(subtask.id)

    def terminate(self):
        try:
            self.proc.terminate()
        except OSError:
            self.balancer.logger.warning("Executor process with PID {0} already dead".format(self.proc.pid))

    def executor(self):
        while not self.exiting:
            try:
                self.proc = Popen(
                    [TASKWORKER_PATH, self.key],
                    close_fds=True,
                    preexec_fn=os.setpgrp,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.STDOUT,
                )

                self.pid = self.proc.pid
                self.balancer.logger.debug("Started executor #{0} as PID {1}".format(self.index, self.pid))
            except OSError:
                self.result.set_exception(TaskException(errno.EFAULT, "Cannot spawn task executor"))
                self.balancer.logger.error("Cannot spawn task executor #{0}".format(self.index))
                return

            for line in self.proc.stdout:
                line = line.decode("utf8")
                self.balancer.logger.debug("Executor #{0}: {1}".format(self.index, line.strip()))
                if self.task:
                    self.task.output += line

            self.proc.wait()

            with self.cv:
                self.state = WorkerState.STARTING
                self.cv.notify_all()

            if self.proc.returncode == -signal.SIGTERM:
                self.balancer.logger.info(
                    "Executor process with PID {0} was terminated gracefully".format(self.proc.pid)
                )
            else:
                self.balancer.logger.error(
                    "Executor process with PID {0} died abruptly with exit code {1}".format(
                        self.proc.pid, self.proc.returncode
                    )
                )

            if self.killed:
                self.result.set_exception(TaskException(errno.EFAULT, "Task killed"))
            else:
                self.result.set_exception(TaskException(errno.EFAULT, "Task executor died"))
            gevent.sleep(1)

    def die(self):
        self.exiting = True
        if self.proc:
            self.terminate()
Пример #37
0
class Process(object):
    # TODO: handle bot stdout and stderr
    # TODO: refactor into TTY, Process and TTYProcess?
    def __init__(self, args, env=None, executable=None, shell=False):
        master, slave = pty.openpty()
        fcntl.fcntl(master, fcntl.F_SETFL, os.O_NONBLOCK)

        self._finished = Event()
        self._master = master
        self._read_event = get_hub().loop.io(master, 1)
        self._write_event = get_hub().loop.io(master, 2)
        self._args = args
        self._proc = Popen(args,
                           env=env,
                           executable=executable,
                           shell=shell,
                           stdin=slave,
                           stdout=slave,
                           stderr=slave,
                           bufsize=0,
                           universal_newlines=False,
                           close_fds=True)

    def __repr__(self):
        return "Process:%x %r" % (id(self), self._args)

    @property
    def finished(self):
        return self._finished.ready()

    def _waitclosed(self):
        self._proc.wait()
        self.stop()

    def _writer(self, inch):
        """
        This greenlet will block until messages are ready to be written to pty
        """
        try:
            sock = self._master
            for msg in inch.watch():
                if 'resize' in msg:
                    set_winsize(sock, msg['resize']['width'],
                                msg['resize']['height'])
                if 'data' in msg:
                    buf = msg['data']
                    while not self.finished and len(buf):
                        try:
                            wait(self._write_event)
                        except Exception:
                            break
                        nwritten = os.write(sock, msg['data'])
                        buf = buf[nwritten:]
        except Exception:
            LOG.exception("In Process._writer")

    def run(self, task):
        writer_task = gevent.spawn(self._writer, task.input)
        gevent.spawn(self._waitclosed)
        proc = self._proc
        try:
            sock = self._master
            while not self.finished:
                try:
                    wait(self._read_event)
                except Exception:
                    break
                data = os.read(sock, 1024)
                if len(data) == 0 or data is StopIteration:
                    break
                if sock == proc.stderr:
                    task.output.send(dict(error=data))
                else:
                    task.output.send(dict(data=data))
        except Exception:
            LOG.exception("While reading from process")
        finally:
            writer_task.kill()
            self.stop()

    def stop(self):
        if not self.finished:
            cancel_wait(self._read_event)
            cancel_wait(self._write_event)
            try:
                os.close(self._master)
            except Exception:
                pass
            if not self._proc.poll():
                self._proc.terminate()
                self._proc.wait()
            self._finished.set()
Пример #38
0
def test_cov_update_published(volttron_instance, test_agent):
    """Tests the functionality of BACnet change of value forwarding in the
    Master Driver and driver.py"""
    # Reset master driver config store
    cmd = ['volttron-ctl', 'config', 'delete', PLATFORM_DRIVER, '--all']
    process = Popen(cmd,
                    env=volttron_instance.env,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE)
    result = process.wait()
    assert result == 0

    # Add fake device configuration
    cmd = [
        'volttron-ctl', 'config', 'store', PLATFORM_DRIVER, 'fake.csv',
        'examples/configurations/drivers/fake.csv', '--csv'
    ]
    process = Popen(cmd,
                    env=volttron_instance.env,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE)
    result = process.wait()
    assert result == 0

    cmd = [
        'volttron-ctl', 'config', 'store', PLATFORM_DRIVER,
        "devices/fakedriver", 'examples/configurations/drivers/fake.config',
        '--json'
    ]
    process = Popen(cmd,
                    env=volttron_instance.env,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE)
    result = process.wait()
    assert result == 0

    # install master driver, start the master driver, which starts the device
    master_uuid = volttron_instance.install_agent(
        agent_dir=get_services_core("MasterDriverAgent"),
        config_file={},
        start=True)
    print("agent id: ", master_uuid)

    # tell the master driver to forward the value
    point_name = "PowerState"
    device_path = "fakedriver"
    result_dict = {"fake1": "test", "fake2": "test", "fake3": "test"}
    test_agent.vip.rpc.call(PLATFORM_DRIVER, 'forward_bacnet_cov_value',
                            device_path, point_name, result_dict)
    # wait for the publishes to make it to the bus
    gevent.sleep(2)

    # Mock checks
    # Should have one "PowerState" publish for each item in the result dict
    # Total all publishes likely will include regular scrapes
    assert test_agent.cov_callback.call_count >= 3
    test_count = 0
    for call_arg in test_agent.cov_callback.call_args_list:
        if call_arg[0][5][0].get("PowerState", False):
            test_count += 1
    assert test_count == 3
Пример #39
0
def run_command(
        command=None,
        arguments=None,
        working_directory=None,
        standard_output=None,
        error_output=None,
        show_output=None,
        raise_exceptions=None,
        environment_overlay=None):
    # type: (str, Iterable[str], str, [], [], bool, bool, EnvironmentOverlay) -> int
    if show_output is None:
        show_output = True

    if raise_exceptions is None:
        raise_exceptions = True

    if arguments is None:
        arguments = list()

    if command:
        arguments.insert(0, command)

    if not arguments:
        raise ValueError('In order to run a command, a command, arguments, or both must be specified.')

    log.debug(
        "Invoking: {1}{0}"
        "\tCapture stdout?: {2}\tCapture stderr?: {3}\tShow output?: {4}\tRaise exceptions: {5}{0}"
        "\tWorking directory: {6}{0}"
        "\tEnvironment overlay: {7}{0}".format(
            linesep,
            ' '.join(arguments),
            standard_output is not None,
            error_output is not None,
            show_output,
            raise_exceptions,
            working_directory,
            repr(environment_overlay)))

    environment = None if not environment_overlay else environment_overlay.overlay(environ)

    process = Popen(
        args=arguments,
        cwd=working_directory,
        stdout=PIPE,
        stderr=PIPE,
        env=environment)

    stdout_routine = spawn(_synchronize_stream, process.stdout, stdout, standard_output, show_output)
    stderr_routine = spawn(_synchronize_stream, process.stderr, stderr, error_output, show_output)

    stdout_routine.join()
    stderr_routine.join()
    exit_code = process.wait()

    log.debug('Exit code: ' + str(exit_code))

    if exit_code != 0:
        message = str(
            "Non-zero exit detected from sub-process:{0}"
            "\tArguments: {1}{0}"
            "\tExit code: {2}{0}"
            "\tWorking directory: {3}{0}".format(
                linesep,
                ' '.join(arguments),
                str(exit_code),
                repr(working_directory)))
        if raise_exceptions:
            log.debug('raising exception: ' + message)
            process.stderr.close()
            process.stdout.close()
            raise RuntimeError(message)

        log.error(message)

    return exit_code
Пример #40
0
class RaptureHarness(object):
    def setUp(self):
        self.rapture_process = None
        self.redis_process = None
        self.redis_endpoint = None
        self.jobs = None

    def tearDown(self):
        if self.rapture_running():
            self.interrupt_rapture()

        if self.redis_running():
            self.stop_redis()

    def start_redis(self):
        assert self.redis_process is None
        assert self.redis_endpoint is None
        assert self.jobs is None
        if not ENV_REDIS_COMMANDLINE:
            sockfile = binascii.hexlify(os.urandom(8)) + '.sock'
            redis_commandline = [REDIS_COMMAND, REDIS_CONF,
                                 '--unixsocket ' + sockfile,
                                 '--unixsocketperm 755']
            self.redis_endpoint = 'unix-socket=' + \
                os.path.join(REDIS_WORKING_DIR, sockfile)
        else:
            redis_commandline = shlex.split(ENV_REDIS_COMMANDLINE)
            self.redis_endpoint = ENV_REDIS_ENDPOINT
            assert(len(self.redis_endpoint.split(':')) >= 2)

        self.redis_process = Popen(redis_commandline, close_fds=True,
                                   preexec_fn=os.setsid, cwd=REDIS_WORKING_DIR)
        for n_attempt in xrange(4):
            try:
                protocol, endpoint = self.redis_endpoint.split('=', 1)
                if protocol == 'unix-socket':
                    self.jobs = \
                        jobs.connect_to_unix_socket_queue(endpoint)
                else:
                    host, port = endpoint.split(':', 1)
                    self.jobs = jobs.connect_to_queue(host, port)
            except:
                if n_attempt == 3:
                    raise
                else:
                    time.sleep(.1)

    def stop_redis(self):
        assert not self.rapture_running()
        self.jobs.disconnect()
        try:
            os.killpg(self.redis_process.pid, signal.SIGINT)
        except:
            pass
        self.redis_process.wait()
        self.redis_process, self.redis_endpoint, self.jobs = None, None, None

    def start_rapture(self, args):
        assert not self.rapture_running()
        socket = self.redis_endpoint or '/inexistent/redis/endpoint'

        self.rapture_process = Popen(shlex.split(RAPTURE_COMMAND) +
                                     ['--redis-' + socket] + args,
                                     close_fds=True, preexec_fn=os.setsid,
                                     stderr=PIPE, stdout=PIPE)

        # forward both stderr & stdout to stdout for them to be captured by
        # nosetests
        pipe_fd(self.rapture_process.stderr, sys.stdout)
        pipe_fd(self.rapture_process.stdout, sys.stdout)
        return self.rapture_process

    def wait_for_rapture(self):
        assert self.rapture_running()
        proc, self.rapture_process = self.rapture_process, None
        with Timeout(5, False):
            proc.wait()
            return 'errcode-%d' % proc.returncode
        os.killpg(proc.pid, signal.SIGKILL)
        proc.wait()
        return 'unresponsive'

    def interrupt_rapture(self):
        assert self.rapture_running()
        with Timeout(.1, False):
            self.rapture_process.wait()
            proc, self.rapture_process = self.rapture_process, None
            return 'already-dead-errcode-%d' % proc.returncode

        os.killpg(self.rapture_process.pid, signal.SIGINT)
        return self.wait_for_rapture()

    def rapture_running(self):
        return self.rapture_process is not None

    def redis_running(self):
        return self.redis_process is not None

    def assert_job_list(self, definitions, max_wait=5, interval=.1):
        ids = map(lambda d: self.jobs.push(d[0], **d[1]), definitions)

        done = False
        expire_at = time.time() + max_wait
        while not done:
            done = True
            time.sleep(interval)
            for i, job_id, job_def in zip(xrange(len(ids)), ids, definitions):
                snapshot = self.jobs.fetch_snapshot(job_id)
                expect = job_def[2]
                expect_args = job_def[3]
                if snapshot.status == jobs.STATUS_PENDING and \
                        expect != is_pending_snap:
                    done = False
                    break
                assmsg = '\nJob Index: %d\nSnapshot:\n %s\nExpect:\n %s' % \
                    (i, pprint.pformat(dict(snapshot._asdict())),
                     pprint.pformat(expect_args))
                self.assertTrue(expect(snapshot, **expect_args), assmsg)

            if time.time() > expire_at:
                break
        self.assertTrue(done, 'Time limit exceeded %ss' % max_wait)
Пример #41
0
def test_actuator_topic(publish_agent, query_agent, volttron_instance1,
                        volttron_instance2):
    print("\n** test_actuator_topic **")

    # Create master driver config and 4 fake devices each with 6 points
    process = Popen(['python', 'config_builder.py', '--count=1',
                     '--publish-only-depth-all',
                     'fake', 'fake_unit_testing.csv', 'null'],
                    env=volttron_instance1.env,
                    cwd='scripts/scalability-testing',
                    stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    result = process.wait()
    print result
    assert result == 0

    # Start the master driver agent which would intern start the fake driver
    # using the configs created above
    master_uuid = volttron_instance1.install_agent(
        agent_dir="services/core/MasterDriverAgent",
        config_file="scripts/scalability-testing/configs/master-driver.agent",
        start=True)
    print("agent id: ", master_uuid)
    gevent.sleep(2)  # wait for the agent to start and start the devices

    # Start the actuator agent through which publish agent should communicate
    # to fake device. Start the master driver agent which would intern start
    # the fake driver using the configs created above
    actuator_uuid = volttron_instance1.install_agent(
        agent_dir="services/core/ActuatorAgent",
        config_file="services/core/ActuatorAgent/tests/actuator.config",
        start=True)
    print("agent id: ", actuator_uuid)

    listener_uuid = volttron_instance2.install_agent(
        agent_dir="examples/ListenerAgent",
        config_file="examples/ListenerAgent/config",
        start=True)
    print("agent id: ", listener_uuid)

    # Make query agent running in instance two subscribe to
    # actuator_schedule_result topic
    # query_agent.callback = types.MethodType(callback, query_agent)
    query_agent.callback = MagicMock(name="callback")
    # subscribe to schedule response topic
    query_agent.vip.pubsub.subscribe(
        peer='pubsub',
        prefix=topics.ACTUATOR_SCHEDULE_RESULT,
        callback=query_agent.callback).get()

    # Now publish in volttron_instance1

    start = str(datetime.now())
    end = str(datetime.now() + timedelta(seconds=2))
    header = {
        'type': 'NEW_SCHEDULE',
        'requesterID': 'test-agent',  # The name of the requesting agent.
        'taskID': 'task_schedule_response',
        'priority': 'LOW'  # ('HIGH, 'LOW', 'LOW_PREEMPT').
    }
    msg = [
        ['fakedriver0', start, end]
    ]
    # reset mock to ignore any previous callback
    publish(publish_agent, topics.ACTUATOR_SCHEDULE_REQUEST, header, msg)
    gevent.sleep(1)  # wait for topic to be forwarded and callback to happen

    # assert query_agent.callback.call_count == 1
    print ('call args ', query_agent.callback.call_args_list)
    # assert query_agent.callback.call_args[0][1] == 'platform.actuator'
    assert query_agent.callback.call_args[0][3] == \
           topics.ACTUATOR_SCHEDULE_RESULT
    result_header = query_agent.callback.call_args[0][4]
    result_message = query_agent.callback.call_args[0][5]
    assert result_header['type'] == 'NEW_SCHEDULE'
    assert result_header['taskID'] == 'task_schedule_response'
    assert result_header['requesterID'] == 'test-agent'
    assert result_message['result'] == 'SUCCESS'
Пример #42
0
def run():
    proc = Popen('bt-rssi', stdout=PIPE, shell=False)
    spawn(read_proc, proc.stdout)
    proc.wait()
Пример #43
0
class TaskExecutor(object):
    def __init__(self, balancer, index):
        self.balancer = balancer
        self.index = index
        self.task = None
        self.proc = None
        self.pid = None
        self.conn = None
        self.state = WorkerState.STARTING
        self.key = str(uuid.uuid4())
        self.result = AsyncResult()
        self.exiting = False
        self.killed = False
        self.thread = gevent.spawn(self.executor)
        self.cv = Condition()
        self.status_lock = RLock()

    def checkin(self, conn):
        with self.cv:
            self.balancer.logger.debug(
                'Check-in of worker #{0} (key {1})'.format(
                    self.index, self.key))
            self.conn = conn
            self.state = WorkerState.IDLE
            self.cv.notify_all()

    def put_progress(self, progress):
        st = TaskStatus(None)
        st.__setstate__(progress)
        self.task.set_state(progress=st)

    def put_status(self, status):
        with self.cv:
            # Try to collect rusage at this point, when process is still alive
            try:
                kinfo = self.balancer.dispatcher.threaded(
                    bsd.kinfo_getproc, self.pid)
                self.task.rusage = kinfo.rusage
            except LookupError:
                pass

            if status['status'] == 'ROLLBACK':
                self.task.set_state(TaskState.ROLLBACK)

            if status['status'] == 'FINISHED':
                self.result.set(status['result'])

            if status['status'] == 'FAILED':
                error = status['error']

                if error['type'] in ERROR_TYPES:
                    cls = ERROR_TYPES[error['type']]
                    exc = cls(code=error['code'],
                              message=error['message'],
                              stacktrace=error['stacktrace'],
                              extra=error.get('extra'))
                else:
                    exc = OtherException(
                        code=error['code'],
                        message=error['message'],
                        stacktrace=error['stacktrace'],
                        type=error['type'],
                        extra=error.get('extra'),
                    )

                self.result.set_exception(exc)

    def put_warning(self, warning):
        self.task.add_warning(warning)

    def run(self, task):
        def match_file(module, f):
            name, ext = os.path.splitext(f)
            return module == name and ext in ['.py', '.pyc', '.so']

        with self.cv:
            self.cv.wait_for(lambda: self.state == WorkerState.ASSIGNED)
            self.result = AsyncResult()
            self.task = task
            self.task.set_state(TaskState.EXECUTING)
            self.state = WorkerState.EXECUTING
            self.cv.notify_all()

        self.balancer.logger.debug('Actually starting task {0}'.format(
            task.id))

        filename = None
        module_name = inspect.getmodule(task.clazz).__name__
        for dir in self.balancer.dispatcher.plugin_dirs:
            found = False
            try:
                for root, _, files in os.walk(dir):
                    file = first_or_default(
                        lambda f: match_file(module_name, f), files)
                    if file:
                        filename = os.path.join(root, file)
                        found = True
                        break

                if found:
                    break
            except OSError:
                continue

        try:
            self.conn.call_sync(
                'taskproxy.run', {
                    'id': task.id,
                    'user': task.user,
                    'class': task.clazz.__name__,
                    'filename': filename,
                    'args': task.args,
                    'debugger': task.debugger,
                    'environment': task.environment,
                    'hooks': task.hooks,
                })
        except RpcException as e:
            self.balancer.logger.warning(
                'Cannot start task {0} on executor #{1}: {2}'.format(
                    task.id, self.index, str(e)))

            self.balancer.logger.warning(
                'Killing unresponsive task executor #{0} (pid {1})'.format(
                    self.index, self.proc.pid))

            self.terminate()

        try:
            self.result.get()
        except BaseException as e:
            if isinstance(e, OtherException):
                self.balancer.dispatcher.report_error(
                    'Task {0} raised invalid exception'.format(self.task.name),
                    e)

            if isinstance(e, TaskAbortException):
                self.task.set_state(TaskState.ABORTED,
                                    TaskStatus(0, 'aborted'))
            else:
                self.task.error = serialize_error(e)
                self.task.set_state(
                    TaskState.FAILED,
                    TaskStatus(0,
                               str(e),
                               extra={"stacktrace": traceback.format_exc()}))

            with self.cv:
                self.task.ended.set()

                if self.state == WorkerState.EXECUTING:
                    self.state = WorkerState.IDLE
                    self.cv.notify_all()

            self.balancer.task_exited(self.task)
            return

        with self.cv:
            self.task.result = self.result.value
            self.task.set_state(TaskState.FINISHED, TaskStatus(100, ''))
            self.task.ended.set()
            if self.state == WorkerState.EXECUTING:
                self.state = WorkerState.IDLE
                self.cv.notify_all()

        self.balancer.task_exited(self.task)

    def abort(self):
        self.balancer.logger.info("Trying to abort task #{0}".format(
            self.task.id))
        # Try to abort via RPC. If this fails, kill process
        try:
            # If task supports abort protocol we don't need to worry about subtasks - it's task
            # responsibility to kill them
            self.conn.call_sync('taskproxy.abort')
        except RpcException as err:
            self.balancer.logger.warning(
                "Failed to abort task #{0} gracefully: {1}".format(
                    self.task.id, str(err)))
            self.balancer.logger.warning("Killing process {0}".format(
                self.pid))
            self.killed = True
            self.terminate()

            # Now kill all the subtasks
            for subtask in filter(lambda t: t.parent is self.task,
                                  self.balancer.task_list):
                self.balancer.logger.warning(
                    "Aborting subtask {0} because parent task {1} died".format(
                        subtask.id, self.task.id))
                self.balancer.abort(subtask.id)

    def terminate(self):
        try:
            self.proc.terminate()
        except OSError:
            self.balancer.logger.warning(
                'Executor process with PID {0} already dead'.format(
                    self.proc.pid))

    def executor(self):
        while not self.exiting:
            try:
                self.proc = Popen([TASKWORKER_PATH, self.key],
                                  close_fds=True,
                                  preexec_fn=os.setpgrp,
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.STDOUT)

                self.pid = self.proc.pid
                self.balancer.logger.debug(
                    'Started executor #{0} as PID {1}'.format(
                        self.index, self.pid))
            except OSError:
                self.result.set_exception(
                    TaskException(errno.EFAULT, 'Cannot spawn task executor'))
                self.balancer.logger.error(
                    'Cannot spawn task executor #{0}'.format(self.index))
                return

            for line in self.proc.stdout:
                line = line.decode('utf8')
                self.balancer.logger.debug('Executor #{0}: {1}'.format(
                    self.index, line.strip()))
                if self.task:
                    self.task.output += line

            self.proc.wait()

            with self.cv:
                self.state = WorkerState.STARTING
                self.cv.notify_all()

            if self.proc.returncode == -signal.SIGTERM:
                self.balancer.logger.info(
                    'Executor process with PID {0} was terminated gracefully'.
                    format(self.proc.pid))
            else:
                self.balancer.logger.error(
                    'Executor process with PID {0} died abruptly with exit code {1}'
                    .format(self.proc.pid, self.proc.returncode))

            if self.killed:
                self.result.set_exception(
                    TaskException(errno.EFAULT, 'Task killed'))
            else:
                self.result.set_exception(
                    TaskException(errno.EFAULT, 'Task executor died'))
            gevent.sleep(1)

    def die(self):
        self.exiting = True
        if self.proc:
            self.terminate()
Пример #44
0
def run_server(execs, cwd):
    proc = Popen(execs, stdout=PIPE, stderr=PIPE, cwd=cwd)
    stdout, stderr = proc.communicate()
    sys.stdout.write(stdout)
    if proc.wait() != 0:
        sys.stdout.write(stderr)
Пример #45
0
def _cmd(cmdargs):
    process = Popen(cmdargs, env=_os.environ, stdout=subprocess.PIPE,
                stderr=subprocess.PIPE)
    process.wait()
Пример #46
0
def proc_cmd(cmd, **kwargs):
    """
    :param cmd:
    数组形式: ['ceph', 'osd', 'dump', '|', 'grep', 'poolname']
    字符串形式: 'ceph osd dump | grep "poolname"'
    kwargs传递其他额外参数,比如timeout=60,sudo=True等
    """
    sudo = kwargs.get('sudo', True)
    timeout = kwargs.get('timeout', 20)
    # 字符串命令转换成数组
    if isinstance(cmd, str):
        cmd = shlex.split(cmd)
    _log.info('proc_cmd command: {}'.format(cmd))
    # 为了处理好管道的情况,程序限定 '|' 分割写法
    all_cmd_list = list(_split_list(cmd, '|'))
    len_arg = len(all_cmd_list)
    if len_arg == 1:
        cmd_args = all_cmd_list[0]
        if sudo:
            cmd_args.insert(0, 'sudo')
        proc = Popen(cmd_args, close_fds=True, stdout=PIPE, stderr=PIPE)
        try:
            with gevent.Timeout(timeout, False):
                stdout, stderr = proc.communicate()
            if proc.returncode is None:
                _log.error("gevent.Timeout.cmd={},timeout={}".format(cmd, timeout))
                if proc:
                    kill_p = Popen(['sudo', 'kill', '--', str(proc.pid)], close_fds=True)
                    kill_p.wait()
                return 2, '', 'timeout error'
            return proc.returncode, stdout, stderr
        except Exception:
            if proc:
                # proc.kill()
                kill_p = Popen(['sudo', 'kill', '--', str(proc.pid)], close_fds=True)
                kill_p.wait()
            _log.error("proc_cmd unknownerror: cmd={}".format(cmd), exc_info=1)
            return 3, '', 'proc_cmd unknown error'
    else:
        if sudo:
            all_cmd_list[0].insert(0, 'sudo')
        p1 = Popen(all_cmd_list[0], stdout=PIPE)
        for cmd_middle in all_cmd_list[1:-1]:
            if sudo:
                cmd_middle.insert(0, 'sudo')
            p2 = Popen(cmd_middle, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
            p1.stdout.close()  # Allow p1 to receive a SIGPIPE if p2 exits.
            p1 = p2
        if sudo:
            all_cmd_list[-1].insert(0, 'sudo')
        p2 = Popen(all_cmd_list[-1], stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
        p1.stdout.close()  # Allow p1 to receive a SIGPIPE if p2 exits.
        try:
            with gevent.Timeout(timeout, False):
                stdout, stderr = p2.communicate()
            if p2.returncode is None:
                if p2:
                    # p2.kill()
                    kill_p = Popen(['sudo', 'kill', '--', str(p2.pid)], close_fds=True)
                    kill_p.wait()
                _log.error("gevent.Timeout.cmd={},timeout={}".format(cmd, timeout))
                return 2, '', 'timeout error'
        except Exception as e:
            if p2:
                # p2.kill()
                kill_p = Popen(['sudo', 'kill', '--', str(p2.pid)], close_fds=True)
                kill_p.wait()
            _log.error("proc_cmd unknownerror: cmd={}".format(cmd), exc_info=1)
            return 3, '', 'proc_cmd unknown error'
        return p2.returncode, stdout, stderr
Пример #47
0
    def _job_monitor_glet(self, job_group, jobid, description, args, data):
        jobname = (REDIS_JOBS_GROUP_PREFIX+'-{}').format(job_group, jobid)
        joblogfile = os.path.join(
            config.get('MINEMELD_LOG_DIRECTORY_PATH', '/tmp'),
            '{}.log'.format(jobname)
        )
        jobtempdir = tempfile.mkdtemp(prefix=jobname)

        LOG.info('Executing job {} - {} cwd: {} logfile: {}'.format(jobname, args, jobtempdir, joblogfile))

        try:
            with open(joblogfile, 'w+') as logfile:
                jobprocess = Popen(
                    args=args,
                    close_fds=True,
                    cwd=jobtempdir,
                    shell=False,
                    stdout=logfile,
                    stderr=subprocess.STDOUT
                )

        except OSError:
            self._safe_remove(joblogfile)
            self._safe_rmtree(jobtempdir)
            LOG.exception('Error starting job {}'.format(jobname))
            return

        jobpsproc = psutil.Process(pid=jobprocess.pid)

        jobdata = data
        if jobdata is None:
            jobdata = {}

        jobdata['create_time'] = int(time.time()*1000)
        jobdata['description'] = description
        jobdata['job_id'] = jobid
        jobdata['pid'] = jobpsproc.pid
        jobdata['hash'] = hash(jobpsproc)
        jobdata['logfile'] = joblogfile
        jobdata['cwd'] = jobtempdir
        jobdata['status'] = 'RUNNING'

        self.SR.hset(
            REDIS_JOBS_GROUP_PREFIX.format(job_group),
            jobid,
            json.dumps(jobdata)
        )

        jobprocess.wait()

        if jobprocess.returncode != 0:
            jobdata['status'] = 'ERROR'
        else:
            jobdata['status'] = 'DONE'
        jobdata['returncode'] = jobprocess.returncode
        jobdata['end_time'] = int(time.time()*1000)

        self._collect_job(jobdata)

        self.SR.hset(
            REDIS_JOBS_GROUP_PREFIX.format(job_group),
            jobid,
            json.dumps(jobdata)
        )

        job = self.running_jobs[job_group].pop(jobid, None)
        if job is not None and job.timeout_glet is not None:
            job.timeout_glet.kill()

        return jobprocess.returncode
Пример #48
0
def publish_agent(request, volttron_instance1):
    """
    Fixture used for setting up the environment.
    1. Creates fake driver configs
    2. Starts the master driver agent with the created fake driver agents
    3. Starts the actuator agent
    4. Creates an instance Agent class for publishing and returns it

    :param request: pytest request object
    :param volttron_instance1: instance of volttron in which test cases are run
    :return: an instance of fake agent used for publishing
    """

    # Reset master driver config store
    cmd = ['volttron-ctl', 'config', 'delete', PLATFORM_DRIVER, '--all']

    process = Popen(cmd, env=volttron_instance1.env,
                    cwd='scripts/scalability-testing',
                    stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    result = process.wait()
    print(result)
    assert result == 0

    # Add master driver configuration files to config store.
    cmd = ['volttron-ctl', 'config', 'store',PLATFORM_DRIVER,
           'fake.csv', 'fake_unit_testing.csv', '--csv']
    process = Popen(cmd, env=volttron_instance1.env,
                    cwd='scripts/scalability-testing',
                    stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    result = process.wait()
    print(result)
    assert result == 0

    config_name = "devices/fakedriver"
    cmd = ['volttron-ctl', 'config', 'store', PLATFORM_DRIVER,
           config_name, 'fake_unit_testing.config', '--json']
    process = Popen(cmd, env=volttron_instance1.env,
                    cwd='scripts/scalability-testing',
                    stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    result = process.wait()
    print(result)
    assert result == 0

    # Start the master driver agent which would intern start the fake driver
    #  using the configs created above
    master_uuid = volttron_instance1.install_agent(
        agent_dir=get_services_core("MasterDriverAgent"),
        config_file={},
        start=True)
    print("agent id: ", master_uuid)
    gevent.sleep(2)  # wait for the agent to start and start the devices

    # Start the actuator agent through which publish agent should communicate
    # to fake device. Start the master driver agent which would intern start
    # the fake driver using the configs created above
    actuator_uuid = volttron_instance1.install_agent(
        agent_dir=get_services_core("ActuatorAgent"),
        config_file=get_services_core("ActuatorAgent/tests/actuator.config"),
        start=True)
    print("agent id: ", actuator_uuid)
    gevent.sleep(2)


    example_uuid = volttron_instance1.install_agent(
        agent_dir=get_examples("ConfigActuation"),
        config_file={},
        vip_identity="config_actuation")
    gevent.sleep(2)

    # 3: Start a fake agent to publish to message bus
    publish_agent = volttron_instance1.build_agent(identity=TEST_AGENT)

    # 4: add a tear down method to stop sqlhistorian agent and the fake agent
    #  \that published to message bus
    def stop_agent():
        print("In teardown method of module")
        volttron_instance1.stop_agent(actuator_uuid)
        volttron_instance1.stop_agent(master_uuid)
        volttron_instance1.stop_agent(example_uuid)
        volttron_instance1.remove_agent(actuator_uuid)
        volttron_instance1.remove_agent(master_uuid)
        volttron_instance1.remove_agent(example_uuid)
        publish_agent.core.stop()

    request.addfinalizer(stop_agent)
    return publish_agent
Пример #49
0
class TaskExecutor(object):
    def __init__(self, balancer, index):
        self.balancer = balancer
        self.index = index
        self.task = None
        self.proc = None
        self.pid = None
        self.conn = None
        self.state = None
        self.key = str(uuid.uuid4())
        self.checked_in = Event()
        self.result = AsyncResult()
        self.exiting = False
        self.thread = gevent.spawn(self.executor)

    def checkin(self, conn):
        self.balancer.logger.debug('Check-in of worker #{0} (key {1})'.format(self.index, self.key))
        self.conn = conn
        self.state = WorkerState.IDLE
        self.checked_in.set()

    def get_status(self):
        if not self.conn:
            return None

        try:
            st = TaskStatus(0)
            if issubclass(self.task.clazz, MasterProgressTask):
                progress_subtask_info = self.conn.call_client_sync(
                    'taskproxy.get_master_progress_info'
                )
                if progress_subtask_info['increment_progress'] != 0:
                    progress_subtask_info['progress'] += progress_subtask_info['increment_progress']
                    progress_subtask_info['increment_progress'] = 0
                    self.conn.call_client_sync(
                        'taskproxy.set_master_progress_detail',
                        {
                            'progress': progress_subtask_info['progress'],
                            'increment_progress': progress_subtask_info['increment_progress']
                        }
                    )
                if progress_subtask_info['active_tids']:
                    progress_to_increment = 0
                    concurent_weight = progress_subtask_info['concurent_subtask_detail']['average_weight']
                    for tid in progress_subtask_info['concurent_subtask_detail']['tids']:
                        subtask_status = self.balancer.get_task(tid).executor.get_status()
                        progress_to_increment += subtask_status.percentage * concurent_weight * \
                            progress_subtask_info['subtask_weights'][str(tid)]
                    for tid in set(progress_subtask_info['active_tids']).symmetric_difference(
                        set(progress_subtask_info['concurent_subtask_detail']['tids'])
                    ):
                        subtask_status = self.balancer.get_task(tid).executor.get_status()
                        progress_to_increment += subtask_status.percentage * \
                            progress_subtask_info['subtask_weights'][str(tid)]
                    progress_subtask_info['progress'] += int(progress_to_increment)
                    if progress_subtask_info['pass_subtask_details']:
                        progress_subtask_info['message'] = subtask_status.message
                st = TaskStatus(
                    progress_subtask_info['progress'], progress_subtask_info['message']
                )
            else:
                st.__setstate__(self.conn.call_client_sync('taskproxy.get_status'))
            return st

        except RpcException as err:
            self.balancer.logger.error(
                "Cannot obtain status from task #{0}: {1}".format(self.task.id, str(err))
            )
            self.proc.terminate()

    def put_status(self, status):
        # Try to collect rusage at this point, when process is still alive
        try:
            kinfo = bsd.kinfo_getproc(self.pid)
            self.task.rusage = kinfo.rusage
        except LookupError:
            pass

        if status['status'] == 'ROLLBACK':
            self.task.set_state(TaskState.ROLLBACK)

        if status['status'] == 'FINISHED':
            self.result.set(status['result'])

        if status['status'] == 'FAILED':
            error = status['error']
            cls = TaskException

            if error['type'] == 'task.TaskAbortException':
                cls = TaskAbortException

            if error['type'] == 'ValidationException':
                cls = ValidationException

            self.result.set_exception(cls(
                code=error['code'],
                message=error['message'],
                stacktrace=error['stacktrace'],
                extra=error.get('extra')
            ))

    def put_warning(self, warning):
        self.task.add_warning(warning)

    def run(self, task):
        self.result = AsyncResult()
        self.task = task
        self.task.set_state(TaskState.EXECUTING)

        filename = None
        module_name = inspect.getmodule(task.clazz).__name__
        for dir in self.balancer.dispatcher.plugin_dirs:
            found = False
            try:
                for root, _, files in os.walk(dir):
                    file = first_or_default(lambda f: module_name in f, files)
                    if file:
                        filename = os.path.join(root, file)
                        found = True
                        break

                if found:
                    break
            except FileNotFoundError:
                continue

        self.conn.call_client_sync('taskproxy.run', {
            'id': task.id,
            'class': task.clazz.__name__,
            'filename': filename,
            'args': task.args,
            'debugger': task.debugger,
            'environment': task.environment
        })

        try:
            self.result.get()
        except BaseException as e:
            if not isinstance(e, TaskException):
                self.balancer.dispatcher.report_error(
                    'Task {0} raised exception other than TaskException'.format(self.task.name),
                    e
                )

            if isinstance(e, TaskAbortException):
                self.task.set_state(TaskState.ABORTED, TaskStatus(0, 'aborted'))
            else:
                self.task.error = serialize_error(e)
                self.task.set_state(TaskState.FAILED, TaskStatus(0, str(e), extra={
                    "stacktrace": traceback.format_exc()
                }))

            self.task.ended.set()
            self.balancer.task_exited(self.task)
            self.state = WorkerState.IDLE
            return

        self.task.result = self.result.value
        self.task.set_state(TaskState.FINISHED, TaskStatus(100, ''))
        self.task.ended.set()
        self.balancer.task_exited(self.task)
        self.state = WorkerState.IDLE

    def abort(self):
        self.balancer.logger.info("Trying to abort task #{0}".format(self.task.id))
        # Try to abort via RPC. If this fails, kill process
        try:
            self.conn.call_client_sync('taskproxy.abort')
        except RpcException as err:
            self.balancer.logger.warning("Failed to abort task #{0} gracefully: {1}".format(self.task.id, str(err)))
            self.balancer.logger.warning("Killing process {0}".format(self.pid))
            self.proc.terminate()

    def executor(self):
        while not self.exiting:
            try:
                self.proc = Popen(
                    [TASKWORKER_PATH, self.key],
                    close_fds=True,
                    preexec_fn=os.setpgrp,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.STDOUT)

                self.pid = self.proc.pid
                self.balancer.logger.debug('Started executor #{0} as PID {1}'.format(self.index, self.pid))
            except OSError:
                self.result.set_exception(TaskException(errno.EFAULT, 'Cannot spawn task executor'))
                self.balancer.logger.error('Cannot spawn task executor #{0}'.format(self.index))
                return

            for line in self.proc.stdout:
                line = line.decode('utf8')
                self.balancer.logger.debug('Executor #{0}: {1}'.format(self.index, line.strip()))
                if self.task:
                    self.task.output += line

            self.proc.wait()

            if self.proc.returncode == -signal.SIGTERM:
                self.balancer.logger.info(
                    'Executor process with PID {0} was terminated gracefully'.format(
                        self.proc.pid
                    )
                )
            else:
                self.balancer.logger.error('Executor process with PID {0} died abruptly with exit code {1}'.format(
                    self.proc.pid,
                    self.proc.returncode)
                )

            self.result.set_exception(TaskException(errno.EFAULT, 'Task executor died'))
            gevent.sleep(1)

    def die(self):
        self.exiting = True
        if self.proc:
            try:
                self.proc.terminate()
            except ProcessLookupError:
                self.balancer.logger.warning('Executor process with PID {0} already dead'.format(self.proc.pid))
def test_actuator_topic(publish_agent, query_agent):
    print("\n** test_actuator_topic **")
    global volttron_instance1, volttron_instance2

    # Create master driver config and 4 fake devices each with 6 points
    process = Popen(['python', 'config_builder.py', '--count=1',
                     '--publish-only-depth-all',
                     'fake', 'fake_unit_testing.csv', 'null'],
                    env=volttron_instance1.env,
                    cwd='scripts/scalability-testing',
                    stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    result = process.wait()
    print result
    assert result == 0

    # Start the master driver agent which would intern start the fake driver
    # using the configs created above
    master_uuid = volttron_instance1.install_agent(
        agent_dir="services/core/MasterDriverAgent",
        config_file="scripts/scalability-testing/configs/master-driver.agent",
        start=True)
    print("agent id: ", master_uuid)
    gevent.sleep(2)  # wait for the agent to start and start the devices

    # Start the actuator agent through which publish agent should communicate
    # to fake device. Start the master driver agent which would intern start
    # the fake driver using the configs created above
    actuator_uuid = volttron_instance1.install_agent(
        agent_dir="services/core/ActuatorAgent",
        config_file="services/core/ActuatorAgent/tests/actuator.config",
        start=True)
    print("agent id: ", actuator_uuid)

    listener_uuid = volttron_instance2.install_agent(
        agent_dir="examples/ListenerAgent",
        config_file="examples/ListenerAgent/config",
        start=True)
    print("agent id: ", listener_uuid)

    try:
        # Make query agent running in instance two subscribe to
        # actuator_schedule_result topic
        # query_agent.callback = types.MethodType(callback, query_agent)
        query_agent.callback = MagicMock(name="callback")
        # subscribe to schedule response topic
        query_agent.vip.pubsub.subscribe(
            peer='pubsub',
            prefix=topics.ACTUATOR_SCHEDULE_RESULT,
            callback=query_agent.callback).get()

        # Now publish in volttron_instance1

        start = str(datetime.now())
        end = str(datetime.now() + timedelta(seconds=2))
        header = {
            'type': 'NEW_SCHEDULE',
            'requesterID': 'test-agent',  # The name of the requesting agent.
            'taskID': 'task_schedule_response',
            'priority': 'LOW'  # ('HIGH, 'LOW', 'LOW_PREEMPT').
        }
        msg = [
            ['fakedriver0', start, end]
        ]
        # reset mock to ignore any previous callback
        publish(publish_agent, topics.ACTUATOR_SCHEDULE_REQUEST, header, msg)
        gevent.sleep(1)  # wait for topic to be forwarded and callback to happen

        # assert query_agent.callback.call_count == 1
        print ('call args ', query_agent.callback.call_args_list)
        # assert query_agent.callback.call_args[0][1] == 'platform.actuator'
        assert query_agent.callback.call_args[0][3] == \
               topics.ACTUATOR_SCHEDULE_RESULT
        result_header = query_agent.callback.call_args[0][4]
        result_message = query_agent.callback.call_args[0][5]
        assert result_header['type'] == 'NEW_SCHEDULE'
        assert result_header['taskID'] == 'task_schedule_response'
        assert result_header['requesterID'] == 'test-agent'
        assert result_message['result'] == 'SUCCESS'
    finally:
        volttron_instance1.stop_agent(master_uuid)
        volttron_instance1.remove_agent(master_uuid)
        volttron_instance1.stop_agent(actuator_uuid)
        volttron_instance1.remove_agent(actuator_uuid)
        volttron_instance2.stop_agent(listener_uuid)
        volttron_instance2.remove_agent(listener_uuid)