def stop_platform(self): """ Stop the platform without cleaning up any agents or context of the agent. This should be paired with restart platform in order to maintain the context of the platform. :return: """ if not self.is_running(): return cmd = ['volttron-ctl'] cmd.extend(['shutdown', '--platform']) try: execute_command(cmd, env=self.env, logger=_log, err_prefix="Error shutting down platform") except RuntimeError: if self.p_process is not None: try: gevent.sleep(0.2) self.p_process.terminate() gevent.sleep(0.2) except OSError: self.logit('Platform process was terminated.') else: self.logit("platform process was null") gevent.sleep(1)
def generate_apidoc(app): """ Generates apidocs for modules under volttron/platform and volttron/services/core :param app: :return: """ volttron_src = os.path.abspath('../volttron') if os.environ.get("READTHEDOCS"): volttron_src = os.path.abspath('../../volttron') # Exclusions must be full paths to directories exlusions = [ os.path.join(volttron_src, 'lint/'), os.path.join(volttron_src, 'drivers/') ] cmd = [ "sphinx-apidoc", '-M', '-d 4', '-o', 'source/volttron_api', '--force', volttron_src ] cmd.extend(exlusions) print("The command is: {}".format(cmd)) execute_command(cmd)
def test_install_arg_matrix(volttron_instance: PlatformWrapper, args: List, use_config: bool): listener_config_file = get_examples("ListenerAgent/config") with with_os_environ(volttron_instance.env): # Don't change the parametrized args that have mutable values. Make copy if changing within test. # parameterized args when used with more than 1 .parametrize() or with another parameterized fixture # fails to rest values correctly # @pytest.mark.parametrize("x,y", (([1, 2], 1), ([3, 4], 1))) - will work fine even if x is changed in test # But # @pytest.mark.parametrize("x,y", (([1,2],1), ([3,4],1))) # @pytest.mark.parametrize("z", [8, 9]) # will fail to reset value of x correctly if x is changed within test vctl_args = copy.copy(args) vctl_args.insert(0, "--json") vctl_args.insert(0, "volttron-ctl") if use_config: vctl_args.extend(["--agent-config", listener_config_file]) response = execute_command(vctl_args, volttron_instance.env) json_response = jsonapi.loads(response) agent_uuid = json_response["agent_uuid"] gevent.sleep(1) response = execute_command(["vctl", "--json", "status", agent_uuid], volttron_instance.env) json_response = jsonapi.loads(response) identity = list(json_response.keys())[0] agent_status_dict = json_response[identity] if "--start" in vctl_args: assert agent_status_dict["status"] if "--tag" in vctl_args: assert agent_status_dict["agent_tag"] tag_name = vctl_args[vctl_args.index("--tag") + 1] assert tag_name == agent_status_dict["agent_tag"] if "--vip-identity" in vctl_args: assert agent_status_dict["identity"] expected_identity = vctl_args[vctl_args.index("--vip-identity") + 1] assert expected_identity == agent_status_dict["identity"] if use_config: with open(listener_config_file) as fp: expected_config = yaml.safe_load(fp.read()) config_path = Path(volttron_instance.volttron_home).joinpath( f"agents/{agent_uuid}/listeneragent-3.3/listeneragent-3.3.dist-info/config" ) with open(config_path) as fp: config_data = yaml.safe_load(fp.read()) assert expected_config == config_data volttron_instance.remove_all_agents()
def stop_platform(self): """ Stop the platform without cleaning up any agents or context of the agent. This should be paired with restart platform in order to maintain the context of the platform. :return: """ if not self.is_running(): return cmd = ['volttron-ctl'] cmd.extend(['shutdown', '--platform']) try: execute_command(cmd, env=self.env, logger=_log, err_prefix="Error shutting down platform") except RuntimeError: if self.p_process is not None: try: gevent.sleep(0.2) self.p_process.terminate() gevent.sleep(0.2) except OSError: self.logit('Platform process was terminated.') else: self.logit("platform process was null") gevent.sleep(1)
def test_vctl_shutdown_on_rmq_stop(request): """ Test for fix issue# 1886 :param volttron_instance_rmq: :return: """ address = get_rand_vip() volttron_instance = build_wrapper(address, messagebus='rmq', ssl_auth=True) agent_uuid = volttron_instance.install_agent( agent_dir=get_examples("ListenerAgent"), start=True) assert agent_uuid is not None agent_pid = volttron_instance.agent_pid(agent_uuid) assert agent_pid is not None and agent_pid > 0 # Stop RabbitMQ server rmq_cfg = RMQConfig() stop_rabbit(rmq_home=rmq_cfg.rmq_home, env=volttron_instance.env) gevent.sleep(5) # Shtudown platform cmd = ['volttron-ctl', 'shutdown', '--platform'] execute_command(cmd, env=volttron_instance.env) gevent.sleep(2) # Check that installed agent and platform is not running assert not psutil.pid_exists(agent_pid) assert volttron_instance.is_running() == False
def remove_agent(opts, agent_uuid): env = _build_copy_env(opts) cmds = [opts.volttron_control, "remove", agent_uuid] execute_command(cmds, env=env, logger=log, err_prefix="Error removing agent")
def install_requirements(agent_source): req_file = os.path.join(agent_source, "requirements.txt") if os.path.exists(req_file): log.info("Installing requirements for agent.") cmds = ["pip", "install", "-r", req_file] try: execute_command(cmds, logger=log, err_prefix="Error installing requirements") except RuntimeError: sys.exit(1)
def restart_ssl(rmq_home, env=None): """ Runs rabbitmqctl eval "ssl:stop(), ssl:start()." to make rmq reload ssl certificates. Client connection will get dropped and client should reconnect. :param rmq_home: :param env: Environment to run the RabbitMQ command. :return: """ cmd = [os.path.join(rmq_home, "sbin/rabbitmqctl"), "eval", "ssl:stop(), ssl:start()."] execute_command(cmd, err_prefix="Error reloading ssl certificates", env=env, logger=_log)
def install_requirements(agent_source): req_file = os.path.join(agent_source, "requirements.txt") if os.path.exists(req_file): log.info("Installing requirements for agent.") cmds = ["pip", "install", "-r", req_file] try: execute_command(cmds, logger=log, err_prefix="Error installing requirements") except RuntimeError: sys.exit(1)
def start_rabbit(rmq_home, env=None): """ Start RabbitMQ server. The function assumes that rabbitmq.conf in rmq_home/etc/rabbitmq is setup before this funciton is called. If the function cannot detect that rabbit was started within roughly 60 seconds then `class:RabbitMQStartError` will be raised. :param rmq_home: RabbitMQ installation path :param env: Environment to start RabbitMQ with. :raises RabbitMQStartError: """ # rabbitmqctl status returns true as soon as the erlang vm and does not wait # for all the plugins and database to be initialized and rmq is ready to # accept incoming connection. # Nor does rabbitmqctl wait, rabbitmqctl await_online_nodes work for this # purpose. shovel_status comes close... status_cmd = [os.path.join(rmq_home, "sbin/rabbitmqctl"), "shovel_status"] start_cmd = [os.path.join(rmq_home, "sbin/rabbitmq-server"), "-detached"] i = 0 started = False start = True while not started: try: # we expect this call to raise a RuntimeError until the rabbitmq server # is up and running. execute_command(status_cmd, env=env) if not start: # if we have attempted started already gevent.sleep(1) # give a second just to be sure started = True _log.info("Rmq server at {} is running at ".format(rmq_home)) except RuntimeError as e: # First time this exception block we are going to attempt to start # the rabbitmq server. if start: _log.debug("Rabbitmq is not running. Attempting to start") msg = "Error starting rabbitmq at {}".format(rmq_home) # attempt to start once execute_command(start_cmd, env=env, err_prefix=msg, logger=_log) start = False else: if i > 60: # if more than 60 tries we assume something failed raise RabbitMQStartError("Unable to verify rabbitmq server has started in a resonable time.") else: # sleep for another 2 seconds and check status again gevent.sleep(2) i = i + 2
def check_rabbit_status(rmq_home=None, env=None): status = True if not rmq_home: rmq_cfg = RMQConfig() rmq_home = rmq_cfg.rmq_home status_cmd = [os.path.join(rmq_home, "sbin/rabbitmqctl"), "shovel_status"] try: execute_command(status_cmd, env=env) except RuntimeError: status = False return status
def test_install_with_wheel(volttron_instance: PlatformWrapper): with with_os_environ(volttron_instance.env): global listener_agent_dir args = ["volttron-pkg", "package", listener_agent_dir] response = execute_command(args, volttron_instance.env) assert response.startswith("Package created at: ") path = response[len("Package created at: "):] assert os.path.exists(path.strip()) args = ["volttron-ctl", "--json", "install", path.strip()] response = execute_command(args, volttron_instance.env) response_dict = jsonapi.loads(response) assert response_dict.get("agent_uuid") volttron_instance.remove_all_agents()
def start_agent(self, agent_uuid): self.logit('Starting agent {}'.format(agent_uuid)) self.logit("VOLTTRON_HOME SETTING: {}".format( self.env['VOLTTRON_HOME'])) cmd = ['volttron-ctl'] cmd.extend(['start', agent_uuid]) p = Popen(cmd, env=self.env, stdout=sys.stdout, stderr=sys.stderr) p.wait() # Confirm agent running cmd = ['volttron-ctl'] cmd.extend(['status', agent_uuid]) res = execute_command(cmd, env=self.env) # 776 TODO: Timing issue where check fails time.sleep(.1) self.logit("Subprocess res is {}".format(res)) assert 'running' in res pidpos = res.index('[') + 1 pidend = res.index(']') pid = int(res[pidpos:pidend]) assert psutil.pid_exists(pid), \ "The pid associated with agent {} does not exist".format(pid) self.started_agent_pids.append(pid) return pid
def start_agent(self, agent_uuid): self.logit('Starting agent {}'.format(agent_uuid)) self.logit("VOLTTRON_HOME SETTING: {}".format( self.env['VOLTTRON_HOME'])) cmd = ['volttron-ctl'] cmd.extend(['start', agent_uuid]) p = Popen(cmd, env=self.env, stdout=sys.stdout, stderr=sys.stderr) p.wait() # Confirm agent running cmd = ['volttron-ctl'] cmd.extend(['status', agent_uuid]) res = execute_command(cmd, env=self.env) # 776 TODO: Timing issue where check fails time.sleep(.1) self.logit("Subprocess res is {}".format(res)) assert 'running' in res pidpos = res.index('[') + 1 pidend = res.index(']') pid = int(res[pidpos: pidend]) assert psutil.pid_exists(pid), \ "The pid associated with agent {} does not exist".format(pid) self.started_agent_pids.append(pid) return pid
def _create_initial_package(agent_dir_to_package, wheelhouse, identity=None): """Create an initial whl file from the passed agent_dir_to_package. The function produces a wheel from the setup.py file located in agent_dir_to_package. Parameters: agent_dir_to_package - The root directory of the specific agent that is to be packaged. Returns The path and file name of the packaged whl file. """ tmpdir = tempfile.mkdtemp() try: builddir = os.path.join(tmpdir, 'pkg') distdir = os.path.join(builddir, 'dist') shutil.copytree(agent_dir_to_package, builddir) cmd = [sys.executable, 'setup.py', '--no-user-cfg', 'bdist_wheel'] execute_command(cmd, cwd=builddir, logger=_log) wheel_name = os.listdir(distdir)[0] wheel_path = os.path.join(distdir, wheel_name) if identity is not None: tmp_identity_file_fd, identity_template_filename = tempfile.mkstemp( dir=builddir) tmp_identity_file = os.fdopen(tmp_identity_file_fd, "w") tmp_identity_file.write(identity) tmp_identity_file.close() else: identity_template_filename = os.path.join(builddir, "IDENTITY") if os.path.exists(identity_template_filename): add_files_to_package(wheel_path, {'identity_file': identity_template_filename}) if not os.path.exists(wheelhouse): os.makedirs(wheelhouse, 0o750) wheel_dest = os.path.join(wheelhouse, wheel_name) shutil.move(wheel_path, wheel_dest) return wheel_dest except subprocess.CalledProcessError as ex: traceback.print_last() finally: shutil.rmtree(tmpdir, True) pass
def test_install_with_wheel_bad_path(volttron_instance: PlatformWrapper): with with_os_environ(volttron_instance.env): bad_wheel_path = "foo/wheel.whl" args = ["volttron-ctl", "--json", "install", bad_wheel_path] try: response = execute_command(args, volttron_instance.env) except RuntimeError as exc: assert f"Invalid file {bad_wheel_path}" in exc.args[0]
def remove_agent(self, agent_uuid): """Remove the agent specified by agent_uuid""" _log.debug("REMOVING AGENT: {}".format(agent_uuid)) cmd = ['volttron-ctl'] cmd.extend(['remove', agent_uuid]) res = execute_command(cmd, env=self.env, logger=_log, err_prefix="Error removing agent") return self.agent_pid(agent_uuid)
def stop_agent(self, agent_uuid): # Confirm agent running _log.debug("STOPPING AGENT: {}".format(agent_uuid)) cmd = ['volttron-ctl'] cmd.extend(['stop', agent_uuid]) res = execute_command(cmd, env=self.env, logger=_log, err_prefix="Error stopping agent") return self.agent_pid(agent_uuid)
def stop_rabbit(rmq_home, env=None, quite=False): """ Stop RabbitMQ Server :param rmq_home: RabbitMQ installation path :param env: Environment to run the RabbitMQ command. :param quite: :return: """ try: if env: _log.debug("Stop RMQ: {}".format(env.get('VOLTTRON_HOME'))) cmd = [os.path.join(rmq_home, "sbin/rabbitmqctl"), "stop"] execute_command(cmd, env=env) gevent.sleep(2) if not quite: _log.info("**Stopped rmq server") except RuntimeError as e: if not quite: raise e
def _create_initial_package(agent_dir_to_package, wheelhouse, identity=None): """Create an initial whl file from the passed agent_dir_to_package. The function produces a wheel from the setup.py file located in agent_dir_to_package. Parameters: agent_dir_to_package - The root directory of the specific agent that is to be packaged. Returns The path and file name of the packaged whl file. """ tmpdir = tempfile.mkdtemp() try: builddir = os.path.join(tmpdir, 'pkg') distdir = os.path.join(builddir, 'dist') shutil.copytree(agent_dir_to_package, builddir) cmd = [sys.executable, 'setup.py', '--no-user-cfg', 'bdist_wheel'] execute_command(cmd, cwd=builddir, logger=_log) wheel_name = os.listdir(distdir)[0] wheel_path = os.path.join(distdir, wheel_name) if identity is not None: tmp_identity_file_fd, identity_template_filename = tempfile.mkstemp(dir=builddir) tmp_identity_file = os.fdopen(tmp_identity_file_fd, "w") tmp_identity_file.write(identity) tmp_identity_file.close() else: identity_template_filename = os.path.join(builddir, "IDENTITY") if os.path.exists(identity_template_filename): add_files_to_package(wheel_path, {'identity_file': identity_template_filename}) if not os.path.exists(wheelhouse): os.makedirs(wheelhouse, 0o750) wheel_dest = os.path.join(wheelhouse, wheel_name) shutil.move(wheel_path, wheel_dest) return wheel_dest except subprocess.CalledProcessError as ex: traceback.print_last() finally: shutil.rmtree(tmpdir, True) pass
def stop_agent(self, agent_uuid): # Confirm agent running _log.debug("STOPPING AGENT: {}".format(agent_uuid)) cmd = ['volttron-ctl'] cmd.extend(['stop', agent_uuid]) res = execute_command(cmd, env=self.env, logger=_log, err_prefix="Error stopping agent") return self.agent_pid(agent_uuid)
def remove_agent(self, agent_uuid): """Remove the agent specified by agent_uuid""" _log.debug("REMOVING AGENT: {}".format(agent_uuid)) cmd = ['volttron-ctl'] cmd.extend(['remove', agent_uuid]) res = execute_command(cmd, env=self.env, logger=_log, err_prefix="Error removing agent") return self.agent_pid(agent_uuid)
def identity_exists(opts, identity): env = _build_copy_env(opts) cmds = [opts.volttron_control, "status"] data = execute_command(cmds, env=env, logger=log, err_prefix="Error checking identity") for x in data.split("\n"): if x: line_split = x.split() if identity == line_split[2]: return line_split[0] return False
def validate_key_pair(public_key_file, private_key_file): """ Given a public private key pair, validate the pair. :param public_key_file: path to public certificate file :param private_key_file: path to private key file :return True if the pair is valid, False otherwise """ try: cmd = ['openssl', 'x509', '-noout', '-modulus', '-in', os.path.expanduser(os.path.expandvars(public_key_file))] mod_pub = execute_command(cmd, err_prefix="Error getting modulus of " "public key") cmd = ['openssl', 'rsa', '-noout', '-modulus', '-in', os.path.expanduser(os.path.expandvars(private_key_file))] mod_key = execute_command(cmd, err_prefix="Error getting modulus of " "private key") except RuntimeError as e: return False return mod_pub == mod_key
def identity_exists(opts, identity): env = _build_copy_env(opts) cmds = [opts.volttron_control, "status"] data = execute_command(cmds, env=env, logger=log, err_prefix="Error checking identity") for x in data.split("\n"): if x: line_split = x.split() if identity == line_split[2]: return line_split[0] return False
def generate_apidoc(app): """ Generates apidocs for modules under volttron/platform and volttron/services/core :param app: :return: """ volttron_src = os.path.abspath('../volttron') if os.environ.get("READTHEDOCS"): volttron_src = os.path.abspath('../../volttron') # Exclusions must be full paths to directories exlusions = [ os.path.join(volttron_src, 'lint/'), os.path.join(volttron_src, 'drivers/') ] cmd = ["sphinx-apidoc", '-M', '-d 4', '-o', 'source/volttron_api', '--force', volttron_src] cmd.extend(exlusions) print("The command is: {}".format(cmd)) execute_command(cmd)
def test_config_store_access(secure_volttron_instance, security_agent, query_agent): """ Test to make sure agent does not have any permissions on files outside agent's directory but for the following exceptions. Agent user should have read access to - vhome/config - vhome/known_hosts - vhome/certificates/certs/<agent_vip_id>.<instance_name>.crt - vhome/certificates/private/<agent_vip_id>.<instance_name>.pem :param secure_volttron_instance: secure volttron instance :param security_agent: Test agent which runs secure mode as a user other than platform user :param query_agent: Fake agent to do rpc calls to test agent """ assert secure_volttron_instance.is_agent_running(security_agent) # Try installing a second copy of the agent. First agent should not have read/write/execute access to any # of the files of agent2. rpc call checks all files in vhome agent2 = None try: agent2 = secure_volttron_instance.install_agent( vip_identity="security_agent2", agent_dir="volttrontesting/platform/security/SecurityAgent", start=False, config_file=None) secure_volttron_instance.start_agent(agent2) gevent.sleep(3) assert secure_volttron_instance.is_agent_running(agent2) # make initial entry in config store for both agents config_path = os.path.join(secure_volttron_instance.volttron_home, "test_secure_agent_config") with open(config_path, "w+") as f: f.write('{"test":"value"}') gevent.sleep(1) execute_command(['volttron-ctl', 'config', 'store', "security_agent", "config", config_path, "--json"], cwd=secure_volttron_instance.volttron_home, env=secure_volttron_instance.env) execute_command(['volttron-ctl', 'config', 'store', "security_agent2", "config", config_path, "--json"], cwd=secure_volttron_instance.volttron_home, env=secure_volttron_instance.env) execute_command(['volttron-ctl', 'config', 'store', "security_agent", "config", config_path, "--json"], cwd=secure_volttron_instance.volttron_home, env=secure_volttron_instance.env) # this rpc method will check agents own config store and access and agent's access to other agent's config store results = query_agent.vip.rpc.call("security_agent", "verify_config_store_access", "security_agent2").get(timeout=30) print("RESULTS :::: {}".format(results)) except BaseException as e: print("Exception {}".format(e)) assert False finally: if agent2: secure_volttron_instance.remove_agent(agent2)
def _install_agent(self, wheel_file, start, vip_identity): self.logit('Creating channel for sending the agent.') gevent.sleep(0.3) self.logit('calling control install agent.') self.logit("VOLTTRON_HOME SETTING: {}".format( self.env['VOLTTRON_HOME'])) env = self.env.copy() cmd = ['volttron-ctl', '-vv', 'install', wheel_file] if vip_identity: cmd.extend(['--vip-identity', vip_identity]) res = execute_command(cmd, env=env, logger=_log) assert res, "failed to install wheel:{}".format(wheel_file) agent_uuid = res.split(' ')[-2] self.logit(agent_uuid) if start: self.start_agent(agent_uuid) return agent_uuid return agent_uuid
def _install_agent(self, wheel_file, start, vip_identity): self.logit('Creating channel for sending the agent.') gevent.sleep(0.3) self.logit('calling control install agent.') self.logit("VOLTTRON_HOME SETTING: {}".format( self.env['VOLTTRON_HOME'])) env = self.env.copy() cmd = ['volttron-ctl', '-vv', 'install', wheel_file] if vip_identity: cmd.extend(['--vip-identity', vip_identity]) res = execute_command(cmd, env=env, logger=_log) assert res, "failed to install wheel:{}".format(wheel_file) agent_uuid = res.split(' ')[-2] self.logit(agent_uuid) if start: self.start_agent(agent_uuid) return agent_uuid return agent_uuid
def agent_pid(self, agent_uuid): """ Returns the pid of a running agent or None :param agent_uuid: :return: """ # Confirm agent running cmd = ['volttron-ctl'] cmd.extend(['status', agent_uuid]) pid = None try: res = execute_command(cmd, env=self.env, logger=_log, err_prefix="Error getting agent status") try: pidpos = res.index('[') + 1 pidend = res.index(']') pid = int(res[pidpos:pidend]) except: pid = None except CalledProcessError as ex: _log.error("Exception: {}".format(ex)) # Handle the following exception that seems to happen when getting a # pid of an agent during the platform shutdown phase. # # Logged from file platformwrapper.py, line 797 # AGENT IDENTITY TAG STATUS # Traceback (most recent call last): # File "/usr/lib/python2.7/logging/__init__.py", line 882, in emit # stream.write(fs % msg) # File "/home/volttron/git/volttron/env/local/lib/python2.7/site-packages/_pytest/capture.py", line 244, in write # self.buffer.write(obj) # ValueError: I/O operation on closed file except ValueError: pass # _log.debug("AGENT_PID: {}".format(pid)) return pid
def agent_pid(self, agent_uuid): """ Returns the pid of a running agent or None :param agent_uuid: :return: """ # Confirm agent running cmd = ['volttron-ctl'] cmd.extend(['status', agent_uuid]) pid = None try: res = execute_command(cmd, env=self.env, logger=_log, err_prefix="Error getting agent status") try: pidpos = res.index('[') + 1 pidend = res.index(']') pid = int(res[pidpos: pidend]) except: pid = None except CalledProcessError as ex: _log.error("Exception: {}".format(ex)) # Handle the following exception that seems to happen when getting a # pid of an agent during the platform shutdown phase. # # Logged from file platformwrapper.py, line 797 # AGENT IDENTITY TAG STATUS # Traceback (most recent call last): # File "/usr/lib/python2.7/logging/__init__.py", line 882, in emit # stream.write(fs % msg) # File "/home/volttron/git/volttron/env/local/lib/python2.7/site-packages/_pytest/capture.py", line 244, in write # self.buffer.write(obj) # ValueError: I/O operation on closed file except ValueError: pass # _log.debug("AGENT_PID: {}".format(pid)) return pid
def install_agent(opts, package, config): """ The main installation method for installing the agent on the correct local platform instance. :param opts: :param package: :param config: :return: """ if config is None: config = {} # if not a dict then config should be a filename if not isinstance(config, dict): config_file = config else: cfg = tempfile.NamedTemporaryFile() with open(cfg.name, 'w') as fout: fout.write(yaml.safe_dump(config)) # jsonapi.dumps(config)) config_file = cfg.name try: with open(config_file) as fp: data = yaml.safe_load(fp) # data = json.load(fp) except: log.error("Invalid yaml/json config file.") sys.exit(-10) # Configure the whl file before installing. add_files_to_package(opts.package, {'config_file': config_file}) env = _build_copy_env(opts) if opts.vip_identity: cmds = [opts.volttron_control, "upgrade", opts.vip_identity, package] else: cmds = [opts.volttron_control, "install", package] if opts.tag: cmds.extend(["--tag", opts.tag]) out = execute_command(cmds, env=env, logger=log, err_prefix="Error installing agent") parsed = out.split("\n") # If there is not an agent with that identity: # 'Could not find agent with VIP IDENTITY "BOO". Installing as new agent # Installed /home/volttron/.volttron/packaged/listeneragent-3.2-py2-none-any.whl as 6ccbf8dc-4929-4794-9c8e-3d8c6a121776 listeneragent-3.2' # The following is standard output of an agent that was previously installed # If the agent was not previously installed then only the second line # would have been output to standard out. # # Removing previous version of agent "foo" # Installed /home/volttron/.volttron/packaged/listeneragent-3.2-py2-none-any.whl as 81b811ff-02b5-482e-af01-63d2fd95195a listeneragent-3.2 if 'Could not' in parsed[0]: agent_uuid = parsed[1].split()[-2] elif 'Removing' in parsed[0]: agent_uuid = parsed[1].split()[-2] else: agent_uuid = parsed[0].split()[-2] output_dict = dict(agent_uuid=agent_uuid) if opts.start: cmds = [opts.volttron_control, "start", agent_uuid] outputdata = execute_command(cmds, env=env, logger=log, err_prefix="Error starting agent") # Expected output on standard out # Starting 83856b74-76dc-4bd9-8480-f62bd508aa9c listeneragent-3.2 if 'Starting' in outputdata: output_dict['starting'] = True if opts.enable: cmds = [opts.volttron_control, "enable", agent_uuid] if opts.priority != -1: cmds.extend(["--priority", str(opts.priority)]) outputdata = execute_command(cmds, env=env, logger=log, err_prefix="Error enabling agent") # Expected output from standard out # Enabling 6bcee29b-7af3-4361-a67f-7d3c9e986419 listeneragent-3.2 with priority 50 if "Enabling" in outputdata: output_dict['enabling'] = True output_dict['priority'] = outputdata.split("\n")[0].split()[-1] if opts.start: # Pause for agent_start_time seconds before verifying that the agent sleep(opts.agent_start_time) cmds = [opts.volttron_control, "status", agent_uuid] outputdata = execute_command(cmds, env=env, logger=log, err_prefix="Error finding agent status") # 5 listeneragent-3.2 foo running [10737] output_dict["started"] = "running" in outputdata if output_dict["started"]: pidpos = outputdata.index('[') + 1 pidend = outputdata.index(']') output_dict['agent_pid'] = int(outputdata[pidpos:pidend]) if opts.json: sys.stdout.write("%s\n" % json.dumps(output_dict, indent=4)) if opts.csv: keylen = len(output_dict.keys()) keyline = '' valueline = '' keys = output_dict.keys() for k in range(keylen): if k < keylen - 1: keyline += "%s," % keys[k] valueline += "%s," % output_dict[keys[k]] else: keyline += "%s" % keys[k] valueline += "%s" % output_dict[keys[k]] sys.stdout.write("%s\n%s\n" % (keyline, valueline))
def install_agent(opts, package, config): """ The main installation method for installing the agent on the correct local platform instance. :param opts: :param package: :param config: :return: """ if config is None: config = {} # if not a dict then config should be a filename if not isinstance(config, dict): config_file = config else: cfg = tempfile.NamedTemporaryFile() with open(cfg.name, 'w') as fout: fout.write(yaml.safe_dump(config)) # jsonapi.dumps(config)) config_file = cfg.name try: with open(config_file) as fp: data = yaml.safe_load(fp) # data = json.load(fp) except: log.error("Invalid yaml/json config file.") sys.exit(-10) # Configure the whl file before installing. add_files_to_package(opts.package, {'config_file': config_file}) env = _build_copy_env(opts) if opts.vip_identity: cmds = [opts.volttron_control, "upgrade", opts.vip_identity, package] else: cmds = [opts.volttron_control, "install", package] if opts.tag: cmds.extend(["--tag", opts.tag]) out = execute_command(cmds, env=env, logger=log, err_prefix="Error installing agent") parsed = out.split("\n") # If there is not an agent with that identity: # 'Could not find agent with VIP IDENTITY "BOO". Installing as new agent # Installed /home/volttron/.volttron/packaged/listeneragent-3.2-py2-none-any.whl as 6ccbf8dc-4929-4794-9c8e-3d8c6a121776 listeneragent-3.2' # The following is standard output of an agent that was previously installed # If the agent was not previously installed then only the second line # would have been output to standard out. # # Removing previous version of agent "foo" # Installed /home/volttron/.volttron/packaged/listeneragent-3.2-py2-none-any.whl as 81b811ff-02b5-482e-af01-63d2fd95195a listeneragent-3.2 if 'Could not' in parsed[0]: agent_uuid = parsed[1].split()[-2] elif 'Removing' in parsed[0]: agent_uuid = parsed[1].split()[-2] else: agent_uuid = parsed[0].split()[-2] output_dict = dict(agent_uuid=agent_uuid) if opts.start: cmds = [opts.volttron_control, "start", agent_uuid] outputdata = execute_command(cmds, env=env, logger=log, err_prefix="Error starting agent") # Expected output on standard out # Starting 83856b74-76dc-4bd9-8480-f62bd508aa9c listeneragent-3.2 if 'Starting' in outputdata: output_dict['starting'] = True if opts.enable: cmds = [opts.volttron_control, "enable", agent_uuid] if opts.priority != -1: cmds.extend(["--priority", str(opts.priority)]) outputdata = execute_command(cmds, env=env, logger=log, err_prefix="Error enabling agent") # Expected output from standard out # Enabling 6bcee29b-7af3-4361-a67f-7d3c9e986419 listeneragent-3.2 with priority 50 if "Enabling" in outputdata: output_dict['enabling'] = True output_dict['priority'] = outputdata.split("\n")[0].split()[-1] if opts.start: # Pause for agent_start_time seconds before verifying that the agent sleep(opts.agent_start_time) cmds = [opts.volttron_control, "status", agent_uuid] outputdata = execute_command(cmds, env=env, logger=log, err_prefix="Error finding agent status") # 5 listeneragent-3.2 foo running [10737] output_dict["started"] = "running" in outputdata if output_dict["started"]: pidpos = outputdata.index('[') + 1 pidend = outputdata.index(']') output_dict['agent_pid'] = int(outputdata[pidpos: pidend]) if opts.json: sys.stdout.write("%s\n" % json.dumps(output_dict, indent=4)) if opts.csv: keylen = len(output_dict.keys()) keyline = '' valueline = '' keys = output_dict.keys() for k in range(keylen): if k < keylen - 1: keyline += "%s," % keys[k] valueline += "%s," % output_dict[keys[k]] else: keyline += "%s" % keys[k] valueline += "%s" % output_dict[keys[k]] sys.stdout.write("%s\n%s\n" % (keyline, valueline))
inenv = False else: inenv = True if os.environ.get('WAS_CORRECTED'): corrected = True else: corrected = False # Call the script with the correct environment if we aren't activated yet. if not inenv and not corrected: mypath = os.path.dirname(__file__) # Travis-CI puts the python in a little bit different location than # we do. if os.environ.get('CI') is not None: correct_python = execute_command(['which', 'python'], logger=log).strip() else: correct_python = os.path.abspath( os.path.join(mypath, '../env/bin/python')) if not os.path.exists(correct_python): log.error("Invalid location for the script {}".format(correct_python)) sys.exit(-10) # Call this script in a subprocess with the correct python interpreter. cmds = [correct_python, __file__] cmds.extend(sys.argv[1:]) try: output = execute_command(cmds, env=os.environ, logger=log) sys.exit(0) except RuntimeError:
def shutdown_platform(self): """ Stop platform here. First grab a list of all of the agents that are running on the platform, then shutdown, then if any of the listed agent pids are still running then kill them. """ # Handle cascading calls from multiple levels of fixtures. if self._instance_shutdown: return running_pids = [] for agnt in self.list_agents(): pid = self.agent_pid(agnt['uuid']) if pid is not None and int(pid) > 0: running_pids.append(int(pid)) # First try and nicely shutdown the platform, which should clean all # of the agents up automatically. cmd = ['volttron-ctl'] cmd.extend(['shutdown', '--platform']) try: execute_command(cmd, env=self.env, logger=_log, err_prefix="Error shutting down platform") except RuntimeError: if self.p_process is not None: try: gevent.sleep(0.2) self.p_process.terminate() gevent.sleep(0.2) except OSError: self.logit('Platform process was terminated.') else: self.logit("platform process was null") for pid in running_pids: if psutil.pid_exists(pid): self.logit("TERMINATING: {}".format(pid)) proc = psutil.Process(pid) proc.terminate() if self.use_twistd and self.t_process is not None: self.t_process.kill() self.t_process.wait() elif self.use_twistd: self.logit("twistd process was null") if os.environ.get('PRINT_LOG'): logpath = os.path.join(self.volttron_home, 'volttron.log') if os.path.exists(logpath): print("************************* Begin {}".format(logpath)) with open(logpath) as f: for l in f.readlines(): print(l) print("************************* End {}".format(logpath)) else: print("######################### No Log Exists: {}".format( logpath)) if not self.skip_cleanup: self.logit('Removing {}'.format(self.volttron_home)) shutil.rmtree(self.volttron_home, ignore_errors=True) self._instance_shutdown = True
def federated_rmq_instances(request, **kwargs): """ Create two rmq based volttron instances. One to act as producer of data and one to act as consumer of data :return: 2 volttron instances - (producer, consumer) that are federated """ upstream_vip = get_rand_vip() upstream = build_wrapper(upstream_vip, ssl_auth=True, messagebus='rmq', should_start=False, **kwargs) downstream_vip = get_rand_vip() downstream = build_wrapper(downstream_vip, ssl_auth=True, messagebus='rmq', should_start=False, **kwargs) # exchange CA certs stop_rabbit(rmq_home=upstream.rabbitmq_config_obj.rmq_home, env=upstream.env, quite=True) stop_rabbit(rmq_home=downstream.rabbitmq_config_obj.rmq_home, env=downstream.env, quite=True) with open( os.path.join(upstream.certsobj.cert_dir, upstream.instance_name + "-root-ca.crt"), "r") as uf: with open( os.path.join(downstream.certsobj.cert_dir, downstream.instance_name + "-trusted-cas.crt"), "a") as df: df.write(uf.read()) with open( os.path.join(downstream.certsobj.cert_dir, downstream.instance_name + "-root-ca.crt"), "r") as df: with open( os.path.join(upstream.certsobj.cert_dir, upstream.instance_name + "-trusted-cas.crt"), "a") as uf: uf.write(df.read()) start_rabbit(rmq_home=downstream.rabbitmq_config_obj.rmq_home, env=downstream.env) gevent.sleep(1) start_rabbit(rmq_home=upstream.rabbitmq_config_obj.rmq_home, env=upstream.env) gevent.sleep(1) try: # add downstream user ON UPSTREAM and give permissions # ~/rabbitmq_server/rabbitmq_server-3.7.7/sbin/rabbitmqctl add_user <user> <password> # ~/rabbitmq_server/rabbitmq_server-3.7.7/sbin/rabbitmqctl set_permissions -p <vhost> <user> ".*" ".*" ".*" cmd = [ os.path.join(upstream.rabbitmq_config_obj.rmq_home, "sbin/rabbitmqctl") ] cmd.extend(['add_user', downstream.instance_name + "-admin", "test"]) execute_command(cmd, env=upstream.env, err_prefix="Error creating user in upstream server") cmd = [ os.path.join( upstream.rabbitmq_config_obj.rabbitmq_config['rmq-home'], "sbin/rabbitmqctl") ] cmd.extend([ 'set_permissions', "-p", upstream.rabbitmq_config_obj.rabbitmq_config["virtual-host"] ]) cmd.extend([downstream.instance_name + "-admin", ".*", ".*", ".*"]) execute_command( cmd, env=upstream.env, err_prefix="Error setting user permission in upstream server") gevent.sleep(1) upstream.startup_platform(upstream_vip) gevent.sleep(2) print("After upstream start") downstream.startup_platform(downstream_vip) gevent.sleep(2) # create federation config and setup federation content = """federation-upstream: {host}: port: {port} virtual-host: {vhost} """ config_path = os.path.join(downstream.volttron_home, "federation.config") with open(config_path, 'w') as conf: conf.write( content.format( host=upstream.rabbitmq_config_obj.rabbitmq_config["host"], port=upstream.rabbitmq_config_obj. rabbitmq_config["amqp-port-ssl"], vhost=upstream.rabbitmq_config_obj. rabbitmq_config["virtual-host"])) downstream.setup_federation(config_path) except Exception as e: print("Exception setting up federation: {}".format(e)) upstream.shutdown_platform() downstream.shutdown_platform() raise e yield upstream, downstream upstream.shutdown_platform() downstream.shutdown_platform()
def __init__(self): """ Initializes a new VOLTTRON instance Creates a temporary VOLTTRON_HOME directory with a packaged directory for agents that are built. """ # This is hopefully going to keep us from attempting to shutdown # multiple times. For example if a fixture calls shutdown and a # lower level fixture calls shutdown, this won't hang. self._instance_shutdown = False self.volttron_home = tempfile.mkdtemp() self.packaged_dir = os.path.join(self.volttron_home, "packaged") os.makedirs(self.packaged_dir) # in the context of this platform it is very important not to # use the main os.environ for anything. self.env = { 'VOLTTRON_HOME': self.volttron_home, 'PACKAGED_DIR': self.packaged_dir, 'DEBUG_MODE': os.environ.get('DEBUG_MODE', ''), 'DEBUG': os.environ.get('DEBUG', ''), 'PATH': VOLTTRON_ROOT + ':' + os.environ['PATH'] } self.volttron_root = VOLTTRON_ROOT volttron_exe = execute_command(['which', 'volttron']).strip() assert os.path.exists(volttron_exe) self.python = os.path.join(os.path.dirname(volttron_exe), 'python') assert os.path.exists(self.python) # By default no web server should be started. self.bind_web_address = None self.discovery_address = None self.jsonrpc_endpoint = None self.volttron_central_address = None self.volttron_central_serverkey = None self.instance_name = None self.serverkey = None self.p_process = None self.t_process = None self.started_agent_pids = [] self.local_vip_address = None self.vip_address = None self.logit('Creating platform wrapper') # This was used when we are testing the SMAP historian. self.use_twistd = False # Added restricted code properties self.certsobj = None # Control whether the instance directory is cleaned up when shutdown. # if the environment variable DEBUG is set to a True value then the # instance is not cleaned up. self.skip_cleanup = False # This is used as command line entry replacement. Especially working # with older 2.0 agents. self.opts = None keystorefile = os.path.join(self.volttron_home, 'keystore') self.keystore = KeyStore(keystorefile) self.keystore.generate()
def install_agent(self, agent_wheel=None, agent_dir=None, config_file=None, start=True, vip_identity=None): """ Install and optionally start an agent on the instance. This function allows installation from an agent wheel or an agent directory (NOT BOTH). If an agent_wheel is specified then it is assumed to be ready for installation (has a config file). If an agent_dir is specified then a config_file file must be specified or if it is not specified then it is assumed that the file agent_dir/config is to be used as the configuration file. If none of these exist then an assertion error will be thrown. This function will return with a uuid of the installed agent. :param agent_wheel: :param agent_dir: :param config_file: :param start: :param vip_identity: :return: """ assert self.is_running(), "Instance must be running to install agent." assert agent_wheel or agent_dir, "Invalid agent_wheel or agent_dir." if agent_wheel: assert not agent_dir assert not config_file assert os.path.exists(agent_wheel) wheel_file = agent_wheel agent_uuid = self._install_agent(wheel_file, start, vip_identity) # Now if the agent_dir is specified. if agent_dir: assert not agent_wheel if isinstance(config_file, dict): from os.path import join, basename temp_config = join(self.volttron_home, basename(agent_dir) + "_config_file") with open(temp_config, "w") as fp: fp.write(json.dumps(config_file)) config_file = temp_config elif not config_file: if os.path.exists(os.path.join(agent_dir, "config")): config_file = os.path.join(agent_dir, "config") else: from os.path import join, basename temp_config = join(self.volttron_home, basename(agent_dir) + "_config_file") with open(temp_config, "w") as fp: fp.write(json.dumps({})) config_file = temp_config elif os.path.exists(config_file): pass # config_file already set! else: raise ValueError("Can't determine correct config file.") script = os.path.join(self.volttron_root, "scripts/install-agent.py") cmd = [self.python, script, "--volttron-home", self.volttron_home, "--volttron-root", self.volttron_root, "--agent-source", agent_dir, "--config", config_file, "--json"] if vip_identity: cmd.extend(["--vip-identity", vip_identity]) if start: cmd.extend(["--start"]) stdout = execute_command(cmd, logger=_log, err_prefix="Error installing agent") self.logit(stdout) # Because we are no longer silencing output from the install, the # the results object is now much more verbose. Our assumption is # that the result we are looking for is the only JSON block in # the output match = re.search(r'^({.*})', stdout, flags=re.M | re.S) if match: results = match.group(0) else: raise ValueError( "The results were not found in the command output") self.logit("here are the results: {}".format(results)) # # Response from results is expected as follows depending on # parameters, note this is a json string so parse to get dictionary # { # "started": true, # "agent_pid": 26241, # "starting": true, # "agent_uuid": "ec1fd94e-922a-491f-9878-c392b24dbe50" # } assert results resultobj = jsonapi.loads(str(results)) if start: assert resultobj['started'] agent_uuid = resultobj['agent_uuid'] assert agent_uuid is not None if start: assert self.is_agent_running(agent_uuid) return agent_uuid
def remove_agent(opts, agent_uuid): env = _build_copy_env(opts) cmds = [opts.volttron_control, "remove", agent_uuid] execute_command(cmds, env=env, logger=log, err_prefix="Error removing agent")
def __init__(self): """ Initializes a new VOLTTRON instance Creates a temporary VOLTTRON_HOME directory with a packaged directory for agents that are built. """ # This is hopefully going to keep us from attempting to shutdown # multiple times. For example if a fixture calls shutdown and a # lower level fixture calls shutdown, this won't hang. self._instance_shutdown = False self.volttron_home = tempfile.mkdtemp() self.packaged_dir = os.path.join(self.volttron_home, "packaged") os.makedirs(self.packaged_dir) # in the context of this platform it is very important not to # use the main os.environ for anything. self.env = { 'VOLTTRON_HOME': self.volttron_home, 'PACKAGED_DIR': self.packaged_dir, 'DEBUG_MODE': os.environ.get('DEBUG_MODE', ''), 'DEBUG': os.environ.get('DEBUG', ''), 'PATH': VOLTTRON_ROOT + ':' + os.environ['PATH'] } self.volttron_root = VOLTTRON_ROOT volttron_exe = execute_command(['which', 'volttron']).strip() assert os.path.exists(volttron_exe) self.python = os.path.join(os.path.dirname(volttron_exe), 'python') assert os.path.exists(self.python) # By default no web server should be started. self.bind_web_address = None self.discovery_address = None self.jsonrpc_endpoint = None self.volttron_central_address = None self.volttron_central_serverkey = None self.instance_name = None self.serverkey = None self.p_process = None self.t_process = None self.started_agent_pids = [] self.local_vip_address = None self.vip_address = None self.logit('Creating platform wrapper') # This was used when we are testing the SMAP historian. self.use_twistd = False # Added restricted code properties self.certsobj = None # Control whether the instance directory is cleaned up when shutdown. # if the environment variable DEBUG is set to a True value then the # instance is not cleaned up. self.skip_cleanup = False # This is used as command line entry replacement. Especially working # with older 2.0 agents. self.opts = None keystorefile = os.path.join(self.volttron_home, 'keystore') self.keystore = KeyStore(keystorefile) self.keystore.generate()
def test_install_same_identity(volttron_instance: PlatformWrapper): global listener_agent_dir with with_os_environ(volttron_instance.env): expected_identity = "listener.1" args = [ "volttron-ctl", "--json", "install", listener_agent_dir, "--vip-identity", expected_identity, "--start", ] response = execute_command(args, volttron_instance.env) json_response = jsonapi.loads(response) agent_uuid = json_response["agent_uuid"] response = execute_command(["vctl", "--json", "status", agent_uuid], volttron_instance.env) json_response = jsonapi.loads(response) identity = list(json_response.keys())[0] agent_status_dict = json_response[identity] assert "running [" in agent_status_dict.get("status") expected_status = agent_status_dict.get("status") expected_auuid = agent_status_dict.get("agent_uuid") # Attempt to install without force. with pytest.raises(RuntimeError): execute_command(args, volttron_instance.env) # Nothing should have changed the pid should be the same response = execute_command(["vctl", "--json", "status", agent_uuid], volttron_instance.env) json_response = jsonapi.loads(response) identity = list(json_response.keys())[0] agent_status_dict = json_response[identity] assert expected_status == agent_status_dict.get("status") assert expected_auuid == agent_status_dict.get("agent_uuid") args = [ "volttron-ctl", "--json", "install", listener_agent_dir, "--vip-identity", expected_identity, "--start", "--force", ] # Install with force. response = execute_command(args, volttron_instance.env) json_response = jsonapi.loads(response) agent_uuid = json_response["agent_uuid"] response = execute_command(["vctl", "--json", "status", agent_uuid], volttron_instance.env) json_response = jsonapi.loads(response) identity = list(json_response.keys())[0] agent_status_dict = json_response[identity] assert "running [" in agent_status_dict.get("status") assert expected_status != agent_status_dict.get("status") assert expected_auuid != agent_status_dict.get("agent_uuid")
def shutdown_platform(self): """ Stop platform here. First grab a list of all of the agents that are running on the platform, then shutdown, then if any of the listed agent pids are still running then kill them. """ # Handle cascading calls from multiple levels of fixtures. if self._instance_shutdown: return running_pids = [] for agnt in self.list_agents(): pid = self.agent_pid(agnt['uuid']) if pid is not None and int(pid) > 0: running_pids.append(int(pid)) # First try and nicely shutdown the platform, which should clean all # of the agents up automatically. cmd = ['volttron-ctl'] cmd.extend(['shutdown', '--platform']) try: execute_command(cmd, env=self.env, logger=_log, err_prefix="Error shutting down platform") except RuntimeError: if self.p_process is not None: try: gevent.sleep(0.2) self.p_process.terminate() gevent.sleep(0.2) except OSError: self.logit('Platform process was terminated.') else: self.logit("platform process was null") for pid in running_pids: if psutil.pid_exists(pid): self.logit("TERMINATING: {}".format(pid)) proc = psutil.Process(pid) proc.terminate() if self.use_twistd and self.t_process is not None: self.t_process.kill() self.t_process.wait() elif self.use_twistd: self.logit("twistd process was null") if os.environ.get('PRINT_LOG'): logpath = os.path.join(self.volttron_home, 'volttron.log') if os.path.exists(logpath): print("************************* Begin {}".format(logpath)) with open(logpath) as f: for l in f.readlines(): print(l) print("************************* End {}".format(logpath)) else: print("######################### No Log Exists: {}".format( logpath )) if not self.skip_cleanup: self.logit('Removing {}'.format(self.volttron_home)) shutil.rmtree(self.volttron_home, ignore_errors=True) self._instance_shutdown = True
corrected = True else: corrected = False # Moste of the time the environment will be run within a virtualenv # however if we need to run the install agent in a non virtualized # environment this allows us to do that. ignore_env_check = os.environ.get('IGNORE_ENV_CHECK', False) # Call the script with the correct environment if we aren't activated yet. if not ignore_env_check and not inenv and not corrected: mypath = os.path.dirname(__file__) # Travis-CI puts the python in a little bit different location than # we do. if os.environ.get('CI') is not None: correct_python = execute_command(['which', 'python'], logger=log).strip() else: correct_python = os.path.abspath( os.path.join(mypath, '../env/bin/python')) if not os.path.exists(correct_python): log.error("Invalid location for the script {}".format(correct_python)) sys.exit(-10) # Call this script in a subprocess with the correct python interpreter. cmds = [correct_python, __file__] cmds.extend(sys.argv[1:]) try: output = execute_command(cmds, env=os.environ, logger=log) sys.exit(0) except RuntimeError:
def install_agent(self, agent_wheel=None, agent_dir=None, config_file=None, start=True, vip_identity=None): """ Install and optionally start an agent on the instance. This function allows installation from an agent wheel or an agent directory (NOT BOTH). If an agent_wheel is specified then it is assumed to be ready for installation (has a config file). If an agent_dir is specified then a config_file file must be specified or if it is not specified then it is assumed that the file agent_dir/config is to be used as the configuration file. If none of these exist then an assertion error will be thrown. This function will return with a uuid of the installed agent. :param agent_wheel: :param agent_dir: :param config_file: :param start: :param vip_identity: :return: """ assert self.is_running(), "Instance must be running to install agent." assert agent_wheel or agent_dir, "Invalid agent_wheel or agent_dir." if agent_wheel: assert not agent_dir assert not config_file assert os.path.exists(agent_wheel) wheel_file = agent_wheel agent_uuid = self._install_agent(wheel_file, start, vip_identity) # Now if the agent_dir is specified. if agent_dir: assert not agent_wheel if isinstance(config_file, dict): from os.path import join, basename temp_config = join(self.volttron_home, basename(agent_dir) + "_config_file") with open(temp_config, "w") as fp: fp.write(json.dumps(config_file)) config_file = temp_config elif not config_file: if os.path.exists(os.path.join(agent_dir, "config")): config_file = os.path.join(agent_dir, "config") else: from os.path import join, basename temp_config = join(self.volttron_home, basename(agent_dir) + "_config_file") with open(temp_config, "w") as fp: fp.write(json.dumps({})) config_file = temp_config elif os.path.exists(config_file): pass # config_file already set! else: raise ValueError("Can't determine correct config file.") script = os.path.join(self.volttron_root, "scripts/install-agent.py") cmd = [ self.python, script, "--volttron-home", self.volttron_home, "--volttron-root", self.volttron_root, "--agent-source", agent_dir, "--config", config_file, "--json" ] if vip_identity: cmd.extend(["--vip-identity", vip_identity]) if start: cmd.extend(["--start"]) stdout = execute_command(cmd, logger=_log, err_prefix="Error installing agent") self.logit(stdout) # Because we are no longer silencing output from the install, the # the results object is now much more verbose. Our assumption is # that the result we are looking for is the only JSON block in # the output match = re.search(r'^({.*})', stdout, flags=re.M | re.S) if match: results = match.group(0) else: raise ValueError( "The results were not found in the command output") self.logit("here are the results: {}".format(results)) # # Response from results is expected as follows depending on # parameters, note this is a json string so parse to get dictionary # { # "started": true, # "agent_pid": 26241, # "starting": true, # "agent_uuid": "ec1fd94e-922a-491f-9878-c392b24dbe50" # } assert results resultobj = jsonapi.loads(str(results)) if start: assert resultobj['started'] agent_uuid = resultobj['agent_uuid'] assert agent_uuid is not None if start: assert self.is_agent_running(agent_uuid) return agent_uuid