def create_agent_amqp(install_agent_timeout=300, manager_ip=None, manager_certificate=None, stop_old_agent=False, **_): """ Installs a new agent on a host machine. :param install_agent_timeout: operation's timeout. :param manager_ip: the private IP of the current leader (master) Manager. This IP is used to connect to the Manager's RabbitMQ. (relevant only in HA cluster) :param manager_certificate: the SSL certificate of the current leader (master) Manager. (relevant only in HA cluster) :param stop_old_agent: if set, stop the old agent after successfully installing the new one """ old_agent = _validate_agent() update_agent_record(old_agent, AgentState.UPGRADING) _update_broker_config(old_agent, manager_ip, manager_certificate) agents = _run_install_script(old_agent, install_agent_timeout) new_agent = agents['new'] ctx.logger.info('Installed agent {0}'.format(new_agent['name'])) create_agent_record(new_agent, AgentState.STARTING) result = _validate_current_amqp(new_agent) if not result['agent_alive']: update_agent_record(new_agent, AgentState.FAILED) raise RecoverableError('New agent did not start and connect') update_agent_record(new_agent, AgentState.STARTED) update_agent_record(old_agent, AgentState.UPGRADED) if stop_old_agent: _stop_old_diamond(old_agent, install_agent_timeout) _stop_old_agent(new_agent, old_agent, install_agent_timeout) # Setting old_cloudify_agent in order to uninstall it later. ctx.instance.runtime_properties['old_cloudify_agent'] = agents['old'] update_agent_runtime_properties(new_agent)
def create_agent_amqp(install_agent_timeout=300, manager_ip=None, manager_certificate=None, stop_old_agent=False, **_): """ Installs a new agent on a host machine. :param install_agent_timeout: operation's timeout. :param manager_ip: the private IP of the current leader (master) Manager. This IP is used to connect to the Manager's RabbitMQ. (relevant only in HA cluster) :param manager_certificate: the SSL certificate of the current leader (master) Manager. (relevant only in HA cluster) :param stop_old_agent: if set, stop the old agent after successfully installing the new one """ old_agent = _validate_agent() _update_broker_config(old_agent, manager_ip, manager_certificate) agents = _run_install_script(old_agent, install_agent_timeout) new_agent = agents['new'] ctx.logger.info('Installed agent {0}'.format(new_agent['name'])) result = _validate_current_amqp(new_agent) if not result['agent_alive']: raise RecoverableError('New agent did not start and connect') if stop_old_agent: _stop_old_diamond(old_agent, install_agent_timeout) _stop_old_agent(new_agent, old_agent, install_agent_timeout) # Setting old_cloudify_agent in order to uninstall it later. ctx.instance.runtime_properties['old_cloudify_agent'] = agents['old'] update_agent_runtime_properties(new_agent)
def restart(new_name=None, delay_period=5, **_): cloudify_agent = ctx.instance.runtime_properties['cloudify_agent'] if new_name is None: new_name = utils.internal.generate_new_agent_name( cloudify_agent.get('name', 'agent')) # update agent name in runtime properties so that the workflow will # what the name of the worker handling tasks to this instance. # the update cannot be done by setting a nested property directly # because they are not recognized as 'dirty' cloudify_agent['name'] = new_name update_agent_runtime_properties(cloudify_agent) daemon = _load_daemon(logger=ctx.logger) # make the current master stop listening to the current queue # to avoid a situation where we have two masters listening on the # same queue. rest_tenant = get_tenant() app = get_celery_app(tenant=rest_tenant) app.control.cancel_consumer( queue=daemon.queue, destination=['celery@{0}'.format(daemon.name)] ) # clone the current daemon to preserve all the attributes attributes = utils.internal.daemon_to_dict(daemon) # give the new daemon the new name attributes['name'] = new_name # remove the log file and pid file so that new ones will be created # for the new agent del attributes['log_file'] del attributes['pid_file'] # Get the broker credentials for the daemon attributes.update(ctx.bootstrap_context.broker_config()) new_daemon = DaemonFactory().new(logger=ctx.logger, **attributes) # create the new daemon new_daemon.create() _save_daemon(new_daemon) # configure the new daemon new_daemon.configure() new_daemon.start() # start a thread that will kill the current master. # this is done in a thread so that the current task will not result in # a failure thread = threading.Thread(target=shutdown_current_master, args=[delay_period, ctx.logger]) thread.daemon = True thread.start()
def restart(new_name=None, delay_period=5, **_): cloudify_agent = ctx.instance.runtime_properties['cloudify_agent'] if new_name is None: new_name = utils.internal.generate_new_agent_name( cloudify_agent.get('name', 'agent')) # update agent name in runtime properties so that the workflow will # what the name of the worker handling tasks to this instance. # the update cannot be done by setting a nested property directly # because they are not recognized as 'dirty' cloudify_agent['name'] = new_name update_agent_runtime_properties(cloudify_agent) daemon = _load_daemon(logger=ctx.logger) # make the current master stop listening to the current queue # to avoid a situation where we have two masters listening on the # same queue. app = get_celery_app(tenant=ctx.tenant) app.control.cancel_consumer( queue=daemon.queue, destination=['celery@{0}'.format(daemon.name)] ) # clone the current daemon to preserve all the attributes attributes = utils.internal.daemon_to_dict(daemon) # give the new daemon the new name attributes['name'] = new_name # remove the log file and pid file so that new ones will be created # for the new agent del attributes['log_file'] del attributes['pid_file'] # Get the broker credentials for the daemon attributes.update(ctx.bootstrap_context.broker_config()) new_daemon = DaemonFactory().new(logger=ctx.logger, **attributes) # create the new daemon new_daemon.create() _save_daemon(new_daemon) # configure the new daemon new_daemon.configure() new_daemon.start() # start a thread that will kill the current master. # this is done in a thread so that the current task will not result in # a failure thread = threading.Thread(target=shutdown_current_master, args=[delay_period, ctx.logger]) thread.daemon = True thread.start()
def cleanup_scripts(): """Remove the files that were scheduled for deletion.""" cloudify_agent = ctx.instance.runtime_properties.get('cloudify_agent', {}) paths = cloudify_agent.pop(LOCAL_CLEANUP_PATHS_KEY, []) update_agent_runtime_properties(cloudify_agent) for path in paths: try: os.remove(path) except OSError: pass
def _cleanup_after_installation(self, path): """Mark path to be deleted after agent installation. This simply adds the path to cloudify_agent inside runtime properties, so that it can be removed later. """ cleanup = self.cloudify_agent.get(LOCAL_CLEANUP_PATHS_KEY, []) cleanup.append(path) self.cloudify_agent[LOCAL_CLEANUP_PATHS_KEY] = cleanup update_agent_runtime_properties(self.cloudify_agent)
def _update_broker_config(agent, manager_ip, manager_cert): if not manager_ip and not manager_cert: return broker_conf = agent.setdefault('broker_config', dict()) if manager_ip: agent['broker_ip'] = manager_ip agent['rest_host'] = manager_ip package_url = agent['package_url'] agent['package_url'] = _create_package_url(package_url, manager_ip) broker_conf['broker_ip'] = manager_ip if manager_cert: broker_conf['broker_ssl_cert'] = manager_cert update_agent_runtime_properties(agent)
def _update_broker_config(agent, manager_ip, manager_cert): if not manager_ip and not manager_cert: return broker_conf = agent.setdefault('broker_config', dict()) if manager_ip: agent['broker_ip'] = manager_ip agent['rest_host'] = manager_ip package_url = agent['package_url'] agent['package_url'] = _create_package_url(package_url, manager_ip) broker_conf['broker_ip'] = manager_ip if manager_cert: broker_conf['broker_ssl_cert'] = manager_cert update_agent_runtime_properties(agent)