def restart(new_name=None, delay_period=5, **_): cloudify_agent = ctx.instance.runtime_properties['cloudify_agent'] if new_name is None: new_name = utils.internal.generate_new_agent_name( cloudify_agent.get('name', 'agent')) # update agent name in runtime properties so that the workflow will # what the name of the worker handling tasks to this instance. # the update cannot be done by setting a nested property directly # because they are not recognized as 'dirty' cloudify_agent['name'] = new_name ctx.instance.runtime_properties['cloudify_agent'] = cloudify_agent # must update instance here because the process may shutdown before # the decorator has a chance to do it. ctx.instance.update() daemon = _load_daemon(logger=ctx.logger) # make the current master stop listening to the current queue # to avoid a situation where we have two masters listening on the # same queue. app = get_celery_app(tenant=cloudify_agent['rest_tenant']) app.control.cancel_consumer(queue=daemon.queue, destination=['celery@{0}'.format(daemon.name)]) # clone the current daemon to preserve all the attributes attributes = utils.internal.daemon_to_dict(daemon) # give the new daemon the new name attributes['name'] = new_name # remove the log file and pid file so that new ones will be created # for the new agent del attributes['log_file'] del attributes['pid_file'] # Get the broker credentials for the daemon attributes.update(ctx.bootstrap_context.broker_config()) new_daemon = DaemonFactory().new(logger=ctx.logger, **attributes) # create the new daemon new_daemon.create() _save_daemon(new_daemon) # configure the new daemon new_daemon.configure() new_daemon.start() # start a thread that will kill the current master. # this is done in a thread so that the current task will not result in # a failure thread = threading.Thread(target=shutdown_current_master, args=[delay_period, ctx.logger]) thread.daemon = True thread.start()
def _get_registered(self): tenant = self.workflow_context.tenant with get_celery_app(tenant=tenant, target=self.target) as app: worker_name = 'celery@{0}'.format(self.target) inspect = app.control.inspect(destination=[worker_name], timeout=INSPECT_TIMEOUT) registered = inspect.registered() if registered is None or worker_name not in registered: return None return set(registered[worker_name])
def _validate_current_amqp(): agent = ctx.instance.runtime_properties['cloudify_agent'] try: ctx.logger.info('Trying current AMQP...') app = get_celery_app(tenant=agent.get('rest_tenant')) _validate_amqp_connection(app, agent['name']) except Exception as e: ctx.logger.info('Agent unavailable, reason {0}'.format(str(e))) return {'agent_alive': False, 'agent_alive_error': str(e)} else: return {'agent_alive': True, 'agent_alive_error': ''}
def create_agent_amqp(install_agent_timeout=300, **_): old_agent = _validate_agent() agents = _run_install_script(old_agent, install_agent_timeout) returned_agent = agents['new'] ctx.logger.info('Installed agent {0}'.format(returned_agent['name'])) # Make sure that new celery agent was started: app = get_celery_app(tenant=returned_agent['rest_tenant']) _assert_agent_alive(returned_agent['name'], app) # Setting old_cloudify_agent in order to uninstall it later. ctx.instance.runtime_properties['old_cloudify_agent'] = agents['old'] ctx.instance.runtime_properties['cloudify_agent'] = returned_agent
def start(cloudify_agent, **_): """ Only called in "init_script"/"plugin" mode, where the agent is started externally (e.g. userdata script), and all we have to do is wait for it """ celery_client = get_celery_app(tenant=cloudify_agent['rest_tenant'], target=cloudify_agent['queue']) registered = utils.get_agent_registered(cloudify_agent['name'], celery_client) if registered: ctx.logger.info('Agent has started') else: return ctx.operation.retry(message='Waiting for Agent to start...')
def _is_agent_registered(self): if self.cluster: # only used for manager failures during installation - see # detailed comment in the ._get_amqp_client method celery_client = get_cluster_celery_app(self.broker_url, self.cluster, self.broker_ssl_enabled) else: celery_client = get_celery_app( broker_url=self.broker_url, broker_ssl_enabled=self.broker_ssl_enabled, broker_ssl_cert_path=self.broker_ssl_cert_path) try: self._logger.debug('Retrieving daemon registered tasks') return utils.get_agent_registered( self.name, celery_client, timeout=AGENT_IS_REGISTERED_TIMEOUT) finally: if celery_client: celery_client.close()
def _celery_app(agent): # We retrieve broker url from old agent in order to support # cases when old agent is not connected to current rabbit server. broker_config = agent.get('broker_config', ctx.bootstrap_context.broker_config()) agent_version = agent.get('version') or str(_get_manager_version()) broker_url = utils.internal.get_broker_url(broker_config) ctx.logger.info('Connecting to {0}'.format(broker_url)) ssl_cert_path = _get_ssl_cert_path(broker_config) celery_client = get_celery_app( broker_url=broker_url, broker_ssl_enabled=broker_config.get('broker_ssl_enabled'), broker_ssl_cert_path=ssl_cert_path) if ManagerVersion(agent_version) != ManagerVersion('3.2'): celery_client.conf['CELERY_TASK_RESULT_EXPIRES'] = \ defaults.CELERY_TASK_RESULT_EXPIRES try: yield celery_client finally: if ssl_cert_path: os.remove(ssl_cert_path)
def capture(self, tenant=None): with get_celery_app(tenant=tenant) as app: with app.connection() as connection: if self._should_stop: return connection.clone = lambda: connection self._receiver = app.events.Receiver(connection, handlers={ 'task-sent': self.task_sent, 'task-received': self.task_received, 'task-started': self.task_started, 'task-succeeded': self.task_succeeded, 'task-failed': self.task_failed, 'task-revoked': self.task_revoked, 'task-retried': self.task_retried }) self._receiver.capture(limit=None, timeout=None, wakeup=True)