def update_operation(self, operation_id, state): self.rest_client.operations.update( operation_id, state=state, agent_name=utils.get_daemon_name(), manager_name=utils.get_manager_name(), )
def _publish_message(message, message_type, logger, skip_send=False): if u'message' in message: _log_message(logger, message) if skip_send: return execution_id = get_execution_id() client = manager.get_rest_client() if message_type == 'log': logs = [message] events = None else: logs = None events = [message] client.events.create( events=events, logs=logs, execution_id=execution_id, agent_name=get_daemon_name(), manager_name=get_manager_name(), )
def _set_ips_and_certs(self): network = self['network'] managers = ctx.get_managers(network=network) brokers = ctx.get_brokers(network=network) self['rest_host'] = [manager.networks[network] for manager in managers] self['broker_ip'] = [broker.networks[network] for broker in brokers] self['rest_ssl_cert'] = '\n'.join( manager.ca_cert_content.strip() for manager in managers if manager.ca_cert_content ) self['broker_ssl_cert'] = '\n'.join( broker.ca_cert_content.strip() for broker in brokers if broker.ca_cert_content ) # setting fileserver url: # using the mgmtworker-local one, not all in the cluster. # This is because mgmtworker will write a script # that is supposed to be downloaded by the agent installer, and that # script will only be served by the local restservice, because other # restservices would only have it available after the delay of # filesystem replication local_manager_hostname = cloudify_utils.get_manager_name() local_manager_network_ip = None for manager in managers: if manager.hostname == local_manager_hostname: local_manager_network_ip = manager.networks[network] break if not local_manager_network_ip: raise RuntimeError( 'No fileserver url for manager {0} on network {1}' .format(local_manager_hostname, self['network'])) self['file_server_url'] = agent_utils.get_manager_file_server_url( local_manager_network_ip, cloudify_utils.get_manager_rest_service_port(), scheme=cloudify_utils.get_manager_file_server_scheme() )
def uninstall_plugin_task(self, plugin, rest_token, tenant, rest_host, target=None, bypass_maintenance=False): if target: # target was provided, so this is to be installed only on the # specified workers, but might have been received by us because # it was sent to a fanout exchange. # This only matters for mgmtworkers, because agents have no # fanout exchanges. if get_manager_name() not in target: return class _EmptyID(object): id = None class PluginUninstallCloudifyContext(object): """A CloudifyContext that has just enough data to uninstall plugins """ def __init__(self): self.rest_host = rest_host self.tenant_name = tenant['name'] self.rest_token = rest_token self.execution_token = None self.logger = logging.getLogger('plugin') # deployment/blueprint are not defined for force-installs, # but the ctx demands they be objects with an .id self.deployment = _EmptyID() self.blueprint = _EmptyID() self.bypass_maintenance = bypass_maintenance with current_ctx.push(PluginUninstallCloudifyContext()): uninstall_plugins([plugin])
def _set_state(plugin, **kwargs): client = get_rest_client() if not plugin.get('id'): # we don't have the full plugin details, try to retrieve them # from the restservice managed_plugin = get_managed_plugin(plugin) if managed_plugin and managed_plugin.get('id'): plugin = managed_plugin else: return try: agent = get_daemon_name() manager = None except KeyError: agent = None manager = get_manager_name() try: client.plugins.set_state( plugin['id'], agent_name=agent, manager_name=manager, **kwargs ) except CloudifyClientError as e: if e.status_code != 404 or not allow_missing: raise