示例#1
0
    def _strategy(initial_brokers):
        logger.debug('Failover strategy: searching for a new rabbitmq server')
        initial_brokers = [broker for broker in initial_brokers if broker]
        brokers = itertools.cycle(initial_brokers)
        while True:
            if cluster.is_cluster_configured():
                nodes = cluster.get_cluster_nodes()
                for node_ip in nodes:
                    _set_master(daemon_name, node_ip)

                    daemon = DaemonFactory().load(daemon_name)

                    broker_url = 'amqp://{0}:{1}@{2}:{3}/{4}'.format(
                        daemon.broker_user, daemon.broker_pass, node_ip,
                        daemon.broker_port, daemon.broker_vhost)

                    logger.debug('Trying broker at {0}'.format(broker_url))
                    yield broker_url
            else:
                logger.debug('cluster config file does not exist')
                broker_url = next(brokers)
                if len(initial_brokers) > 1:
                    logger.debug('writing config file')
                    cluster.config_from_broker_urls(broker_url,
                                                    initial_brokers)
                    _set_master(daemon_name, cluster.get_cluster_active())
                logger.debug('Trying broker at {0}'.format(broker_url))
                yield broker_url
示例#2
0
 def cluster_update(nodes):
     cluster.set_cluster_nodes(nodes)
     factory = DaemonFactory()
     daemon = factory.load(daemon_name)
     network_name = daemon.network
     daemon.cluster = [n['networks'][network_name] for n in nodes]
     factory.save(daemon)
示例#3
0
    def cluster_update_task(self, brokers, broker_ca, managers, manager_ca):
        """Update the running agent with the new cluster.

        When a node is added or removed from the cluster, the agent will
        receive the current cluster nodes in this task. We need to update
        both the current process envvars, the cert files, and all the
        daemon config files.
        """
        if not self.name:
            raise RuntimeError('cluster-update sent to agent with no name set')
        factory = DaemonFactory()
        daemon = factory.load(self.name)

        os.environ[constants.REST_HOST_KEY] = ','.join(managers)

        with open(daemon.local_rest_cert_file, 'w') as f:
            f.write(manager_ca)
        with open(daemon.broker_ssl_cert_path, 'w') as f:
            f.write(broker_ca)

        daemon.rest_host = managers
        daemon.broker_ip = brokers
        daemon.create_broker_conf()
        daemon.create_config()

        factory.save(daemon)
示例#4
0
 def __init__(self, tmp_path, logger, ssl_cert):
     self.daemons = []
     self.temp_folder = str(tmp_path)
     self.username = getpass.getuser()
     self.logger = logger
     self.rest_cert_path = ssl_cert.local_cert_path()
     self.factory = DaemonFactory()
     self.runner = LocalCommandRunner(logger=logger)
示例#5
0
def restart(new_name=None, delay_period=5, **_):

    cloudify_agent = ctx.instance.runtime_properties['cloudify_agent']
    if new_name is None:
        new_name = utils.internal.generate_new_agent_name(
            cloudify_agent.get('name', 'agent'))

    # update agent name in runtime properties so that the workflow will
    # what the name of the worker handling tasks to this instance.
    # the update cannot be done by setting a nested property directly
    # because they are not recognized as 'dirty'
    cloudify_agent['name'] = new_name
    ctx.instance.runtime_properties['cloudify_agent'] = cloudify_agent

    # must update instance here because the process may shutdown before
    # the decorator has a chance to do it.
    ctx.instance.update()

    daemon = _load_daemon(logger=ctx.logger)

    # make the current master stop listening to the current queue
    # to avoid a situation where we have two masters listening on the
    # same queue.
    app = get_celery_app(tenant=cloudify_agent['rest_tenant'])
    app.control.cancel_consumer(queue=daemon.queue,
                                destination=['celery@{0}'.format(daemon.name)])

    # clone the current daemon to preserve all the attributes
    attributes = utils.internal.daemon_to_dict(daemon)

    # give the new daemon the new name
    attributes['name'] = new_name

    # remove the log file and pid file so that new ones will be created
    # for the new agent
    del attributes['log_file']
    del attributes['pid_file']

    # Get the broker credentials for the daemon
    attributes.update(ctx.bootstrap_context.broker_config())

    new_daemon = DaemonFactory().new(logger=ctx.logger, **attributes)

    # create the new daemon
    new_daemon.create()
    _save_daemon(new_daemon)

    # configure the new daemon
    new_daemon.configure()
    new_daemon.start()

    # start a thread that will kill the current master.
    # this is done in a thread so that the current task will not result in
    # a failure
    thread = threading.Thread(target=shutdown_current_master,
                              args=[delay_period, ctx.logger])
    thread.daemon = True
    thread.start()
示例#6
0
def _set_master(daemon_name, node_ip):
    factory = DaemonFactory()
    try:
        daemon = factory.load(daemon_name)
    except exceptions.DaemonNotFoundError:
        return
    daemon.broker_ip = node_ip
    factory.save(daemon)
    cluster.set_cluster_active(node_ip)
示例#7
0
 def cluster_update_task(self, nodes):
     if not self.name:
         raise RuntimeError('cluster-update sent to agent with no name set')
     factory = DaemonFactory()
     daemon = factory.load(self.name)
     network_name = daemon.network
     nodes = [n['networks'][network_name] for n in nodes]
     daemon.cluster = nodes
     factory.save(daemon)
示例#8
0
def ls():
    """
    List all existing daemons.

    """

    from cloudify_agent.shell.main import get_logger
    daemons = DaemonFactory().load_all(logger=get_logger())
    for daemon in daemons:
        click.echo(daemon.name)
示例#9
0
def delete(name):
    """
    Deletes the daemon.

    """

    click.echo('Deleting...')
    daemon = _load_daemon(name)
    daemon.delete()
    DaemonFactory().delete(name)
    click.echo('Successfully deleted daemon: {0}'.format(name))
示例#10
0
def _set_master(daemon_name, node):
    factory = DaemonFactory()
    try:
        daemon = factory.load(daemon_name)
    except exceptions.DaemonNotFoundError:
        return
    daemon.broker_ip = node['broker_ip']
    daemon.broker_ssl_cert_path = node.get('internal_cert_path')
    with _cluster_settings_lock(daemon_name):
        factory.save(daemon)
        cluster.set_cluster_active(node)
示例#11
0
def restart(new_name=None, delay_period=5, **_):

    cloudify_agent = ctx.instance.runtime_properties['cloudify_agent']
    if new_name is None:
        new_name = utils.internal.generate_new_agent_name(
            cloudify_agent.get('name', 'agent'))

    # update agent name in runtime properties so that the workflow will
    # what the name of the worker handling tasks to this instance.
    # the update cannot be done by setting a nested property directly
    # because they are not recognized as 'dirty'
    cloudify_agent['name'] = new_name
    update_agent_runtime_properties(cloudify_agent)

    daemon = _load_daemon(logger=ctx.logger)

    # make the current master stop listening to the current queue
    # to avoid a situation where we have two masters listening on the
    # same queue.
    app = get_celery_app(tenant=ctx.tenant)
    app.control.cancel_consumer(queue=daemon.queue,
                                destination=['celery@{0}'.format(daemon.name)])

    # clone the current daemon to preserve all the attributes
    attributes = utils.internal.daemon_to_dict(daemon)

    # give the new daemon the new name
    attributes['name'] = new_name

    # remove the log file and pid file so that new ones will be created
    # for the new agent
    del attributes['log_file']
    del attributes['pid_file']

    # Get the broker credentials for the daemon
    attributes.update(ctx.bootstrap_context.broker_config())

    new_daemon = DaemonFactory().new(logger=ctx.logger, **attributes)

    # create the new daemon
    new_daemon.create()
    _save_daemon(new_daemon)

    # configure the new daemon
    new_daemon.configure()
    new_daemon.start()

    # ..and stop the old agent
    daemon.before_self_stop()
    raise StopAgent()
示例#12
0
 def setUp(self):
     super(TestDaemonFactory, self).setUp()
     self.daemon_name = 'test-daemon-{0}'.format(uuid.uuid4())
     self.factory = DaemonFactory(storage=get_storage_directory())
     self.daemon_params = {
         'process_management': 'init.d',
         'name': self.daemon_name,
         'queue': 'queue',
         'rest_host': '127.0.0.1',
         'broker_ip': '127.0.0.1',
         'user': '******',
         'broker_url': '127.0.0.1',
         'broker_ssl_enabled': True,
         'local_rest_cert_file': self._rest_cert_path
     }
示例#13
0
def create(**params):
    """
    Creates and stores the daemon parameters.

    """
    attributes = dict(**params)
    custom_arg = attributes.pop('custom_options', ())
    attributes.update(_parse_custom_options(custom_arg))

    click.echo('Creating...')
    from cloudify_agent.shell.main import get_logger
    daemon = DaemonFactory().new(logger=get_logger(), **attributes)
    daemon.create()
    _save_daemon(daemon)
    click.echo('Successfully created daemon: {0}'.format(daemon.name))
示例#14
0
    def replace_ca_certs_task(self, new_manager_ca, new_broker_ca):
        """Update the running agent with new CAs."""
        self._assert_name('replace-ca-certs')
        factory = DaemonFactory()
        daemon = factory.load(self.name)

        if new_broker_ca:
            with open(daemon.broker_ssl_cert_path, 'w') as f:
                f.write(new_broker_ca)
            daemon.create_broker_conf()

        if new_manager_ca:
            with open(daemon.local_rest_cert_file, 'w') as f:
                f.write(new_manager_ca)
            daemon.create_config()

        factory.save(daemon)
示例#15
0
def install(source, args):
    """
    Install a cloudify plugin into the current virtualenv. This will also
    register the plugin to all daemons created from this virtualenv.
    """

    from cloudify_agent.shell.main import get_logger
    click.echo('Installing plugin from {0}'.format(source))
    installer = PluginInstaller(logger=get_logger())
    name = installer.install(source, args)

    daemons = DaemonFactory().load_all(logger=get_logger())
    for daemon in daemons:
        click.echo('Registering plugin {0} to {1}'.format(name, daemon.name))
        if daemon.virtualenv == VIRTUALENV:
            daemon.register(name)
            _save_daemon(daemon)

    click.echo('Successfully installed plugin: {0}'.format(name))
示例#16
0
def uninstall(plugin):
    """
    Install a cloudify plugin into the current virtualenv. This will also
    register the plugin to all daemons created from this virtualenv.
    """

    from cloudify_agent.shell.main import get_logger
    click.echo('Uninstalling plugin {0}'.format(plugin))
    installer = PluginInstaller(logger=get_logger())
    installer.uninstall(plugin)

    daemons = DaemonFactory().load_all(logger=get_logger())
    for daemon in daemons:
        click.echo('Un-registering plugin {0} from {1}'.format(
            plugin, daemon.name))
        if daemon.virtualenv == VIRTUALENV:
            daemon.unregister(plugin)
            _save_daemon(daemon)

    click.echo('Successfully installed plugin: {0}'.format(plugin))
示例#17
0
def create(**params):
    """
    Creates and stores the daemon parameters.

    """
    attributes = dict(**params)
    custom_arg = attributes.pop('custom_options', ())
    attributes.update(_parse_custom_options(custom_arg))

    click.echo('Creating...')

    if attributes['broker_get_settings_from_manager']:
        broker = api_utils.internal.get_broker_configuration(attributes)
        attributes.update(broker)

    from cloudify_agent.shell.main import get_logger
    daemon = DaemonFactory().new(logger=get_logger(), **attributes)

    daemon.create()
    _save_daemon(daemon)
    click.echo('Successfully created daemon: {0}'.format(daemon.name))
示例#18
0
    def create_daemon(self, **attributes):
        name = utils.internal.generate_agent_name()

        params = {
            'rest_host': ['127.0.0.1'],
            'broker_ip': ['127.0.0.1'],
            'user': self.username,
            'workdir': self.temp_folder,
            'logger': self.logger,
            'name': name,
            'queue': '{0}-queue'.format(name),
            'local_rest_cert_file': self._rest_cert_path,
            'broker_ssl_enabled': False,  # No SSL on the CI machines
        }
        params.update(attributes)

        factory = DaemonFactory()
        daemon = self.daemon_cls(**params)
        factory.save(daemon)
        self.addCleanup(factory.delete, daemon.name)
        self.daemons.append(daemon)
        return daemon
示例#19
0
    def _strategy(initial_brokers):
        logger.debug('Failover strategy: searching for a new rabbitmq server')
        initial_brokers = [broker for broker in initial_brokers if broker]
        brokers = itertools.cycle(initial_brokers)
        while True:
            if cluster.is_cluster_configured():
                nodes = cluster.get_cluster_nodes()
                for node in nodes:
                    with _cluster_settings_lock(daemon_name):
                        _set_master(daemon_name, node)

                    daemon = DaemonFactory().load(daemon_name)

                    broker_url = 'amqp://{0}:{1}@{2}:{3}/{4}'.format(
                        daemon.broker_user, daemon.broker_pass,
                        node['broker_ip'], daemon.broker_port,
                        daemon.broker_vhost)

                    if daemon.broker_ssl_enabled:
                        # use a different cert for each node in the cluster -
                        # can't pass that in the amqp url
                        broker_ssl_cert_path = node.get('internal_cert_path')
                        if broker_ssl_cert_path:
                            app.conf['BROKER_USE_SSL']['ca_certs'] =\
                                broker_ssl_cert_path

                    logger.debug('Trying broker at {0}'.format(broker_url))
                    yield broker_url
            else:
                logger.debug('cluster config file does not exist')
                broker_url = next(brokers)
                if len(initial_brokers) > 1:
                    logger.debug('writing config file')
                    with _cluster_settings_lock(daemon_name):
                        cluster.config_from_broker_urls(
                            broker_url, initial_brokers)
                        _set_master(daemon_name, cluster.get_cluster_active())
                logger.debug('Trying broker at {0}'.format(broker_url))
                yield broker_url
示例#20
0
def _save_daemon(daemon):
    DaemonFactory(username=daemon.user).save(daemon)
示例#21
0
def _load_daemon(name, user=None):
    from cloudify_agent.shell.main import get_logger
    return DaemonFactory(username=user).load(name, logger=get_logger())
示例#22
0
def daemon_factory(tmp_path):
    yield DaemonFactory(storage=get_daemon_storage(str(tmp_path)))
示例#23
0
def _save_daemon(daemon):
    factory = DaemonFactory(username=utils.internal.get_daemon_user(),
                            storage=utils.internal.get_daemon_storage_dir())
    factory.save(daemon)
示例#24
0
def _load_daemon(logger):
    factory = DaemonFactory(username=utils.internal.get_daemon_user(),
                            storage=utils.internal.get_daemon_storage_dir())
    return factory.load(utils.internal.get_daemon_name(), logger=logger)
示例#25
0
 def cluster_update(nodes):
     cluster.set_cluster_nodes(nodes)
     factory = DaemonFactory()
     daemon = factory.load(daemon_name)
     daemon.cluster = nodes
     factory.save(daemon)
示例#26
0
 def setUp(self):
     super(TestDaemonFactory, self).setUp()
     self.daemon_name = 'test-daemon-{0}'.format(uuid.uuid4())
     self.factory = DaemonFactory(storage=get_storage_directory())