Exemple #1
0
def extract_plugin_name(plugin_url):
    previous_cwd = os.getcwd()
    fetch_plugin_from_pip_by_url = not os.path.isdir(plugin_url)
    plugin_dir = plugin_url
    try:
        if fetch_plugin_from_pip_by_url:
            plugin_dir = tempfile.mkdtemp()
            req_set = pip.req.RequirementSet(build_dir=None,
                                             src_dir=None,
                                             download_dir=None)
            req_set.unpack_url(link=pip.index.Link(plugin_url),
                               location=plugin_dir,
                               download_dir=None,
                               only_download=False)
        runner = LocalCommandRunner()
        os.chdir(plugin_dir)
        plugin_name = runner.run('{0} {1} {2}'.format(
            _python(),
            path.join(path.dirname(__file__), 'extract_package_name.py'),
            plugin_dir)).std_out
        runner.run('{0} install --no-deps {1}'.format(_pip(), plugin_dir))
        return plugin_name
    finally:
        os.chdir(previous_cwd)
        if fetch_plugin_from_pip_by_url:
            shutil.rmtree(plugin_dir)
Exemple #2
0
    def _list_plugin_files(self, plugin_name):
        """
        Retrieves python files related to the plugin.
        __init__ file are filtered out.

        :param plugin_name: The plugin name.

        :return: A list of file paths.
        :rtype: list of str
        """

        module_paths = []
        runner = LocalCommandRunner(self._logger)

        files = runner.run('{0} show -f {1}'.format(
            utils.get_pip_path(), plugin_name)).std_out.splitlines()
        for module in files:
            if self._is_valid_module(module):
                # the file paths are relative to the
                # package __init__.py file.
                prefix = '../' if os.name == 'posix' else '..\\'
                module_paths.append(
                    module.replace(prefix,
                                   '').replace(os.sep,
                                               '.').replace('.py', '').strip())
        return module_paths
def extract_package_to_dir(package_url):
    """
    Using a subprocess to extract a pip package to a temporary directory.
    :param: package_url: the URL of the package source.
    :return: the directory the package was extracted to.

    """
    plugin_dir = None
    archive_dir = tempfile.mkdtemp()
    runner = LocalCommandRunner()

    try:
        # We run `pip download` command in a subprocess to support
        # multi-threaded scenario (i.e snapshot restore).
        # We don't use `curl` because pip can handle different kinds of files,
        # including .git.
        command = [get_pip_path(), 'download', '-d',
                   archive_dir, '--no-deps', package_url]
        runner.run(command=command)
        archive = _get_archive(archive_dir, package_url)
        plugin_dir_parent = extract_archive(archive)
        plugin_dir = _get_plugin_path(plugin_dir_parent, package_url)

    except NonRecoverableError as e:
        if plugin_dir and os.path.exists(plugin_dir):
            shutil.rmtree(plugin_dir)
        raise e

    finally:
        if os.path.exists(archive_dir):
            shutil.rmtree(archive_dir)

    return plugin_dir
Exemple #4
0
def install_blueprint_plugins(blueprint_path):

    requirements = create_requirements(
        blueprint_path=blueprint_path
    )

    # validate we are inside a virtual env
    if not utils.is_virtual_env():
        raise exceptions.CloudifyCliError(
            'You must be running inside a '
            'virtualenv to install blueprint plugins')

    runner = LocalCommandRunner(get_logger())

    # dump the requirements to a file
    # and let pip install it.
    # this will utilize pip's mechanism
    # of cleanup in case an installation fails.
    output = tempfile.NamedTemporaryFile(mode='w',
                                         delete=True,
                                         suffix='.txt',
                                         prefix='requirements_')
    utils.dump_to_file(collection=requirements,
                       file_path=output.name)
    runner.run(command='pip install -r {0}'.format(output.name),
               stdout_pipe=False)
def extract_package_to_dir(package_url):
    """
    Using a subprocess to extract a pip package to a temporary directory.
    :param: package_url: the URL of the package source.
    :return: the directory the package was extracted to.

    """
    plugin_dir = None
    archive_dir = tempfile.mkdtemp()
    runner = LocalCommandRunner()

    try:
        # We run `pip download` command in a subprocess to support
        # multi-threaded scenario (i.e snapshot restore).
        # We don't use `curl` because pip can handle different kinds of files,
        # including .git.
        command = [sys.executable, '-m', 'pip', 'download', '-d',
                   archive_dir, '--no-deps', package_url]
        runner.run(command=command)
        archive = _get_archive(archive_dir, package_url)
        plugin_dir_parent = extract_archive(archive)
        plugin_dir = _get_plugin_path(plugin_dir_parent, package_url)

    except NonRecoverableError as e:
        if plugin_dir and os.path.exists(plugin_dir):
            shutil.rmtree(plugin_dir)
        raise e

    finally:
        if os.path.exists(archive_dir):
            shutil.rmtree(archive_dir)

    return plugin_dir
Exemple #6
0
 def setUp(self):
     super(BaseDaemonLiveTestCase, self).setUp()
     self.celery = Celery(broker='amqp://', backend='amqp://')
     self.celery.conf.update(
         CELERY_TASK_RESULT_EXPIRES=defaults.CELERY_TASK_RESULT_EXPIRES)
     self.runner = LocalCommandRunner(logger=self.logger)
     self.daemons = []
Exemple #7
0
 def __init__(self, root_path=None, port=5555):
     self.port = port
     self.root_path = root_path or os.path.dirname(resources.__file__)
     self.process = None
     self.logger = setup_logger('cloudify_agent.tests.utils.FileServer',
                                logger_level=logging.DEBUG)
     self.runner = LocalCommandRunner(self.logger)
def extract_plugin_name(plugin_url):
    previous_cwd = os.getcwd()
    fetch_plugin_from_pip_by_url = not os.path.isdir(plugin_url)
    plugin_dir = plugin_url
    try:
        if fetch_plugin_from_pip_by_url:
            plugin_dir = tempfile.mkdtemp()
            req_set = pip.req.RequirementSet(build_dir=None,
                                             src_dir=None,
                                             download_dir=None)
            req_set.unpack_url(link=pip.index.Link(plugin_url),
                               location=plugin_dir,
                               download_dir=None,
                               only_download=False)
        runner = LocalCommandRunner()
        os.chdir(plugin_dir)
        plugin_name = runner.run(
            '{0} {1} {2}'.format(_python(),
                                 path.join(
                                     path.dirname(__file__),
                                     'extract_package_name.py'),
                                 plugin_dir)).std_out
        runner.run('{0} install --no-deps {1}'.format(_pip(), plugin_dir))
        return plugin_name
    finally:
        os.chdir(previous_cwd)
        if fetch_plugin_from_pip_by_url:
            shutil.rmtree(plugin_dir)
Exemple #9
0
class FileServer(object):
    def __init__(self, root_path=None, port=5555):
        self.port = port
        self.root_path = root_path or os.path.dirname(resources.__file__)
        self.process = None
        self.logger = setup_logger("cloudify_agent.tests.utils.FileServer", logger_level=logging.DEBUG)
        self.runner = LocalCommandRunner(self.logger)

    def start(self, timeout=5):
        if os.name == "nt":
            serve_path = os.path.join(VIRTUALENV, "Scripts", "serve")
        else:
            serve_path = os.path.join(VIRTUALENV, "bin", "serve")

        self.process = subprocess.Popen(
            [serve_path, "-p", str(self.port), self.root_path], stdin=open(os.devnull, "w"), stdout=None, stderr=None
        )

        end_time = time.time() + timeout

        while end_time > time.time():
            if self.is_alive():
                logger.info("File server is up and serving from {0} ({1})".format(self.root_path, self.process.pid))
                return
            logger.info("File server is not responding. waiting 10ms")
            time.sleep(0.1)
        raise RuntimeError("FileServer failed to start")

    def stop(self, timeout=15):
        if self.process is None:
            return

        end_time = time.time() + timeout

        if os.name == "nt":
            self.runner.run(
                "taskkill /F /T /PID {0}".format(self.process.pid),
                stdout_pipe=False,
                stderr_pipe=False,
                exit_on_failure=False,
            )
        else:
            self.runner.run("kill -9 {0}".format(self.process.pid))

        while end_time > time.time():
            if not self.is_alive():
                logger.info("File server has shutdown")
                return
            logger.info("File server is still running. waiting 10ms")
            time.sleep(0.1)
        raise RuntimeError("FileServer failed to stop")

    def is_alive(self):
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        try:
            s.connect(("localhost", self.port))
            s.close()
            return True
        except socket.error:
            return False
Exemple #10
0
def install_blueprint_plugins(blueprint_path):

    requirements = create_requirements(
        blueprint_path=blueprint_path
    )

    if requirements:
        # validate we are inside a virtual env
        if not utils.is_virtual_env():
            raise exceptions.CloudifyCliError(
                'You must be running inside a '
                'virtualenv to install blueprint plugins')

        runner = LocalCommandRunner(get_logger())
        # dump the requirements to a file
        # and let pip install it.
        # this will utilize pip's mechanism
        # of cleanup in case an installation fails.
        tmp_path = tempfile.mkstemp(suffix='.txt', prefix='requirements_')[1]
        utils.dump_to_file(collection=requirements, file_path=tmp_path)
        command_parts = [sys.executable, '-m', 'pip', 'install', '-r',
                         tmp_path]
        runner.run(command=' '.join(command_parts), stdout_pipe=False)
    else:
        get_logger().debug('There are no plugins to install')
Exemple #11
0
    def _list_plugin_files(self, plugin_name):

        """
        Retrieves python files related to the plugin.
        __init__ file are filtered out.

        :param plugin_name: The plugin name.

        :return: A list of file paths.
        :rtype: list of str
        """

        module_paths = []
        runner = LocalCommandRunner(self._logger)

        files = runner.run(
            '{0} show -f {1}'
            .format(utils.get_pip_path(), plugin_name)
        ).std_out.splitlines()
        for module in files:
            if self._is_valid_module(module):
                # the file paths are relative to the
                # package __init__.py file.
                prefix = '../' if os.name == 'posix' else '..\\'
                module_paths.append(
                    module.replace(prefix, '')
                    .replace(os.sep, '.').replace('.py', '').strip())
        return module_paths
Exemple #12
0
def install_blueprint_plugins(blueprint_path):

    requirements = create_requirements(
        blueprint_path=blueprint_path
    )

    if requirements:
        # validate we are inside a virtual env
        if not utils.is_virtual_env():
            raise exceptions.CloudifyCliError(
                'You must be running inside a '
                'virtualenv to install blueprint plugins')

        runner = LocalCommandRunner(get_logger())
        # dump the requirements to a file
        # and let pip install it.
        # this will utilize pip's mechanism
        # of cleanup in case an installation fails.
        output = tempfile.NamedTemporaryFile(mode='w',
                                             delete=True,
                                             suffix='.txt',
                                             prefix='requirements_')
        utils.dump_to_file(collection=requirements,
                           file_path=output.name)
        runner.run(command='pip install -r {0}'.format(output.name),
                   stdout_pipe=False)
    else:
        get_logger().debug('There are no plugins to install..')
 def _assert_plugin_installed(self, package_name,
                              plugin, dependencies=None):
     if not dependencies:
         dependencies = []
     runner = LocalCommandRunner()
     out = runner.run(
         '{0}/bin/pip list | grep {1}'
         .format(self.temp_folder, plugin['name'])).std_out
     self.assertIn(package_name, out)
     for dependency in dependencies:
         self.assertIn(dependency, out)
Exemple #14
0
 def _assert_plugin_installed(self,
                              package_name,
                              plugin,
                              dependencies=None):
     if not dependencies:
         dependencies = []
     runner = LocalCommandRunner()
     out = runner.run('{0}/bin/pip list | grep {1}'.format(
         self.temp_folder, plugin['name'])).std_out
     self.assertIn(package_name, out)
     for dependency in dependencies:
         self.assertIn(dependency, out)
Exemple #15
0
def extract_plugin_name(plugin_dir):
    previous_cwd = os.getcwd()

    try:
        os.chdir(plugin_dir)
        runner = LocalCommandRunner(host=utils.get_local_ip())
        plugin_name = runner.run('{0} {1} {2}'.format(
            _python(),
            path.join(path.dirname(__file__), 'extract_package_name.py'),
            plugin_dir)).std_out
        return plugin_name
    finally:
        os.chdir(previous_cwd)
Exemple #16
0
def _fix_virtualenv():

    """
    This method is used for auto-configuration of the virtualenv.
    It is needed in case the environment was created using different paths
    than the one that is used at runtime.

    """

    from cloudify_agent.shell.main import get_logger
    logger = get_logger()

    bin_dir = '{0}/bin'.format(VIRTUALENV)

    logger.debug('Searching for executable files in {0}'.format(bin_dir))
    for executable in os.listdir(bin_dir):
        path = os.path.join(bin_dir, executable)
        logger.debug('Checking {0}...'.format(path))
        if not os.path.isfile(path):
            logger.debug('{0} is not a file. Skipping...'.format(path))
            continue
        if os.path.islink(path):
            logger.debug('{0} is a link. Skipping...'.format(path))
            continue
        basename = os.path.basename(path)
        if basename in ['python', 'python2.7', 'python2.6']:
            logger.debug('{0} is the python executable. Skipping...'
                         .format(path))
            continue
        with open(path) as f:
            lines = f.read().split(os.linesep)
            if lines[0].endswith('/bin/python'):
                new_line = '#!{0}/python'.format(bin_dir)
                logger.debug('Replacing {0} with {1}'
                             .format(lines[0], new_line))
                lines[0] = new_line
        with open(path, 'w') as f:
            f.write(os.linesep.join(lines))

    runner = LocalCommandRunner(logger)

    logger.debug('Searching for links in {0}'.format(VIRTUALENV))
    for link in ['archives', 'bin', 'include', 'lib']:
        link_path = '{0}/local/{1}'.format(VIRTUALENV, link)
        logger.debug('Checking {0}...'.format(link_path))
        try:
            runner.run('unlink {0}'.format(link_path))
            runner.run('ln -s {0}/{1} {2}'
                       .format(VIRTUALENV, link, link_path))
        except CommandExecutionException:
            pass
Exemple #17
0
 def _test_agent_installation(self, agent):
     if 'user' not in agent:
         agent['user'] = getpass.getuser()
     celery = Celery()
     worker_name = 'celery@{0}'.format(agent['name'])
     inspect = celery.control.inspect(destination=[worker_name])
     self.assertFalse(inspect.active())
     _, path = tempfile.mkstemp()
     with open(path, 'w') as agent_file:
         agent_file.write(json.dumps(agent))
     _, output_path = tempfile.mkstemp()
     runner = LocalCommandRunner()
     runner.run('cfy-agent install-local --agent-file {0} '
                '--output-agent-file {1} --rest-cert-path {2} '
                '--rest-token TOKEN'.format(path, output_path,
                                            self._rest_cert_path))
     self.assertTrue(inspect.active())
     with open(output_path) as new_agent_file:
         new_agent = json.loads(new_agent_file.read())
     command_format = 'cfy-agent daemons {0} --name {1}'.format(
         '{0}', new_agent['name'])
     agent_ssl_cert.verify_remote_cert(new_agent['agent_dir'])
     runner.run(command_format.format('stop'))
     runner.run(command_format.format('delete'))
     self.assertFalse(inspect.active())
     return new_agent
class InstallerTestBase(unittest.TestCase):

    def setUp(self):
        self.logger = setup_logger('InstallerTest')
        config_path = os.environ.get('CONFIG_PATH')
        self.logger.info('Config: {0}'.format(config_path))
        with open(config_path) as config_file:
            self.config = yaml.load(config_file)
        self.logger.info(str(self.config))
        current_ctx.set(MockCloudifyContext())
        self.runner = LocalCommandRunner(self.logger)
        self.base_dir = tempfile.mkdtemp()
        self.logger.info('Base dir: {0}'.format(self.base_dir))
        _, self.script_path = tempfile.mkstemp(dir=self.base_dir,
                                               suffix='.py')
        install_utils.prepare_script({}, self.script_path)

    def tearDown(self):
        shutil.rmtree(self.base_dir)

    def get_agent(self):
        result = {
            'local': True,
            'package_url': self.config['agent_url'],
            'user': self.config['agent_user'],
            'basedir': self.base_dir,
            'manager_ip': '127.0.0.1',
            'name': 'agent_{0}'.format(uuid.uuid4())
        }
        agent_config.prepare_connection(result)
        # We specify base_dir and user directly, so runner is not needed.
        agent_config.prepare_agent(result, None)
        _, agent_file_path = tempfile.mkstemp(dir=self.base_dir)
        with open(agent_file_path, 'a') as agent_file:
            agent_file.write(json.dumps(result))
        result['agent_file'] = agent_file_path
        return result

    def cleanup_agent(self, agent):
        os.remove(agent['agent_file'])

    def call(self, operation, agent):
        agent_config_path = agent['agent_file']
        command = '{0} {1} --operation={2} --config={3}'.format(
            self.config['python_path'],
            self.script_path,
            operation,
            agent_config_path)
        self.logger.info('Calling: "{0}"'.format(command))
        self.runner.run(command)
Exemple #19
0
def extract_plugin_name(plugin_dir):
    previous_cwd = os.getcwd()

    try:
        os.chdir(plugin_dir)
        runner = LocalCommandRunner(host=utils.get_local_ip())
        plugin_name = runner.run(
            '{0} {1} {2}'.format(_python(),
                                 path.join(
                                     path.dirname(__file__),
                                     'extract_package_name.py'),
                                 plugin_dir)).std_out
        return plugin_name
    finally:
        os.chdir(previous_cwd)
 def __init__(self, root_path=None, port=5555):
     self.port = port
     self.root_path = root_path or os.path.dirname(resources.__file__)
     self.process = None
     self.logger = setup_logger('cloudify_agent.tests.utils.FileServer',
                                logger_level=logging.DEBUG)
     self.runner = LocalCommandRunner(self.logger)
Exemple #21
0
    def setUpClass(cls):

        cls.logger = setup_logger(cls.__name__, logger_level=logging.DEBUG)
        cls.runner = LocalCommandRunner(cls.logger)

        cls.plugins_work_dir = tempfile.mkdtemp(prefix='plugins-work-dir-')
        cls.file_server_resource_base = tempfile.mkdtemp(
            prefix='file-server-resource-base-')
        cls.fs = test_utils.FileServer(root_path=cls.file_server_resource_base)
        cls.fs.start()
        cls.file_server_url = 'http://localhost:{0}'.format(cls.fs.port)

        cls.plugins_to_be_installed = [
            'mock-plugin', 'mock-plugin-modified',
            'mock-plugin-with-requirements'
        ]

        cls.wagons = {}

        for plugin_dir in cls.plugins_to_be_installed:
            test_utils.create_plugin_tar(
                plugin_dir_name=plugin_dir,
                target_directory=cls.file_server_resource_base)
            cls.wagons[plugin_dir] = test_utils.create_plugin_wagon(
                plugin_dir_name=plugin_dir,
                target_directory=cls.file_server_resource_base)
def extract_plugin_name(plugin_url):
    previous_cwd = os.getcwd()
    fetch_plugin_from_pip_by_url = not os.path.isdir(plugin_url)
    plugin_dir = plugin_url
    try:
        if fetch_plugin_from_pip_by_url:
            plugin_dir = tempfile.mkdtemp()
            req_set = pip.req.RequirementSet(build_dir=None,
                                             src_dir=None,
                                             download_dir=None)
            req_set.unpack_url(link=pip.index.Link(plugin_url),
                               location=plugin_dir,
                               download_dir=None,
                               only_download=False)
        os.chdir(plugin_dir)
        return LocalCommandRunner(
            host=utils.get_local_ip()
        ).run('cmd.exe /c "{0} {1} {2}"'.format(
            sys.executable,
            os.path.join(os.path.dirname(__file__), 'extract_package_name.py'),
            plugin_dir)).std_out
    finally:
        os.chdir(previous_cwd)
        if fetch_plugin_from_pip_by_url:
            shutil.rmtree(plugin_dir)
def create_runner(agent_config, validate_connection):
    if agent_config.is_local:
        runner = LocalCommandRunner(logger=ctx.logger)
    elif not agent_config.is_remote:
        runner = StubRunner()
    else:
        host = agent_config['ip']
        try:
            if agent_config.is_windows:
                runner = WinRMRunner(host=host,
                                     port=agent_config.get('port'),
                                     user=agent_config['user'],
                                     password=agent_config.get('password'),
                                     protocol=agent_config.get('protocol'),
                                     uri=agent_config.get('uri'),
                                     logger=ctx.logger,
                                     validate_connection=validate_connection)
            else:
                runner = FabricRunner(
                    host=host,
                    port=agent_config.get('port'),
                    user=agent_config['user'],
                    key=agent_config.get('key'),
                    password=agent_config.get('password'),
                    fabric_env=agent_config.get('fabric_env'),
                    logger=ctx.logger,
                    validate_connection=validate_connection)
        except CommandExecutionError as e:
            message = e.error
            if not message:
                message = 'Failed connecting to host on {0}'.format(host)
            return ctx.operation.retry(message=message)

    return runner
Exemple #24
0
def extract_package_name(package_dir):
    """
    Detects the package name of the package located at 'package_dir' as
    specified in the package setup.py file.

    :param package_dir: the directory the package was extracted to.

    :return: the package name
    """
    runner = LocalCommandRunner()
    plugin_name = runner.run('{0} {1} {2}'.format(
        sys.executable,
        os.path.join(os.path.dirname(plugins.__file__),
                     'extract_package_name.py'), package_dir),
                             cwd=package_dir).std_out
    return plugin_name
Exemple #25
0
 def __init__(self, tmp_path, logger, ssl_cert):
     self.daemons = []
     self.temp_folder = str(tmp_path)
     self.username = getpass.getuser()
     self.logger = logger
     self.rest_cert_path = ssl_cert.local_cert_path()
     self.factory = DaemonFactory()
     self.runner = LocalCommandRunner(logger=logger)
Exemple #26
0
 def setUp(self):
     super(BaseDaemonLiveTestCase, self).setUp()
     self.celery = Celery(broker='amqp://',
                          backend='amqp://')
     self.celery.conf.update(
         CELERY_TASK_RESULT_EXPIRES=defaults.CELERY_TASK_RESULT_EXPIRES)
     self.runner = LocalCommandRunner(logger=self.logger)
     self.daemons = []
Exemple #27
0
def install_package(url):
    """
    Installs a package onto the worker's virtualenv.

    :param url: A URL to the package archive.
    """

    command = '{0} install {1}'.format(_pip(), url)
    LocalCommandRunner().run(command)
Exemple #28
0
def _disable_requiretty():

    """
    Disables the requiretty directive in the /etc/sudoers file. This
    will enable operations that require sudo permissions to work properly.

    This is needed because operations are executed
    from within the worker process, which is not a tty process.

    """

    from cloudify_agent.shell.main import get_logger
    runner = LocalCommandRunner(get_logger())

    disable_requiretty_script_path = utils.resource_to_tempfile(
        resource_path='disable-requiretty.sh'
    )
    runner.run('chmod +x {0}'.format(disable_requiretty_script_path))
    runner.run('{0}'.format(disable_requiretty_script_path))
Exemple #29
0
def extract_package_name(package_dir):
    """
    Detects the package name of the package located at 'package_dir' as
    specified in the package setup.py file.

    :param package_dir: the directory the package was extracted to.

    :return: the package name
    """
    runner = LocalCommandRunner()
    plugin_name = runner.run(
        '{0} {1} {2}'.format(
            sys.executable,
            os.path.join(os.path.dirname(plugins.__file__),
                         'extract_package_name.py'),
            package_dir),
        cwd=package_dir
    ).std_out
    return plugin_name
    def _test_agent_installation(self, agent_config, _):
        new_ctx = mock_context()
        current_ctx.set(new_ctx)

        self.assert_daemon_dead(agent_config['name'])
        create_agent(agent_config=agent_config)
        self.wait_for_daemon_alive(agent_config['name'])

        new_agent = ctx.instance.runtime_properties['cloudify_agent']

        agent_ssl_cert.verify_remote_cert(new_agent['agent_dir'])

        command_format = 'cfy-agent daemons {0} --name {1}'.format(
            '{0}',
            new_agent['name'])
        runner = LocalCommandRunner()
        runner.run(command_format.format('stop'))
        runner.run(command_format.format('delete'))

        self.assert_daemon_dead(agent_config['name'])
        return new_agent
class BaseDaemonLiveTestCase(BaseTest):

    def setUp(self):
        super(BaseDaemonLiveTestCase, self).setUp()
        self.runner = LocalCommandRunner(logger=self.logger)
        self.daemons = []

    def tearDown(self):
        super(BaseDaemonLiveTestCase, self).tearDown()
        if os.name == 'nt':
            # with windows we need to stop and remove the service
            nssm_path = utils.get_absolute_resource_path(
                os.path.join('pm', 'nssm', 'nssm.exe'))
            for daemon in self.daemons:
                self.runner.run('sc stop {0}'.format(daemon.name),
                                exit_on_failure=False)
                self.runner.run('{0} remove {1} confirm'
                                .format(nssm_path, daemon.name),
                                exit_on_failure=False)
        else:
            self.runner.run("pkill -9 -f 'cloudify_agent.worker'",
                            exit_on_failure=False)

    def get_agent_dict(self, env, name='host'):
        node_instances = env.storage.get_node_instances()
        agent_host = [n for n in node_instances if n['name'] == name][0]
        return agent_host['runtime_properties']['cloudify_agent']
class BaseDaemonLiveTestCase(BaseTest):
    def setUp(self):
        super(BaseDaemonLiveTestCase, self).setUp()
        self.runner = LocalCommandRunner(logger=self.logger)
        self.daemons = []

    def tearDown(self):
        super(BaseDaemonLiveTestCase, self).tearDown()
        if os.name == 'nt':
            # with windows we need to stop and remove the service
            nssm_path = utils.get_absolute_resource_path(
                os.path.join('pm', 'nssm', 'nssm.exe'))
            for daemon in self.daemons:
                self.runner.run('sc stop {0}'.format(daemon.name),
                                exit_on_failure=False)
                self.runner.run('{0} remove {1} confirm'.format(
                    nssm_path, daemon.name),
                                exit_on_failure=False)
        else:
            self.runner.run("pkill -9 -f 'cloudify_agent.worker'",
                            exit_on_failure=False)

    def get_agent_dict(self, env, name='host'):
        node_instances = env.storage.get_node_instances()
        agent_host = [n for n in node_instances if n['name'] == name][0]
        return agent_host['runtime_properties']['cloudify_agent']
Exemple #33
0
def create_windows_installer(config, logger):
    version_info = get_agent_version().split('-')
    version = version_info[0]
    if len(version_info) == 1:
        prerelease = 'ga'
        agent_name_suffix = version
    else:
        prerelease = version_info[1]
        agent_name_suffix = '{0}-{1}'.format(version, prerelease)

    temp_agent_path = os.path.join(
        os.getcwd(),
        'cloudify-windows-agent-{0}.exe'.format(agent_name_suffix))

    if not os.path.exists(get_windows_built_agent_path()):
        runner = LocalCommandRunner()
        agent_builder = os.path.join(get_source_uri(), 'packaging', 'windows',
                                     'win_agent_builder.ps1')

        # Run the agent builder with current version and prerelease, '.' dev
        # branch (so that the repo won't be redownloaded) and no upload flag.
        runner.run(
            [
                'C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\'
                'powershell.exe',
                agent_builder,
                version,
                prerelease,
                '.',
                '',
            ],
            cwd=os.path.join(get_source_uri(), '..'),
            stdout_pipe=False,
            stderr_pipe=False,
        )

    shutil.copy(
        get_windows_built_agent_path(),
        temp_agent_path,
    )
Exemple #34
0
def _install_plugins(blueprint_path):
    requirements = create_requirements(blueprint_path=blueprint_path)
    logger = get_logger()

    if requirements:
        # Validate we are inside a virtual env
        if not utils.is_virtual_env():
            raise exceptions.CloudifyCliError(
                'You must be running inside a '
                'virtualenv to install blueprint plugins')

        runner = LocalCommandRunner(logger)
        # Dump the requirements to a file and let pip install it.
        # This will utilize pip's mechanism of cleanup in case an installation
        # fails.
        tmp_path = tempfile.mkstemp(suffix='.txt', prefix='requirements_')[1]
        utils.dump_to_file(collection=requirements, file_path=tmp_path)
        command_parts = [sys.executable, '-m', 'pip', 'install', '-r',
                         tmp_path]
        runner.run(command=' '.join(command_parts), stdout_pipe=False)
    else:
        logger.info('There are no plugins to install')
Exemple #35
0
def install_blueprint_plugins(blueprint_path):

    requirements = create_requirements(blueprint_path=blueprint_path)

    if requirements:
        # validate we are inside a virtual env
        if not utils.is_virtual_env():
            raise exceptions.CloudifyCliError(
                'You must be running inside a '
                'virtualenv to install blueprint plugins')

        runner = LocalCommandRunner(get_logger())
        # dump the requirements to a file
        # and let pip install it.
        # this will utilize pip's mechanism
        # of cleanup in case an installation fails.
        tmp_path = tempfile.mkstemp(suffix='.txt', prefix='requirements_')[1]
        utils.dump_to_file(collection=requirements, file_path=tmp_path)
        runner.run(command='pip install -r {0}'.format(tmp_path),
                   stdout_pipe=False)
    else:
        get_logger().debug('There are no plugins to install..')
Exemple #36
0
def _update_includes(module_paths):

    # Read current AppParameters
    app_parameters = read_app_parameters()

    new_app_parameters = add_module_paths_to_includes(module_paths,
                                                      app_parameters)
    LocalCommandRunner().run(
        'cmd /c "{0} set CloudifyAgent AppParameters {1}"'.format(
            NSSM_PATH, new_app_parameters))

    # Write new AppParameters
    write_app_parameters(new_app_parameters)
Exemple #37
0
    def setUp(self):
        super(PluginInstallerTestCase, self).setUp()
        self.temp_folder = tempfile.mkdtemp()

        # Create a virtualenv in a temp folder.
        # this will be used for actually installing plugins of tests.
        os.environ[LOCAL_IP_KEY] = 'localhost'
        LocalCommandRunner().run('virtualenv {0}'.format(self.temp_folder))
        os.environ[VIRTUALENV_PATH_KEY] = self.temp_folder

        self.ctx = MockCloudifyContext(blueprint_id=TEST_BLUEPRINT_ID)
        os.environ[CELERY_WORK_DIR_PATH_KEY] = self.temp_folder
        os.environ[MANAGER_FILE_SERVER_BLUEPRINTS_ROOT_URL_KEY] \
            = MANAGER_FILE_SERVER_BLUEPRINTS_ROOT_URL
 def setUp(self):
     self.logger = setup_logger('InstallerTest')
     config_path = os.environ.get('CONFIG_PATH')
     self.logger.info('Config: {0}'.format(config_path))
     with open(config_path) as config_file:
         self.config = yaml.load(config_file)
     self.logger.info(str(self.config))
     current_ctx.set(MockCloudifyContext())
     self.runner = LocalCommandRunner(self.logger)
     self.base_dir = tempfile.mkdtemp()
     self.logger.info('Base dir: {0}'.format(self.base_dir))
     _, self.script_path = tempfile.mkstemp(dir=self.base_dir,
                                            suffix='.py')
     install_utils.prepare_script({}, self.script_path)
Exemple #39
0
    def test_install(self):
        plugin_source = '{0}/{1}/{2}.{3}'.format(
            MANAGER_FILE_SERVER_BLUEPRINTS_ROOT_URL, PLUGINS_DIR, MOCK_PLUGIN,
            TAR_SUFFIX)

        plugin = {'name': MOCK_PLUGIN, 'source': plugin_source}

        ctx = MockCloudifyContext(blueprint_id=TEST_BLUEPRINT_ID)
        install(ctx, plugins=[plugin])
        self._assert_plugin_installed(MOCK_PLUGIN, plugin)

        # Assert includes file was written
        out = LocalCommandRunner().run('cat {0}'.format(
            os.path.join(self.temp_folder, 'celeryd-includes'))).std_out
        self.assertIn('mock_for_test.module', out)
Exemple #40
0
    def test_install(self):

        # override get_url to return local paths
        from plugin_installer import tasks
        tasks.get_url = _get_local_path

        plugin = {'name': 'mock-plugin', 'source': 'mock-plugin'}

        install(plugins=[plugin])
        self._assert_plugin_installed('mock-plugin', plugin)

        # Assert includes file was written
        out = LocalCommandRunner().run('cat {0}'.format(
            os.path.join(self.temp_folder, 'celeryd-includes'))).std_out
        self.assertIn('mock_for_test.module', out)
Exemple #41
0
def extract_module_paths(url):

    plugin_name = extract_plugin_name(url)

    module_paths = []
    files = LocalCommandRunner().run('{0} show -f {1}'.format(
        _pip(), plugin_name)).std_out.splitlines()
    for module in files:
        if module.endswith('.py') and '__init__' not in module:
            # the files paths are relative to the package __init__.py file.
            module_paths.append(
                module.replace('../', '').replace('/',
                                                  '.').replace('.py',
                                                               '').strip())
    return module_paths
Exemple #42
0
def create_windows_installer(config, logger):
    runner = LocalCommandRunner()
    wheelhouse = resources.get_resource('winpackage/source/wheels')

    pip_cmd = 'pip wheel --wheel-dir {wheel_dir} --requirement {req_file}'.\
        format(wheel_dir=wheelhouse, req_file=config['requirements_file'])

    logger.info('Building wheels into: {0}'.format(wheelhouse))
    runner.run(pip_cmd)

    pip_cmd = 'pip wheel --find-links {wheel_dir} --wheel-dir {wheel_dir} ' \
              '{repo_url}'.format(wheel_dir=wheelhouse,
                                  repo_url=config['cloudify_agent_module'])
    runner.run(pip_cmd)

    iscc_cmd = 'C:\\Program Files (x86)\\Inno Setup 5\\iscc.exe {0}'\
        .format(resources.get_resource(
            os.path.join('winpackage', 'create.iss')))
    os.environ['VERSION'] = '0'
    os.environ['iscc_output'] = os.getcwd()
    runner.run(iscc_cmd)
Exemple #43
0
def _fix_virtualenv():
    """
    This method is used for auto-configuration of the virtualenv.
    It is needed in case the environment was created using different paths
    than the one that is used at runtime.

    """

    from cloudify_agent.shell.main import get_logger
    logger = get_logger()

    bin_dir = '{0}/bin'.format(VIRTUALENV)

    logger.debug('Searching for executable files in {0}'.format(bin_dir))
    for executable in os.listdir(bin_dir):
        path = os.path.join(bin_dir, executable)
        logger.debug('Checking {0}...'.format(path))
        if not os.path.isfile(path):
            logger.debug('{0} is not a file. Skipping...'.format(path))
            continue
        if os.path.islink(path):
            logger.debug('{0} is a link. Skipping...'.format(path))
            continue
        basename = os.path.basename(path)
        if basename in ['python', 'python2.7', 'python2.6']:
            logger.debug(
                '{0} is the python executable. Skipping...'.format(path))
            continue
        with open(path) as f:
            lines = f.read().split(os.linesep)
            if lines[0].endswith('/bin/python'):
                new_line = '#!{0}/python'.format(bin_dir)
                logger.debug('Replacing {0} with {1}'.format(
                    lines[0], new_line))
                lines[0] = new_line
        with open(path, 'w') as f:
            f.write(os.linesep.join(lines))

    runner = LocalCommandRunner(logger)

    logger.debug('Searching for links in {0}'.format(VIRTUALENV))
    for link in ['archives', 'bin', 'include', 'lib']:
        link_path = '{0}/local/{1}'.format(VIRTUALENV, link)
        logger.debug('Checking {0}...'.format(link_path))
        try:
            runner.run('unlink {0}'.format(link_path))
            runner.run('ln -s {0}/{1} {2}'.format(VIRTUALENV, link, link_path))
        except CommandExecutionException:
            pass
Exemple #44
0
def install_package(extracted_plugin_dir, install_args):
    """
    Installs a package onto the worker's virtualenv.

    :param extracted_plugin_dir:The directory containing the extracted plugin.
                                If the plugin's source property is a URL, this
                                is the directory the plugin was unpacked to.
    :param install_args:       Arguments passed to pip install.
                                e.g.: -r requirements.txt
    """

    previous_cwd = os.getcwd()

    try:
        os.chdir(extracted_plugin_dir)

        command = '{0} install . {1}'.format(_pip(), install_args)
        LocalCommandRunner(host=utils.get_local_ip()).run(command)
    finally:
        os.chdir(previous_cwd)
def extract_module_paths(module_name):

    module_paths = []
    files = LocalCommandRunner(host=utils.get_local_ip())\
        .run('cmd /c "{0}\Scripts\pip.exe show -f {1}"'
             .format(sys.prefix, module_name)).std_out.splitlines()
    for module in files:
        if module.endswith(".py") and "__init__" not in module:
            if module.endswith("-script.py"):
                script_stripped = module[:-len("-script.py")]
                potential_exe_file = "{0}.exe".format(script_stripped)
                if potential_exe_file in files:
                    # file is a console script "entry_point"
                    continue

            # the files paths are relative to the package __init__.py file.
            module_paths.append(
                module.replace("..\\", "").replace("\\", ".")
                .replace(".py", "")
                .strip())
    return ','.join(module_paths)
class LocalCommandRunnerTest(unittest.TestCase):

    from cloudify.utils import LocalCommandRunner
    runner = LocalCommandRunner()

    @classmethod
    def setUpClass(cls):
        os.environ[LOCAL_IP_KEY] = 'localhost'

    def test_run_command_success(self):
        command_execution_result = self.runner.run('echo Hello')
        self.assertEqual('Hello', command_execution_result.std_out.strip())
        self.assertEqual(0, command_execution_result.return_code)
        self.assertEqual('', command_execution_result.std_err)

    def test_run_command_error(self):
        try:
            self.runner.run('/bin/sh -c bad')
            self.fail('Expected CommandExecutionException due to Bad command')
        except CommandExecutionException as e:
            self.assertTrue(1, e.code)
Exemple #47
0
def install_celery_plugin(plugin_url):
    '''

    Installs celery tasks into the cloudify agent.

        1. Installs the plugin into the current python installation directory.
        2  Adds the python files into the agent includes directive.

    :param plugin_url: URL to an archive of the plugin.
    :return:
    '''

    command = 'cmd /c "{0}\Scripts\pip.exe install --process-dependency-links {1}"'\
              .format(sys.prefix, plugin_url)
    LocalCommandRunner().run(command)

    plugin_name = plugin_utils.extract_plugin_name(plugin_url)

    module_paths = plugin_utils.extract_module_paths(plugin_name)

    _update_includes(module_paths)
def create_windows_installer():
    runner = LocalCommandRunner()
    wheelhouse = get_resource('winpackage/source/wheels')

    pip_cmd = 'pip wheel --wheel-dir {wheel_dir} --requirement {req_file}'.\
        format(wheel_dir=wheelhouse, req_file=config['requirements_file'])

    ctx.logger.info('Building wheels into: {0}'.format(wheelhouse))
    runner.run(pip_cmd)

    pip_cmd = 'pip wheel --find-links {wheel_dir} --wheel-dir {wheel_dir} ' \
              '{repo_url}'.format(wheel_dir=wheelhouse,
                                  repo_url=config['cloudify_agent_module'])
    runner.run(pip_cmd)

    iscc_cmd = 'C:\\Program Files (x86)\\Inno Setup 5\\iscc.exe {0}'\
        .format(get_resource(os.path.join('winpackage', 'create.iss')))
    os.environ['VERSION'] = '0'
    os.environ['iscc_output'] = os.getcwd()
    runner.run(iscc_cmd)
 def _test_agent_installation(self, agent):
     if 'user' not in agent:
         agent['user'] = getpass.getuser()
     celery = Celery()
     worker_name = 'celery@{0}'.format(agent['name'])
     inspect = celery.control.inspect(destination=[worker_name])
     self.assertFalse(inspect.active())
     _, path = tempfile.mkstemp()
     with open(path, 'w') as agent_file:
         agent_file.write(json.dumps(agent))
     _, output_path = tempfile.mkstemp()
     runner = LocalCommandRunner()
     runner.run('cfy-agent install-local --agent-file {0} '
                '--output-agent-file {1}'.format(path, output_path))
     self.assertTrue(inspect.active())
     with open(output_path) as new_agent_file:
         new_agent = json.loads(new_agent_file.read())
     command_format = 'cfy-agent daemons {0} --name {1}'.format(
         '{0}',
         new_agent['name'])
     runner.run(command_format.format('stop'))
     runner.run(command_format.format('delete'))
     self.assertFalse(inspect.active())
     return new_agent
Exemple #50
0
class Daemon(object):

    """
    Base class for daemon implementations.
    Following is all the available common daemon keyword arguments. These
    will be available to any daemon without any configuration as instance
    attributes.

    ``user``:

        the user this daemon will run under. default to the current user.

    ``name``:

        the name to give the daemon. This name will be a unique identifier of
        the daemon. meaning you will not be able to create more daemons with
        that name until a delete operation has been performed. defaults to
        a unique name generated by cloudify.

    ``queue``:

        the queue this daemon will listen to. It is possible to create
        different workers with the same queue, however this is discouraged.
        to create more workers that process tasks from a given queue, use the
        'min_workers' and 'max_workers' keys. defaults to <name>-queue.

    ``host``:

        the ip address of the host the agent will be started on. this
        property is used only when the 'queue' or 'name' property are omitted,
        in order to retrieve the agent name and queue from the manager. in
        such case, this property must match the 'ip' runtime property given
        to the corresponding Compute node.

    ``deployment_id``:

        the deployment id this agent will be a part of. this
        property is used only when the 'queue' or 'name' property are omitted,
        in order to retrieve the agent name and queue from the manager.

    ``workdir``:

        working directory for runtime files (pid, log).
        defaults to the current working directory.

    ``broker_ip``:

        the host name or ip address of the broker to connect to.

    ``broker_ssl_enabled``:

        Whether SSL is enabled for the broker.

    ``broker_ssl_cert``:

        The SSL public certificate for the broker, if SSL is enabled on the
        broker. This should be in PEM format and should be the string
        representation, including the 'BEGIN CERTIFICATE' header and footer.

    ``broker_user``

        the username for the broker connection
        defaults to 'guest'

    ``broker_pass``

        the password for the broker connection
        defaults to 'guest'

    ``file_server_host``:

        the IP or hostname of the file server (Required)

    ``file_server_port``:

        the port of the file server (Required)

    ``file_server_protocol``:

        the protocol of the file server. defaults to HTTP.

    ``rest_host``:

        the ip address/host name of the manager, running the
        REST service. (Required)

    ``rest_protocol``:

        the protocol to use in REST call. defaults to HTTP.

    ``rest_port``:

        the manager REST gateway port to connect to. defaults to 80.

    ``security_enabled``:

        True if REST security is enabled, False otherwise

    ``rest_username``:

        the username to use in REST calls. No default.

    ``rest_password``:

        the password to use in REST calls. No default.

    ``rest_token``:

        the token to use in REST calls. No default.

    ``verify_rest_certificate``:

        indicates whether agents should verify the REST server's SSL
        certificate or not

    ``local_rest_cert_file``:

        A path to a local copy of the manager's SSL certificate, to be used
        for certificate verification if SSL is enabled.

    ``rest_ssl_cert_content``:
        The content of the REST SSL certificate, to be written to
        local_rest_cert_file

    ``min_workers``:

        the minimum number of worker processes this daemon will manage. all
        workers will listen on the same queue allowing for higher
        concurrency when preforming tasks. defaults to 0.

    ``max_workers``:

        the maximum number of worker processes this daemon will manage.
        as tasks keep coming in, the daemon will expand its worker pool to
        handle more tasks concurrently. However, as the name
        suggests, it will never exceed this number. allowing for the control
        of resource usage. defaults to 5.

    ``extra_env_path``:

        path to a file containing environment variables to be added to the
        daemon environment. the file should be in the format of
        multiple 'export A=B' lines for linux, ot 'set A=B' for windows.
        defaults to None.

    ``log_level``:

        log level of the daemon process itself. defaults to debug.

    ``log_file``:

        location of the daemon log file. defaults to <workdir>/<name>.log

    ``pid_file``:

        location of the daemon pid file. defaults to <workdir>/<name>.pid

    """

    # override this when adding implementations.
    PROCESS_MANAGEMENT = None

    # add specific mandatory parameters for different implementations.
    # they will be validated upon daemon creation
    MANDATORY_PARAMS = [
        'rest_host',
        'broker_ip',
        'file_server_host'
    ]

    def __init__(self, logger=None, **params):

        """

        ####################################################################
        # When subclassing this, do not implement any logic inside the
        # constructor except for in-memory calculations and settings, as the
        # daemon may be instantiated many times for an existing agent. Also,
        # all daemon attributes must be JSON serializable, as daemons are
        # represented as dictionaries and stored as JSON files on Disk. If
        # you wish to have a non serializable attribute, mark it private by
        # naming it _<name>. Attributes starting with underscore will be
        # omitted when serializing the object.
        ####################################################################

        :param logger: a logger to be used to log various subsequent
        operations.
        :type logger: logging.Logger

        :param params: key-value pairs as stated above.
        :type params dict

        """

        # will be populated later on with runtime properties of the host
        # node instance this agent is dedicated for (if needed)
        self._runtime_properties = None

        # configure logger
        self._logger = logger or setup_logger(
            logger_name='cloudify_agent.api.pm.{0}'
            .format(self.PROCESS_MANAGEMENT))

        # save params
        self._params = params

        # configure command runner
        self._runner = LocalCommandRunner(logger=self._logger)

        # Mandatory parameters
        self.validate_mandatory()
        self.file_server_host = params['file_server_host']
        self.rest_host = params['rest_host']
        self.broker_ip = params['broker_ip']

        # Optional parameters - REST client
        self.validate_optional()
        self.rest_port = params.get(
            'rest_port') or defaults.REST_PORT
        self.rest_protocol = params.get(
            'rest_protocol') or defaults.REST_PROTOCOL
        self.file_server_port = params.get(
            'file_server_port') or defaults.FILE_SERVER_PORT
        self.file_server_protocol = params.get(
            'file_server_protocol') or defaults.FILE_SERVER_PROTOCOL
        self.verify_rest_certificate = params.get('verify_rest_certificate')
        self.local_rest_cert_file = params.get('local_rest_cert_file', '')
        self.rest_cert_content = params.get('rest_ssl_cert_content', '')
        self.security_enabled = params.get('security_enabled')
        # REST credentials need to be prefixed with _ so they're not stored
        # when the daemon is serialized
        self._rest_username = params.get('rest_username')
        self._rest_password = params.get('rest_password')
        self._rest_token = params.get('rest_token')

        # Optional parameters
        self.name = params.get(
            'name') or self._get_name_from_manager()
        self.user = params.get('user') or getpass.getuser()
        self.broker_ssl_enabled = params.get('broker_ssl_enabled', False)
        self.broker_ssl_cert_content = params.get('broker_ssl_cert', '')
        self.broker_ssl_cert_path = params.get('broker_ssl_cert_path', '')
        # Port must be determined after SSL enabled has been set in order for
        # intelligent port selection to work properly
        self.broker_port = self._get_broker_port()
        self.broker_user = params.get('broker_user', 'guest')
        self.broker_pass = params.get('broker_pass', 'guest')
        self.host = params.get('host')
        self.deployment_id = params.get('deployment_id')
        self.security_enabled = params.get('security_enabled')
        self.verify_rest_certificate = params.get('verify_rest_certificate')
        self.local_rest_cert_file = params.get('local_rest_cert_file', '')
        self.rest_cert_content = params.get('rest_ssl_cert_content', '')
        self.queue = params.get(
            'queue') or self._get_queue_from_manager()

        # This is not retrieved by param as an option any more as it then
        # introduces ambiguity over which values should be used if the
        # components of this differ from the passed in broker_user, pass, etc
        # These components need to be known for the _delete_amqp_queues
        # function.
        self.broker_url = defaults.BROKER_URL.format(
            host=self.broker_ip,
            port=self.broker_port,
            username=self.broker_user,
            password=self.broker_pass,
        )
        self.min_workers = params.get(
            'min_workers') or defaults.MIN_WORKERS
        self.max_workers = params.get(
            'max_workers') or defaults.MAX_WORKERS
        self.workdir = params.get(
            'workdir') or os.getcwd()
        self.extra_env_path = params.get('extra_env_path')
        self.log_level = params.get('log_level') or defaults.LOG_LEVEL
        self.log_file = params.get(
            'log_file') or os.path.join(self.workdir,
                                        '{0}.log'.format(self.name))
        self.pid_file = params.get(
            'pid_file') or os.path.join(self.workdir,
                                        '{0}.pid'.format(self.name))

        # create working directory if its missing
        if not os.path.exists(self.workdir):
            self._logger.debug('Creating directory: {0}'.format(self.workdir))
            os.makedirs(self.workdir)

        # save as attributes so that they will be persisted in the json files.
        # we will make use of these values when loading agents by name.
        self.process_management = self.PROCESS_MANAGEMENT
        self.virtualenv = VIRTUALENV

    def _get_celery_conf_path(self):
        return os.path.join(self.workdir, 'broker_config.json')

    def create_celery_conf(self):
        self._logger.info('Deploying celery configuration.')
        config = {
            'broker_ssl_enabled': self.broker_ssl_enabled,
            'broker_cert_path': self.broker_ssl_cert_path,
            'broker_username': self.broker_user,
            'broker_password': self.broker_pass,
            'broker_hostname': self.broker_ip,
        }
        with open(self._get_celery_conf_path(), 'w') as conf_handle:
            json.dump(config, conf_handle)

    def validate_mandatory(self):

        """
        Validates that all mandatory parameters are given.

        :raise DaemonMissingMandatoryPropertyError: in case one of the
        mandatory parameters is missing.
        """

        for param in self.MANDATORY_PARAMS:
            if param not in self._params:
                raise exceptions.DaemonMissingMandatoryPropertyError(param)

    def validate_optional(self):

        """
        Validates any optional parameters given to the daemon.

        :raise DaemonPropertiesError:
        in case one of the parameters is faulty.
        """

        self._validate_autoscale()
        self._validate_host()

    def _get_broker_port(self):
        """
        Determines the broker port if it has not been provided. Only intended
        to be called before self.broker_port has been set and after
        self.broker_ssl_cert has been set.
        """
        if self.broker_ssl_enabled:
            return constants.BROKER_PORT_SSL
        else:
            return constants.BROKER_PORT_NO_SSL

    def _is_agent_registered(self):
        celery_client = utils.get_celery_client(
            broker_url=self.broker_url,
            broker_ssl_enabled=self.broker_ssl_enabled,
            broker_ssl_cert_path=self.broker_ssl_cert_path)
        try:
            self._logger.debug('Retrieving daemon registered tasks')
            return utils.get_agent_registered(
                    self.name,
                    celery_client,
                    timeout=AGENT_IS_REGISTERED_TIMEOUT)
        finally:
            if celery_client:
                celery_client.close()

    ########################################################################
    # the following methods must be implemented by the sub-classes as they
    # may exhibit custom logic. usually this would be related to process
    # management specific configuration files.
    ########################################################################

    def delete(self, force=defaults.DAEMON_FORCE_DELETE):

        """
        Delete any resources created for the daemon in the 'configure' method.

        :param force: if the daemon is still running, stop it before
                      deleting it.
        """
        raise NotImplementedError('Must be implemented by a subclass')

    def start_command(self):

        """
        Construct a command line for starting the daemon.
        (e.g sudo service <name> start)

        :return a one liner command to start the daemon process.
        """
        raise NotImplementedError('Must be implemented by a subclass')

    def stop_command(self):

        """
        Construct a command line for stopping the daemon.
        (e.g sudo service <name> stop)

        :return a one liner command to stop the daemon process.
        """
        raise NotImplementedError('Must be implemented by a subclass')

    def status(self):

        """
        Query the daemon status, This method can be usually implemented
        by simply running the status command. However, this is not always
        the case, as different commands and process management tools may
        behave differently.

        :return: True if the service is running, False otherwise
        """
        raise NotImplementedError('Must be implemented by a subclass')

    def create_script(self):
        raise NotImplementedError('Must be implemented by a subclass')

    def create_config(self):
        raise NotImplementedError('Must be implemented by a subclass')

    ########################################################################
    # the following methods is the common logic that would apply to any
    # process management implementation.
    ########################################################################

    def create(self):

        """
        Creates the agent. This method may be served as a hook to some custom
        logic that needs to be implemented after the instance
        was instantiated.

        """
        self._logger.debug('Daemon created')

    def configure(self):

        """
        Creates any necessary resources for the daemon. After this method
        was completed successfully, it should be possible to start the daemon
        by running the command returned by the `start_command` method.

        """
        self.create_script()
        self.create_config()
        self.create_celery_conf()

    def start(self,
              interval=defaults.START_INTERVAL,
              timeout=defaults.START_TIMEOUT,
              delete_amqp_queue=defaults.DELETE_AMQP_QUEUE_BEFORE_START):

        """
        Starts the daemon process.

        :param interval: the interval in seconds to sleep when waiting for
                         the daemon to be ready.
        :param timeout: the timeout in seconds to wait for the daemon to be
                        ready.
        :param delete_amqp_queue: delete any queues with the name of the
                                  current daemon queue in the broker.

        :raise DaemonStartupTimeout: in case the agent failed to start in the
        given amount of time.
        :raise DaemonException: in case an error happened during the agent
        startup.

        """

        if delete_amqp_queue:
            self._logger.debug('Deleting AMQP queues')
            self._delete_amqp_queues()
        start_command = self.start_command()
        self._logger.info('Starting daemon with command: {0}'
                          .format(start_command))
        self._runner.run(start_command)
        end_time = time.time() + timeout
        while time.time() < end_time:
            self._logger.debug('Querying daemon {0} registered tasks'.format(
                self.name))
            if self._is_agent_registered():
                # make sure the status command recognizes the daemon is up
                status = self.status()
                if status:
                    self._logger.debug('Daemon {0} has started'
                                       .format(self.name))
                    return
            self._logger.debug('Daemon {0} has not started yet. '
                               'Sleeping for {1} seconds...'
                               .format(self.name, interval))
            time.sleep(interval)
        self._logger.debug('Verifying there were no un-handled '
                           'exception during startup')
        self._verify_no_celery_error()
        raise exceptions.DaemonStartupTimeout(timeout, self.name)

    def stop(self,
             interval=defaults.STOP_INTERVAL,
             timeout=defaults.STOP_TIMEOUT):

        """
        Stops the daemon process.

        :param interval: the interval in seconds to sleep when waiting for
                         the daemon to stop.
        :param timeout: the timeout in seconds to wait for the daemon to stop.

        :raise DaemonShutdownTimeout: in case the agent failed to be stopped
        in the given amount of time.
        :raise DaemonException: in case an error happened during the agent
        shutdown.

        """

        stop_command = self.stop_command()
        self._logger.info('Stopping daemon with command: {0}'
                          .format(stop_command))
        self._runner.run(stop_command)
        end_time = time.time() + timeout
        while time.time() < end_time:
            self._logger.debug('Querying daemon {0} registered tasks'.format(
                self.name))
            # check the process has shutdown
            if not self._is_agent_registered():
                # make sure the status command also recognizes the
                # daemon is down
                status = self.status()
                if not status:
                    self._logger.debug('Daemon {0} has shutdown'
                                       .format(self.name, interval))
                    self._logger.debug('Deleting AMQP queues')
                    self._delete_amqp_queues()
                    return
            self._logger.debug('Daemon {0} is still running. '
                               'Sleeping for {1} seconds...'
                               .format(self.name, interval))
            time.sleep(interval)
        self._logger.debug('Verifying there were no un-handled '
                           'exception during startup')
        self._verify_no_celery_error()
        raise exceptions.DaemonShutdownTimeout(timeout, self.name)

    def restart(self,
                start_timeout=defaults.START_TIMEOUT,
                start_interval=defaults.START_INTERVAL,
                stop_timeout=defaults.STOP_TIMEOUT,
                stop_interval=defaults.STOP_INTERVAL):

        """
        Restart the daemon process.

        :param start_interval: the interval in seconds to sleep when waiting
                               for the daemon to start.
        :param start_timeout: The timeout in seconds to wait for the daemon
                              to start.
        :param stop_interval: the interval in seconds to sleep when waiting
                              for the daemon to stop.
        :param stop_timeout: the timeout in seconds to wait for the daemon
                             to stop.

        :raise DaemonStartupTimeout: in case the agent failed to start in the
        given amount of time.
        :raise DaemonShutdownTimeout: in case the agent failed to be stopped
        in the given amount of time.
        :raise DaemonException: in case an error happened during startup or
        shutdown

        """

        self.stop(timeout=stop_timeout,
                  interval=stop_interval)
        self.start(timeout=start_timeout,
                   interval=start_interval)

    def before_self_stop(self):

        """
        Optional method that can be implemented by subclasses. This method
        will be called before operations that involve the daemon stopping
        itself and therefore, can be used for cleanup purposes.
        """
        pass

    def get_logfile(self):

        """
        Injects worker_id placeholder into logfile. Celery library will replace
        this placeholder with worker id. It is used to make sure that there is
        at most one process writing to a specific log file.

        """

        path, extension = os.path.splitext(self.log_file)
        return '{0}{1}{2}'.format(path,
                                  self.get_worker_id_placeholder(),
                                  extension)

    def get_worker_id_placeholder(self):

        """
        Placeholder suitable for linux systems.

        """

        return '%I'

    def _verify_no_celery_error(self):

        error_dump_path = os.path.join(
            utils.internal.get_storage_directory(self.user),
            '{0}.err'.format(self.name))

        # this means the celery worker had an uncaught
        # exception and it wrote its content
        # to the file above because of our custom exception
        # handler (see app.py)
        if os.path.exists(error_dump_path):
            with open(error_dump_path) as f:
                error = f.read()
            os.remove(error_dump_path)
            raise exceptions.DaemonError(error)

    def _delete_amqp_queues(self):
        client = amqp_client.create_client(
            amqp_host=self.broker_ip,
            amqp_user=self.broker_user,
            amqp_pass=self.broker_pass,
            ssl_enabled=self.broker_ssl_enabled,
            ssl_cert_path=self.broker_ssl_cert_path,
        )

        try:
            channel = client.connection.channel()
            self._logger.debug('Deleting queue: {0}'.format(self.queue))

            channel.queue_delete(self.queue)
            pid_box_queue = 'celery@{0}.celery.pidbox'.format(self.name)
            self._logger.debug('Deleting queue: {0}'.format(pid_box_queue))
            channel.queue_delete(pid_box_queue)
        finally:
            try:
                client.close()
            except Exception as e:
                self._logger.warning('Failed closing amqp client: {0}'
                                     .format(e))

    def _validate_autoscale(self):
        min_workers = self._params.get('min_workers')
        max_workers = self._params.get('max_workers')
        if min_workers:
            if not str(min_workers).isdigit():
                raise exceptions.DaemonPropertiesError(
                    'min_workers is supposed to be a number '
                    'but is: {0}'
                    .format(min_workers)
                )
            min_workers = int(min_workers)
        if max_workers:
            if not str(max_workers).isdigit():
                raise exceptions.DaemonPropertiesError(
                    'max_workers is supposed to be a number '
                    'but is: {0}'
                    .format(max_workers)
                )
            max_workers = int(max_workers)
        if min_workers and max_workers:
            if min_workers > max_workers:
                raise exceptions.DaemonPropertiesError(
                    'min_workers cannot be greater than max_workers '
                    '[min_workers={0}, max_workers={1}]'
                    .format(min_workers, max_workers))

    def _validate_host(self):
        queue = self._params.get('queue')
        host = self._params.get('host')
        if not queue and not host:
            raise exceptions.DaemonPropertiesError(
                'host must be supplied when queue is omitted'
            )

    def _validate_deployment_id(self):
        queue = self._params.get('queue')
        host = self._params.get('deployment_id')
        if not queue and not host:
            raise exceptions.DaemonPropertiesError(
                'deployment_id must be supplied when queue is omitted'
            )

    def _get_name_from_manager(self):
        if self._runtime_properties is None:
            self._get_runtime_properties()
        return self._runtime_properties['cloudify_agent']['name']

    def _get_queue_from_manager(self):
        if self._runtime_properties is None:
            self._get_runtime_properties()
        return self._runtime_properties['cloudify_agent']['queue']

    def _get_runtime_properties(self):
        client = utils.get_rest_client(
            security_enabled=self.security_enabled,
            rest_host=self.rest_host,
            rest_protocol=self.rest_protocol,
            rest_port=self.rest_port,
            rest_username=self._rest_username,
            rest_password=self._rest_password,
            rest_token=self._rest_token,
            verify_rest_certificate=self.verify_rest_certificate,
            ssl_cert_path=self.local_rest_cert_file
        )
        node_instances = client.node_instances.list(
            deployment_id=self.deployment_id)

        def match_ip(node_instance):
            host_id = node_instance.host_id
            if host_id == node_instance.id:
                # compute node instance
                return self.host == node_instance.runtime_properties['ip']
            return False

        matched = filter(match_ip, node_instances)

        if len(matched) > 1:
            raise exceptions.DaemonConfigurationError(
                'Found multiple node instances with ip {0}: {1}'.format(
                    self.host, ','.join(matched))
            )

        if len(matched) == 0:
            raise exceptions.DaemonConfigurationError(
                'No node instances with ip {0} were found'.format(self.host)
            )
        self._runtime_properties = matched[0].runtime_propreties

    def _list_plugin_files(self, plugin_name):

        """
        Retrieves python files related to the plugin.
        __init__ file are filtered out.

        :param plugin_name: The plugin name.

        :return: A list of file paths.
        :rtype: list of str
        """

        module_paths = []
        runner = LocalCommandRunner(self._logger)

        files = runner.run(
            '{0} show -f {1}'
            .format(utils.get_pip_path(), plugin_name)
        ).std_out.splitlines()
        for module in files:
            if self._is_valid_module(module):
                # the file paths are relative to the
                # package __init__.py file.
                prefix = '../' if os.name == 'posix' else '..\\'
                module_paths.append(
                    module.replace(prefix, '')
                    .replace(os.sep, '.').replace('.py', '').strip())
        return module_paths

    @staticmethod
    def _is_valid_module(module):
        if not module.endswith('py'):
            return False
        if '__init__' in module:
            return False
        if '-' in os.path.basename(module):
            return False
        return True
Exemple #51
0
def call_entry_point(**_):
    runner = LocalCommandRunner()
    return runner.run('mock-plugin-entry-point').std_out
Exemple #52
0
class BaseDaemonLiveTestCase(BaseTest):

    def setUp(self):
        super(BaseDaemonLiveTestCase, self).setUp()
        self.celery = Celery(broker='amqp://',
                             backend='amqp://')
        self.celery.conf.update(
            CELERY_TASK_RESULT_EXPIRES=defaults.CELERY_TASK_RESULT_EXPIRES)
        self.runner = LocalCommandRunner(logger=self.logger)
        self.daemons = []

    def tearDown(self):
        super(BaseDaemonLiveTestCase, self).tearDown()
        if os.name == 'nt':
            # with windows we need to stop and remove the service
            nssm_path = utils.get_absolute_resource_path(
                os.path.join('pm', 'nssm', 'nssm.exe'))
            for daemon in self.daemons:
                self.runner.run('sc stop {0}'.format(daemon.name),
                                exit_on_failure=False)
                self.runner.run('{0} remove {1} confirm'
                                .format(nssm_path, daemon.name),
                                exit_on_failure=False)
        else:
            self.runner.run("pkill -9 -f 'celery'", exit_on_failure=False)

    def assert_registered_tasks(self, name, additional_tasks=None):
        if not additional_tasks:
            additional_tasks = set()
        destination = 'celery@{0}'.format(name)
        c_inspect = self.celery.control.inspect(destination=[destination])
        registered = c_inspect.registered() or {}

        def include(task):
            return 'celery' not in task

        daemon_tasks = set(filter(include, set(registered[destination])))
        expected_tasks = set(BUILT_IN_TASKS)
        expected_tasks.update(additional_tasks)
        self.assertEqual(expected_tasks, daemon_tasks)

    def assert_daemon_alive(self, name):
        stats = utils.get_agent_stats(name, self.celery)
        self.assertTrue(stats is not None)

    def assert_daemon_dead(self, name):
        stats = utils.get_agent_stats(name, self.celery)
        self.assertTrue(stats is None)

    def wait_for_daemon_alive(self, name, timeout=10):
        deadline = time.time() + timeout

        while time.time() < deadline:
            stats = utils.get_agent_stats(name, self.celery)
            if stats:
                return
            self.logger.info('Waiting for daemon {0} to start...'
                             .format(name))
            time.sleep(5)
        raise RuntimeError('Failed waiting for daemon {0} to start. Waited '
                           'for {1} seconds'.format(name, timeout))

    def wait_for_daemon_dead(self, name, timeout=10):
        deadline = time.time() + timeout

        while time.time() < deadline:
            stats = utils.get_agent_stats(name, self.celery)
            if not stats:
                return
            self.logger.info('Waiting for daemon {0} to stop...'
                             .format(name))
            time.sleep(1)
        raise RuntimeError('Failed waiting for daemon {0} to stop. Waited '
                           'for {1} seconds'.format(name, timeout))
 def setUp(self):
     super(BaseDaemonLiveTestCase, self).setUp()
     self.runner = LocalCommandRunner(logger=self.logger)
     self.daemons = []
Exemple #54
0
    def __init__(self, logger=None, **params):

        """

        ####################################################################
        # When subclassing this, do not implement any logic inside the
        # constructor except for in-memory calculations and settings, as the
        # daemon may be instantiated many times for an existing agent. Also,
        # all daemon attributes must be JSON serializable, as daemons are
        # represented as dictionaries and stored as JSON files on Disk. If
        # you wish to have a non serializable attribute, mark it private by
        # naming it _<name>. Attributes starting with underscore will be
        # omitted when serializing the object.
        ####################################################################

        :param logger: a logger to be used to log various subsequent
        operations.
        :type logger: logging.Logger

        :param params: key-value pairs as stated above.
        :type params dict

        """

        # will be populated later on with runtime properties of the host
        # node instance this agent is dedicated for (if needed)
        self._runtime_properties = None

        # configure logger
        self._logger = logger or setup_logger(
            logger_name='cloudify_agent.api.pm.{0}'
            .format(self.PROCESS_MANAGEMENT))

        # save params
        self._params = params

        if self._logger.isEnabledFor(logging.DEBUG):
            printed_params = copy.deepcopy(self._params)
            for hidden_field in ['broker_pass',
                                 'service_password']:
                printed_params.pop(hidden_field, None)
            self._logger.debug("Daemon attributes: %s", json.dumps(
                printed_params, indent=4))

        # configure command runner
        self._runner = LocalCommandRunner(logger=self._logger)

        # Mandatory parameters
        self.validate_mandatory()
        self.rest_host = params['rest_host']
        if isinstance(params['broker_ip'], basestring):
            self.broker_ip = params['broker_ip'].split(',')
        else:
            # We appear to sometimes invoke this with the params already
            # having been processed, so if it's not a string then we will
            # treat it as already having been split.
            self.broker_ip = params['broker_ip']
        self.local_rest_cert_file = params['local_rest_cert_file']
        self.cluster = params.get('cluster', [])

        # Optional parameters - REST client
        self.validate_optional()
        self.rest_port = params.get('rest_port', defaults.INTERNAL_REST_PORT)
        # REST token needs to be prefixed with _ so it's not stored
        # when the daemon is serialized
        self._rest_token = params.get('rest_token')
        self._rest_tenant = params.get('rest_tenant')

        # Optional parameters
        self.name = params.get('name') or self._get_name_from_manager()
        self.user = params.get('user') or getpass.getuser()

        self.broker_user = params.get('broker_user', 'guest')
        self.broker_pass = params.get('broker_pass', 'guest')
        self.broker_vhost = params.get('broker_vhost', '/')
        self.broker_ssl_enabled = params.get('broker_ssl_enabled', False)
        self.broker_ssl_cert_path = params['local_rest_cert_file']
        if self.broker_ssl_enabled:
            self.broker_port = constants.BROKER_PORT_SSL
        else:
            self.broker_port = constants.BROKER_PORT_NO_SSL
        self.heartbeat = params.get('heartbeat')

        self.host = params.get('host')
        self.deployment_id = params.get('deployment_id')
        self.queue = params.get('queue') or self._get_queue_from_manager()

        self.min_workers = params.get('min_workers') or defaults.MIN_WORKERS
        self.max_workers = params.get('max_workers') or defaults.MAX_WORKERS
        self.workdir = params.get('workdir') or os.getcwd()
        self.log_max_bytes = params.get('log_max_bytes',
                                        defaults.LOG_FILE_SIZE)
        self.log_max_history = params.get('log_max_history',
                                          defaults.LOG_BACKUPS)
        self.executable_temp_path = params.get('executable_temp_path')

        self.extra_env_path = params.get('extra_env_path')
        self.log_level = params.get('log_level') or defaults.LOG_LEVEL
        self.log_dir = params.get('log_dir') or self.workdir
        self.pid_file = params.get(
            'pid_file') or os.path.join(self.workdir,
                                        '{0}.pid'.format(self.name))

        # create working directory if its missing
        if not os.path.exists(self.workdir):
            self._logger.debug('Creating directory: {0}'.format(self.workdir))
            os.makedirs(self.workdir)

        # save as attributes so that they will be persisted in the json files.
        # we will make use of these values when loading agents by name.
        self.process_management = self.PROCESS_MANAGEMENT
        self.virtualenv = VIRTUALENV
        self.cluster_settings_path = params.get('cluster_settings_path')
        self.network = params.get('network') or 'default'
Exemple #55
0
 def __init__(self, logger=None):
     self.logger = logger or setup_logger(self.__class__.__name__)
     self.runner = LocalCommandRunner(logger=self.logger)
Exemple #56
0
class PluginInstaller(object):

    def __init__(self, logger=None):
        self.logger = logger or setup_logger(self.__class__.__name__)
        self.runner = LocalCommandRunner(logger=self.logger)

    def install(self, plugin, blueprint_id=None):
        """
        Install the plugin to the current virtualenv.

        :param plugin: A plugin structure as defined in the blueprint.
        :param blueprint_id: The blueprint id associated with this
                             installation. if specified, will be used
                             when downloading plugins that were included
                             as part of the blueprint itself.
        """
        managed_plugin = get_managed_plugin(plugin)
        source = get_plugin_source(plugin, blueprint_id)
        args = get_plugin_args(plugin)
        if managed_plugin:
            def build_description(*fields):
                return ', '.join(
                    '{0}: {1}'.format(field, managed_plugin.get(field))
                    for field in fields if managed_plugin.get(field))
            message = ('Installing managed plugin: {0} [{1}]'
                       .format(managed_plugin.id,
                               build_description('package_name',
                                                 'package_version',
                                                 'supported_platform',
                                                 'distribution',
                                                 'distribution_release')))
            self.logger.info(message)
            try:
                self._wagon_install(managed_plugin.id, args)
            except Exception as e:
                raise NonRecoverableError('Failed installing managed '
                                          'plugin: {0} [{1}][{2}]'
                                          .format(managed_plugin.id,
                                                  plugin, e))
            return managed_plugin.package_name
        elif source:
            self.logger.info('Installing plugin from source')
            return self._pip_install(source, args)
        else:
            raise NonRecoverableError('No source or managed plugin found for'
                                      ' {0}'.format(plugin))

    def _wagon_install(self, plugin_id, args):
        client = get_rest_client()
        wagon_dir = tempfile.mkdtemp(prefix='{0}-'.format(plugin_id))
        wagon_path = os.path.join(wagon_dir, 'wagon.tar.gz')
        try:
            self.logger.debug('Downloading plugin {0} from manager into {1}'
                              .format(plugin_id, wagon_path))
            client.plugins.download(plugin_id=plugin_id,
                                    output_file=wagon_path)
            self.logger.debug('Installing plugin {0} using wagon'
                              .format(plugin_id))
            w = wagon.Wagon(source=wagon_path)
            w.install(ignore_platform=True,
                      install_args=args,
                      virtualenv=VIRTUALENV)
        finally:
            self.logger.debug('Removing directory: {0}'
                              .format(wagon_dir))
            shutil.rmtree(wagon_dir, ignore_errors=True)

    def _pip_install(self, source, args):
        plugin_dir = None
        try:
            if os.path.isabs(source):
                plugin_dir = source
            else:
                self.logger.debug('Extracting archive: {0}'.format(source))
                plugin_dir = extract_package_to_dir(source)
            self.logger.debug('Installing from directory: {0} '
                              '[args={1}]'.format(plugin_dir, args))
            command = '{0} install {1} {2}'.format(
                get_pip_path(), plugin_dir, args)
            self.runner.run(command, cwd=plugin_dir)
            package_name = extract_package_name(plugin_dir)
            self.logger.debug('Retrieved package name: {0}'
                              .format(package_name))
        finally:
            if plugin_dir and not os.path.isabs(source):
                self.logger.debug('Removing directory: {0}'
                                  .format(plugin_dir))
                shutil.rmtree(plugin_dir, ignore_errors=True)
        return package_name

    def uninstall(self, package_name, ignore_missing=True):
        """
        Uninstall the plugin from the current virtualenv. By default this
        operation will fail when trying to uninstall a plugin that is not
        installed, use `ignore_missing` to change this behavior.

        :param package_name: the package name as stated in the setup.py file
        :param ignore_missing: ignore failures in uninstalling missing plugins.
        """

        if not ignore_missing:
            self.runner.run('{0} uninstall -y {1}'.format(
                utils.get_pip_path(), package_name))
        else:
            out = self.runner.run(
                '{0} freeze'.format(utils.get_pip_path())).std_out
            packages = []
            for line in out.splitlines():
                packages.append(line.split('==')[0])
            if package_name in packages:
                self.runner.run('{0} uninstall -y {1}'.format(
                    utils.get_pip_path(), package_name))
            else:
                self.logger.info('{0} not installed. Nothing to do'
                                 .format(package_name))
class PluginInstaller(object):

    def __init__(self, logger=None):
        self.logger = logger or setup_logger(self.__class__.__name__)
        self.runner = LocalCommandRunner(logger=self.logger)

    def install(self,
                plugin,
                deployment_id=None,
                blueprint_id=None):
        """
        Install the plugin to the current virtualenv.

        :param plugin: A plugin structure as defined in the blueprint.
        :param deployment_id: The deployment id associated with this
                              installation.
        :param blueprint_id: The blueprint id associated with this
                             installation. if specified, will be used
                             when downloading plugins that were included
                             as part of the blueprint itself.
        """
        # deployment_id may be empty in some tests.
        deployment_id = deployment_id or SYSTEM_DEPLOYMENT
        managed_plugin = get_managed_plugin(plugin,
                                            logger=self.logger)
        source = get_plugin_source(plugin, blueprint_id)
        args = get_plugin_args(plugin)
        tmp_plugin_dir = tempfile.mkdtemp(prefix='{0}-'.format(plugin['name']))
        args = '{0} --prefix="{1}"'.format(args, tmp_plugin_dir).strip()
        self._create_plugins_dir_if_missing()
        try:
            if managed_plugin:
                self._install_managed_plugin(
                    managed_plugin=managed_plugin,
                    plugin=plugin,
                    args=args,
                    tmp_plugin_dir=tmp_plugin_dir)
            elif source:
                self._install_source_plugin(
                    deployment_id=deployment_id,
                    plugin=plugin,
                    source=source,
                    args=args,
                    tmp_plugin_dir=tmp_plugin_dir)
            else:
                raise NonRecoverableError(
                    'No source or managed plugin found for {0}'.format(plugin))
        finally:
            self._rmtree(tmp_plugin_dir)

    def _install_managed_plugin(self, managed_plugin, plugin, args,
                                tmp_plugin_dir):
        matching_existing_installation = False
        package_name = managed_plugin.package_name
        dst_dir = '{0}-{1}'.format(package_name,
                                   managed_plugin.package_version)
        dst_dir = self._full_dst_dir(dst_dir)
        lock = self._lock(dst_dir)
        lock.acquire()
        try:
            if os.path.exists(dst_dir):
                plugin_id_path = os.path.join(dst_dir, 'plugin.id')
                if os.path.exists(plugin_id_path):
                    with open(plugin_id_path) as f:
                        existing_plugin_id = f.read().strip()
                    matching_existing_installation = (
                        existing_plugin_id == managed_plugin.id)
                    if not matching_existing_installation:
                        self.logger.warning(
                            'Managed plugin installation found but its ID '
                            'does not match the ID of the plugin currently'
                            ' on the manager. Existing '
                            'installation will be overridden. '
                            '[existing: {0}]'.format(existing_plugin_id))
                        self._rmtree(dst_dir)
                else:
                    self.logger.warning(
                        'Managed plugin installation found but it is '
                        'in a corrupted state. Existing installation '
                        'will be overridden.')
                    self._rmtree(dst_dir)

            fields = ['package_name',
                      'package_version',
                      'supported_platform',
                      'distribution',
                      'distribution_release']
            description = ', '.join('{0}: {1}'.format(
                field, managed_plugin.get(field))
                for field in fields if managed_plugin.get(field))

            if matching_existing_installation:
                self.logger.info(
                    'Skipping installation of managed plugin: {0} '
                    'as it is already installed [{1}]'
                    .format(managed_plugin.id, description))
            else:
                self.logger.info('Installing managed plugin: {0} [{1}]'
                                 .format(managed_plugin.id, description))
                try:
                    self._wagon_install(plugin=managed_plugin, args=args)
                    shutil.move(tmp_plugin_dir, dst_dir)
                    with open(os.path.join(dst_dir, 'plugin.id'), 'w') as f:
                        f.write(managed_plugin.id)
                except Exception as e:
                    tpe, value, tb = sys.exc_info()
                    raise NonRecoverableError('Failed installing managed '
                                              'plugin: {0} [{1}][{2}]'
                                              .format(managed_plugin.id,
                                                      plugin, e)), None, tb
        finally:
            if lock:
                lock.release()

    def _wagon_install(self, plugin, args):
        client = get_rest_client()
        wagon_dir = tempfile.mkdtemp(prefix='{0}-'.format(plugin.id))
        wagon_path = os.path.join(wagon_dir, 'wagon.tar.gz')
        try:
            self.logger.debug('Downloading plugin {0} from manager into {1}'
                              .format(plugin.id, wagon_path))
            client.plugins.download(plugin_id=plugin.id,
                                    output_file=wagon_path)
            self.logger.debug('Installing plugin {0} using wagon'
                              .format(plugin.id))
            w = wagon.Wagon(source=wagon_path)
            w.install(ignore_platform=True,
                      install_args=args,
                      virtualenv=VIRTUALENV)
        finally:
            self.logger.debug('Removing directory: {0}'
                              .format(wagon_dir))
            self._rmtree(wagon_dir)

    def _install_source_plugin(self, deployment_id, plugin, source, args,
                               tmp_plugin_dir):
        dst_dir = '{0}-{1}'.format(deployment_id, plugin['name'])
        dst_dir = self._full_dst_dir(dst_dir)
        if os.path.exists(dst_dir):
            self.logger.warning(
                'Source plugin {0} already exists for deployment {1}. '
                'This probably means a previous deployment with the '
                'same name was not cleaned properly. Removing existing'
                ' directory'.format(plugin['name'], deployment_id))
            self._rmtree(dst_dir)
        self.logger.info('Installing plugin from source')
        self._pip_install(source=source, args=args)
        shutil.move(tmp_plugin_dir, dst_dir)

    def _pip_install(self, source, args):
        plugin_dir = None
        try:
            if os.path.isabs(source):
                plugin_dir = source
            else:
                self.logger.debug('Extracting archive: {0}'.format(source))
                plugin_dir = extract_package_to_dir(source)
            self.logger.debug('Installing from directory: {0} '
                              '[args={1}]'.format(plugin_dir, args))
            command = '{0} install {1} {2}'.format(
                get_pip_path(), plugin_dir, args)
            self.runner.run(command, cwd=plugin_dir)
            package_name = extract_package_name(plugin_dir)
            self.logger.debug('Retrieved package name: {0}'
                              .format(package_name))
        finally:
            if plugin_dir and not os.path.isabs(source):
                self.logger.debug('Removing directory: {0}'
                                  .format(plugin_dir))
                self._rmtree(plugin_dir)
        return package_name

    def uninstall(self, plugin, deployment_id=None):
        """Uninstall a previously installed plugin (only supports source
        plugins) """
        deployment_id = deployment_id or SYSTEM_DEPLOYMENT
        self.logger.info('Uninstalling plugin from source')
        dst_dir = '{0}-{1}'.format(deployment_id, plugin['name'])
        dst_dir = self._full_dst_dir(dst_dir)
        if os.path.isdir(dst_dir):
            self._rmtree(dst_dir)

    def uninstall_wagon(self, package_name, package_version):
        """Only used by tests for cleanup purposes"""
        dst_dir = '{0}-{1}'.format(package_name, package_version)
        dst_dir = self._full_dst_dir(dst_dir)
        if os.path.isdir(dst_dir):
            self._rmtree(dst_dir)

    @staticmethod
    def _create_plugins_dir_if_missing():
        plugins_dir = os.path.join(VIRTUALENV, 'plugins')
        if not os.path.exists(plugins_dir):
            try:
                os.makedirs(plugins_dir)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    raise

    @staticmethod
    def _full_dst_dir(dst_dir):
        plugins_dir = os.path.join(VIRTUALENV, 'plugins')
        return os.path.join(plugins_dir, dst_dir)

    @staticmethod
    def _lock(path):
        return fasteners.InterProcessLock('{0}.lock'.format(path))

    @staticmethod
    def _rmtree(path):
        shutil.rmtree(path, ignore_errors=True)
Exemple #58
0
class Daemon(object):

    """
    Base class for daemon implementations.
    Following is all the available common daemon keyword arguments. These
    will be available to any daemon without any configuration as instance
    attributes.

    ``manager_ip``:

        the ip address of the manager host. (Required)

    ``user``:

        the user this daemon will run under. default to the current user.

    ``name``:

        the name to give the daemon. This name will be a unique identifier of
        the daemon. meaning you will not be able to create more daemons with
        that name until a delete operation has been performed. defaults to
        a unique name generated by cloudify.

    ``queue``:

        the queue this daemon will listen to. It is possible to create
        different workers with the same queue, however this is discouraged.
        to create more workers that process tasks from a given queue, use the
        'min_workers' and 'max_workers' keys. defaults to <name>-queue.

    ``host``:

        the ip address of the host the agent will be started on. this
        property is used only when the 'queue' or 'name' property are omitted,
        in order to retrieve the agent name and queue from the manager. in
        such case, this property must match the 'ip' runtime property given
        to the corresponding Compute node.

    ``deployment_id``:

        the deployment id this agent will be a part of. this
        property is used only when the 'queue' or 'name' property are omitted,
        in order to retrieve the agent name and queue from the manager.

    ``workdir``:

        working directory for runtime files (pid, log).
        defaults to the current working directory.

    ``broker_ip``:

        the ip address of the broker to connect to.
        defaults to the manager_ip value.

    ``broker_port``

        the connection port of the broker process.
        defaults to 5672.

    ``broker_url``:

        full url to the broker. if this key is specified,
        the broker_ip and broker_port keys are ignored.

        for example:
            amqp://192.168.9.19:6786

        if this is not specified, the broker url will be constructed from the
        broker_ip and broker_port like so:
        'amqp://*****:*****@<broker_ip>:<broker_port>//'

    ``manager_port``:

        the manager REST gateway port to connect to. defaults to 80.

    ``min_workers``:

        the minimum number of worker processes this daemon will manage. all
        workers will listen on the same queue allowing for higher
        concurrency when preforming tasks. defaults to 0.

    ``max_workers``:

        the maximum number of worker processes this daemon will manage.
        as tasks keep coming in, the daemon will expand its worker pool to
        handle more tasks concurrently. However, as the name
        suggests, it will never exceed this number. allowing for the control
        of resource usage. defaults to 5.

    ``extra_env_path``:

        path to a file containing environment variables to be added to the
        daemon environment. the file should be in the format of
        multiple 'export A=B' lines for linux, ot 'set A=B' for windows.
        defaults to None.

    ``log_level``:

        log level of the daemon process itself. defaults to debug.

    ``log_file``:

        location of the daemon log file. defaults to <workdir>/<name>.log

    ``pid_file``:

        location of the daemon pid file. defaults to <workdir>/<name>.pid

    ``includes``:

        a comma separated list of modules to include with this agent.
        if none if specified, only the built-in modules will be included.

        see `cloudify_agent.operations.CLOUDIFY_AGENT_BUILT_IN_TASK_MODULES`

        This option may also be passed as a regular JSON list.

    """

    # override this when adding implementations.
    PROCESS_MANAGEMENT = None

    # add specific mandatory parameters for different implementations.
    # they will be validated upon daemon creation
    MANDATORY_PARAMS = [
        'manager_ip'
    ]

    def __init__(self, logger=None, **params):

        """

        ####################################################################
        # When subclassing this, do not implement any logic inside the
        # constructor expect for in-memory calculations and settings, as the
        # daemon may be instantiated many times for an existing agent. Also,
        # all daemon attributes must be JSON serializable, as daemons are
        # represented as dictionaries and stored as JSON files on Disk. If
        # you wish to have a non serializable attribute, mark it private by
        # naming it _<name>. Attributes starting with underscore will be
        # omitted when serializing the object.
        ####################################################################

        :param logger: a logger to be used to log various subsequent
        operations.
        :type logger: logging.Logger

        :param params: key-value pairs as stated above.
        :type params dict

        """

        # will be populated later on with runtime properties of the host
        # node instance this agent is dedicated for (if needed)
        self._runtime_properties = None

        # configure logger
        self._logger = logger or setup_logger(
            logger_name='cloudify_agent.api.pm.{0}'
            .format(self.PROCESS_MANAGEMENT))

        # save params
        self._params = params

        # configure command runner
        self._runner = LocalCommandRunner(logger=self._logger)

        # Mandatory parameters
        self.validate_mandatory()
        self.manager_ip = params['manager_ip']

        # Optional parameters
        self.validate_optional()
        self.user = params.get('user') or getpass.getuser()
        self.broker_ip = params.get(
            'broker_ip') or self.manager_ip
        self.broker_port = params.get(
            'broker_port') or defaults.BROKER_PORT
        self.host = params.get('host')
        self.deployment_id = params.get('deployment_id')
        self.manager_port = params.get(
            'manager_port') or defaults.MANAGER_PORT
        self.name = params.get(
            'name') or self._get_name_from_manager()
        self.queue = params.get(
            'queue') or self._get_queue_from_manager()
        self.broker_url = params.get(
            'broker_url') or defaults.BROKER_URL.format(
            self.broker_ip,
            self.broker_port)
        self.min_workers = params.get(
            'min_workers') or defaults.MIN_WORKERS
        self.max_workers = params.get(
            'max_workers') or defaults.MAX_WORKERS
        self.workdir = params.get(
            'workdir') or os.getcwd()
        self.extra_env_path = params.get('extra_env_path')
        self.log_level = params.get('log_level') or defaults.LOG_LEVEL
        self.log_file = params.get(
            'log_file') or os.path.join(self.workdir,
                                        '{0}.log'.format(self.name))
        self.pid_file = params.get(
            'pid_file') or os.path.join(self.workdir,
                                        '{0}.pid'.format(self.name))

        # accept the 'includes' parameter as a string as well
        # as a list. the string acceptance is important because this
        # class is instantiated by a CLI as well as API, and its not very
        # convenient to pass proper lists on CLI.
        includes = params.get('includes')
        if includes:
            if isinstance(includes, str):
                self.includes = includes.split(',')
            elif isinstance(includes, list):
                self.includes = includes
            else:
                raise ValueError("Unexpected type for 'includes' parameter: "
                                 "{0}. supported type are 'str' and 'list'"
                                 .format(type(includes)))
        else:
            self.includes = []

        # add built-in operations. check they don't already exist to avoid
        # duplicates, which may happen when cloning daemons.
        for module in operations.CLOUDIFY_AGENT_BUILT_IN_TASK_MODULES:
            if module not in self.includes:
                self.includes.append(module)

        # create working directory if its missing
        if not os.path.exists(self.workdir):
            self._logger.debug('Creating directory: {0}'.format(self.workdir))
            os.makedirs(self.workdir)

        # save as attributes so that they will be persisted in the json files.
        # we will make use of these values when loading agents by name.
        self.process_management = self.PROCESS_MANAGEMENT
        self.virtualenv = VIRTUALENV

        # initialize an internal celery client
        self._celery = Celery(broker=self.broker_url,
                              backend=self.broker_url)

    def validate_mandatory(self):

        """
        Validates that all mandatory parameters are given.

        :raise DaemonMissingMandatoryPropertyError: in case one of the
        mandatory parameters is missing.
        """

        for param in self.MANDATORY_PARAMS:
            if param not in self._params:
                raise exceptions.DaemonMissingMandatoryPropertyError(param)

    def validate_optional(self):

        """
        Validates any optional parameters given to the daemon.

        :raise DaemonPropertiesError:
        in case one of the parameters is faulty.
        """

        self._validate_autoscale()
        self._validate_host()

    ########################################################################
    # the following methods must be implemented by the sub-classes as they
    # may exhibit custom logic. usually this would be related to process
    # management specific configuration files.
    ########################################################################

    def configure(self):

        """
        Creates any necessary resources for the daemon. After this method
        was completed successfully, it should be possible to start the daemon
        by running the command returned by the `start_command` method.

        """
        raise NotImplementedError('Must be implemented by a subclass')

    def delete(self, force=defaults.DAEMON_FORCE_DELETE):

        """
        Delete any resources created for the daemon in the 'configure' method.

        :param force: if the daemon is still running, stop it before
                      deleting it.
        """
        raise NotImplementedError('Must be implemented by a subclass')

    def apply_includes(self):

        """
        Apply the includes list of the agent. This method must modify the
        includes configuration used when starting the agent.
        """
        raise NotImplementedError('Must be implemented by a subclass')

    def start_command(self):

        """
        Construct a command line for starting the daemon.
        (e.g sudo service <name> start)

        :return a one liner command to start the daemon process.
        """
        raise NotImplementedError('Must be implemented by a subclass')

    def stop_command(self):

        """
        Construct a command line for stopping the daemon.
        (e.g sudo service <name> stop)

        :return a one liner command to stop the daemon process.
        """
        raise NotImplementedError('Must be implemented by a subclass')

    def status(self):

        """
        Query the daemon status, This method will can be usually
        implemented by simply running the status command. However, this is
        not always the case as different commands and process management
        tools may behave differently.

        :return: True if the service is running, False otherwise
        """
        raise NotImplementedError('Must be implemented by a subclass')

    ########################################################################
    # the following methods is the common logic that would apply to any
    # process management implementation.
    ########################################################################

    def register(self, plugin):

        """
        Register an additional plugin. This method will enable the addition
        of operations defined in the plugin.

        #####################################################################
        # Note this method changes the
        # internal 'includes' list, that means you must persist the daemon
        # after running this method (by calling DaemonFactory.save) in order
        # to be able to properly load this daemon at future times.
        #####################################################################

        :param plugin: the plugin name to register.
        """

        self._logger.debug('Listing modules of plugin: {0}'.format(plugin))
        modules = self._list_plugin_files(plugin)

        self.includes.extend(modules)

        # process management specific implementation
        self._logger.debug('Setting includes: {0}'.format(self.includes))
        self.apply_includes()

    def unregister(self, plugin):

        """
        Un-registers a plugin from the daemon. After applying this method,
        the daemon will no longer recognize operations defined in that plugin.

        #####################################################################
        # Note this method changes the
        # internal 'includes' list, that means you must persist the daemon
        # after running this method (by calling DaemonFactory.save) in order
        # to be able to properly load this daemon at future times.
        #####################################################################

        :param plugin: the plugin name to register.

        """
        self._logger.debug('Listing modules of plugin: {0}'.format(plugin))
        modules = self._list_plugin_files(plugin)

        for module in modules:
            self.includes.remove(module)

        # process management specific implementation
        self._logger.debug('Applying includes: {0}'.format(self.includes))
        self.apply_includes()

    def create(self):

        """
        Creates the agent. This method may be served as a hook to some custom
        logic that needs to be implemented after the instance
        was instantiated.

        """
        self._logger.debug('Daemon created')

    def start(self,
              interval=defaults.START_INTERVAL,
              timeout=defaults.START_TIMEOUT,
              delete_amqp_queue=defaults.DELETE_AMQP_QUEUE_BEFORE_START):

        """
        Starts the daemon process.

        :param interval: the interval in seconds to sleep when waiting for
                         the daemon to be ready.
        :param timeout: the timeout in seconds to wait for the daemon to be
                        ready.
        :param delete_amqp_queue: delete any queues with the name of the
                                  current daemon queue in the broker.

        :raise DaemonStartupTimeout: in case the agent failed to start in the
        given amount of time.
        :raise DaemonException: in case an error happened during the agent
        startup.

        """

        if delete_amqp_queue:
            self._logger.debug('Deleting AMQP queues')
            self._delete_amqp_queues()
        start_command = self.start_command()
        self._logger.info('Starting daemon with command: {0}'
                          .format(start_command))
        self._runner.run(start_command)
        end_time = time.time() + timeout
        while time.time() < end_time:
            self._logger.debug('Querying daemon {0} stats'.format(self.name))
            stats = utils.get_agent_stats(self.name, self._celery)
            if stats:
                # make sure the status command recognizes the daemon is up
                status = self.status()
                if status:
                    self._logger.debug('Daemon {0} has started'
                                       .format(self.name))
                    return
            self._logger.debug('Daemon {0} has not started yet. '
                               'Sleeping for {1} seconds...'
                               .format(self.name, interval))
            time.sleep(interval)
        self._logger.debug('Verifying there were no un-handled '
                           'exception during startup')
        self._verify_no_celery_error()
        raise exceptions.DaemonStartupTimeout(timeout, self.name)

    def stop(self,
             interval=defaults.STOP_INTERVAL,
             timeout=defaults.STOP_TIMEOUT):

        """
        Stops the daemon process.

        :param interval: the interval in seconds to sleep when waiting for
                         the daemon to stop.
        :param timeout: the timeout in seconds to wait for the daemon to stop.

        :raise DaemonShutdownTimeout: in case the agent failed to be stopped
        in the given amount of time.
        :raise DaemonException: in case an error happened during the agent
        shutdown.

        """

        stop_command = self.stop_command()
        self._logger.info('Stopping daemon with command: {0}'
                          .format(stop_command))
        self._runner.run(stop_command)
        end_time = time.time() + timeout
        while time.time() < end_time:
            self._logger.debug('Querying daemon {0} stats'.format(self.name))
            # check the process has shutdown
            stats = utils.get_agent_stats(self.name, self._celery)
            if not stats:
                # make sure the status command also recognizes the
                # daemon is down
                status = self.status()
                if not status:
                    self._logger.debug('Daemon {0} has shutdown'
                                       .format(self.name, interval))
                    return
            self._logger.debug('Daemon {0} is still running. '
                               'Sleeping for {1} seconds...'
                               .format(self.name, interval))
            time.sleep(interval)
        self._logger.debug('Verifying there were no un-handled '
                           'exception during startup')
        self._verify_no_celery_error()
        raise exceptions.DaemonShutdownTimeout(timeout, self.name)

    def restart(self,
                start_timeout=defaults.START_TIMEOUT,
                start_interval=defaults.START_INTERVAL,
                stop_timeout=defaults.STOP_TIMEOUT,
                stop_interval=defaults.STOP_INTERVAL):

        """
        Restart the daemon process.

        :param start_interval: the interval in seconds to sleep when waiting
                               for the daemon to start.
        :param start_timeout: The timeout in seconds to wait for the daemon
                              to start.
        :param stop_interval: the interval in seconds to sleep when waiting
                              for the daemon to stop.
        :param stop_timeout: the timeout in seconds to wait for the daemon
                             to stop.

        :raise DaemonStartupTimeout: in case the agent failed to start in the
        given amount of time.
        :raise DaemonShutdownTimeout: in case the agent failed to be stopped
        in the given amount of time.
        :raise DaemonException: in case an error happened during startup or
        shutdown

        """

        self.stop(timeout=stop_timeout,
                  interval=stop_interval)
        self.start(timeout=start_timeout,
                   interval=start_interval)

    def _verify_no_celery_error(self):

        error_dump_path = os.path.join(
            utils.internal.get_storage_directory(self.user),
            '{0}.err'.format(self.name))

        # this means the celery worker had an uncaught
        # exception and it wrote its content
        # to the file above because of our custom exception
        # handler (see app.py)
        if os.path.exists(error_dump_path):
            with open(error_dump_path) as f:
                error = f.read()
            os.remove(error_dump_path)
            raise exceptions.DaemonError(error)

    def _delete_amqp_queues(self):
        client = amqp_client.create_client(self.broker_ip)
        try:
            channel = client.connection.channel()
            self._logger.debug('Deleting queue: {0}'.format(self.queue))
            channel.queue_delete(self.queue)
            pid_box_queue = 'celery@{0}.celery.pidbox'.format(self.queue)
            self._logger.debug('Deleting queue: {0}'.format(pid_box_queue))
            channel.queue_delete(pid_box_queue)
        finally:
            try:
                client.close()
            except Exception as e:
                self._logger.warning('Failed closing amqp client: {0}'
                                     .format(e))

    def _validate_autoscale(self):
        min_workers = self._params.get('min_workers')
        max_workers = self._params.get('max_workers')
        if min_workers:
            if not str(min_workers).isdigit():
                raise exceptions.DaemonPropertiesError(
                    'min_workers is supposed to be a number '
                    'but is: {0}'
                    .format(min_workers)
                )
            min_workers = int(min_workers)
        if max_workers:
            if not str(max_workers).isdigit():
                raise exceptions.DaemonPropertiesError(
                    'max_workers is supposed to be a number '
                    'but is: {0}'
                    .format(max_workers)
                )
            max_workers = int(max_workers)
        if min_workers and max_workers:
            if min_workers > max_workers:
                raise exceptions.DaemonPropertiesError(
                    'min_workers cannot be greater than max_workers '
                    '[min_workers={0}, max_workers={1}]'
                    .format(min_workers, max_workers))

    def _validate_host(self):
        queue = self._params.get('queue')
        host = self._params.get('host')
        if not queue and not host:
            raise exceptions.DaemonPropertiesError(
                'host must be supplied when queue is omitted'
            )

    def _validate_deployment_id(self):
        queue = self._params.get('queue')
        host = self._params.get('deployment_id')
        if not queue and not host:
            raise exceptions.DaemonPropertiesError(
                'deployment_id must be supplied when queue is omitted'
            )

    def _get_name_from_manager(self):
        if self._runtime_properties is None:
            self._get_runtime_properties()
        return self._runtime_properties['cloudify_agent']['name']

    def _get_queue_from_manager(self):
        if self._runtime_properties is None:
            self._get_runtime_properties()
        return self._runtime_properties['cloudify_agent']['queue']

    def _get_runtime_properties(self):
        client = CloudifyClient(host=self.manager_ip, port=self.manager_port)
        node_instances = client.node_instances.list(
            deployment_id=self.deployment_id)

        def match_ip(node_instance):
            host_id = node_instance.host_id
            if host_id == node_instance.id:
                # compute node instance
                return self.host == node_instance.runtime_properties['ip']
            return False

        matched = filter(match_ip, node_instances)

        if len(matched) > 1:
            raise exceptions.DaemonConfigurationError(
                'Found multiple node instances with ip {0}: {1}'.format(
                    self.host, ','.join(matched))
            )

        if len(matched) == 0:
            raise exceptions.DaemonConfigurationError(
                'No node instances with ip {0} were found'.format(self.host)
            )
        self._runtime_properties = matched[0].runtime_propreties

    def _list_plugin_files(self, plugin_name):

        """
        Retrieves python files related to the plugin.
        __init__ file are filtered out.

        :param plugin_name: The plugin name.

        :return: A list of file paths.
        :rtype: list of str
        """

        module_paths = []
        runner = LocalCommandRunner(self._logger)

        files = runner.run(
            '{0} show -f {1}'
            .format(utils.get_pip_path(), plugin_name)
        ).std_out.splitlines()
        for module in files:
            if module.endswith('.py') and '__init__' not in module:
                # the files paths are relative to the
                # package __init__.py file.
                prefix = '../' if os.name == 'posix' else '..\\'
                module_paths.append(
                    module.replace(prefix, '')
                    .replace(os.sep, '.').replace('.py', '').strip())
        return module_paths
Exemple #59
0
    def __init__(self, logger=None, **params):

        """

        ####################################################################
        # When subclassing this, do not implement any logic inside the
        # constructor expect for in-memory calculations and settings, as the
        # daemon may be instantiated many times for an existing agent. Also,
        # all daemon attributes must be JSON serializable, as daemons are
        # represented as dictionaries and stored as JSON files on Disk. If
        # you wish to have a non serializable attribute, mark it private by
        # naming it _<name>. Attributes starting with underscore will be
        # omitted when serializing the object.
        ####################################################################

        :param logger: a logger to be used to log various subsequent
        operations.
        :type logger: logging.Logger

        :param params: key-value pairs as stated above.
        :type params dict

        """

        # will be populated later on with runtime properties of the host
        # node instance this agent is dedicated for (if needed)
        self._runtime_properties = None

        # configure logger
        self._logger = logger or setup_logger(
            logger_name='cloudify_agent.api.pm.{0}'
            .format(self.PROCESS_MANAGEMENT))

        # save params
        self._params = params

        # configure command runner
        self._runner = LocalCommandRunner(logger=self._logger)

        # Mandatory parameters
        self.validate_mandatory()
        self.manager_ip = params['manager_ip']

        # Optional parameters
        self.validate_optional()
        self.user = params.get('user') or getpass.getuser()
        self.broker_ip = params.get(
            'broker_ip') or self.manager_ip
        self.broker_port = params.get(
            'broker_port') or defaults.BROKER_PORT
        self.host = params.get('host')
        self.deployment_id = params.get('deployment_id')
        self.manager_port = params.get(
            'manager_port') or defaults.MANAGER_PORT
        self.name = params.get(
            'name') or self._get_name_from_manager()
        self.queue = params.get(
            'queue') or self._get_queue_from_manager()
        self.broker_url = params.get(
            'broker_url') or defaults.BROKER_URL.format(
            self.broker_ip,
            self.broker_port)
        self.min_workers = params.get(
            'min_workers') or defaults.MIN_WORKERS
        self.max_workers = params.get(
            'max_workers') or defaults.MAX_WORKERS
        self.workdir = params.get(
            'workdir') or os.getcwd()
        self.extra_env_path = params.get('extra_env_path')
        self.log_level = params.get('log_level') or defaults.LOG_LEVEL
        self.log_file = params.get(
            'log_file') or os.path.join(self.workdir,
                                        '{0}.log'.format(self.name))
        self.pid_file = params.get(
            'pid_file') or os.path.join(self.workdir,
                                        '{0}.pid'.format(self.name))

        # accept the 'includes' parameter as a string as well
        # as a list. the string acceptance is important because this
        # class is instantiated by a CLI as well as API, and its not very
        # convenient to pass proper lists on CLI.
        includes = params.get('includes')
        if includes:
            if isinstance(includes, str):
                self.includes = includes.split(',')
            elif isinstance(includes, list):
                self.includes = includes
            else:
                raise ValueError("Unexpected type for 'includes' parameter: "
                                 "{0}. supported type are 'str' and 'list'"
                                 .format(type(includes)))
        else:
            self.includes = []

        # add built-in operations. check they don't already exist to avoid
        # duplicates, which may happen when cloning daemons.
        for module in operations.CLOUDIFY_AGENT_BUILT_IN_TASK_MODULES:
            if module not in self.includes:
                self.includes.append(module)

        # create working directory if its missing
        if not os.path.exists(self.workdir):
            self._logger.debug('Creating directory: {0}'.format(self.workdir))
            os.makedirs(self.workdir)

        # save as attributes so that they will be persisted in the json files.
        # we will make use of these values when loading agents by name.
        self.process_management = self.PROCESS_MANAGEMENT
        self.virtualenv = VIRTUALENV

        # initialize an internal celery client
        self._celery = Celery(broker=self.broker_url,
                              backend=self.broker_url)
Exemple #60
0
    def __init__(self, logger=None, **params):

        """

        ####################################################################
        # When subclassing this, do not implement any logic inside the
        # constructor except for in-memory calculations and settings, as the
        # daemon may be instantiated many times for an existing agent. Also,
        # all daemon attributes must be JSON serializable, as daemons are
        # represented as dictionaries and stored as JSON files on Disk. If
        # you wish to have a non serializable attribute, mark it private by
        # naming it _<name>. Attributes starting with underscore will be
        # omitted when serializing the object.
        ####################################################################

        :param logger: a logger to be used to log various subsequent
        operations.
        :type logger: logging.Logger

        :param params: key-value pairs as stated above.
        :type params dict

        """

        # will be populated later on with runtime properties of the host
        # node instance this agent is dedicated for (if needed)
        self._runtime_properties = None

        # configure logger
        self._logger = logger or setup_logger(
            logger_name='cloudify_agent.api.pm.{0}'
            .format(self.PROCESS_MANAGEMENT))

        # save params
        self._params = params

        # configure command runner
        self._runner = LocalCommandRunner(logger=self._logger)

        # Mandatory parameters
        self.validate_mandatory()
        self.file_server_host = params['file_server_host']
        self.rest_host = params['rest_host']
        self.broker_ip = params['broker_ip']

        # Optional parameters - REST client
        self.validate_optional()
        self.rest_port = params.get(
            'rest_port') or defaults.REST_PORT
        self.rest_protocol = params.get(
            'rest_protocol') or defaults.REST_PROTOCOL
        self.file_server_port = params.get(
            'file_server_port') or defaults.FILE_SERVER_PORT
        self.file_server_protocol = params.get(
            'file_server_protocol') or defaults.FILE_SERVER_PROTOCOL
        self.verify_rest_certificate = params.get('verify_rest_certificate')
        self.local_rest_cert_file = params.get('local_rest_cert_file', '')
        self.rest_cert_content = params.get('rest_ssl_cert_content', '')
        self.security_enabled = params.get('security_enabled')
        # REST credentials need to be prefixed with _ so they're not stored
        # when the daemon is serialized
        self._rest_username = params.get('rest_username')
        self._rest_password = params.get('rest_password')
        self._rest_token = params.get('rest_token')

        # Optional parameters
        self.name = params.get(
            'name') or self._get_name_from_manager()
        self.user = params.get('user') or getpass.getuser()
        self.broker_ssl_enabled = params.get('broker_ssl_enabled', False)
        self.broker_ssl_cert_content = params.get('broker_ssl_cert', '')
        self.broker_ssl_cert_path = params.get('broker_ssl_cert_path', '')
        # Port must be determined after SSL enabled has been set in order for
        # intelligent port selection to work properly
        self.broker_port = self._get_broker_port()
        self.broker_user = params.get('broker_user', 'guest')
        self.broker_pass = params.get('broker_pass', 'guest')
        self.host = params.get('host')
        self.deployment_id = params.get('deployment_id')
        self.security_enabled = params.get('security_enabled')
        self.verify_rest_certificate = params.get('verify_rest_certificate')
        self.local_rest_cert_file = params.get('local_rest_cert_file', '')
        self.rest_cert_content = params.get('rest_ssl_cert_content', '')
        self.queue = params.get(
            'queue') or self._get_queue_from_manager()

        # This is not retrieved by param as an option any more as it then
        # introduces ambiguity over which values should be used if the
        # components of this differ from the passed in broker_user, pass, etc
        # These components need to be known for the _delete_amqp_queues
        # function.
        self.broker_url = defaults.BROKER_URL.format(
            host=self.broker_ip,
            port=self.broker_port,
            username=self.broker_user,
            password=self.broker_pass,
        )
        self.min_workers = params.get(
            'min_workers') or defaults.MIN_WORKERS
        self.max_workers = params.get(
            'max_workers') or defaults.MAX_WORKERS
        self.workdir = params.get(
            'workdir') or os.getcwd()
        self.extra_env_path = params.get('extra_env_path')
        self.log_level = params.get('log_level') or defaults.LOG_LEVEL
        self.log_file = params.get(
            'log_file') or os.path.join(self.workdir,
                                        '{0}.log'.format(self.name))
        self.pid_file = params.get(
            'pid_file') or os.path.join(self.workdir,
                                        '{0}.pid'.format(self.name))

        # create working directory if its missing
        if not os.path.exists(self.workdir):
            self._logger.debug('Creating directory: {0}'.format(self.workdir))
            os.makedirs(self.workdir)

        # save as attributes so that they will be persisted in the json files.
        # we will make use of these values when loading agents by name.
        self.process_management = self.PROCESS_MANAGEMENT
        self.virtualenv = VIRTUALENV