def _bootstrap(self,
                   blueprint_path,
                   inputs_path=None,
                   install_plugins=None,
                   keep_up_on_failure=False,
                   validate_only=False,
                   task_retries=5,
                   task_retry_interval=90,
                   subgraph_retries=2,
                   verbose=False,
                   create_rest_client_func=None):

        if install_plugins is None:
            install_plugins = test_environment.install_plugins

        cfy = get_cfy()
        cfy.init(reset_context=True)

        with YamlPatcher(CLOUDIFY_CONFIG_PATH) as patch:
            prop_path = ('local_provider_context.'
                         'cloudify.workflows.subgraph_retries')
            patch.set_value(prop_path, subgraph_retries)

        if not validate_only:
            # TODO: add manager_blueprint_path and inputs path arguments to
            # openstack_plugin
            try:
                self.before_bootstrap(
                    manager_blueprint_path=blueprint_path,
                    inputs_path=inputs_path)
            except TypeError:
                self.before_bootstrap()

        cfy.bootstrap(
            blueprint_path,
            inputs=inputs_path,
            install_plugins=install_plugins,
            keep_up_on_failure=keep_up_on_failure,
            validate_only=validate_only,
            task_retries=task_retries,
            task_retry_interval=task_retry_interval,
            verbose=verbose,
        )

        if not validate_only:
            self._upload_plugins()
            self._running_env_setup(get_profile_context().manager_ip,
                                    create_rest_client_func)
            # A hacky workaround where a test bootstraps a manager
            # using simple manager blueprint and provider context does
            # not contain a 'resources' key as expected by openstack's
            # handler after_bootstrap method.
            # should be probably handled better.
            if 'simple' not in path(blueprint_path).basename():
                self.after_bootstrap(get_profile_context().provider_context)
 def teardown(self):
     if self._global_cleanup_context is None:
         return
     self.setup()
     cfy = get_cfy()
     try:
         cfy.use(self.management_ip)
         cfy.teardown(force=True, verbose=True, ignore_deployments=True)
     finally:
         self._global_cleanup_context.cleanup()
         self.handler.after_teardown()
         if os.path.exists(self._workdir):
             shutil.rmtree(self._workdir)
    def setUp(self):
        super(TwoManagersTest, self).setUp()

        self.cfy2 = get_cfy()
        second_manager_blueprint_path = '{}_existing'.format(
            self.env._manager_blueprint_path)

        shutil.copy2(self.env._manager_blueprint_path,
                     second_manager_blueprint_path)

        external_resources = [
            'node_templates.management_network.properties',
            'node_templates.management_subnet.properties',
            'node_templates.router.properties',
            'node_templates.agents_security_group.properties',
            'node_templates.management_security_group.properties',
        ]

        with YamlPatcher(second_manager_blueprint_path) as patch:
            for prop in external_resources:
                patch.merge_obj(prop, {'use_external_resource': True})

        second_cloudify_config_path = '{}_existing'.format(
            self.env.cloudify_config_path)

        shutil.copy2(self.env.cloudify_config_path,
                     second_cloudify_config_path)

        new_resources = ['manager_server_name', 'manager_port_name']

        with YamlPatcher(second_cloudify_config_path) as patch:
            for prop in new_resources:
                patch.append_value(prop, '2')

        self.cfy2.bootstrap(
            second_manager_blueprint_path,
            inputs=second_cloudify_config_path,
            install_plugins=self.env.install_plugins,
            keep_up_on_failure=False,
            task_retries=5,
            verbose=False
        )

        # Bootstrap updates the active profile, so get_manager_ip returns
        # the IP of the second manager
        self.client2 = create_rest_client(self.get_manager_ip())
    def prepare_manager(self):
        # note that we're using a separate manager checkout, so we need to
        # create our own utils like cfy and the rest client, rather than use
        # the testenv ones
        self.cfy_workdir = tempfile.mkdtemp(prefix='manager-upgrade-')
        self.addCleanup(shutil.rmtree, self.cfy_workdir)
        self.manager_cfy = get_cfy()
        self.manager_inputs = self._get_bootstrap_inputs()

        if self._use_external_manager:
            upgrade_config = self.env.handler_configuration['upgrade_manager']
            self.upgrade_manager_ip = upgrade_config['public_ip']
            self.manager_private_ip = upgrade_config['private_ip']
            self.manager_cfy.use(self.upgrade_manager_ip)
        else:
            self.bootstrap_manager()

        self.rest_client = create_rest_client(self.upgrade_manager_ip)

        self.bootstrap_manager_version = self.get_curr_version()
    def test_hello_world(self):
        self._deploy_manager()

        self.cfy = get_cfy()
        self.cfy.use(self.manager_public_ip)

        self.logger.info('Waiting for config install workflow to finish...')
        self.wait_for_config_to_finish(self.client)
        self.logger.info('...workflow finished.')

        # once we've managed to connect again using `cfy use` we need to update
        # the rest client too:
        self.client = create_rest_client(self.manager_public_ip)

        time.sleep(120)

        self._run(
            blueprint_file=self.hello_world_blueprint_file,
            inputs=self.hello_world_inputs,
            influx_host_ip=self.manager_public_ip,
        )
    def setUp(self):
        self.env = test_environment.setup()
        self.logger = logging.getLogger(self._testMethodName)
        self.logger.setLevel(logging.INFO)
        # TODO: remove before merging
        # logger = logging.getLogger('sh.command')
        # logger.setLevel(logging.WARNING)
        # logger = logging.getLogger('sh.stream_bufferer')
        # logger.setLevel(logging.WARNING)
        # logger = logging.getLogger('cloudify.rest_client')
        # logger.setLevel(logging.WARNING)
        self.logger.info('Starting test setUp')
        self.workdir = tempfile.mkdtemp(prefix='cosmo-test-')
        management_user = getattr(self.env, 'management_user_name', None)
        management_port = getattr(self.env, 'management_port', 22)
        management_key_path = getattr(self.env, 'management_key_path', None)

        self.client = self.env.rest_client
        self.test_id = 'system-test-{0}-{1}'.format(
            self._testMethodName,
            time.strftime("%Y%m%d-%H%M"))

        self.cfy = get_cfy()
        if self.env.management_ip:
            self.cfy.use(
                self.env.management_ip,
                manager_user=management_user,
                manager_key=management_key_path,
                manager_port=management_port
            )

        self.blueprint_yaml = None
        self._test_cleanup_context = self.env.handler.CleanupContext(
            self._testMethodName, self.env)
        self.addCleanup(self._cleanup)
        self.maxDiff = 1024 * 1024 * 10
 def _upload_plugins(self):
     downloaded_wagon_paths = self._download_wagons()
     for wagon in downloaded_wagon_paths:
         self.logger.info('Uploading {0}'.format(wagon))
         get_cfy().plugins.upload(wagon, verbose=True)
    def test_hello_world(self):
        self._deploy_manager()

        os.environ[constants.CLOUDIFY_USERNAME_ENV
                   ] = self.conf.get('new_manager_username', 'new')
        os.environ[constants.CLOUDIFY_PASSWORD_ENV
                   ] = self.conf.get('new_manager_password', 'new')

        self.logger.info('Waiting for config install workflow to finish...')
        self.wait_for_config_to_finish(self.client)

        self.cfy = get_cfy()
        self.cfy.use(self.manager_public_ip, rest_port=443)

        self.logger.info('...workflow finished.')
        post_config_key = get_ssh_host_key(self.manager_public_ip)
        self.logger.info(
            'Template was reconfigured with SSH host key: {key}'.format(
                key=post_config_key,
            )
        )

        self.assertNotEqual(
            self.template_key, post_config_key,
            'SSH host key did not change when configuration blueprint ran.')

        post_config_cert = get_ssl_cert((self.manager_public_ip, 443))
        self.logger.info(
            'Template was reconfigured with SSL cert: {key}'.format(
                key=post_config_cert,
            )
        )

        self.assertNotEqual(
            self.template_ssl_cert, post_config_cert,
            'SSL certificate did not change when configuration blueprint ran.')

        self.assertIn(
            'IP Address:{}'.format(self.manager_public_ip),
            post_config_cert['subject_altnames'],
            'SSL certificate does not contain manager public IP.')

        post_config_cert_path = os.path.join(self.base_temp_dir,
                                             'new_ssl_cert.pem')
        with open(post_config_cert_path, 'w') as f:
            f.write(post_config_cert['cert'])

        # once we've managed to connect again using `cfy use` we need to update
        # the rest client too:
        self.logger.debug('Recreating REST client with SSL cert')
        self.client = create_rest_client(
            self.manager_public_ip,
            secure=True,
            cert=post_config_cert_path,
            trust_all=False,
        )

        self.logger.debug('Reconnectiong cli client with SSL cert')
        del os.environ['CLOUDIFY_SSL_TRUST_ALL']
        os.environ['CLOUDIFY_SSL_CERT'] = post_config_cert_path

        self.cfy = get_cfy()
        self.cfy.use(self.manager_public_ip, rest_port=443)

        time.sleep(120)

        self._run(
            blueprint_file=self.hello_world_blueprint_file,
            inputs=self.hello_world_inputs,
            influx_host_ip=self.manager_public_ip,
        )