Пример #1
0
 def _running_env_setup(self, management_ip):
     self.env.management_ip = management_ip
     self.client = create_rest_client(management_ip)
     response = self.client.manager.get_status()
     if not response['status'] == 'running':
         raise RuntimeError(
             'Manager at {0} is not running.'.format(management_ip))
 def _running_env_setup(self, management_ip):
     self.env.management_ip = management_ip
     self.client = create_rest_client(management_ip)
     response = self.client.manager.get_status()
     if not response['status'] == 'running':
         raise RuntimeError('Manager at {0} is not running.'
                            .format(management_ip))
 def _update_instances_list(self, outputs):
     for i, instance in enumerate(self.instances):
         public_ip_address = outputs['public_ip_address_{}'.format(i)]
         private_ip_address = outputs['private_ip_address_{}'.format(i)]
         # Some templates don't expose networks as outputs
         networks = outputs.get('networks_{}'.format(i), {})
         # Convert unicode to strings, in order to avoid ruamel issues
         # when loading this dict into the config.yaml
         networks = {str(k): str(v) for k, v in networks.items()}
         if hasattr(instance, 'api_version'):
             rest_client = util.create_rest_client(
                 public_ip_address,
                 username=self._attributes.cloudify_username,
                 password=self._attributes.cloudify_password,
                 tenant=self._attributes.cloudify_tenant,
                 api_version=instance.api_version,
             )
         else:
             rest_client = None
         instance.create(
             i,
             public_ip_address,
             private_ip_address,
             networks,
             rest_client,
             self._ssh_key,
             self._cfy,
             self._attributes,
             self._logger,
             self._tmpdir
         )
 def _running_env_setup(self, management_ip):
     # Copied from abstract single host test as this one must be used
     # rather than the one from the broker security test base
     self.env.management_ip = management_ip
     self.client = create_rest_client(management_ip)
     response = self.client.manager.get_status()
     if not response['status'] == 'running':
         raise RuntimeError(
             'Manager at {0} is not running.'.format(management_ip))
 def _running_env_setup(self, management_ip):
     # Copied from abstract single host test as this one must be used
     # rather than the one from the broker security test base
     self.env.management_ip = management_ip
     self.client = create_rest_client(management_ip)
     response = self.client.manager.get_status()
     if not response['status'] == 'running':
         raise RuntimeError('Manager at {0} is not running.'
                            .format(management_ip))
    def _bootstrap_manager_3_3(self):
        self.logger.info('Bootstrapping manager 3.3')

        manager_name = self.test_id + '-manager-33'

        # generate manager inputs file
        inputs_template_vars = {
            'keystone_username': self.env.keystone_username,
            'keystone_password': self.env.keystone_password,
            'keystone_tenant_name': self.env.keystone_tenant_name,
            'keystone_url': self.env.keystone_url,
            'region': self.env.region,
            'flavor_id': self.env.medium_flavor_id,
            'image_id': self.env.centos_7_image_id,

            'manager_server_user': self.env.centos_7_image_user,
            'external_network_name': self.env.external_network_name,
            'resources_prefix': self.env.resources_prefix,

            'manager_server_name': manager_name,

            # shared settings
            'manager_public_key_name': self.manager_public_key_name,
            'agent_public_key_name': self.agent_public_key_name,
            'manager_private_key_path': self.manager_key_path,
            'agent_private_key_path': self.agents_key_path,

            'management_network_name': self.management_network_name,
            'management_subnet_name': self.management_subnet_name,
            'management_router': self.management_router,

            'agents_user': self.agents_user,

            # private settings
            'manager_security_group_name': manager_name + '-m-sg',
            'agents_security_group_name': manager_name + '-a-sg',
            'manager_port_name': manager_name + '-port',
        }

        self._render_script(
            NEW_MANAGER_TEMPLATE_NAME,
            inputs_template_vars,
            NEW_MANAGER_INPUTS_NAME
        )

        self.cfy.bootstrap(
            os.path.join(self.repo_path, 'openstack-manager-blueprint.yaml'),
            os.path.join(self.workdir, NEW_MANAGER_INPUTS_NAME),
        )

        self.client = create_rest_client(self.cfy.get_management_ip())

        self.addCleanup(self._teardown_manager_3_3)

        self._run_code_on_manager_3_3('sudo yum install -y gcc python-devel')
    def _bootstrap(self):
        self.bootstrap(
            self.test_manager_blueprint_path,
            inputs=self.test_inputs_path,
            install_plugins=self.env.install_plugins
        )

        # override the client instance to use the correct ip
        self.client = util.create_rest_client(self.get_manager_ip())

        self.addCleanup(self.cfy.teardown, force=True)
    def client(self):
        if not self._tier_1_client:
            self._tier_1_client = util.create_rest_client(
                manager_ip=self.master_ip,
                username=self.attributes.cloudify_username,
                password=self.attributes.cloudify_password,
                tenant=self.attributes.cloudify_tenant,
                protocol='https',
                cert=self._get_tier_1_cert())

        return self._tier_1_client
    def client(self):
        if not self._tier_1_client:
            self._tier_1_client = util.create_rest_client(
                manager_ip=self.master_ip,
                username=self.attributes.cloudify_username,
                password=self.attributes.cloudify_password,
                tenant=self.attributes.cloudify_tenant,
                protocol='https',
                cert=self._get_tier_1_cert()
            )

        return self._tier_1_client
Пример #10
0
 def _create_managers_list(self, outputs):
     self._managers = []
     for i in range(self._number_of_managers):
         public_ip_address = outputs['public_ip_address_{}'.format(i)]
         private_ip_address = outputs['private_ip_address_{}'.format(i)]
         rest_clinet = util.create_rest_client(
             public_ip_address,
             username=self._attributes.cloudify_username,
             password=self._attributes.cloudify_password,
             tenant=self._attributes.cloudify_tenant)
         self._managers.append(
             _CloudifyManager(i, public_ip_address, private_ip_address,
                              rest_clinet, self._ssh_key, self._cfy,
                              self._attributes, self._logger,
                              self.managers_config[i]))
Пример #11
0
    def _bootstrap_manager_3_3(self):
        self.logger.info('Bootstrapping manager 3.3')

        manager_name = self.test_id + '-manager-33'

        # generate manager inputs file
        inputs_template_vars = {
            'keystone_username': self.env.keystone_username,
            'keystone_password': self.env.keystone_password,
            'keystone_tenant_name': self.env.keystone_tenant_name,
            'keystone_url': self.env.keystone_url,
            'region': self.env.region,
            'flavor_id': self.env.medium_flavor_id,
            'image_id': self.env.centos_7_image_id,
            'manager_server_user': self.env.centos_7_image_user,
            'external_network_name': self.env.external_network_name,
            'resources_prefix': self.env.resources_prefix,
            'manager_server_name': manager_name,

            # shared settings
            'manager_public_key_name': self.manager_public_key_name,
            'agent_public_key_name': self.agent_public_key_name,
            'manager_private_key_path': self.manager_key_path,
            'agent_private_key_path': self.agents_key_path,
            'management_network_name': self.management_network_name,
            'management_subnet_name': self.management_subnet_name,
            'management_router': self.management_router,
            'agents_user': self.agents_user,

            # private settings
            'manager_security_group_name': manager_name + '-m-sg',
            'agents_security_group_name': manager_name + '-a-sg',
            'manager_port_name': manager_name + '-port',
        }

        self._render_script(NEW_MANAGER_TEMPLATE_NAME, inputs_template_vars,
                            NEW_MANAGER_INPUTS_NAME)

        self.cfy.bootstrap(
            os.path.join(self.repo_path, 'openstack-manager-blueprint.yaml'),
            os.path.join(self.workdir, NEW_MANAGER_INPUTS_NAME),
        )

        self.client = create_rest_client(self.cfy.get_management_ip())

        self.addCleanup(self._teardown_manager_3_3)

        self._run_code_on_manager_3_3('sudo yum install -y gcc python-devel')
    def prepare_manager(self):
        # note that we're using a separate manager checkout, so we need to
        # create our own utils like cfy and the rest client, rather than use
        # the testenv ones
        self.cfy_workdir = tempfile.mkdtemp(prefix='manager-upgrade-')
        self.addCleanup(shutil.rmtree, self.cfy_workdir)
        self.manager_cfy = CfyHelper(cfy_workdir=self.cfy_workdir)

        self.manager_inputs = self._get_bootstrap_inputs()

        self.bootstrap_manager()

        self.rest_client = create_rest_client(self.upgrade_manager_ip)

        self.bootstrap_manager_version = LooseVersion(
            self.rest_client.manager.get_version()['version'])
    def setUp(self):
        super(TwoManagersTest, self).setUp()

        self.cfy2 = get_cfy()
        second_manager_blueprint_path = '{}_existing'.format(
            self.env._manager_blueprint_path)

        shutil.copy2(self.env._manager_blueprint_path,
                     second_manager_blueprint_path)

        external_resources = [
            'node_templates.management_network.properties',
            'node_templates.management_subnet.properties',
            'node_templates.router.properties',
            'node_templates.agents_security_group.properties',
            'node_templates.management_security_group.properties',
        ]

        with YamlPatcher(second_manager_blueprint_path) as patch:
            for prop in external_resources:
                patch.merge_obj(prop, {'use_external_resource': True})

        second_cloudify_config_path = '{}_existing'.format(
            self.env.cloudify_config_path)

        shutil.copy2(self.env.cloudify_config_path,
                     second_cloudify_config_path)

        new_resources = ['manager_server_name', 'manager_port_name']

        with YamlPatcher(second_cloudify_config_path) as patch:
            for prop in new_resources:
                patch.append_value(prop, '2')

        self.cfy2.bootstrap(
            second_manager_blueprint_path,
            inputs=second_cloudify_config_path,
            install_plugins=self.env.install_plugins,
            keep_up_on_failure=False,
            task_retries=5,
            verbose=False
        )

        # Bootstrap updates the active profile, so get_manager_ip returns
        # the IP of the second manager
        self.client2 = create_rest_client(self.get_manager_ip())
    def setUp(self):
        super(TwoManagersTest, self).setUp()
        self.workdir2 = tempfile.mkdtemp(prefix='cloudify-testenv-')

        self.cfy2 = CfyHelper(self.workdir2, testcase=self)
        second_manager_blueprint_path = '{}_existing'.format(
            self.env._manager_blueprint_path)

        shutil.copy2(self.env._manager_blueprint_path,
                     second_manager_blueprint_path)

        external_resources = [
            'node_templates.management_network.properties',
            'node_templates.management_subnet.properties',
            'node_templates.router.properties',
            'node_templates.agents_security_group.properties',
            'node_templates.management_security_group.properties',
        ]

        with YamlPatcher(second_manager_blueprint_path) as patch:
            for prop in external_resources:
                patch.merge_obj(prop, {'use_external_resource': True})

        second_cloudify_config_path = '{}_existing'.format(
            self.env.cloudify_config_path)

        shutil.copy2(self.env.cloudify_config_path,
                     second_cloudify_config_path)

        new_resources = ['manager_server_name', 'manager_port_name']

        with YamlPatcher(second_cloudify_config_path) as patch:
            for prop in new_resources:
                patch.append_value(prop, '2')

        self.cfy2.bootstrap(
            blueprint_path=second_manager_blueprint_path,
            inputs_file=second_cloudify_config_path,
            install_plugins=self.env.install_plugins,
            keep_up_on_failure=False,
            task_retries=5,
            verbose=False
        )

        self.client2 = create_rest_client(self.cfy2.get_management_ip())
    def setUp(self):
        super(TwoManagersTest, self).setUp()
        self.workdir2 = tempfile.mkdtemp(prefix='cloudify-testenv-')

        self.cfy2 = CfyHelper(self.workdir2, testcase=self)
        second_manager_blueprint_path = '{}_existing'.format(
            self.env._manager_blueprint_path)

        shutil.copy2(self.env._manager_blueprint_path,
                     second_manager_blueprint_path)

        external_resources = [
            'node_templates.management_network.properties',
            'node_templates.management_subnet.properties',
            'node_templates.router.properties',
            'node_templates.agents_security_group.properties',
            'node_templates.management_security_group.properties',
        ]

        with YamlPatcher(second_manager_blueprint_path) as patch:
            for prop in external_resources:
                patch.merge_obj(prop, {'use_external_resource': True})

        second_cloudify_config_path = '{}_existing'.format(
            self.env.cloudify_config_path)

        shutil.copy2(self.env.cloudify_config_path,
                     second_cloudify_config_path)

        new_resources = ['manager_server_name', 'manager_port_name']

        with YamlPatcher(second_cloudify_config_path) as patch:
            for prop in new_resources:
                patch.append_value(prop, '2')

        self.cfy2.bootstrap(blueprint_path=second_manager_blueprint_path,
                            inputs_file=second_cloudify_config_path,
                            install_plugins=self.env.install_plugins,
                            keep_up_on_failure=False,
                            task_retries=5,
                            verbose=False)

        self.client2 = create_rest_client(self.cfy2.get_management_ip())
    def prepare_manager(self):
        # note that we're using a separate manager checkout, so we need to
        # create our own utils like cfy and the rest client, rather than use
        # the testenv ones
        self.cfy_workdir = tempfile.mkdtemp(prefix='manager-upgrade-')
        self.addCleanup(shutil.rmtree, self.cfy_workdir)
        self.manager_cfy = get_cfy()
        self.manager_inputs = self._get_bootstrap_inputs()

        if self._use_external_manager:
            upgrade_config = self.env.handler_configuration['upgrade_manager']
            self.upgrade_manager_ip = upgrade_config['public_ip']
            self.manager_private_ip = upgrade_config['private_ip']
            self.manager_cfy.use(self.upgrade_manager_ip)
        else:
            self.bootstrap_manager()

        self.rest_client = create_rest_client(self.upgrade_manager_ip)

        self.bootstrap_manager_version = self.get_curr_version()
    def test_hello_world(self):
        self._deploy_manager()

        self.cfy = get_cfy()
        self.cfy.use(self.manager_public_ip)

        self.logger.info('Waiting for config install workflow to finish...')
        self.wait_for_config_to_finish(self.client)
        self.logger.info('...workflow finished.')

        # once we've managed to connect again using `cfy use` we need to update
        # the rest client too:
        self.client = create_rest_client(self.manager_public_ip)

        time.sleep(120)

        self._run(
            blueprint_file=self.hello_world_blueprint_file,
            inputs=self.hello_world_inputs,
            influx_host_ip=self.manager_public_ip,
        )
    def bootstrap_manager(self,
                          inputs,
                          inputs_is_file=False):
        self.logger.info('Bootstrapping Cloudify manager...')
        self.client_executor(
            'init',
            fabric_env=self.centos_client_env,
            within_cfy_env=True)

        install_plugins = ''
        if self.is_install_plugins():
            install_plugins = '--install-plugins'

        bootstrap_command = \
            "bootstrap --skip-validations {0} -i \"{1}\" {2}".format(
                install_plugins,
                json.dumps(inputs).replace('"', "'").replace(' ', ''),
                self.manager_blueprint_path)
        if inputs_is_file:
            bootstrap_command = \
                "bootstrap --skip-validations {0} -i {1} {2}".format(
                    install_plugins,
                    inputs,
                    self.manager_blueprint_path)

        out = self.client_executor(
            bootstrap_command,
            fabric_env=self.centos_client_env,
            within_cfy_env=True)

        # TODO: This is really not where specific wording checks for bootstrap
        # output belong, but left in for the moment until this test can be
        # reworked as there is a lot of cruft
        self.assertIn('Bootstrap complete', out, 'Bootstrap has failed')

        self.manager_ip = self._manager_ip()
        self.client = util.create_rest_client(self.manager_ip)
        self.addCleanup(self.teardown_manager)
        self.cfy.use(self.manager_ip)
        self.env._upload_plugins()
Пример #19
0
 def _update_instances_list(self, outputs):
     for i, instance in enumerate(self.instances):
         public_ip_address = outputs['public_ip_address_{}'.format(i)]
         private_ip_address = outputs['private_ip_address_{}'.format(i)]
         # Some templates don't expose networks as outputs
         networks = outputs.get('networks_{}'.format(i), {})
         # Convert unicode to strings, in order to avoid ruamel issues
         # when loading this dict into the config.yaml
         networks = {str(k): str(v) for k, v in networks.items()}
         if hasattr(instance, 'api_version'):
             rest_client = util.create_rest_client(
                 public_ip_address,
                 username=self._attributes.cloudify_username,
                 password=self._attributes.cloudify_password,
                 tenant=self._attributes.cloudify_tenant,
                 api_version=instance.api_version,
             )
         else:
             rest_client = None
         instance.create(i, public_ip_address, private_ip_address, networks,
                         rest_client, self._ssh_key, self._cfy,
                         self._attributes, self._logger, self._tmpdir)
 def _assert_no_credentials_or_token_fails(self):
     self.client = util.create_rest_client(self.env.management_ip)
     self._assert_unauthorized(self.client.manager.get_status)
 def _create_client(self, username=None, password=None, token=None):
     user_pass_header = util.get_auth_header(username=username,
                                             password=password,
                                             token=token)
     return util.create_rest_client(self.env.management_ip,
                                    headers=user_pass_header)
Пример #22
0
def change_rest_client_password(manager, new_password):
    manager.client = create_rest_client(manager.ip_address,
                                        password=new_password)
 def set_rest_client(self):
     self.client = util.create_rest_client(self.env.management_ip)
    def _deploy_manager(self):
        self.build_with_packer(only=self.packer_build_only)
        self.deploy_image()

        os.environ[constants.CLOUDIFY_USERNAME_ENV] = 'cloudify'
        os.environ[constants.CLOUDIFY_PASSWORD_ENV] = 'cloudify'

        self.client = create_rest_client(
            self.manager_public_ip,
            secure=self.secure,
            trust_all=self.secure,
        )

        response = {'status': None}
        attempt = 0
        max_attempts = 80
        while response['status'] != 'running':
            attempt += 1
            if attempt >= max_attempts:
                raise RuntimeError('Manager did not start in time')
            else:
                time.sleep(3)
            try:
                response = self.client.manager.get_status()
            except CloudifyClientError:
                # Manager not fully ready
                pass
            except ConnectionError:
                # Timeout
                pass

        if self.secure:
            self.template_key = get_ssh_host_key(self.manager_public_ip)
            self.logger.info(
                'Template was deployed with SSH host key: {key}'.format(
                    key=self.template_key,
                )
            )

            self.template_ssl_cert = get_ssl_cert(
                (self.manager_public_ip, 443))
            self.logger.info(
                'Template was deployed with SSL key: {key}'.format(
                    key=self.template_ssl_cert,
                )
            )

        self.config_inputs.update({
            'agents_security_group_name': self.agents_secgroup,
            'agents_keypair_name': self.agents_keypair,
        })
        if self.secure:
            # Need to add the external IP address to the generated cert
            self.config_inputs.update({
                'manager_names_and_ips': self.manager_public_ip,
            })

        # Arbitrary sleep to wait for manager to actually finish starting as
        # otherwise we suffer timeouts in the next section
        # TODO: This would be better if it actually had some way of checking
        # the manager was fully up and we had a reasonable upper bound on how
        # long we should expect to wait for that
        time.sleep(90)

        # We have to retry this a few times, as even after the manager is
        # accessible we still see failures trying to create deployments
        deployment_created = False
        attempt = 0
        max_attempts = 40
        while not deployment_created:
            attempt += 1
            if attempt >= max_attempts:
                raise RuntimeError('Manager not created in time')
            else:
                time.sleep(3)
            try:
                self.client.deployments.create(
                    blueprint_id='CloudifySettings',
                    deployment_id='config',
                    inputs=self.config_inputs,
                )
                self.addCleanup(self._delete_agents_secgroup)
                self.addCleanup(self._delete_agents_keypair)
                deployment_created = True
            except Exception as err:
                if attempt >= max_attempts:
                    raise err
                else:
                    self.logger.warn(
                        'Saw error {}. Retrying.'.format(str(err))
                    )

        attempt = 0
        max_attempts = 40
        execution_started = False
        while not execution_started:
            attempt += 1
            if attempt >= max_attempts:
                raise RuntimeError('Manager did not start in time')
            else:
                time.sleep(3)
            try:
                self.client.executions.start(
                    deployment_id='config',
                    workflow_id='install',
                )
                execution_started = True
            except Exception as err:
                if attempt >= max_attempts:
                    raise err
                else:
                    self.logger.warn(
                        'Saw error {}. Retrying.'.format(str(err))
                    )
 def set_rest_client(self):
     self.client = util.create_rest_client(
         self.env.management_ip,
         headers=util.get_auth_header(username=self.TEST_CFY_USERNAME,
                                      password=self.TEST_CFY_PASSWORD))
     self.env.rest_client = self.client
def change_rest_client_password(manager, new_password):
    manager.client = create_rest_client(manager.ip_address,
                                        password=new_password)
Пример #27
0
def test_restore_snapshot_and_agents_upgrade(cfy, cluster, attributes, logger,
                                             tmpdir):
    manager1 = cluster.managers[0]
    manager2 = cluster.managers[1]

    snapshot_id = str(uuid.uuid4())

    logger.info('Creating snapshot on manager1..')
    manager1.client.snapshots.create(snapshot_id, False, False, False)
    assert_snapshot_created(manager1, snapshot_id, attributes)

    local_snapshot_path = str(tmpdir / 'snapshot.zip')

    logger.info('Downloading snapshot from old manager..')
    manager1.client.snapshots.list()
    manager1.client.snapshots.download(snapshot_id, local_snapshot_path)

    manager2.use()
    logger.info('Uploading snapshot to latest manager..')
    snapshot = manager2.client.snapshots.upload(local_snapshot_path,
                                                snapshot_id)
    logger.info('Uploaded snapshot:%s%s', os.linesep,
                json.dumps(snapshot, indent=2))

    cfy.snapshots.list()

    logger.info('Restoring snapshot on latest manager..')
    restore_execution = manager2.client.snapshots.restore(
        snapshot_id,
        tenant_name=manager1.restore_tenant_name,
    )
    logger.info('Snapshot restore execution:%s%s', os.linesep,
                json.dumps(restore_execution, indent=2))

    cfy.executions.list(['--include-system-workflows'])

    restore_execution = wait_for_execution(manager2.client, restore_execution,
                                           logger)
    assert restore_execution.status == 'terminated'

    cfy.executions.list(['--include-system-workflows'])

    manager2.use(tenant=manager1.restore_tenant_name)
    client = create_rest_client(
        manager2.ip_address,
        username=cluster._attributes.cloudify_username,
        password=cluster._attributes.cloudify_password,
        tenant=manager1.tenant_name,
        api_version=manager2.api_version,
    )

    cfy.deployments.list()
    deployments = client.deployments.list()
    assert 1 == len(deployments)

    logger.info('Upgrading agents..')
    cfy.agents.install()

    logger.info('Deleting original {version} manager..'.format(
        version=manager1.branch_name))
    manager1.delete()

    logger.info('Uninstalling deployment from latest manager..')
    cfy.executions.start.uninstall(['-d', deployment_id])
    cfy.deployments.delete(deployment_id)
    def test_hello_world(self):
        self._deploy_manager()

        os.environ[constants.CLOUDIFY_USERNAME_ENV
                   ] = self.conf.get('new_manager_username', 'new')
        os.environ[constants.CLOUDIFY_PASSWORD_ENV
                   ] = self.conf.get('new_manager_password', 'new')

        self.logger.info('Waiting for config install workflow to finish...')
        self.wait_for_config_to_finish(self.client)

        self.cfy = get_cfy()
        self.cfy.use(self.manager_public_ip, rest_port=443)

        self.logger.info('...workflow finished.')
        post_config_key = get_ssh_host_key(self.manager_public_ip)
        self.logger.info(
            'Template was reconfigured with SSH host key: {key}'.format(
                key=post_config_key,
            )
        )

        self.assertNotEqual(
            self.template_key, post_config_key,
            'SSH host key did not change when configuration blueprint ran.')

        post_config_cert = get_ssl_cert((self.manager_public_ip, 443))
        self.logger.info(
            'Template was reconfigured with SSL cert: {key}'.format(
                key=post_config_cert,
            )
        )

        self.assertNotEqual(
            self.template_ssl_cert, post_config_cert,
            'SSL certificate did not change when configuration blueprint ran.')

        self.assertIn(
            'IP Address:{}'.format(self.manager_public_ip),
            post_config_cert['subject_altnames'],
            'SSL certificate does not contain manager public IP.')

        post_config_cert_path = os.path.join(self.base_temp_dir,
                                             'new_ssl_cert.pem')
        with open(post_config_cert_path, 'w') as f:
            f.write(post_config_cert['cert'])

        # once we've managed to connect again using `cfy use` we need to update
        # the rest client too:
        self.logger.debug('Recreating REST client with SSL cert')
        self.client = create_rest_client(
            self.manager_public_ip,
            secure=True,
            cert=post_config_cert_path,
            trust_all=False,
        )

        self.logger.debug('Reconnectiong cli client with SSL cert')
        del os.environ['CLOUDIFY_SSL_TRUST_ALL']
        os.environ['CLOUDIFY_SSL_CERT'] = post_config_cert_path

        self.cfy = get_cfy()
        self.cfy.use(self.manager_public_ip, rest_port=443)

        time.sleep(120)

        self._run(
            blueprint_file=self.hello_world_blueprint_file,
            inputs=self.hello_world_inputs,
            influx_host_ip=self.manager_public_ip,
        )