Esempio n. 1
0
    def bootstrap(self,
                  blueprint_path,
                  inputs_file=None,
                  install_plugins=True,
                  keep_up_on_failure=False,
                  validate_only=False,
                  reset_config=False,
                  task_retries=5,
                  task_retry_interval=90,
                  subgraph_retries=2,
                  verbose=False):
        with self.workdir:
            cfy.init(reset_config=reset_config).wait()

            with YamlPatcher(get_configuration_path()) as patch:
                prop_path = ('local_provider_context.'
                             'cloudify.workflows.subgraph_retries')
                patch.set_value(prop_path, subgraph_retries)

            if not inputs_file:
                inputs_file = self._get_inputs_in_temp_file({}, 'manager')

            cfy.bootstrap(blueprint_path=blueprint_path,
                          inputs=inputs_file,
                          install_plugins=install_plugins,
                          keep_up_on_failure=keep_up_on_failure,
                          validate_only=validate_only,
                          task_retries=task_retries,
                          task_retry_interval=task_retry_interval,
                          verbose=verbose).wait()
    def _update_manager_blueprint(self):
        self._update_manager_blueprints_overrides()

        with YamlPatcher(self.test_manager_blueprint_path) as patch:
            for prop_path, new_value in \
                    self.manager_blueprint_overrides.items():
                patch.set_value(prop_path, new_value)
Esempio n. 3
0
 def update_cloudify_config(self):
     with YamlPatcher(self.env.cloudify_config_path) as patch:
         yield patch
     self.env.cloudify_config = yaml.load(
         self.env.cloudify_config_path.text())
     self.env._config_reader = self.CloudifyConfigReader(
         self.env.cloudify_config,
         manager_blueprint_path=self.env._manager_blueprint_path)
    def setUp(self):
        super(TwoManagersTest, self).setUp()
        self.workdir2 = tempfile.mkdtemp(prefix='cloudify-testenv-')

        self.cfy2 = CfyHelper(self.workdir2, testcase=self)
        second_manager_blueprint_path = '{}_existing'.format(
            self.env._manager_blueprint_path)

        shutil.copy2(self.env._manager_blueprint_path,
                     second_manager_blueprint_path)

        external_resources = [
            'node_templates.management_network.properties',
            'node_templates.management_subnet.properties',
            'node_templates.router.properties',
            'node_templates.agents_security_group.properties',
            'node_templates.management_security_group.properties',
        ]

        with YamlPatcher(second_manager_blueprint_path) as patch:
            for prop in external_resources:
                patch.merge_obj(prop, {'use_external_resource': True})

        second_cloudify_config_path = '{}_existing'.format(
            self.env.cloudify_config_path)

        shutil.copy2(self.env.cloudify_config_path,
                     second_cloudify_config_path)

        new_resources = ['manager_server_name', 'manager_port_name']

        with YamlPatcher(second_cloudify_config_path) as patch:
            for prop in new_resources:
                patch.append_value(prop, '2')

        self.cfy2.bootstrap(blueprint_path=second_manager_blueprint_path,
                            inputs_file=second_cloudify_config_path,
                            install_plugins=self.env.install_plugins,
                            keep_up_on_failure=False,
                            task_retries=5,
                            verbose=False)

        self.client2 = create_rest_client(self.cfy2.get_management_ip())
Esempio n. 5
0
    def test_puppet_standalone_without_download(self):
        id_ = "{0}-puppet-standalone-{1}-{2}".format(self.test_id, 'nodl',
                                                     str(int(time.time())))
        blueprint_dir = self.copy_blueprint('blueprints', get_resources_path())

        self.blueprint_yaml = (blueprint_dir /
                               'puppet-standalone-test-blueprint.yaml')
        with YamlPatcher(self.blueprint_yaml) as blueprint:
            _, inputs = update_blueprint(self.env, blueprint,
                                         'puppet-standalone-nodl')
        self.execute_and_check(id_, inputs=inputs)
    def test_windows(self):

        blueprint_path = self.copy_blueprint('windows')
        self.blueprint_yaml = blueprint_path / 'blueprint.yaml'
        with YamlPatcher(self.blueprint_yaml) as patch:
            patch.set_value('node_templates.vm.properties.image',
                            self.env.windows_image_name)
            patch.set_value('node_templates.vm.properties.flavor',
                            self.env.medium_flavor_id)

        self.upload_deploy_and_execute_install()
        self.execute_uninstall()
    def bootstrap(self):
        with YamlPatcher(self.test_inputs_path) as inputs_patch:
            inputs_patch.set_value('image_id', UBUNTU_DOCKER_IMAGE_ID)

        self.cfy.bootstrap(blueprint_path=self.test_manager_blueprint_path,
                           inputs_file=self.test_inputs_path,
                           task_retries=10,
                           install_plugins=self.env.install_plugins)

        # override the client instance to use the correct ip
        self.client = CloudifyClient(self.cfy.get_management_ip())

        self.addCleanup(self.cfy.teardown)
 def modify_blueprint(self):
     with YamlPatcher(self.blueprint_yaml) as patch:
         vm_props_path = 'node_types.nodecellar\.nodes\.MonitoredServer' \
                         '.properties'
         vm_type_path = 'node_types.vm_host.properties'
         patch.merge_obj(
             '{0}.server.default'.format(vm_type_path), {
                 'image': self.env.ubuntu_trusty_image_name,
                 'flavor': self.env.flavor_name
             })
         # Use ubuntu trusty 14.04 as agent machine
         patch.merge_obj('{0}.server.default'.format(vm_props_path),
                         {'image': self.env.ubuntu_trusty_image_id})
Esempio n. 9
0
 def modify_yaml(self, blueprint_path, security_group_name):
     with YamlPatcher(blueprint_path) as patch:
         vm_properties_path = 'node_templates.vm.properties'
         patch.merge_obj('{0}.cloudify_agent'.format(vm_properties_path), {
             'user': self.env.cloudify_agent_user,
         })
         patch.merge_obj(
             '{0}'.format(vm_properties_path), {
                 'image': self.env.ubuntu_image_name,
                 'flavor': self.env.flavor_name,
             })
         sg_name_path = 'node_templates.security_group.properties' \
                        '.security_group.name'
         patch.set_value(sg_name_path, security_group_name)
Esempio n. 10
0
    def setUp(self, *args, **kwargs):

        super(PuppetPluginAgentTest, self).setUp(*args, **kwargs)

        blueprint_dir = self.copy_blueprint('blueprints', get_resources_path())
        self.blueprint_dir = blueprint_dir
        if 'CLOUDIFY_TEST_PUPPET_IP' in os.environ:
            self.logger.info('Using existing Puppet server at {0}'.format(
                os.environ['CLOUDIFY_TEST_PUPPET_IP']))
            self.puppet_server_ip = os.environ['CLOUDIFY_TEST_PUPPET_IP']
            self.puppet_server_id = None
            return

        self.logger.info('Setting up Puppet master')

        self.blueprint_yaml = (blueprint_dir /
                               'puppet-server-by-puppet-blueprint.yaml')

        with YamlPatcher(self.blueprint_yaml) as blueprint:
            bp_info, inputs = update_blueprint(self.env, blueprint,
                                               'puppet-master')

        self.puppet_server_hostname = bp_info['hostnames'][0]

        self.puppet_server_id = self.test_id + '-puppet-master'
        id_ = self.puppet_server_id
        before, after = self.upload_deploy_and_execute_install(
            blueprint_id=id_, deployment_id=id_, inputs=inputs)

        fip_node = find_node_state('ip', after['node_state'][id_])
        self.puppet_server_ip = \
            fip_node['runtime_properties']['floating_ip_address']

        fabric_env = fabric.api.env
        fabric_env.update({
            'timeout':
            30,
            'user':
            bp_info['users'][0],
            'key_filename':
            get_actual_keypath(self.env, self.env.agent_key_path),
            'host_string':
            self.puppet_server_ip,
        })

        setup_puppet_server(blueprint_dir)
Esempio n. 11
0
    def prepare_manager_blueprint(self):
        super(TestWindowsOfflineBootstrap, self).prepare_manager_blueprint()

        mng_blueprint = self._get_remote_blueprint()
        mng_blueprint_yaml = \
            self._get_yaml_in_temp_file(mng_blueprint, 'tmp_userdata_bp')
        # removing dns from the manager host
        user_data = """#!/bin/bash
    echo "" > /etc/resolv.conf
    chattr +i /etc/resolv.conf
    """
        with YamlPatcher(mng_blueprint_yaml) as patcher:
            patcher.set_value(
                'node_templates.manager_host.properties'
                '.parameters.user_data', user_data)

        self.write_file_remotely(mng_blueprint_yaml,
                                 self.manager_blueprint_path)
    def _modify_blueprint_use_external_resource(self):
        node_instances = self.client.node_instances.list(
            deployment_id=self.test_id)

        node_id_to_external_resource_id = {
            node_instance.node_id:
            node_instance.runtime_properties['external_id']
            for node_instance in node_instances
        }

        with YamlPatcher(self.blueprint_yaml) as patch:
            for node_id, resource_id in \
                    node_id_to_external_resource_id.iteritems():
                patch.merge_obj(
                    'node_templates.{0}.properties'.format(node_id), {
                        'use_external_resource': True,
                        'resource_id': resource_id
                    })
Esempio n. 13
0
    def test_puppet_agent(self):
        blueprint_dir = self.blueprint_dir
        self.blueprint_yaml = (blueprint_dir /
                               'puppet-agent-test-blueprint.yaml')
        with YamlPatcher(self.blueprint_yaml) as blueprint:
            bp_info, inputs = update_blueprint(
                self.env, blueprint, 'puppet-agent', {
                    'puppet_server_ip': self.puppet_server_ip,
                })

        id_ = self.test_id + '-puppet-agent-' + str(int(time.time()))
        before, after = self.upload_deploy_and_execute_install(
            blueprint_id=id_, deployment_id=id_, inputs=inputs)

        # import pdb; pdb.set_trace()

        fip_node = find_node_state('ip', after['node_state'][id_])
        puppet_agent_ip = fip_node['runtime_properties']['floating_ip_address']

        fabric_env = fabric.api.env
        fabric_env.update({
            'timeout':
            30,
            'user':
            bp_info['users'][0],
            'key_filename':
            get_actual_keypath(self.env, self.env.agent_key_path),
            'host_string':
            puppet_agent_ip,
        })

        f = '/tmp/cloudify_operation_create'

        out = fabric.api.run('[ -f {0} ]; echo $?'.format(f))
        self.assertEquals(out, '0')

        out = fabric.api.run('cat {0}'.format(f))
        self.assertEquals(out, id_)

        self.execute_uninstall(id_)
Esempio n. 14
0
    def test_chef_solo(self):
        agent_key_file = get_actual_keypath(self.env, self.env.agent_key_path)
        blueprint_dir = self.blueprint_dir
        self.blueprint_yaml = blueprint_dir / 'chef-solo-test-blueprint.yaml'
        with YamlPatcher(self.blueprint_yaml) as blueprint:
            bp_info, inputs = update_blueprint(self.env, blueprint,
                                               'chef-solo')

        id_ = self.test_id + '-chef-solo-' + str(int(time.time()))
        before, after = self.upload_deploy_and_execute_install(id_,
                                                               id_,
                                                               inputs=inputs)

        fip_node = find_node_state('ip', after['node_state'][id_])
        chef_solo_ip = fip_node['runtime_properties']['floating_ip_address']

        fabric_env = fabric.api.env
        fabric_env.update({
            'timeout': 30,
            'user': bp_info['users'][0],
            'key_filename': str(agent_key_file),
            'host_string': chef_solo_ip,
        })

        expected_files_contents = (
            ('/tmp/blueprint.txt', 'Great success number #2 !'),
            ('/tmp/blueprint2.txt', '/tmp/blueprint.txt'),
            ('/tmp/chef_node_env.e1.txt', 'env:e1'),
            ('/tmp/chef_node_data_bag_user.db1.i1.txt', 'db1-i1-k1'),
        )

        for file_name, expected_content in expected_files_contents:
            actual_content = fabric.api.run('cat ' + file_name)
            msg = "File '{0}' should have content '{1}' but has '{2}'".format(
                file_name, expected_content, actual_content)
            self.assertEquals(actual_content, expected_content, msg)

        self.execute_uninstall(id_)
Esempio n. 15
0
    def test_chef_client(self):
        blueprint_dir = self.blueprint_dir
        self.blueprint_yaml = blueprint_dir / 'chef-client-test-blueprint.yaml'
        with YamlPatcher(self.blueprint_yaml) as blueprint:
            _, inputs = update_blueprint(
                self.env, blueprint, 'chef-server', {
                    'chef_server_ip': self.chef_server_ip,
                    'chef_server_hostname': self.chef_server_hostname,
                })
            chef_node = get_nodes_of_type(blueprint,
                                          'cloudify.chef.nodes.DBMS')[0]
            chef_config = chef_node['properties']['chef_config']
            chef_config['chef_server_url'] = 'https://{0}:443'.format(
                self.chef_server_ip)
            chef_config['validation_client_name'] = 'chef-validator'
            chef_config['validation_key'] = (path(blueprint_dir) /
                                             'chef-validator.pem').text()

        id_ = self.test_id + '-chef-client-' + str(int(time.time()))
        before, after = self.upload_deploy_and_execute_install(id_,
                                                               id_,
                                                               inputs=inputs)

        fip_node = find_node_state('ip', after['node_state'][id_])
        chef_client_ip = fip_node['runtime_properties']['floating_ip_address']

        fabric_env = fabric.api.env
        fabric_env.update({
            # XXX: sometime - same user for connection is accidental
            #      todo: replace it with update_blueprint()'s bp_info,
            #            as in setUp()
            'host_string': chef_client_ip,
        })

        out = fabric.api.run('cat /tmp/blueprint.txt')
        self.assertEquals(out, 'Great success!')

        self.execute_uninstall(id_)
Esempio n. 16
0
 def modify_blueprint(self):
     with YamlPatcher(self.blueprint_yaml) as patch:
         patch.merge_obj('groups', self.AUTOHEAL_GROUP_YAML)
     print self.blueprint_yaml
Esempio n. 17
0
    def __init__(self):
        self._initial_cwd = os.getcwd()
        self._global_cleanup_context = None
        self._management_running = False
        self.rest_client = None
        self.management_ip = None
        self.handler = None
        self._manager_blueprint_path = None
        self._workdir = tempfile.mkdtemp(prefix='cloudify-testenv-')

        if HANDLER_CONFIGURATION not in os.environ:
            raise RuntimeError('handler configuration name must be configured '
                               'in "HANDLER_CONFIGURATION" env variable')
        handler_configuration = os.environ[HANDLER_CONFIGURATION]
        suites_yaml_path = os.environ.get(
            SUITES_YAML_PATH,
            path(__file__).dirname().dirname().dirname() / 'suites' /
            'suites' / 'suites.yaml')
        with open(suites_yaml_path) as f:
            self.suites_yaml = yaml.load(f.read())
        if os.path.exists(os.path.expanduser(handler_configuration)):
            configuration_path = os.path.expanduser(handler_configuration)
            with open(configuration_path) as f:
                self.handler_configuration = yaml.load(f.read())
        else:
            self.handler_configuration = self.suites_yaml[
                'handler_configurations'][handler_configuration]

        self.cloudify_config_path = path(os.path.expanduser(
            self.handler_configuration['inputs']))

        if not self.cloudify_config_path.isfile():
            raise RuntimeError('config file configured in handler '
                               'configuration does not seem to exist: {0}'
                               .format(self.cloudify_config_path))

        if 'manager_blueprint' not in self.handler_configuration:
            raise RuntimeError(
                'manager blueprint must be configured in handler '
                'configuration')

        manager_blueprint = self.handler_configuration['manager_blueprint']
        self._manager_blueprint_path = os.path.expanduser(
            manager_blueprint)

        # make a temp config files than can be modified freely
        self._generate_unique_configurations()

        with YamlPatcher(self._manager_blueprint_path) as patch:
            manager_blueprint_override = process_variables(
                self.suites_yaml,
                self.handler_configuration.get(
                    'manager_blueprint_override', {}))
            for key, value in manager_blueprint_override.items():
                patch.set_value(key, value)

        handler = self.handler_configuration['handler']
        try:
            handler_module = importlib.import_module(
                'system_tests.{0}'.format(handler))
        except ImportError:
            handler_module = importlib.import_module(
                'suites.helpers.handlers.{0}.handler'.format(handler))
        handler_class = handler_module.handler
        self.handler = handler_class(self)

        self.cloudify_config = yaml.load(self.cloudify_config_path.text())
        self._config_reader = self.handler.CloudifyConfigReader(
            self.cloudify_config,
            manager_blueprint_path=self._manager_blueprint_path)
        with self.handler.update_cloudify_config() as patch:
            processed_inputs = process_variables(
                self.suites_yaml,
                self.handler_configuration.get('inputs_override', {}))
            for key, value in processed_inputs.items():
                patch.set_value(key, value)

        if 'manager_ip' in self.handler_configuration:
            self._running_env_setup(self.handler_configuration['manager_ip'])

        self.install_plugins = self.handler_configuration.get(
            'install_manager_blueprint_dependencies', True)

        if self.handler_configuration.get('clean_env_on_init', False) is True:
            logger.info('Cleaning environment on init..')
            self.handler.CleanupContext.clean_all(self)

        global test_environment
        test_environment = self
Esempio n. 18
0
    def setUp(self, *args, **kwargs):

        super(ChefPluginClientTest, self).setUp(*args, **kwargs)
        agent_key_file = get_actual_keypath(self.env, self.env.agent_key_path)

        blueprint_dir = self.copy_blueprint('chef-plugin')
        self.blueprint_yaml = (blueprint_dir /
                               'chef-server-by-chef-solo-blueprint.yaml')

        with YamlPatcher(self.blueprint_yaml) as blueprint:
            bp_info, inputs = update_blueprint(self.env, blueprint,
                                               'chef-server')

        self.chef_server_hostname = '{0}{1}'.format(
            self.env.resources_prefix.replace('_', '-'),
            bp_info['hostnames'][0])

        cookbooks_dir = blueprint_dir / 'cookbooks'

        def run(*args, **kwargs):
            return subprocess.check_output(*args, **kwargs)

        with cookbooks_dir:
            run([
                'wget',
                '-q',
                '-O',
                'chef-server.zip',
                CHEF_SERVER_COOKBOOK_ZIP_URL,
            ])
            ZipFile('chef-server.zip').extractall()
            chef_cookbook_dir = cookbooks_dir.glob('chef-server-*')[0]
            run(['mv', chef_cookbook_dir, 'chef-server'])
            # Next line because Chef cookbooks are required
            # to declare all dependencies, even if they don't use them.
            # We don't need git, it's only used in chef-cookbook::dev recipe.
            run(['sed', '-i', "/depends 'git'/d", 'chef-server/metadata.rb'])

        with blueprint_dir:
            run(['tar', 'czf', 'cookbooks.tar.gz', 'cookbooks'])

        self.chef_server_id = self.test_id + '-chef-server'
        id_ = self.chef_server_id
        before, after = self.upload_deploy_and_execute_install(id_,
                                                               id_,
                                                               inputs=inputs)

        fip_node = find_node_state('ip', after['node_state'][id_])
        self.chef_server_ip = fip_node['runtime_properties'][
            'floating_ip_address']

        fabric_env = fabric.api.env
        fabric_env.update({
            'timeout': 30,
            'user': bp_info['users'][0],
            'key_filename': str(agent_key_file),
            'host_string': self.chef_server_ip,
        })

        cookbook_local_path = os.path.abspath(
            os.path.join(get_blueprint_path('chef-plugin'),
                         'cookbook-create-file.tar.gz'))
        setup_chef_server(blueprint_dir, [[
            'create-file',
            cookbook_local_path,
        ]])
        self.blueprint_dir = blueprint_dir
Esempio n. 19
0
    def _test_puppet_standalone_with_download(self, manifests_are_from_url):
        """ Tests standalone Puppet.
        manifests_are_from_url True ->
            puppet_config:
                download: http://....
                execute: -- removed
                manifest: site.pp
        manifests_are_from_url False ->
                download: /....
                execute:
                    configure: -- removed
        """

        mode = ['resource', 'url'][manifests_are_from_url]
        id_ = "{0}-puppet-standalone-{1}-{2}".format(self.test_id,
                                                     mode,
                                                     str(int(time.time())))
        _url = ('http://' +
                self.env.management_ip +
                '/resources/blueprints/' +
                id_ +
                '/' +
                MANIFESTS_FILE_NAME)

        download_from = ['/' + MANIFESTS_FILE_NAME, _url][
            manifests_are_from_url]

        def call(cmd):
            print("Executing: {0}".format(' '.join(cmd)))
            # subprocess.check_call(cmd, stdout=sys.stdout, stderr=sys.stderr)
            # Trying without piping since this caused the following problem:
            # Traceback (most recent call last):
            # File "/usr/lib/python2.7/subprocess.py", line 506, in check_call
            # retcode = call(*popenargs, **kwargs)
            # File "/usr/lib/python2.7/subprocess.py", line 493, in call
            # return Popen(*popenargs, **kwargs).wait()
            # File "/usr/lib/python2.7/subprocess.py", line 672, in __init__
            # errread, errwrite) = self._get_handles(stdin, stdout, stderr)
            # File "/usr/lib/python2.7/subprocess.py", line 1053, in _get_handles  # noqa
            # c2pwrite = stdout.fileno()
            # AttributeError: 'Tee' object has no attribute 'fileno'
            subprocess.check_call(cmd)

        blueprint_dir = self.copy_blueprint('puppet-plugin')

        # Download manifests
        file_name = os.path.join(tempfile.gettempdir(),
                                 self.test_id + '.manifests.tar.gz')
        temp_dir = tempfile.mkdtemp('.manifests', self.test_id + '.')
        call(['wget', '-O', file_name, MANIFESTS_URL])
        call(['tar', '-vxzf', file_name, '-C', temp_dir,
              '--xform', 's/^[^\/]\+\///'])
        call(['tar', '-vczf', os.path.join(blueprint_dir, MANIFESTS_FILE_NAME),
              '-C', temp_dir, '.'])

        self.blueprint_yaml = (
            blueprint_dir / 'puppet-standalone-test-blueprint.yaml')
        with YamlPatcher(self.blueprint_yaml) as blueprint:
            _, inputs = update_blueprint(self.env, blueprint,
                                         'puppet-standalone-' + mode)
            conf = blueprint.obj['node_templates']['puppet_node_one'][
                'properties']['puppet_config']
            conf['download'] = download_from
            if manifests_are_from_url:
                del conf['execute']
                conf['manifest'] = {'start': 'manifests/site.pp'}
            else:
                del conf['execute']['configure']
        self.execute_and_check(id_, inputs=inputs)
Esempio n. 20
0
    def _bootstrap_manager_3_2_1(self):
        self.logger.info('Bootstrapping manager 3.2.1')

        self.assertTrue(os.path.exists(self.manager_key_path))
        self.assertTrue(os.path.exists(self.agents_key_path))

        manager_name = self.test_id + '-manager-321'

        # generate manager inputs file
        inputs_template_vars = {
            'keystone_username': self.env.keystone_username,
            'keystone_password': self.env.keystone_password,
            'keystone_tenant_name': self.env.keystone_tenant_name,
            'keystone_url': self.env.keystone_url,
            'region': self.env.region,
            'flavor_id': self.env.medium_flavor_id,
            'image_id': self.env.ubuntu_trusty_image_id,
            'manager_server_user': '******',
            'external_network_name': self.env.external_network_name,
            'resources_prefix': self.env.resources_prefix,
            'manager_server_name': manager_name,

            # shared settings
            'manager_public_key_name': self.manager_public_key_name,
            'agent_public_key_name': self.agent_public_key_name,
            'manager_private_key_path': self.manager_key_path,
            'agent_private_key_path': self.agents_key_path,
            'management_network_name': self.management_network_name,
            'management_subnet_name': self.management_subnet_name,
            'management_router': self.management_router,
            'agents_user': self.agents_user,

            # private settings
            'manager_security_group_name': manager_name + '-m-sg',
            'agents_security_group_name': manager_name + '-a-sg',
            'manager_port_name': manager_name + '-port',
            'manager_volume_name': manager_name + '-volume'
        }

        self._render_script(OLD_MANAGER_TEMPLATE_NAME, inputs_template_vars,
                            OLD_MANAGER_INPUTS_NAME)

        checkout(self.repo_path, '3.2.1-build')

        external_resources = [
            'node_templates.management_network.properties',
            'node_templates.management_subnet.properties',
            'node_templates.router.properties',
        ]

        blueprint_path = os.path.join(self.repo_path, 'openstack',
                                      'openstack-manager-blueprint.yaml')

        with YamlPatcher(blueprint_path) as patch:
            for prop in external_resources:
                patch.merge_obj(prop, {'use_external_resource': True})

        template_vars = {
            'work_dir': self.workdir,
            'venv_name': VIRTUALENV_NAME,
            'inputs_file': OLD_MANAGER_INPUTS_NAME,
            'repo_path': self.repo_path
        }

        self._render_script(BOOTSTRAP_TEMPLATE_NAME, template_vars,
                            BOOTSTRAP_SCRIPT_NAME)

        rc = self._run_script(BOOTSTRAP_SCRIPT_NAME)
        if rc:
            self.fail('Bootstrapping manager 3.2.1 failed with exit code: {0}'.
                      format(rc))

        self.addCleanup(self._teardown_manager_3_2_1)