def get_config_holder(log_to_file=True):
        configHolder = ConfigHolder(context={"empty": None}, configFile=MTA.config_file)

        configHolder.set("log_to_file", log_to_file)
        configHolder.set("log", MTA.get_logger(configHolder))

        return configHolder
class MainProgram(CommandBase):
    """A command-line program to download deployment reports."""

    def __init__(self, argv=None):
        self.module = ''
        self.endpoint = None
        self.ss_client = None
        self.configHolder = None
        super(MainProgram, self).__init__(argv)

    def parse(self):
        usage = '''usage: %prog [options] [<run-uuid>]

<run-uuid>    UUID of the run to get reports from.'''

        self.parser.usage = usage
        self.addEndpointOption()

        self.parser.add_option('-c', '--components', dest='components',
                               help='Comma separated list of components to download the reports for. '
                                    'Example: nginx,worker.1,worker.3 - will download reports for all component '
                                    'instances of nginx and only for instances 1 and 3 of worker. '
                                    'Default: all instances of all components.', default='')

        self.parser.add_option('-o', '--output-dir', dest='output_dir',
                               help='Path to the folder to store the reports. Default: <working directory>/<run-uuid>.',
                               default=os.getcwd())

        self.parser.add_option('--no-orch', dest='no_orch',
                               help='Do not download Orchestrator report.',
                               default=False, action='store_true')

        self.options, self.args = self.parser.parse_args()

        if self.options.components:
            self.options.components = self.options.components.split(',')
        else:
            self.options.components = []

        self._checkArgs()

    def _checkArgs(self):
        if len(self.args) == 1:
            self.run_uuid = self.args[0]
        if len(self.args) > 1:
            self.usageExitTooManyArguments()

    def _init_client(self):
        self.configHolder = ConfigHolder(self.options)
        self.configHolder.set('serviceurl', self.options.endpoint)
        self.ss_client = SlipStreamHttpClient(self.configHolder)

    def doWork(self):
        self._init_client()
        rg = ReportsGetter(self.ss_client.get_api(), self.configHolder)
        rg.get_reports(self.run_uuid, components=self.options.components,
                       no_orch=self.options.no_orch)
class TestOkeanosClientCloud(unittest.TestCase):
    def setUp(self):
        os.environ['SLIPSTREAM_CONNECTOR_INSTANCE'] = 'Test'
        self.ch = ConfigHolder(config={'foo': 'bar'}, context={'foo': 'bar'})
        self.ch.set(KEY_RUN_CATEGORY, RUN_CATEGORY_DEPLOYMENT)

    def test_OkeanosClientCloudInit(self):
        okeanos = OkeanosClientCloud(self.ch)
        assert okeanos
        assert okeanos.run_category == RUN_CATEGORY_DEPLOYMENT
class TestFlexiantClientCloud(unittest.TestCase):

    def setUp(self):
        os.environ['SLIPSTREAM_CONNECTOR_INSTANCE'] = 'Test'
        self.ch = ConfigHolder(config={'foo': 'bar'}, context={'foo': 'bar'})
        self.ch.set(KEY_RUN_CATEGORY, RUN_CATEGORY_DEPLOYMENT)

    def test_FcoClientCloudInit(self):
        fco = FlexiantClientCloud(self.ch)
        assert fco
        assert fco.run_category == RUN_CATEGORY_DEPLOYMENT
def get_rtp_all(side_effect, no_block=False):
    ch = ConfigHolder()
    ch.set('noBlock', no_block)
    ch.set('timeout', 1)
    ch.set('verboseLevel', 3)
    ch.set('endpoint', 'https://foo.bar')
    Client._getRuntimeParameter = Mock(side_effect=side_effect)
    client = Client(ch)
    client.httpClient.getRuntimeParameter = Mock(side_effect=side_effect)
    return client.get_rtp_all('foo', 'bar')
class TestCloudWrapper(TestCloudConnectorsBase):

    def setUp(self):
        self.serviceurl = 'http://example.com'
        self.configHolder = ConfigHolder(
            {
                'username': base64.b64encode('user'),
                'password': base64.b64encode('pass'),
                'cookie_filename': 'cookies',
                'serviceurl': self.serviceurl
            },
            context={'foo': 'bar'},
            config={'foo': 'bar'})

        os.environ['SLIPSTREAM_CONNECTOR_INSTANCE'] = 'Test'

    def tearDown(self):
        os.environ.pop('SLIPSTREAM_CONNECTOR_INSTANCE')
        self.configHolder = None
        shutil.rmtree('%s/.cache/' % os.getcwd(), ignore_errors=True)

    def test_getCloudName(self):
        for module_name in self.get_cloudconnector_modulenames():
            setattr(self.configHolder, CONFIGPARAM_CONNECTOR_MODULE_NAME, module_name)
            setattr(self.configHolder, KEY_RUN_CATEGORY, RUN_CATEGORY_DEPLOYMENT)
            cw = CloudWrapper(self.configHolder)
            cw.initCloudConnector()

            assert cw.getCloudInstanceName() == 'Test'

    def test_putImageId(self):
        self.configHolder.set(CONFIGPARAM_CONNECTOR_MODULE_NAME,
                              self.get_cloudconnector_modulename_by_cloudname('local'))
        cw = CloudWrapper(self.configHolder)
        cw.initCloudConnector()

        cw.clientSlipStream.httpClient._call = Mock(return_value=('', ''))

        cw._updateSlipStreamImage('module/Name', 'ABC')
        cw.clientSlipStream.httpClient._call.assert_called_with(
            '%s/module/Name/Test' % self.serviceurl,
            'PUT', 'ABC', 'application/xml',
            'application/xml')
Exemple #7
0
    def test_init_session_fail_no_creds(self):
        ch = ConfigHolder()
        ch.context = {}
        ch.set('verboseLevel', 0)
        ch.set('cookie_filename', '/dev/null')

        client = HttpClient(ch)
        client.init_session('http://foo.bar')
        assert client.session is not None
        assert client.session.login_params == {}
        resp = Mock(spec=Response)
        resp.status_code = 403
        resp.cookies = None
        resp.headers = {}
        client.session._request = Mock(return_value=resp)
        client.session.cimi_login = Mock(return_value=resp)
        try:
            client.get('http://foo.bar', retry=False)
        except Exception as ex:
            assert ex.code == 403
        assert client.session.cimi_login.called is True
    def test_init_session_fail_no_creds(self):
        ch = ConfigHolder()
        ch.context = {}
        ch.set('verboseLevel', 0)
        ch.set('cookie_filename', '/dev/null')

        client = HttpClient(ch)
        client.init_session('http://foo.bar')
        assert client.session is not None
        assert client.session.login_params == {}
        resp = Mock(spec=Response)
        resp.status_code = 403
        resp.cookies = None
        resp.headers = {}
        client.session._request = Mock(return_value=resp)
        client.session.cimi_login = Mock(return_value=resp)
        try:
            client.get('http://foo.bar', retry=False)
        except Exception as ex:
            assert ex.code == 403
        assert client.session.cimi_login.called is True
    def test_init_session_login_apikey(self):
        ch = ConfigHolder()
        ch.context = {}
        ch.set('verboseLevel', 0)
        ch.set('cookie_filename', '/dev/null')
        ch.set('api_key', 'key')
        ch.set('api_secret', 'secret')

        client = HttpClient(ch)
        client.init_session('http://foo.bar')
        assert client.session is not None
        assert client.session.login_params
        assert 'key' in client.session.login_params
        assert 'secret' in client.session.login_params
Exemple #10
0
    def test_init_session_login_apikey(self):
        ch = ConfigHolder()
        ch.context = {}
        ch.set('verboseLevel', 0)
        ch.set('cookie_filename', '/dev/null')
        ch.set('api_key', 'key')
        ch.set('api_secret', 'secret')

        client = HttpClient(ch)
        client.init_session('http://foo.bar')
        assert client.session is not None
        assert client.session.login_params
        assert 'key' in client.session.login_params
        assert 'secret' in client.session.login_params
Exemple #11
0
def ss_get(param, ignore_abort=False, timeout=30, no_block=False):
    """Returns None if parameter is not set.
    Raises Exceptions.NotFoundError if parameter doesn't exist.
    """
    ch = ConfigHolder(config={'foo': None})
    ch.set('ignoreAbort', ignore_abort)
    ch.set('noBlock', no_block)
    ch.set('timeout', timeout)
    client = Client(ch)
    return client.getRuntimeParameter(param)
Exemple #12
0
    def test_post_with_data(self):
        ch = ConfigHolder()
        ch.context = {}
        ch.set('verboseLevel', 0)
        ch.set('cookie_filename', '/dev/null')
        ch.set('api_key', 'key')
        ch.set('api_secret', 'secret')
        client = HttpClient(ch)
        resp = requests.Response()
        resp.status_code = 200
        resp.get = Mock(return_value=None)
        resp.request = Mock()
        resp.request.headers = {}
        requests.sessions.Session.send = Mock(return_value=resp)

        client.post('http://example.com', 'a=b\nc=d')

        args, kwargs = requests.sessions.Session.send.call_args
        self.assertEqual(len(args), 1)
        req = args[0]
        self.assertEqual(req.body, 'a=b\nc=d')
    def test_post_with_data(self):
        ch = ConfigHolder()
        ch.context = {}
        ch.set('verboseLevel', 0)
        ch.set('cookie_filename', '/dev/null')
        ch.set('api_key', 'key')
        ch.set('api_secret', 'secret')
        client = HttpClient(ch)
        resp = requests.Response()
        resp.status_code = 200
        resp.get = Mock(return_value=None)
        resp.request = Mock()
        resp.request.headers = {}
        requests.sessions.Session.send = Mock(return_value=resp)

        client.post('http://example.com', 'a=b\nc=d')

        args, kwargs = requests.sessions.Session.send.call_args
        self.assertEqual(len(args), 1)
        req = args[0]
        self.assertEqual(req.body, 'a=b\nc=d')
Exemple #14
0
    def test_init_session_login_internal(self):
        ch = ConfigHolder()
        ch.context = {}
        ch.set('verboseLevel', 0)
        ch.set('cookie_filename', '/dev/null')
        ch.set('username', 'foo')
        ch.set('password', 'bar')

        client = HttpClient(ch)
        client.init_session('http://foo.bar')
        assert client.session is not None
        assert client.session.login_params
        assert 'username' in client.session.login_params
        assert 'password' in client.session.login_params
    def test_init_session_login_internal(self):
        ch = ConfigHolder()
        ch.context = {}
        ch.set('verboseLevel', 0)
        ch.set('cookie_filename', '/dev/null')
        ch.set('username', 'foo')
        ch.set('password', 'bar')

        client = HttpClient(ch)
        client.init_session('http://foo.bar')
        assert client.session is not None
        assert client.session.login_params
        assert 'username' in client.session.login_params
        assert 'password' in client.session.login_params
class TestResultArchiver(unittest.TestCase):

    def setUp(self):
        self.ch = ConfigHolder()
        self.ch.set('serviceurl', endpoint)
        self.ch.set('verboseLevel', 3)

        self.client = Client(self.ch)
        self.client.login(username, password)
        self.ch.set('endpoint', endpoint)
        self.ch.set('session', self.client.get_session())

    def tearDown(self):
        shutil.rmtree(run_uuid, ignore_errors=True)
        try: self.client.logout()
        except: pass

    @unittest.skipIf(not all([username, password, run_uuid]),
                     "Live test. Creds not set.")
    def test_get_reports(self):
        rg = ReportsGetter(self.ch)
        rg.get_reports(run_uuid)
        self.assertTrue(os.path.isdir(run_uuid))
class TestResultArchiver(unittest.TestCase):
    def setUp(self):
        self.ch = ConfigHolder()
        self.ch.set('serviceurl', endpoint)
        self.ch.set('verboseLevel', 3)

        self.client = Client(self.ch)
        self.client.login(username, password)
        self.ch.set('endpoint', endpoint)
        self.ch.set('session', self.client.get_session())

    def tearDown(self):
        shutil.rmtree(run_uuid, ignore_errors=True)
        try:
            self.client.logout()
        except:
            pass

    @unittest.skipIf(not all([username, password, run_uuid]),
                     "Live test. Creds not set.")
    def test_get_reports(self):
        rg = ReportsGetter(self.ch)
        rg.get_reports(run_uuid)
        self.assertTrue(os.path.isdir(run_uuid))
 def _init_client(self):
     configHolder = ConfigHolder(self.options, context={'empty': None},
                                 config={'empty': None})
     configHolder.set('serviceurl', self.options.endpoint)
     self.client = Client(configHolder)
class TestFlexiantClientCloud(unittest.TestCase):

    def setUp(self):
        cn = getConnectorClass().cloudName

        os.environ['SLIPSTREAM_CONNECTOR_INSTANCE'] = cn
        os.environ['SLIPSTREAM_BOOTSTRAP_BIN'] = 'http://example.com/bootstrap'
        os.environ['SLIPSTREAM_DIID'] = \
            '%s-1234-1234-1234-123456789012' % str(int(time.time()))[2:]

        if not os.path.exists(CONFIG_FILE):
            raise Exception('Configuration file %s not found.' % CONFIG_FILE)

        self.ch = ConfigHolder(configFile=CONFIG_FILE, context={'foo': 'bar'})
        self.ch.verboseLevel = int(self.ch.verboseLevel)

        self.user_info = UserInfo(cn)
        self.user_info['General.ssh.public.key'] = self.ch.config['General.ssh.public.key']
        self.user_info[cn + '.user.uuid'] = self.ch.config[cn + '.user.uuid']
        self.user_info[cn + '.username'] = self.ch.config[cn + '.username']
        self.user_info[cn + '.password'] = self.ch.config[cn + '.password']
        self.user_info[cn + '.endpoint'] = self.ch.config[cn + '.endpoint']

        node_name = 'test_node'

        self.multiplicity = int(self.ch.config['multiplicity'])

        self.node_instances = {}
        for i in range(1, self.multiplicity + 1):
            node_instance_name = node_name + '.' + str(i)
            self.node_instances[node_instance_name] = NodeInstance({
                NodeDecorator.NODE_NAME_KEY: node_name,
                NodeDecorator.NODE_INSTANCE_NAME_KEY: node_instance_name,
                'cloudservice': cn,
                'image.description': 'This is a test image.',
                'image.platform': self.ch.config[cn + '.image.platform'],
                'image.id': self.ch.config[cn + '.imageid'],
                cn + '.ram': self.ch.config[cn + '.ram'],
                cn + '.cpu': self.ch.config[cn + '.cpu'],
                'network': self.ch.config['network']
            })

        self.node_instance = NodeInstance({
            NodeDecorator.NODE_NAME_KEY: NodeDecorator.MACHINE_NAME,
            NodeDecorator.NODE_INSTANCE_NAME_KEY: NodeDecorator.MACHINE_NAME,
            'cloudservice': cn,
            'image.description': 'This is a test image.',
            'image.platform': self.ch.config[cn + '.image.platform'],
            'image.loginUser': self.ch.config[cn + '.image.loginuser'],
            'image.id': self.ch.config[cn + '.imageid'],
            cn + '.ram': self.ch.config[cn + '.ram'],
            cn + '.cpu': self.ch.config[cn + '.cpu'],
            'network': self.ch.config['network'],
            'image.prerecipe':
"""#!/bin/sh
set -e
set -x

ls -l /tmp
dpkg -l | egrep "nano|lvm" || true
""",
                'image.packages': ['lvm2', 'nano'],
                'image.recipe':
"""#!/bin/sh
set -e
set -x

dpkg -l | egrep "nano|lvm" || true
lvs
"""
        })

    def tearDown(self):
        os.environ.pop('SLIPSTREAM_CONNECTOR_INSTANCE')
        os.environ.pop('SLIPSTREAM_BOOTSTRAP_BIN')
        os.environ.pop('SLIPSTREAM_DIID')
        self.client = None
        self.ch = None

    def _init_connector(self, run_category=RUN_CATEGORY_DEPLOYMENT):
        self.ch.set(KEY_RUN_CATEGORY, run_category)
        self.client = getConnector(self.ch)
        self.client._publish_vm_info = Mock()

    def xtest_1_startWaitRunningStopImage(self):
        self._init_connector()
        self._start_wait_running_stop_images()

    def xtest_2_buildImage(self):
        self._init_connector(run_category=RUN_CATEGORY_IMAGE)

        try:
            instances_details = self.client.start_nodes_and_clients(
                self.user_info, {NodeDecorator.MACHINE_NAME: self.node_instance})

            assert instances_details
            assert instances_details[0][NodeDecorator.MACHINE_NAME]

            new_id = self.client.build_image(self.user_info, self.node_instance)

            assert new_id
        finally:
            self.client.stop_deployment()

        print('Deregistering image %s ... ' % new_id)
        self.client.deregister_image(new_id)
        print('Done.')

    def xtest_3_addDisk(self):
        self._init_connector()
        try:
            print('Node instances: %s' %self.node_instances.values())
            self.client.start_nodes_and_clients(self.user_info, self.node_instances)
            for node_instance in self.node_instances.values():
                node_instance.set_parameter(NodeDecorator.SCALE_DISK_ATTACH_SIZE, 20)
                vm = self.client._get_vm(node_instance.get_name())
                vm_uuid = vm['resourceUUID']

                print 'VM Created: %s' %vm

                disk_uuid = self.client._attach_disk(node_instance);
                print '================================================================='
                print ('Disk created with uuid %s ' %disk_uuid)
            # Get the list of VMs
            vm_list = self.client.list_instances()
            for i in vm_list:
                # Get the VM that was created in the test using the UUID obtained after creation
                if (i['resourceUUID'] == vm_uuid):
                    #print 'Disks attached on the VM are: '
                    disks = i['disks']
                    print 'The status of the created VM is %s' %i['status']
                    assert i['status'] == "RUNNING"

                    for disk in disks:
                        if (disk['resourceUUID'] == disk_uuid):
                            print 'The status of the attached disk %s' %disk['status']
                            assert disk['status'] == "ATTACHED_TO_SERVER"
                            assert disk['serverUUID'] == vm_uuid
                            print 'Attached disk info: %s' %disk

            print '================================================================='

        finally:
             self.client.stop_deployment()
        print('Done.')

    def xtest_4_removeDisk(self):
        self._init_connector()
        try:
            print('Node instances: %s' %self.node_instances.values())
            self.client.start_nodes_and_clients(self.user_info, self.node_instances)
            for node_instance in self.node_instances.values():
                vm = self.client._get_vm(node_instance.get_name())
                vm_uuid = vm['resourceUUID']
                print node_instance

                print 'VM Created: %s' %vm
                node_instance.set_parameter(NodeDecorator.SCALE_DISK_ATTACH_SIZE, 20)
                disk_uuid = self.client._attach_disk(node_instance)
                print '================================================================='
                print ('Disk created with uuid %s ' %disk_uuid)

            # Get the list of VMs
            vm_list = self.client.list_instances()
            for i in vm_list:
                # Get the VM that was created in the test using the UUID obtained after creation
                if (i['resourceUUID'] == vm_uuid):
                    #print 'Disks attached on the VM are: '
                    disks = i['disks']
                    print 'The status of the VM is %s' %i['status']

                    for disk in disks:
                        if (disk['resourceUUID'] == disk_uuid):
                            print 'The status of the attached disk %s' %disk['status']

                            node_instance.set_parameter(NodeDecorator.SCALE_DISK_DETACH_DEVICE, disk_uuid)
                            self.client._detach_disk(node_instance)

            vm_list = self.client.list_instances()
            for i in vm_list:
                # Get the VM that was created in the test using the UUID obtained after creation
                if (i['resourceUUID'] == vm_uuid):
                    #print 'Disks attached on the VM are: '
                    disks = i['disks']
                    print "Disks on the VM are:"
                    print disks
                    for disk in disks:
                        assert disk['resourceUUID'] != disk_uuid
                    print 'The status of the VM is %s' %i['status']
                    assert i['status'] == "RUNNING"
            print '================================================================='

        finally:
            self.client.stop_deployment()
        print('Done.')
    
    def xtest_5_resize(self):
        self._init_connector()
        MODIFIED_CPU_COUNT = 2
        MODIFIED_RAM_AMT = 4096
        try:
            print('Node instances: %s' %self.node_instances.values())
            self.client.start_nodes_and_clients(self.user_info, self.node_instances)
            for node_instance in self.node_instances.values():
                node_instance.set_cloud_parameters({'cpu': MODIFIED_CPU_COUNT})
                node_instance.set_cloud_parameters({'ram': MODIFIED_RAM_AMT})
                vm = self.client._get_vm(node_instance.get_name())
                vm_uuid = vm['resourceUUID']
                print 'VM Created: %s' %vm
                self.client._resize(node_instance)
            print '================================================================='

            # Get the list of VMs
            vm_list = self.client.list_instances()
            for i in vm_list:
                # Get the VM that was created in the test using the UUID obtained after creation
                if (i['resourceUUID'] == vm_uuid):
                    #print 'Disks attached on the VM are: '
                    disks = i['disks']
                    print 'The status of the created VM is %s' %i['status']
                    assert i['status'] == "RUNNING"
                    assert i['cpu'] == MODIFIED_CPU_COUNT
                    assert i['ram'] == MODIFIED_RAM_AMT
            print '================================================================='

        finally:
             self.client.stop_deployment()

    def _start_wait_running_stop_images(self):

        try:
            self.client.start_nodes_and_clients(self.user_info, self.node_instances)

            util.printAndFlush('Instances started\n')

            vms = self.client.get_vms()
            assert len(vms) == self.multiplicity

            for vm in vms.values():
                self.client._wait_vm_in_state_running_or_timeout(vm['id'])

            time.sleep(2)
        finally:
            self.client.stop_deployment()
class MainProgram(CommandBase):
    """A command-line program to download deployment reports."""
    def __init__(self, argv=None):
        self.module = ''
        self.endpoint = None
        self.ss_client = None
        self.configHolder = None
        super(MainProgram, self).__init__(argv)

    def parse(self):
        usage = '''usage: %prog [options] [<run-uuid>]

<run-uuid>    UUID of the run to get reports from.'''

        self.parser.usage = usage
        self.addEndpointOption()

        self.parser.add_option(
            '-c',
            '--components',
            dest='components',
            help=
            'Comma separated list of components to download the reports for. '
            'Example: nginx,worker.1,worker.3 - will download reports for all component '
            'instances of nginx and only for instances 1 and 3 of worker. '
            'Default: all instances of all components.',
            default='')

        self.parser.add_option(
            '-o',
            '--output-dir',
            dest='output_dir',
            help=
            'Path to the folder to store the reports. Default: <working directory>/<run-uuid>.',
            default=os.getcwd())

        self.parser.add_option('--no-orch',
                               dest='no_orch',
                               help='Do not download Orchestrator report.',
                               default=False,
                               action='store_true')

        self.options, self.args = self.parser.parse_args()

        if self.options.components:
            self.options.components = self.options.components.split(',')
        else:
            self.options.components = []

        self._checkArgs()

    def _checkArgs(self):
        if len(self.args) == 1:
            self.run_uuid = self.args[0]
        if len(self.args) > 1:
            self.usageExitTooManyArguments()

    def _init_client(self):
        self.configHolder = ConfigHolder(self.options)
        self.configHolder.set('serviceurl', self.options.endpoint)
        self.ss_client = SlipStreamHttpClient(self.configHolder)

    def doWork(self):
        self._init_client()
        rg = ReportsGetter(self.ss_client.get_api(), self.configHolder)
        rg.get_reports(self.run_uuid,
                       components=self.options.components,
                       no_orch=self.options.no_orch)
class TestBaseLive(unittest.TestCase):
    cin = ''
    node_instances = {}  # of NodeInstance()
    multiplicity = 0
    max_iaas_workers = 1

    def construct_key(self, name):
        return self.cin + '.' + name

    def _conf_val(self, key, default=None):
        conf_key = self.construct_key(key)
        if default:
            return self.ch.config.get(conf_key, default)
        return self.ch.config[conf_key]

    def _build_user_info(self, keys):
        self.user_info = UserInfo(self.cin)
        self.user_info['General.' + UserInfo.SSH_PUBKEY_KEY] = self.ch.config[
            'General.ssh.public.key']
        for k in keys:
            self.user_info[self.construct_key(k)] = self._conf_val(k)

    def _load_config(self, conf_file):
        if not os.path.exists(conf_file):
            raise Exception('Configuration file %s not found.' % conf_file)

        self.ch = ConfigHolder(configFile=conf_file, context={'foo': 'bar'})
        self.ch.set(KEY_RUN_CATEGORY, '')

    def _build_client(self, testedCls):
        testedCls._publish_vm_info = publish_vm_info  # pylint: disable=protected-access
        self.client = testedCls(self.ch)

    def _get_ex_msg(self, ex):
        if hasattr(ex, 'message'):
            return ex.message
        if hasattr(ex, 'arg'):
            return ex.arg
        return ''

    def _setUp(self, testedCls, conf_file, conf_keys):
        """(Re-)sets the following fields
        self.ch               - ConfigHolder
        self.client           - instance of BaseCloudConnector
        self.user_info        - UserInfo
        self.multiplicity     - int
        self.max_iaas_workers - str
        """
        os.environ['SLIPSTREAM_CONNECTOR_INSTANCE'] = self.cin
        os.environ['SLIPSTREAM_BOOTSTRAP_BIN'] = 'http://example.com/bootstrap'
        os.environ['SLIPSTREAM_DIID'] = '00000000-0000-0000-0000-000000000000'

        self._load_config(conf_file)
        self._build_client(testedCls)
        self._build_user_info(conf_keys)
        pp(self.user_info)

        self.multiplicity = int(self._conf_val('multiplicity', 2))
        self.max_iaas_workers = self._conf_val('max.iaas.workers',
                                               str(self.multiplicity))

    def _test_start_stop_images(self):
        "Live test that starts and stops VMs on a cloud."
        self.client.run_category = RUN_CATEGORY_DEPLOYMENT

        success = True
        error = ''
        try:
            self.client.start_nodes_and_clients(self.user_info,
                                                self.node_instances)
            vms = self.client.get_vms()
            assert len(vms) == self.multiplicity
            util.printAction('Instances started.')
            pp(vms)
        except Exception as ex:
            success = False
            error = self._get_ex_msg(ex)
            util.printError("Exception caught while starting instances!")
            exc_type, exc_value, exc_traceback = sys.exc_info()
            traceback.print_exception(exc_type, exc_value, exc_traceback)
        finally:
            util.printAction("Stopping deployment.")
            self.client.stop_deployment()
        self.assertEquals(success, True, error)
class TestCloudStackClientCloud(unittest.TestCase):
    connector_instance_name = 'cloudstack'

    def constructKey(self, name):
        return self.connector_instance_name + '.' + name

    def setUp(self):
        BaseCloudConnector._publish_vm_info = Mock()  # pylint: disable=protected-access

        os.environ['SLIPSTREAM_CONNECTOR_INSTANCE'] = self.connector_instance_name
        os.environ['SLIPSTREAM_BOOTSTRAP_BIN'] = 'http://example.com/bootstrap'
        os.environ['SLIPSTREAM_DIID'] = '00000000-0000-0000-0000-%s' % time.time()

        if not os.path.exists(CONFIG_FILE):
            raise Exception('Configuration file %s not found.' % CONFIG_FILE)

        self.ch = ConfigHolder(configFile=CONFIG_FILE, context={'foo': 'bar'})
        self.ch.set(KEY_RUN_CATEGORY, '')
        self.ch.set('verboseLevel', self.ch.config['General.verbosity'])

        self.client = CloudStackClientCloud(self.ch)

        self.user_info = UserInfo(self.connector_instance_name)
        self.user_info[self.constructKey('endpoint')] = self.ch.config['cloudstack.endpoint']
        self.user_info[self.constructKey('zone')] = self.ch.config['cloudstack.zone']
        self.user_info[self.constructKey('username')] = self.ch.config['cloudstack.key']
        self.user_info[self.constructKey('password')] = self.ch.config['cloudstack.secret']
        security_groups = self.ch.config['cloudstack.security.groups']
        instance_type = self.ch.config['cloudstack.instance.type']
        self.user_info['General.' + UserInfo.SSH_PUBKEY_KEY] = self.ch.config['General.ssh.public.key']
        image_id = self.ch.config[self.constructKey('template')]

        self.multiplicity = 2
        self.max_iaas_workers = self.ch.config.get('cloudstack.max.iaas.workers',
                                                   str(self.multiplicity))

        self.node_name = 'test_node'
        self.node_instances = {}
        for i in range(1, self.multiplicity + 1):
            node_instance_name = self.node_name + '.' + str(i)
            self.node_instances[node_instance_name] = NodeInstance({
                NodeDecorator.NODE_NAME_KEY: self.node_name,
                NodeDecorator.NODE_INSTANCE_NAME_KEY: node_instance_name,
                'cloudservice': self.connector_instance_name,
                # 'index': i,s
                'image.platform': 'linux',
                'image.imageId': image_id,
                'image.id': image_id,
                self.constructKey('instance.type'): instance_type,
                self.constructKey('security.groups'): security_groups,
                'network': 'private'
            })

    def tearDown(self):
        os.environ.pop('SLIPSTREAM_CONNECTOR_INSTANCE')
        os.environ.pop('SLIPSTREAM_BOOTSTRAP_BIN')
        self.client = None
        self.ch = None

    def xtest_1_startStopImages(self):
        self.client.run_category = RUN_CATEGORY_DEPLOYMENT

        try:
            self.client.start_nodes_and_clients(self.user_info, self.node_instances)

            util.printAndFlush('Instances started')

            vms = self.client.get_vms()
            assert len(vms) == self.multiplicity
        finally:
            self.client.stop_deployment()

    def xtest_2_buildImage(self):
        raise NotImplementedError()
Exemple #23
0
class TestFlexiantClientCloud(unittest.TestCase):
    def setUp(self):
        cn = getConnectorClass().cloudName

        os.environ['SLIPSTREAM_CONNECTOR_INSTANCE'] = cn
        os.environ['SLIPSTREAM_BOOTSTRAP_BIN'] = 'http://example.com/bootstrap'
        os.environ['SLIPSTREAM_DIID'] = \
            '%s-1234-1234-1234-123456789012' % str(int(time.time()))[2:]

        if not os.path.exists(CONFIG_FILE):
            raise Exception('Configuration file %s not found.' % CONFIG_FILE)

        self.ch = ConfigHolder(configFile=CONFIG_FILE, context={'foo': 'bar'})
        self.ch.verboseLevel = int(self.ch.verboseLevel)

        self.user_info = UserInfo(cn)
        self.user_info['General.ssh.public.key'] = self.ch.config[
            'General.ssh.public.key']
        self.user_info[cn + '.user.uuid'] = self.ch.config[cn + '.user.uuid']
        self.user_info[cn + '.username'] = self.ch.config[cn + '.username']
        self.user_info[cn + '.password'] = self.ch.config[cn + '.password']
        self.user_info[cn + '.endpoint'] = self.ch.config[cn + '.endpoint']

        node_name = 'test_node'

        self.multiplicity = int(self.ch.config['multiplicity'])

        self.node_instances = {}
        for i in range(1, self.multiplicity + 1):
            node_instance_name = node_name + '.' + str(i)
            self.node_instances[node_instance_name] = NodeInstance({
                NodeDecorator.NODE_NAME_KEY:
                node_name,
                NodeDecorator.NODE_INSTANCE_NAME_KEY:
                node_instance_name,
                'cloudservice':
                cn,
                'image.description':
                'This is a test image.',
                'image.platform':
                self.ch.config[cn + '.image.platform'],
                'image.id':
                self.ch.config[cn + '.imageid'],
                cn + '.ram':
                self.ch.config[cn + '.ram'],
                cn + '.cpu':
                self.ch.config[cn + '.cpu'],
                'network':
                self.ch.config['network']
            })

        self.node_instance = NodeInstance({
            NodeDecorator.NODE_NAME_KEY:
            NodeDecorator.MACHINE_NAME,
            NodeDecorator.NODE_INSTANCE_NAME_KEY:
            NodeDecorator.MACHINE_NAME,
            'cloudservice':
            cn,
            'image.description':
            'This is a test image.',
            'image.platform':
            self.ch.config[cn + '.image.platform'],
            'image.loginUser':
            self.ch.config[cn + '.image.loginuser'],
            'image.id':
            self.ch.config[cn + '.imageid'],
            cn + '.ram':
            self.ch.config[cn + '.ram'],
            cn + '.cpu':
            self.ch.config[cn + '.cpu'],
            'network':
            self.ch.config['network'],
            'image.prerecipe':
            """#!/bin/sh
set -e
set -x

ls -l /tmp
dpkg -l | egrep "nano|lvm" || true
""",
            'image.packages': ['lvm2', 'nano'],
            'image.recipe':
            """#!/bin/sh
set -e
set -x

dpkg -l | egrep "nano|lvm" || true
lvs
"""
        })

    def tearDown(self):
        os.environ.pop('SLIPSTREAM_CONNECTOR_INSTANCE')
        os.environ.pop('SLIPSTREAM_BOOTSTRAP_BIN')
        os.environ.pop('SLIPSTREAM_DIID')
        self.client = None
        self.ch = None

    def _init_connector(self, run_category=RUN_CATEGORY_DEPLOYMENT):
        self.ch.set(KEY_RUN_CATEGORY, run_category)
        self.client = getConnector(self.ch)
        self.client._publish_vm_info = Mock()

    def xtest_1_startWaitRunningStopImage(self):
        self._init_connector()
        self._start_wait_running_stop_images()

    def xtest_2_buildImage(self):
        self._init_connector(run_category=RUN_CATEGORY_IMAGE)

        try:
            instances_details = self.client.start_nodes_and_clients(
                self.user_info,
                {NodeDecorator.MACHINE_NAME: self.node_instance})

            assert instances_details
            assert instances_details[0][NodeDecorator.MACHINE_NAME]

            new_id = self.client.build_image(self.user_info,
                                             self.node_instance)

            assert new_id
        finally:
            self.client.stop_deployment()

        print('Deregistering image %s ... ' % new_id)
        self.client.deregister_image(new_id)
        print('Done.')

    def xtest_3_addDisk(self):
        self._init_connector()
        try:
            print('Node instances: %s' % self.node_instances.values())
            self.client.start_nodes_and_clients(self.user_info,
                                                self.node_instances)
            for node_instance in self.node_instances.values():
                node_instance.set_parameter(
                    NodeDecorator.SCALE_DISK_ATTACH_SIZE, 20)
                vm = self.client._get_vm(node_instance.get_name())
                vm_uuid = vm['resourceUUID']

                print 'VM Created: %s' % vm

                disk_uuid = self.client._attach_disk(node_instance)
                print '================================================================='
                print('Disk created with uuid %s ' % disk_uuid)
            # Get the list of VMs
            vm_list = self.client.list_instances()
            for i in vm_list:
                # Get the VM that was created in the test using the UUID obtained after creation
                if (i['resourceUUID'] == vm_uuid):
                    #print 'Disks attached on the VM are: '
                    disks = i['disks']
                    print 'The status of the created VM is %s' % i['status']
                    assert i['status'] == "RUNNING"

                    for disk in disks:
                        if (disk['resourceUUID'] == disk_uuid):
                            print 'The status of the attached disk %s' % disk[
                                'status']
                            assert disk['status'] == "ATTACHED_TO_SERVER"
                            assert disk['serverUUID'] == vm_uuid
                            print 'Attached disk info: %s' % disk

            print '================================================================='

        finally:
            self.client.stop_deployment()
        print('Done.')

    def xtest_4_removeDisk(self):
        self._init_connector()
        try:
            print('Node instances: %s' % self.node_instances.values())
            self.client.start_nodes_and_clients(self.user_info,
                                                self.node_instances)
            for node_instance in self.node_instances.values():
                vm = self.client._get_vm(node_instance.get_name())
                vm_uuid = vm['resourceUUID']
                print node_instance

                print 'VM Created: %s' % vm
                node_instance.set_parameter(
                    NodeDecorator.SCALE_DISK_ATTACH_SIZE, 20)
                disk_uuid = self.client._attach_disk(node_instance)
                print '================================================================='
                print('Disk created with uuid %s ' % disk_uuid)

            # Get the list of VMs
            vm_list = self.client.list_instances()
            for i in vm_list:
                # Get the VM that was created in the test using the UUID obtained after creation
                if (i['resourceUUID'] == vm_uuid):
                    #print 'Disks attached on the VM are: '
                    disks = i['disks']
                    print 'The status of the VM is %s' % i['status']

                    for disk in disks:
                        if (disk['resourceUUID'] == disk_uuid):
                            print 'The status of the attached disk %s' % disk[
                                'status']

                            node_instance.set_parameter(
                                NodeDecorator.SCALE_DISK_DETACH_DEVICE,
                                disk_uuid)
                            self.client._detach_disk(node_instance)

            vm_list = self.client.list_instances()
            for i in vm_list:
                # Get the VM that was created in the test using the UUID obtained after creation
                if (i['resourceUUID'] == vm_uuid):
                    #print 'Disks attached on the VM are: '
                    disks = i['disks']
                    print "Disks on the VM are:"
                    print disks
                    for disk in disks:
                        assert disk['resourceUUID'] != disk_uuid
                    print 'The status of the VM is %s' % i['status']
                    assert i['status'] == "RUNNING"
            print '================================================================='

        finally:
            self.client.stop_deployment()
        print('Done.')

    def xtest_5_resize(self):
        self._init_connector()
        MODIFIED_CPU_COUNT = 2
        MODIFIED_RAM_AMT = 4096
        try:
            print('Node instances: %s' % self.node_instances.values())
            self.client.start_nodes_and_clients(self.user_info,
                                                self.node_instances)
            for node_instance in self.node_instances.values():
                node_instance.set_cloud_parameters({'cpu': MODIFIED_CPU_COUNT})
                node_instance.set_cloud_parameters({'ram': MODIFIED_RAM_AMT})
                vm = self.client._get_vm(node_instance.get_name())
                vm_uuid = vm['resourceUUID']
                print 'VM Created: %s' % vm
                self.client._resize(node_instance)
            print '================================================================='

            # Get the list of VMs
            vm_list = self.client.list_instances()
            for i in vm_list:
                # Get the VM that was created in the test using the UUID obtained after creation
                if (i['resourceUUID'] == vm_uuid):
                    #print 'Disks attached on the VM are: '
                    disks = i['disks']
                    print 'The status of the created VM is %s' % i['status']
                    assert i['status'] == "RUNNING"
                    assert i['cpu'] == MODIFIED_CPU_COUNT
                    assert i['ram'] == MODIFIED_RAM_AMT
            print '================================================================='

        finally:
            self.client.stop_deployment()

    def _start_wait_running_stop_images(self):

        try:
            self.client.start_nodes_and_clients(self.user_info,
                                                self.node_instances)

            util.printAndFlush('Instances started\n')

            vms = self.client.get_vms()
            assert len(vms) == self.multiplicity

            for vm in vms.values():
                self.client._wait_vm_in_state_running_or_timeout(vm['id'])

            time.sleep(2)
        finally:
            self.client.stop_deployment()
class TestOpenStackClientCloud(unittest.TestCase):
    def setUp(self):
        BaseCloudConnector.publishVmInfo = Mock()

        os.environ['SLIPSTREAM_CONNECTOR_INSTANCE'] = 'openstack'
        os.environ['SLIPSTREAM_BOOTSTRAP_BIN'] = 'http://example.com/bootstrap'
        os.environ['SLIPSTREAM_DIID'] = '00000000-0000-0000-0000-000000000000'

        if not os.path.exists(CONFIG_FILE):
            raise Exception('Configuration file %s not found.' % CONFIG_FILE)

        self.ch = ConfigHolder(configFile=CONFIG_FILE, context={'foo': 'bar'})
        self.ch.set(KEY_RUN_CATEGORY, '')

        os.environ['OPENSTACK_SERVICE_TYPE'] = self.ch.config['OPENSTACK_SERVICE_TYPE']
        os.environ['OPENSTACK_SERVICE_NAME'] = self.ch.config['OPENSTACK_SERVICE_NAME']
        os.environ['OPENSTACK_SERVICE_REGION'] = self.ch.config['OPENSTACK_SERVICE_REGION']

        self.client = OpenStackClientCloud(self.ch)

        self.user_info = UserInfo('openstack')
        self.user_info['openstack.endpoint'] = self.ch.config['openstack.endpoint']
        self.user_info['openstack.tenant.name'] = self.ch.config['openstack.tenant.name']
        self.user_info['openstack.username'] = self.ch.config['openstack.username']
        self.user_info['openstack.password'] = self.ch.config['openstack.password']
        self.user_info['General.ssh.public.key'] = self.ch.config['General.ssh.public.key']

        security_groups = self.ch.config['openstack.security.groups']
        image_id = self.ch.config['openstack.imageid']
        self.multiplicity = 2
        self.node_info = {
            'multiplicity': self.multiplicity,
            'nodename': 'test_node',
            'image': {
                'cloud_parameters': {
                    'openstack': {
                        'openstack.instance.type': 'm1.tiny',
                        'openstack.security.groups': security_groups
                    },
                    'Cloud': {'network': 'private'}
                },
                'attributes': {
                    'imageId': image_id,
                    'platform': 'Ubuntu'
                },
                'targets': {
                    'prerecipe':
"""#!/bin/sh
set -e
set -x

ls -l /tmp
dpkg -l | egrep "nano|lvm" || true
""",
                    'recipe':
"""#!/bin/sh
set -e
set -x

dpkg -l | egrep "nano|lvm" || true
lvs
""",
                    'packages' : ['lvm2','nano']
                }
            },
        }

    def tearDown(self):
        os.environ.pop('SLIPSTREAM_CONNECTOR_INSTANCE')
        os.environ.pop('SLIPSTREAM_BOOTSTRAP_BIN')
        self.client = None
        self.ch = None

    def xtest_1_startStopImages(self):
        self.client.run_category = RUN_CATEGORY_DEPLOYMENT

        self.client.startNodesAndClients(self.user_info, [self.node_info])

        util.printAndFlush('Instances started')

        vms = self.client.getVms()
        assert len(vms) == self.multiplicity

        self.client.stopDeployment()

    def xtest_2_buildImage(self):
        self.client.run_category = RUN_CATEGORY_IMAGE

        image_info = self.client._extractImageInfoFromNodeInfo(self.node_info)

        self.client.startImage(self.user_info, image_info)
        instancesDetails = self.client.getVmsDetails()

        assert instancesDetails
        assert instancesDetails[0][NodeDecorator.MACHINE_NAME]

        self.client.buildImage(self.user_info, image_info)
        assert self.client.getNewImageId()
class TestOpenNebulaClientCloudLive(unittest.TestCase):
    connector_instance_name = 'opennebula'

    def constructKey(self, name):
        return self.connector_instance_name + '.' + name

    def setUp(self):
        os.environ['SLIPSTREAM_CONNECTOR_INSTANCE'] = self.connector_instance_name
        os.environ['SLIPSTREAM_BOOTSTRAP_BIN'] = 'http://example.com/bootstrap'
        os.environ['SLIPSTREAM_DIID'] = '00000000-0000-0000-0000-000000000000'

        if not os.path.exists(CONFIG_FILE):
            raise Exception('Configuration file %s not found.' % CONFIG_FILE)

        self.ch = ConfigHolder(configFile=CONFIG_FILE, context={'foo': 'bar'})
        self.ch.set(KEY_RUN_CATEGORY, '')

        OpenNebulaClientCloud._publish_vm_info = publish_vm_info  # pylint: disable=protected-access
        self.client = OpenNebulaClientCloud(self.ch)

        self.user_info = UserInfo(self.connector_instance_name)
        self.user_info['General.'+ UserInfo.SSH_PUBKEY_KEY] = self.ch.config['General.ssh.public.key']
        self.user_info[self.constructKey('endpoint')] = self.ch.config['opennebula.endpoint']
        self.user_info[self.constructKey('username')] = self.ch.config['opennebula.username']
        self.user_info[self.constructKey('password')] = self.ch.config['opennebula.password']
        self.user_info[self.constructKey(UserInfo.NETWORK_PUBLIC_KEY)] = self.ch.config['opennebula.networkPublic']
        self.user_info[self.constructKey(UserInfo.NETWORK_PRIVATE_KEY)] = self.ch.config['opennebula.networkPrivate']
        self.user_info[self.constructKey('cpuRatio')] = '1.0'

        image_id = self.ch.config['opennebula.imageid']
        instance_type = self.ch.config.get('opennebula.intance.type', 'm1.tiny')
        node_name = 'test_node'

        self.multiplicity = 1

        self.node_instances = {}
        for i in range(1, self.multiplicity + 1):
            node_instance_name = node_name + '.' + str(i)
            self.node_instances[node_instance_name] = NodeInstance({
                NodeDecorator.NODE_NAME_KEY: node_name,
                NodeDecorator.NODE_INSTANCE_NAME_KEY: node_instance_name,
                'cloudservice': self.connector_instance_name,
                'image.platform': 'Ubuntu',
                'image.imageId': image_id,
                'image.id': image_id,
                'network': self.ch.config['opennebula.network'],
                self.constructKey('instance.type'): instance_type,
                self.constructKey('ram'): '2',
                self.constructKey('cpu'): '1'
            })

        self.node_instance = NodeInstance({
            NodeDecorator.NODE_NAME_KEY: NodeDecorator.MACHINE_NAME,
            NodeDecorator.NODE_INSTANCE_NAME_KEY: NodeDecorator.MACHINE_NAME,
            'cloudservice': self.connector_instance_name,
            'image.platform': 'Ubuntu',
            'image.imageId': image_id,
            'image.id': image_id,
            self.constructKey('instance.type'): instance_type,
            'image.prerecipe':
"""#!/bin/sh
set -e
set -x

ls -l /tmp
dpkg -l | egrep "nano|lvm" || true
""",
                'image.packages': ['lvm2', 'nano'],
                'image.recipe':
"""#!/bin/sh
set -e
set -x

dpkg -l | egrep "nano|lvm" || true
lvs
"""
        })

    def tearDown(self):
        os.environ.pop('SLIPSTREAM_CONNECTOR_INSTANCE')
        os.environ.pop('SLIPSTREAM_BOOTSTRAP_BIN')
        self.client = None
        self.ch = None

    def xtest_1_startStopImages(self):
        self.client.run_category = RUN_CATEGORY_DEPLOYMENT

        self.client.start_nodes_and_clients(self.user_info, self.node_instances)

        util.printAndFlush('Instances started')

        vms = self.client.get_vms()
        assert len(vms) == self.multiplicity

        self.client.stop_deployment()

    def xtest_2_buildImage(self):
        self.client.run_category = RUN_CATEGORY_IMAGE

        self.client.start_nodes_and_clients(self.user_info, {NodeDecorator.MACHINE_NAME: self.node_instance})
        instances_details = self.client.get_vms_details()

        assert instances_details
        assert instances_details[0][NodeDecorator.MACHINE_NAME]

        new_id = self.client.build_image(self.user_info, self.node_instance)
        assert new_id
Exemple #26
0
class TestStratusLabLiveBase(unittest.TestCase):
    def setUp(self):

        os.environ['SLIPSTREAM_CONNECTOR_INSTANCE'] = 'stratuslab'
        os.environ['SLIPSTREAM_BOOTSTRAP_BIN'] = 'http://example.com/bootstrap'
        os.environ['SLIPSTREAM_DIID'] = '00000000-0000-0000-0000-00%s' % int(
            time.time())

        config_file = self._get_config_file()
        if not os.path.exists(config_file):
            raise Exception('Configuration file %s not found.' % config_file)

        self.ch = ConfigHolder(configFile=config_file, context={'foo': 'bar'})
        self.ch.set('verboseLevel', int(self.ch.config['General.verbosity']))

        os.environ['SLIPSTREAM_PDISK_ENDPOINT'] = self.ch.config[
            'SLIPSTREAM_PDISK_ENDPOINT']

        self.client = self._get_connector_class()(self.ch)
        self.client._publish_vm_info = Mock()

        self.user_info = UserInfo('stratuslab')
        self.user_info['stratuslab.endpoint'] = self.ch.config[
            'stratuslab.endpoint']
        self.user_info['stratuslab.ip.type'] = self.ch.config[
            'stratuslab.ip.type']
        self.user_info['stratuslab.marketplace.endpoint'] = self.ch.config[
            'stratuslab.marketplace.endpoint']
        self.user_info['stratuslab.password'] = self.ch.config[
            'stratuslab.password']
        self.user_info['General.ssh.public.key'] = self.ch.config[
            'General.ssh.public.key']
        self.user_info['stratuslab.username'] = self.ch.config[
            'stratuslab.username']
        self.user_info['User.firstName'] = 'Foo'
        self.user_info['User.lastName'] = 'Bar'
        self.user_info['User.email'] = '*****@*****.**'

        extra_disk_volatile = self.ch.config['stratuslab.extra.disk.volatile']
        image_id = self.ch.config['stratuslab.imageid']
        self.multiplicity = int(
            self.ch.config.get('stratuslab.multiplicity', 2))
        self.max_iaas_workers = self.ch.config.get(
            'stratuslab.max.iaas.workers', 10)

        self.node_name = 'test_node'
        self.node_instances = {}
        for i in range(1, self.multiplicity + 1):
            node_instance_name = self.node_name + '.' + str(i)
            self.node_instances[node_instance_name] = NodeInstance({
                NodeDecorator.NODE_NAME_KEY:
                self.node_name,
                NodeDecorator.NODE_INSTANCE_NAME_KEY:
                node_instance_name,
                'cloudservice':
                'stratuslab',
                'extra.disk.volatile':
                extra_disk_volatile,
                'image.resourceUri':
                '',
                'image.platform':
                'Ubuntu',
                'image.imageId':
                image_id,
                'image.id':
                image_id,
                'stratuslab.instance.type':
                'm1.small',
                'stratuslab.disks,bus.type':
                'virtio',
                'stratuslab.cpu':
                '',
                'stratuslab.ram':
                '',
                'network':
                'public'
            })

        self.node_instance = NodeInstance({
            NodeDecorator.NODE_NAME_KEY:
            NodeDecorator.MACHINE_NAME,
            NodeDecorator.NODE_INSTANCE_NAME_KEY:
            NodeDecorator.MACHINE_NAME,
            'cloudservice':
            'stratuslab',
            'extra.disk.volatile':
            extra_disk_volatile,
            'image.resourceUri':
            '',
            'image.platform':
            'Ubuntu',
            'image.imageId':
            image_id,
            'image.id':
            image_id,
            'stratuslab.instance.type':
            'm1.small',
            'stratuslab.disks,bus.type':
            'virtio',
            'stratuslab.cpu':
            '',
            'stratuslab.ram':
            '',
            'network':
            'public',
            'image.prerecipe': [{
                "name":
                "prerecipe",
                "order":
                1,
                "module":
                "component",
                "module_uri":
                "path/to/component",
                "script":
                """#!/bin/sh
set -e
set -x

ls -l /tmp
dpkg -l | egrep "nano|lvm" || true
"""
            }],
            'image.packages': ['lvm2', 'nano'],
            'image.recipe': [{
                "name":
                "prerecipe",
                "order":
                1,
                "module":
                "component",
                "module_uri":
                "path/to/component",
                "script":
                """#!/bin/sh
set -e
set -x

dpkg -l | egrep "nano|lvm" || true
lvs
"""
            }]
        })

    def tearDown(self):
        os.environ.pop('SLIPSTREAM_CONNECTOR_INSTANCE')
        os.environ.pop('SLIPSTREAM_BOOTSTRAP_BIN')
        self.client = None
        self.ch = None

    def _get_config_file(self):
        return CONFIG_FILE

    def _get_connector_class(self):
        return StratusLabClientCloud

    def _start_instances(self):

        self.client._get_max_workers = Mock(return_value=self.max_iaas_workers)

        util.printAndFlush('Starting instances\n')
        self.client.start_nodes_and_clients(self.user_info,
                                            self.node_instances)
        util.printAndFlush('Instances started\n')

        vms = self.client.get_vms()
        assert len(vms) == int(self.multiplicity)

        vm_ids = []
        runners = vms.values()
        util.printAndFlush('Waiting VMs to go into Running state\n')
        for runner in runners:
            vm_id = runner.vmIds[0]
            state = self.client._wait_vm_in_state([
                'Running',
            ],
                                                  runner,
                                                  vm_id,
                                                  counts=50,
                                                  sleep=6,
                                                  throw=True)
            assert 'Running' == state
            util.printAndFlush('VM %s is Running\n' % vm_id)
            vm_ids.append(vm_id)

        return vm_ids

    def _start_stop_instances(self):
        try:
            self._start_instances()
        finally:
            util.printAndFlush('Stopping deployment\n')
            self.client.stop_deployment()

    def _start_stop_instances_by_ids(self):
        vm_ids = []
        try:
            vm_ids = self._start_instances()
        finally:
            util.printAndFlush('Stopping VMs by ids\n')
            self.client.stop_vms_by_ids(vm_ids)
Exemple #27
0
class TestOpenNebulaClientCloudLive(unittest.TestCase):
    connector_instance_name = 'opennebula'

    def constructKey(self, name):
        return self.connector_instance_name + '.' + name

    def setUp(self):
        os.environ[
            'SLIPSTREAM_CONNECTOR_INSTANCE'] = self.connector_instance_name
        os.environ['SLIPSTREAM_BOOTSTRAP_BIN'] = 'http://example.com/bootstrap'
        os.environ['SLIPSTREAM_DIID'] = '00000000-0000-0000-0000-000000000000'

        if not os.path.exists(CONFIG_FILE):
            raise Exception('Configuration file %s not found.' % CONFIG_FILE)

        self.ch = ConfigHolder(configFile=CONFIG_FILE, context={'foo': 'bar'})
        self.ch.set(KEY_RUN_CATEGORY, '')

        OpenNebulaClientCloud._publish_vm_info = publish_vm_info  # pylint: disable=protected-access
        self.client = OpenNebulaClientCloud(self.ch)

        self.user_info = UserInfo(self.connector_instance_name)
        self.user_info[
            'General.' +
            UserInfo.SSH_PUBKEY_KEY] = self.ch.config['General.ssh.public.key']
        self.user_info[self.constructKey(
            'endpoint')] = self.ch.config['opennebula.endpoint']
        self.user_info[self.constructKey(
            'username')] = self.ch.config['opennebula.username']
        self.user_info[self.constructKey(
            'password')] = self.ch.config['opennebula.password']
        self.user_info[self.constructKey(
            UserInfo.NETWORK_PUBLIC_KEY
        )] = self.ch.config['opennebula.networkPublic']
        self.user_info[self.constructKey(
            UserInfo.NETWORK_PRIVATE_KEY
        )] = self.ch.config['opennebula.networkPrivate']
        self.user_info[self.constructKey('cpuRatio')] = '1.0'

        image_id = self.ch.config['opennebula.imageid']
        instance_type = self.ch.config.get('opennebula.intance.type',
                                           'm1.tiny')
        node_name = 'test_node'

        self.multiplicity = 1

        self.node_instances = {}
        for i in range(1, self.multiplicity + 1):
            node_instance_name = node_name + '.' + str(i)
            self.node_instances[node_instance_name] = NodeInstance({
                NodeDecorator.NODE_NAME_KEY:
                node_name,
                NodeDecorator.NODE_INSTANCE_NAME_KEY:
                node_instance_name,
                'cloudservice':
                self.connector_instance_name,
                'image.platform':
                'Ubuntu',
                'image.imageId':
                image_id,
                'image.id':
                image_id,
                'network':
                self.ch.config['opennebula.network'],
                self.constructKey('instance.type'):
                instance_type,
                self.constructKey('ram'):
                '2',
                self.constructKey('cpu'):
                '1'
            })

        self.node_instance = NodeInstance({
            NodeDecorator.NODE_NAME_KEY:
            NodeDecorator.MACHINE_NAME,
            NodeDecorator.NODE_INSTANCE_NAME_KEY:
            NodeDecorator.MACHINE_NAME,
            'cloudservice':
            self.connector_instance_name,
            'image.platform':
            'Ubuntu',
            'image.imageId':
            image_id,
            'image.id':
            image_id,
            self.constructKey('instance.type'):
            instance_type,
            'image.prerecipe':
            """#!/bin/sh
set -e
set -x

ls -l /tmp
dpkg -l | egrep "nano|lvm" || true
""",
            'image.packages': ['lvm2', 'nano'],
            'image.recipe':
            """#!/bin/sh
set -e
set -x

dpkg -l | egrep "nano|lvm" || true
lvs
"""
        })

    def tearDown(self):
        os.environ.pop('SLIPSTREAM_CONNECTOR_INSTANCE')
        os.environ.pop('SLIPSTREAM_BOOTSTRAP_BIN')
        self.client = None
        self.ch = None

    def xtest_1_startStopImages(self):
        self.client.run_category = RUN_CATEGORY_DEPLOYMENT

        self.client.start_nodes_and_clients(self.user_info,
                                            self.node_instances)

        util.printAndFlush('Instances started')

        vms = self.client.get_vms()
        assert len(vms) == self.multiplicity

        self.client.stop_deployment()

    def xtest_2_buildImage(self):
        self.client.run_category = RUN_CATEGORY_IMAGE

        self.client.start_nodes_and_clients(
            self.user_info, {NodeDecorator.MACHINE_NAME: self.node_instance})
        instances_details = self.client.get_vms_details()

        assert instances_details
        assert instances_details[0][NodeDecorator.MACHINE_NAME]

        new_id = self.client.build_image(self.user_info, self.node_instance)
        assert new_id
class TestOkeanosClientCloud(unittest.TestCase):
    CLOUD_NAME = getConnectorClass().cloudName
    FLAVOR_KEY = "%s.instance.type" % CLOUD_NAME
    RESIZE_FLAVOR_KEY = "%s.resize.instance.type" % CLOUD_NAME

    def log(self, msg=''):
        who = '%s::%s' % (self.__class__.__name__, inspect.stack()[1][3])
        LOG('%s# %s' % (who, msg))

    def setUp(self):
        cloudName = TestOkeanosClientCloud.CLOUD_NAME
        flavorKey = TestOkeanosClientCloud.FLAVOR_KEY
        resizeFlavorKey = TestOkeanosClientCloud.RESIZE_FLAVOR_KEY

        os.environ['SLIPSTREAM_CONNECTOR_INSTANCE'] = cloudName
        os.environ['SLIPSTREAM_BOOTSTRAP_BIN'] = 'http://example.com/bootstrap'
        os.environ['SLIPSTREAM_DIID'] = \
            '%s-1234-1234-1234-123456789012' % str(int(time.time()))[2:]

        if not os.path.exists(CONFIG_FILE):
            raise Exception('Configuration file %s not found.' % CONFIG_FILE)

        self.ch = ConfigHolder(configFile=CONFIG_FILE, context={'foo': 'bar'})
        self.ch.verboseLevel = int(self.ch.verboseLevel)

        flavor = self.ch.config[flavorKey]
        resizeFlavor = self.ch.config[resizeFlavorKey]
        self.log("Initial Flavor: '%s' = %s" % (flavorKey, flavor))
        self.log("Resize  Flavor: '%s' = %s" % (resizeFlavorKey, resizeFlavor))

        self.user_info = UserInfo(cloudName)
        self.user_info['General.ssh.public.key'] = self.ch.config['General.ssh.public.key']
        self.user_info[cloudName + '.endpoint'] = self.ch.config[cloudName + '.auth_url']
        self.user_info[cloudName + '.username'] = self.ch.config[cloudName + '.user.uuid']
        self.user_info[cloudName + '.password'] = self.ch.config[cloudName + '.token']
        self.user_info[cloudName + '.project.id'] = self.ch.config[cloudName + '.project.id']

        node_name = 'test_node'

        self.multiplicity = int(self.ch.config['multiplicity'])

        self.node_instances = {}
        for i in range(1, self.multiplicity + 1):
            node_instance_name = node_name + '.' + str(i)
            ni = NodeInstance({
                NodeDecorator.NODE_NAME_KEY: node_name,
                NodeDecorator.NODE_INSTANCE_NAME_KEY: node_instance_name,
                'cloudservice': cloudName,
                'image.description': 'This is a test image.',
                'image.platform': self.ch.config[cloudName + '.image.platform'],
                'image.id': self.ch.config[cloudName + '.imageid'],
                flavorKey: flavor,
                resizeFlavorKey: resizeFlavor,
                'network': self.ch.config['network']
            })
            ni.set_parameter(NodeDecorator.SCALE_DISK_ATTACH_SIZE, 1)
            self.node_instances[node_instance_name] = ni

        self.node_instance = NodeInstance({
            NodeDecorator.NODE_NAME_KEY: node_name,
            NodeDecorator.NODE_INSTANCE_NAME_KEY: NodeDecorator.MACHINE_NAME,
            'cloudservice': cloudName,
            'disk.attach.size': self.ch.config[cloudName + '.disk.attach.size'],
            'image.description': 'This is a test image.',
            'image.platform': self.ch.config[cloudName + '.image.platform'],
            'image.loginUser': self.ch.config[cloudName + '.image.loginuser'],
            'image.id': self.ch.config[cloudName + '.imageid'],
            flavorKey: flavor,
            resizeFlavorKey: resizeFlavor,
            'network': self.ch.config['network'],
            'image.prerecipe':
"""#!/bin/sh
set -e
set -x

ls -l /tmp
dpkg -l | egrep "nano|lvm" || true
""",
                'image.packages': ['lvm2', 'nano'],
                'image.recipe':
"""#!/bin/sh
set -e
set -x

dpkg -l | egrep "nano|lvm" || true
lvs
"""
        })

    def tearDown(self):
        os.environ.pop('SLIPSTREAM_CONNECTOR_INSTANCE')
        os.environ.pop('SLIPSTREAM_BOOTSTRAP_BIN')
        os.environ.pop('SLIPSTREAM_DIID')
        self.client = None  # type: slipstream_okeanos.OkeanosClientCloud.OkeanosClientCloud
        self.ch = None  # type: slipstream.ConfigHolder.ConfigHolder

    def _init_connector(self, run_category=RUN_CATEGORY_DEPLOYMENT):
        self.ch.set(KEY_RUN_CATEGORY, run_category)
        self.client = getConnector(self.ch)
        self.client._publish_vm_info = Mock()

    def xtest_1_startWaitRunningStopImage(self):
        self._init_connector()
        self._start_wait_running_stop_images()

    def xtest_2_buildImage(self):
        self._init_connector(run_category=RUN_CATEGORY_IMAGE)

        try:
            instances_details = self.client.start_nodes_and_clients(
                self.user_info, {NodeDecorator.MACHINE_NAME: self.node_instance})

            assert instances_details
            assert instances_details[0][NodeDecorator.MACHINE_NAME]

            self.assertRaises(NotImplementedError, self.client.build_image,
                              *(self.user_info, self.node_instance))
#             new_id = self.client.build_image(self.user_info, self.node_instance)
#             assert new_id

        finally:
            self.client.stop_deployment()

#         print('Deregistering image %s ... ' % new_id)
#         self.client.deregister_image(new_id)
#         print('Done.')

    @staticmethod
    def _attached_disk_setter(node_instance, device):
        """
        Sets the attached device on the node_instance object for the later usage in detach action.
        This is called by the BaseCloudConnector when provided as `done_reporter` to BaseCloudConnector.attach_disk()
        """
        LOG("_attached_disk_setter(), node_instance=%s, device=%s" % (node_instance, device))
        node_instance.set_parameter(NodeDecorator.SCALE_DISK_DETACH_DEVICE, device)
        LOG("_attached_disk_setter(), node_instance=%s, device=%s" % (node_instance, device))

    def xtest_3_attach_detach_disk(self):
        def stopDeployment():
            self.log("Stopping deployment ...")
            self.client.stop_deployment()
            self.log("Deployment stopped")

        try:
            self._init_connector(run_category=RUN_CATEGORY_IMAGE)
            self._start_images()
            self.log("Images started")
            node_instances = self.node_instances.values()
            self.log("Attaching disk to %s" % node_instances)
            self.client.attach_disk(node_instances, done_reporter=self._attached_disk_setter)
            self.log("Disk attached to %s" % node_instances)
            self.log("Detaching disk from %s" % node_instances)
            self.client.detach_disk(node_instances)
            self.log("Disk detached")
            stopDeployment()
        except:
            self.log("An error happened, stopping deployment anyway")
            stopDeployment()
            self.log("Re-raising the exception")
            raise

    def xtest_4_resize(self):
        def stopDeployment():
            self.log("Stopping deployment ...")
            self.client.stop_deployment()
            self.log("Deployment stopped")

        try:
            self._init_connector(run_category=RUN_CATEGORY_IMAGE)
            self._start_images()
            self.log("Images started")

            node_instances = self.node_instances.values()
            for node_instance in node_instances:
                existingFlavor = node_instance.get_instance_type()
                resizeFlavor = node_instance.get_cloud_parameter('resize.instance.type')
                self.log("existingFlavor = %s, resizeFlavor = %s" % (existingFlavor, resizeFlavor))
                node_instance.set_cloud_parameters({'instance.type': resizeFlavor})
            self.log("Resizing %s" % node_instances)
            self.client.resize(node_instances)
            self.log("Resized %s" % node_instances)

            stopDeployment()
        except:
            self.log("An error happened, stopping deployment anyway")
            stopDeployment()
            self.log("Re-raising the exception")
            raise

    def _start_images(self):
        for node_instance in self.node_instances:
            self.log('Starting %s' % node_instance)

        self.client.start_nodes_and_clients(self.user_info, self.node_instances)
        util.printAndFlush('Instances started\n')
        vms = self.client.get_vms()
        for vm_name in vms:
            vm = vms[vm_name]
            self.log('Started %s: %s' % (vm_name, vm))
            instanceId = self.client._vm_get_id(vm)
            self.node_instances[vm_name].set_parameter(NodeDecorator.INSTANCEID_KEY, instanceId)
        assert len(vms) == self.multiplicity

    def _wait_running_images(self):
        # Wait VMs enter running state.
        #             for vm in vms.values():
        #                 self.client._wait_vm_in_state_running_or_timeout(vm['id'])
        time.sleep(0)   # No need for ~Okeanos, the connector does the waiting.

    def _start_wait_running_stop_images(self):
        try:
            self._start_images()
            self._wait_running_images()
        finally:
            self.client.stop_deployment()
class TestStratusLabClientCloud(unittest.TestCase):
    def setUp(self):

        os.environ['SLIPSTREAM_CONNECTOR_INSTANCE'] = 'stratuslab'
        os.environ['SLIPSTREAM_BOOTSTRAP_BIN'] = 'http://example.com/bootstrap'
        os.environ['SLIPSTREAM_DIID'] = '00000000-0000-0000-0000-000000000000'

        if not os.path.exists(CONFIG_FILE):
            raise Exception('Configuration file %s not found.' % CONFIG_FILE)

        self.ch = ConfigHolder(configFile=CONFIG_FILE, context={'foo': 'bar'})
        self.ch.set('verboseLevel', '2')

        os.environ['SLIPSTREAM_MESSAGING_ENDPOINT'] = self.ch.config['SLIPSTREAM_MESSAGING_ENDPOINT']
        os.environ['SLIPSTREAM_MESSAGING_TYPE'] = self.ch.config['SLIPSTREAM_MESSAGING_TYPE']
        os.environ['SLIPSTREAM_MESSAGING_QUEUE'] = self.ch.config['SLIPSTREAM_MESSAGING_QUEUE']

        self.client = StratuslabClientCloud(self.ch)
        self.client.publishVmInfo = Mock()

        self.user_info = UserInfo('stratuslab')
        self.user_info['stratuslab.endpoint'] = self.ch.config['stratuslab.endpoint']
        self.user_info['stratuslab.ip.type'] = self.ch.config['stratuslab.ip.type']
        self.user_info['stratuslab.marketplace.endpoint'] = self.ch.config['stratuslab.marketplace.endpoint']
        self.user_info['stratuslab.password'] = self.ch.config['stratuslab.password']
        self.user_info['General.ssh.public.key'] = self.ch.config['General.ssh.public.key']
        self.user_info['stratuslab.username'] = self.ch.config['stratuslab.username']
        self.user_info['User.firstName'] = 'Foo'
        self.user_info['User.lastName'] = 'Bar'
        self.user_info['User.email'] = '*****@*****.**'

        extra_disk_volatile = self.ch.config['stratuslab.extra.disk.volatile']
        image_id = self.ch.config['stratuslab.imageid']
        self.multiplicity = 1
        self.node_info = {
            'multiplicity': self.multiplicity,
            'nodename': 'test_node',
            'image': {
                'extra_disks': {},
                'cloud_parameters': {
                    'stratuslab': {
                        'stratuslab.instance.type': 'm1.small',
                        'stratuslab.disks.bus.type': 'virtio',
                        'stratuslab.cpu': '',
                        'stratuslab.ram': ''
                    },
                    'Cloud': {
                        'network': 'public',
                        'extra.disk.volatile': extra_disk_volatile
                    }
                },
                'attributes': {
                    'resourceUri': '',
                    'imageId': image_id,
                    'platform': 'Ubuntu'
                },
                'targets': {
                    'prerecipe':
"""#!/bin/sh
set -e
set -x

ls -l /tmp
dpkg -l | egrep "nano|lvm" || true
""",
                    'recipe':
"""#!/bin/sh
set -e
set -x

dpkg -l | egrep "nano|lvm" || true
lvs
""",
                    'packages': ['lvm2', 'nano']
                }
            },
        }

    def tearDown(self):
        os.environ.pop('SLIPSTREAM_CONNECTOR_INSTANCE')
        os.environ.pop('SLIPSTREAM_BOOTSTRAP_BIN')
        self.client = None
        self.ch = None

    def test_1_startStopImages(self):

        self.client.startNodesAndClients(self.user_info, [self.node_info])

        util.printAndFlush('Instances started')

        vms = self.client.getVms()
        assert len(vms) == self.multiplicity

        self.client.stopDeployment()

    def test_2_buildImage(self):
        image_info = self.client._extractImageInfoFromNodeInfo(self.node_info)
        self.client._prepareMachineForBuildImage = Mock()
        self.client.buildImage(self.user_info, image_info)
        # StratusLab doesn't provide us with image ID
        assert '' == self.client.getNewImageId()
class TestCloudWrapper(TestCloudConnectorsBase):

    def setUp(self):
        self.serviceurl = 'http://example.com'
        self.config_holder = ConfigHolder(
            {
                'username': base64.b64encode('user'),
                'password': base64.b64encode('pass'),
                'cookie_filename': '/var/tmp/cookies',
                'serviceurl': self.serviceurl,
                'node_instance_name': 'instance-name'
            },
            context={'foo': 'bar'},
            config={'foo': 'bar'})

        os.environ[ENV_CONNECTOR_INSTANCE] = 'Test'

        BaseWrapper.is_mutable = Mock(return_value=False)

    def tearDown(self):
        os.environ.pop(ENV_CONNECTOR_INSTANCE)
        self.config_holder = None
        shutil.rmtree('%s/.cache/' % os.getcwd(), ignore_errors=True)

    def test_get_cloud_name(self):
        for module_name in self.get_cloudconnector_modulenames():
            setattr(self.config_holder, CONFIGPARAM_CONNECTOR_MODULE_NAME, module_name)
            setattr(self.config_holder, KEY_RUN_CATEGORY, RUN_CATEGORY_DEPLOYMENT)
            cw = CloudWrapper(self.config_holder)
            cw.initCloudConnector()

            assert cw._get_cloud_service_name() == 'Test'

    def test_put_image_id(self):
        # pylint: disable=protected-access

        self.config_holder.set(CONFIGPARAM_CONNECTOR_MODULE_NAME,
                               self.get_cloudconnector_modulename_by_cloudname('local'))
        cw = CloudWrapper(self.config_holder)
        cw.initCloudConnector()

        cw._ss_client.httpClient._call = Mock(return_value=Mock())

        cw._update_slipstream_image(NodeInstance({'image.resourceUri': 'module/Name'}), 'ABC')
        cw._ss_client.httpClient._call.assert_called_with(
            '%s/module/Name/Test' % self.serviceurl,
            'PUT', 'ABC', 'application/xml',
            'application/xml', retry=True)

    def test_no_scaling(self):
        """
        No scaling is happening.  Node instances are in terminal states.
        """
        node_instances = {
            'n.1': NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.1',
                                 NodeDecorator.SCALE_STATE_KEY: CloudWrapper.SCALE_STATE_OPERATIONAL}),
            'm.1': NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'm.1',
                                 NodeDecorator.SCALE_STATE_KEY: CloudWrapper.SCALE_STATE_GONE})
        }

        cw = CloudWrapper(self.config_holder)
        cw._get_nodes_instances = Mock(return_value=node_instances)

        assert {} == cw._get_effective_scale_states()
        assert None == cw._get_global_scale_state()
        assert False == cw.is_vertical_scaling()

        node_and_instances = cw.get_scaling_node_and_instance_names()
        assert '' == node_and_instances[0]
        assert [] == node_and_instances[1]

    def test_consistent_scale_state_inconsistent_scaling_nodes(self):
        """
        Consistent scale state: only one scaling action at a time on different node instances.
        In case node instance is not in a terminal state (as set on the NodeInstance object),
        we check the state directly on the Run (via CloudWrapper._get_runtime_parameter()).

        Inconsistent scaling nodes: only one node type at a time is allowed to be scaled.
        """
        def _get_runtime_parameter(key):
            if key.endswith(NodeDecorator.NODE_PROPERTY_SEPARATOR + NodeDecorator.SCALE_STATE_KEY):
                return CloudWrapper.SCALE_STATE_RESIZING
            else:
                return 'unknown'

        node_instances = {
            'n.1': NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.1',
                                 NodeDecorator.NODE_NAME_KEY: 'n',
                                 NodeDecorator.SCALE_STATE_KEY: 'not terminal'}),
            'n.2': NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.2',
                                 NodeDecorator.NODE_NAME_KEY: 'n',
                                 NodeDecorator.SCALE_STATE_KEY: CloudWrapper.SCALE_STATE_OPERATIONAL}),
            'm.1': NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'm.1',
                                 NodeDecorator.NODE_NAME_KEY: 'm',
                                 NodeDecorator.SCALE_STATE_KEY: 'not terminal'}),
            'm.2': NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'm.2',
                                 NodeDecorator.NODE_NAME_KEY: 'm',
                                 NodeDecorator.SCALE_STATE_KEY: CloudWrapper.SCALE_STATE_GONE}),
        }

        cw = CloudWrapper(self.config_holder)
        cw._get_runtime_parameter = Mock(side_effect=_get_runtime_parameter)
        cw._get_nodes_instances = Mock(return_value=node_instances)

        scale_states = cw._get_effective_scale_states()

        assert 1 == len(scale_states)
        assert CloudWrapper.SCALE_STATE_RESIZING in scale_states
        assert 2 == len(scale_states[CloudWrapper.SCALE_STATE_RESIZING])
        assert ['m.1', 'n.1'] == sorted(scale_states[CloudWrapper.SCALE_STATE_RESIZING])

        assert CloudWrapper.SCALE_STATE_RESIZING == cw._get_global_scale_state()

        assert True == cw.is_vertical_scaling()

        self.failUnlessRaises(InconsistentScalingNodesError, cw.get_scaling_node_and_instance_names)

    def test_inconsistent_scale_state_inconsistent_scaling_nodes(self):
        """
        Inconsistent scale state: different scaling actions at a time are not allowed.
        In case node instance is not in a terminal state (as set on the NodeInstance object),
        we check the state directly on the Run (via CloudWrapper._get_runtime_parameter()).

        Inconsistent scaling nodes: only one node type at a time is allowed to be scaled.
        """
        def _get_runtime_parameter(key):
            if key.startswith('n.'):
                return CloudWrapper.SCALE_STATE_RESIZING
            elif key.startswith('m.'):
                return CloudWrapper.SCALE_STATE_DISK_ATTACHING
            else:
                return 'unknown'

        node_instances = {
            'n.1': NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.1',
                                 NodeDecorator.NODE_NAME_KEY: 'n',
                                 NodeDecorator.SCALE_STATE_KEY: 'not terminal'}),
            'n.2': NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.2',
                                 NodeDecorator.NODE_NAME_KEY: 'n',
                                 NodeDecorator.SCALE_STATE_KEY: CloudWrapper.SCALE_STATE_OPERATIONAL}),
            'm.1': NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'm.1',
                                 NodeDecorator.NODE_NAME_KEY: 'm',
                                 NodeDecorator.SCALE_STATE_KEY: 'not terminal'})
        }

        cw = CloudWrapper(self.config_holder)
        cw._get_runtime_parameter = Mock(side_effect=_get_runtime_parameter)
        cw._get_nodes_instances = Mock(return_value=node_instances)

        scale_states = cw._get_effective_scale_states()

        assert 2 == len(scale_states)

        assert CloudWrapper.SCALE_STATE_RESIZING in scale_states
        assert 1 == len(scale_states[CloudWrapper.SCALE_STATE_RESIZING])
        assert ['n.1'] == scale_states[CloudWrapper.SCALE_STATE_RESIZING]

        assert CloudWrapper.SCALE_STATE_DISK_ATTACHING in scale_states
        assert 1 == len(scale_states[CloudWrapper.SCALE_STATE_DISK_ATTACHING])
        assert ['m.1'] == scale_states[CloudWrapper.SCALE_STATE_DISK_ATTACHING]

        self.failUnlessRaises(InconsistentScaleStateError, cw._get_global_scale_state)
        self.failUnlessRaises(InconsistentScaleStateError, cw.is_vertical_scaling)
        self.failUnlessRaises(InconsistentScaleStateError, cw.check_scale_state_consistency)
        self.failUnlessRaises(InconsistentScalingNodesError, cw.get_scaling_node_and_instance_names)

    def test_consistent_scale_state_consistent_scaling_nodes(self):
        """
        Consistent scale state: different scaling actions at a time are not allowed.
        In case node instance is not in a terminal state (as set on the NodeInstance object),
        we check the state directly on the Run (via CloudWrapper._get_runtime_parameter()).

        Consistent scaling nodes: only one node type at a time is allowed to be scaled.
        """
        def _get_runtime_parameter(key):
            if key.endswith(NodeDecorator.NODE_PROPERTY_SEPARATOR + NodeDecorator.SCALE_STATE_KEY):
                return CloudWrapper.SCALE_STATE_RESIZING
            else:
                return 'unknown'

        node_instances = {
            'n.1': NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.1',
                                 NodeDecorator.NODE_NAME_KEY: 'n',
                                 NodeDecorator.SCALE_STATE_KEY: 'not terminal'}),
            'n.2': NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.2',
                                 NodeDecorator.NODE_NAME_KEY: 'n',
                                 NodeDecorator.SCALE_STATE_KEY: 'not terminal'}),
            'm.1': NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'm.1',
                                 NodeDecorator.NODE_NAME_KEY: 'm',
                                 NodeDecorator.SCALE_STATE_KEY: CloudWrapper.SCALE_STATE_OPERATIONAL})
        }

        cw = CloudWrapper(self.config_holder)
        cw._get_runtime_parameter = Mock(side_effect=_get_runtime_parameter)
        cw._get_nodes_instances = Mock(return_value=node_instances)

        scale_states = cw._get_effective_scale_states()

        assert 1 == len(scale_states)

        assert CloudWrapper.SCALE_STATE_RESIZING in scale_states
        assert 2 == len(scale_states[CloudWrapper.SCALE_STATE_RESIZING])
        assert ['n.1', 'n.2'] == sorted(scale_states[CloudWrapper.SCALE_STATE_RESIZING])

        assert CloudWrapper.SCALE_STATE_RESIZING == cw._get_global_scale_state()

        try:
            cw.check_scale_state_consistency()
        except InconsistentScaleStateError as ex:
            self.fail('Should not have failed with: %s' % str(ex))

        node_and_instances = cw.get_scaling_node_and_instance_names()
        assert 'n' == node_and_instances[0]
        assert ['n.1', 'n.2'] == sorted(node_and_instances[1])

    def test_vertically_scalle_instances_nowait(self):
        _scale_state = None

        def _get_runtime_parameter(key):
            if key.endswith(NodeDecorator.NODE_PROPERTY_SEPARATOR + NodeDecorator.SCALE_STATE_KEY):
                return _scale_state
            else:
                return 'unknown'

        node_instances = {
            'n.1': NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.1',
                                 NodeDecorator.NODE_NAME_KEY: 'n',
                                 NodeDecorator.SCALE_STATE_KEY: CloudWrapper.SCALE_STATE_RESIZING}),
            'n.2': NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.2',
                                 NodeDecorator.NODE_NAME_KEY: 'n',
                                 NodeDecorator.SCALE_STATE_KEY: CloudWrapper.SCALE_STATE_DISK_ATTACHING}),
            'n.3': NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.3',
                                 NodeDecorator.NODE_NAME_KEY: 'n',
                                 NodeDecorator.SCALE_STATE_KEY: CloudWrapper.SCALE_STATE_DISK_DETACHING}),
            'm.1': NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'm.1',
                                 NodeDecorator.NODE_NAME_KEY: 'm',
                                 NodeDecorator.SCALE_STATE_KEY: CloudWrapper.SCALE_STATE_OPERATIONAL})
        }

        self.config_holder.set('verboseLevel', 3)
        setattr(self.config_holder, 'cloud', 'local')
        setattr(self.config_holder, CONFIGPARAM_CONNECTOR_MODULE_NAME,
                'slipstream.cloudconnectors.dummy.DummyClientCloud')

        cw = CloudWrapper(self.config_holder)
        cw._get_nodes_instances = Mock(return_value=node_instances)
        cw.initCloudConnector(self.config_holder)
        cw._set_runtime_parameter = Mock()

        cw._get_user_timeout = Mock(return_value=2)

        # No waiting.
        cw._wait_pre_scale_done = Mock()
        cw._wait_scale_state = Mock()

        _scale_state = 'resizing'
        cw._get_runtime_parameter = Mock(side_effect=_get_runtime_parameter)
        cw._cloud_client.resize = Mock(wraps=cw._cloud_client.resize)
        cw.vertically_scale_instances()
        assert True == cw._cloud_client.resize.called
        node_instance = cw._cloud_client.resize.call_args[0][0][0]
        assert 'n.1' in node_instance.get_name()
        assert cw._set_runtime_parameter.called_with('n.1:' + NodeDecorator.SCALE_IAAS_DONE, 'true')

        _scale_state = 'disk_attaching'
        cw._get_runtime_parameter = Mock(side_effect=_get_runtime_parameter)
        cw._cloud_client.attach_disk = Mock(wraps=cw._cloud_client.attach_disk)
        cw.vertically_scale_instances()
        assert True == cw._cloud_client.attach_disk.called
        node_instance = cw._cloud_client.attach_disk.call_args[0][0][0]
        assert 'n.2' in node_instance.get_name()
        assert cw._set_runtime_parameter.called_with('n.2:' + NodeDecorator.SCALE_IAAS_DONE, 'true')

        _scale_state = 'disk_detaching'
        cw._get_runtime_parameter = Mock(side_effect=_get_runtime_parameter)
        cw._cloud_client.detach_disk = Mock(wraps=cw._cloud_client.detach_disk)
        cw.vertically_scale_instances()
        assert True == cw._cloud_client.detach_disk.called
        node_instance = cw._cloud_client.detach_disk.call_args[0][0][0]
        assert 'n.3' in node_instance.get_name()
        assert cw._set_runtime_parameter.called_with('n.3:' + NodeDecorator.SCALE_IAAS_DONE, 'true')

    def test_wait_pre_scale_done(self):
        node_instances = [
            NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.1'}),
            NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.2'}),
            NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.3'}),
            NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'm.1'})
        ]
        cw = CloudWrapper(self.config_holder)

        cw.get_pre_scale_done = Mock(return_value='true')
        cw._get_state_timeout_time = Mock(return_value=(time.time() + 10))
        cw._wait_pre_scale_done(node_instances)

        cw.get_pre_scale_done = Mock(return_value='true')
        cw._get_state_timeout_time = Mock(return_value=(time.time() - 1))
        self.failUnlessRaises(TimeoutException, cw._wait_pre_scale_done, node_instances)

        cw.get_pre_scale_done = Mock(return_value='')
        cw._get_state_timeout_time = Mock(return_value=(time.time() + 2))
        self.failUnlessRaises(TimeoutException, cw._wait_pre_scale_done, node_instances)

    def test_wait_scale_state(self):
        node_instances = [
            NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.1'}),
            NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.2'}),
            NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.3'}),
            NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'm.1'})
        ]
        cw = CloudWrapper(self.config_holder)

        # All set before timeout.
        cw._get_effective_scale_state = Mock(return_value=CloudWrapper.SCALE_STATE_RESIZED)
        cw._get_state_timeout_time = Mock(return_value=(time.time() + 5))
        cw._wait_scale_state(CloudWrapper.SCALE_STATE_RESIZED, node_instances)

        # Timeout is in the past.
        cw._get_effective_scale_state = Mock(return_value=CloudWrapper.SCALE_STATE_RESIZED)
        cw._get_state_timeout_time = Mock(return_value=(time.time() - 1))
        self.failUnlessRaises(TimeoutException, cw._wait_scale_state, *(CloudWrapper.SCALE_STATE_RESIZED, node_instances))

        # VMs do not set proper value and we timeout.
        cw._get_effective_scale_state = Mock(return_value=CloudWrapper.SCALE_STATE_RESIZING)
        cw._get_state_timeout_time = Mock(return_value=(time.time() + 2))
        self.failUnlessRaises(TimeoutException, cw._wait_scale_state, *(CloudWrapper.SCALE_STATE_RESIZED, node_instances))
 def _init_client(self):
     configHolder = ConfigHolder(self.options,
                                 context={'empty': None},
                                 config={'empty': None})
     configHolder.set('serviceurl', self.options.endpoint)
     self.client = Client(configHolder)
Exemple #32
0
class TestBaseLive(unittest.TestCase):
    cin = ''
    node_instances = {}  # of NodeInstance()
    multiplicity = 0
    max_iaas_workers = 1

    def construct_key(self, name):
        return self.cin + '.' + name

    def _conf_val(self, key, default=None):
        conf_key = self.construct_key(key)
        if default:
            return self.ch.config.get(conf_key, default)
        return self.ch.config[conf_key]

    def _build_user_info(self, keys):
        self.user_info = UserInfo(self.cin)
        self.user_info['General.ssh.public.key'] = self.ch.config[
            'General.ssh.public.key']
        for k in keys:
            self.user_info[self.construct_key(k)] = self._conf_val(k)

    def _load_config(self, conf_file):
        if not os.path.exists(conf_file):
            raise Exception('Configuration file %s not found.' % conf_file)

        self.ch = ConfigHolder(configFile=conf_file, context={'foo': 'bar'})
        self.ch.set(KEY_RUN_CATEGORY, '')

    def _build_client(self, testedCls):
        testedCls._publish_vm_info = publish_vm_info  # pylint: disable=protected-access
        self.client = testedCls(self.ch)

    def _get_ex_msg(self, ex):
        if hasattr(ex, 'message'):
            return ex.message
        if hasattr(ex, 'arg'):
            return ex.arg
        return ''

    def _setUp(self, testedCls, conf_file, conf_keys):
        """(Re-)sets the following fields
        self.ch               - ConfigHolder
        self.client           - instance of BaseCloudConnector
        self.user_info        - UserInfo
        self.multiplicity     - int
        self.max_iaas_workers - str
        """
        os.environ['SLIPSTREAM_CONNECTOR_INSTANCE'] = self.cin
        os.environ['SLIPSTREAM_BOOTSTRAP_BIN'] = 'http://example.com/bootstrap'
        os.environ['SLIPSTREAM_DIID'] = '00000000-0000-0000-0000-000000000000'

        self._load_config(conf_file)
        self._build_client(testedCls)
        self._build_user_info(conf_keys)
        pp(self.user_info)

        self.multiplicity = int(self._conf_val('multiplicity', 2))
        self.max_iaas_workers = self._conf_val('max.iaas.workers',
                                               str(self.multiplicity))

    def _test_startStopImages(self):
        "Live test that starts and stops VMs on a cloud."
        self.client._get_max_workers = Mock(return_value=self.max_iaas_workers)
        self.client.run_category = RUN_CATEGORY_DEPLOYMENT

        success = True
        error = ''
        try:
            self.client.start_nodes_and_clients(self.user_info,
                                                self.node_instances)
            vms = self.client.get_vms()
            assert len(vms) == self.multiplicity
            util.printAction('Instances started.')
            pp(vms)
        except Exception as ex:
            success = False
            error = self._get_ex_msg(ex)
            util.printError("Exception caught while starting instances!")
            exc_type, exc_value, exc_traceback = sys.exc_info()
            traceback.print_exception(exc_type, exc_value, exc_traceback)
        finally:
            util.printAction("Stopping deployment.")
            self.client.stop_deployment()
        self.assertEquals(success, True, error)
Exemple #33
0
class TestCloudStackClientCloud(unittest.TestCase):

    connector_instance_name = 'cloudstack'

    def constructKey(self, name):
        return self.connector_instance_name + '.' + name

    def setUp(self):
        BaseCloudConnector._publish_vm_info = Mock()  # pylint: disable=protected-access

        os.environ[
            'SLIPSTREAM_CONNECTOR_INSTANCE'] = self.connector_instance_name
        os.environ['SLIPSTREAM_BOOTSTRAP_BIN'] = 'http://example.com/bootstrap'
        os.environ[
            'SLIPSTREAM_DIID'] = '00000000-0000-0000-0000-%s' % time.time()

        if not os.path.exists(CONFIG_FILE):
            raise Exception('Configuration file %s not found.' % CONFIG_FILE)

        self.ch = ConfigHolder(configFile=CONFIG_FILE, context={'foo': 'bar'})
        self.ch.set(KEY_RUN_CATEGORY, '')
        self.ch.set('verboseLevel', self.ch.config['General.verbosity'])

        self.client = CloudStackClientCloud(self.ch)

        self.user_info = UserInfo(self.connector_instance_name)
        self.user_info[self.constructKey(
            'endpoint')] = self.ch.config['cloudstack.endpoint']
        self.user_info[self.constructKey(
            'zone')] = self.ch.config['cloudstack.zone']
        self.user_info[self.constructKey(
            'username')] = self.ch.config['cloudstack.key']
        self.user_info[self.constructKey(
            'password')] = self.ch.config['cloudstack.secret']
        security_groups = self.ch.config['cloudstack.security.groups']
        instance_type = self.ch.config['cloudstack.instance.type']
        self.user_info['General.ssh.public.key'] = self.ch.config[
            'General.ssh.public.key']
        image_id = self.ch.config[self.constructKey('template')]

        self.multiplicity = 2
        self.max_iaas_workers = self.ch.config.get(
            'cloudstack.max.iaas.workers', str(self.multiplicity))

        self.node_name = 'test_node'
        self.node_instances = {}
        for i in range(1, self.multiplicity + 1):
            node_instance_name = self.node_name + '.' + str(i)
            self.node_instances[node_instance_name] = NodeInstance({
                NodeDecorator.NODE_NAME_KEY:
                self.node_name,
                NodeDecorator.NODE_INSTANCE_NAME_KEY:
                node_instance_name,
                'cloudservice':
                self.connector_instance_name,
                # 'index': i,s
                'image.platform':
                'linux',
                'image.imageId':
                image_id,
                'image.id':
                image_id,
                self.constructKey('instance.type'):
                instance_type,
                self.constructKey('security.groups'):
                security_groups,
                'network':
                'private'
            })

    def tearDown(self):
        os.environ.pop('SLIPSTREAM_CONNECTOR_INSTANCE')
        os.environ.pop('SLIPSTREAM_BOOTSTRAP_BIN')
        self.client = None
        self.ch = None

    def xtest_1_startStopImages(self):
        self.client._get_max_workers = Mock(return_value=self.max_iaas_workers)
        self.client.run_category = RUN_CATEGORY_DEPLOYMENT

        try:
            self.client.start_nodes_and_clients(self.user_info,
                                                self.node_instances)

            util.printAndFlush('Instances started')

            vms = self.client.get_vms()
            assert len(vms) == self.multiplicity
        finally:
            self.client.stop_deployment()

    def xtest_2_buildImage(self):
        raise NotImplementedError()
Exemple #34
0
def ss_set(key, value, ignore_abort=False):
    ch = ConfigHolder(config={'foo': None})
    ch.set('ignoreAbort', ignore_abort)
    client = Client(ch)
    client.setRuntimeParameter(key, value)
class TestCloudWrapper(TestCloudConnectorsBase):
    def setUp(self):
        self.serviceurl = 'http://example.com'
        self.config_holder = ConfigHolder(
            {
                'username': base64.b64encode('user'),
                'password': base64.b64encode('pass'),
                'cookie_filename': '/var/tmp/cookies',
                'serviceurl': self.serviceurl,
                'node_instance_name': 'instance-name'
            },
            context={'foo': 'bar'},
            config={'foo': 'bar'})

        os.environ[ENV_CONNECTOR_INSTANCE] = 'Test'

        BaseWrapper.is_mutable = Mock(return_value=False)

    def tearDown(self):
        os.environ.pop(ENV_CONNECTOR_INSTANCE)
        self.config_holder = None
        shutil.rmtree('%s/.cache/' % os.getcwd(), ignore_errors=True)

    def test_get_cloud_name(self):
        for module_name in self.get_cloudconnector_modulenames():
            setattr(self.config_holder, CONFIGPARAM_CONNECTOR_MODULE_NAME,
                    module_name)
            setattr(self.config_holder, KEY_RUN_CATEGORY,
                    RUN_CATEGORY_DEPLOYMENT)
            cw = CloudWrapper(self.config_holder)
            cw.initCloudConnector()

            assert cw._get_cloud_service_name() == 'Test'

    def test_put_image_id(self):
        # pylint: disable=protected-access

        self.config_holder.set(
            CONFIGPARAM_CONNECTOR_MODULE_NAME,
            self.get_cloudconnector_modulename_by_cloudname('local'))
        cw = CloudWrapper(self.config_holder)
        cw.initCloudConnector()

        cw._ss_client.httpClient._call = Mock(return_value=Mock())

        cw._update_slipstream_image(
            NodeInstance({'image.resourceUri': 'module/Name'}), 'ABC')
        cw._ss_client.httpClient._call.assert_called_with(
            '%s/module/Name/Test' % self.serviceurl,
            'PUT',
            'ABC',
            'application/xml',
            'application/xml',
            retry=True)

    def test_no_scaling(self):
        """
        No scaling is happening.  Node instances are in terminal states.
        """
        node_instances = {
            'n.1':
            NodeInstance({
                NodeDecorator.NODE_INSTANCE_NAME_KEY:
                'n.1',
                NodeDecorator.SCALE_STATE_KEY:
                CloudWrapper.SCALE_STATE_OPERATIONAL
            }),
            'm.1':
            NodeInstance({
                NodeDecorator.NODE_INSTANCE_NAME_KEY:
                'm.1',
                NodeDecorator.SCALE_STATE_KEY:
                CloudWrapper.SCALE_STATE_GONE
            })
        }

        cw = CloudWrapper(self.config_holder)
        cw._get_nodes_instances = Mock(return_value=node_instances)

        assert {} == cw._get_effective_scale_states()
        assert None == cw._get_global_scale_state()
        assert False == cw.is_vertical_scaling()

        node_and_instances = cw.get_scaling_node_and_instance_names()
        assert '' == node_and_instances[0]
        assert [] == node_and_instances[1]

    def test_consistent_scale_state_inconsistent_scaling_nodes(self):
        """
        Consistent scale state: only one scaling action at a time on different node instances.
        In case node instance is not in a terminal state (as set on the NodeInstance object),
        we check the state directly on the Run (via CloudWrapper._get_runtime_parameter()).

        Inconsistent scaling nodes: only one node type at a time is allowed to be scaled.
        """
        def _get_runtime_parameter(key):
            if key.endswith(NodeDecorator.NODE_PROPERTY_SEPARATOR +
                            NodeDecorator.SCALE_STATE_KEY):
                return CloudWrapper.SCALE_STATE_RESIZING
            else:
                return 'unknown'

        node_instances = {
            'n.1':
            NodeInstance({
                NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.1',
                NodeDecorator.NODE_NAME_KEY: 'n',
                NodeDecorator.SCALE_STATE_KEY: 'not terminal'
            }),
            'n.2':
            NodeInstance({
                NodeDecorator.NODE_INSTANCE_NAME_KEY:
                'n.2',
                NodeDecorator.NODE_NAME_KEY:
                'n',
                NodeDecorator.SCALE_STATE_KEY:
                CloudWrapper.SCALE_STATE_OPERATIONAL
            }),
            'm.1':
            NodeInstance({
                NodeDecorator.NODE_INSTANCE_NAME_KEY: 'm.1',
                NodeDecorator.NODE_NAME_KEY: 'm',
                NodeDecorator.SCALE_STATE_KEY: 'not terminal'
            }),
            'm.2':
            NodeInstance({
                NodeDecorator.NODE_INSTANCE_NAME_KEY:
                'm.2',
                NodeDecorator.NODE_NAME_KEY:
                'm',
                NodeDecorator.SCALE_STATE_KEY:
                CloudWrapper.SCALE_STATE_GONE
            }),
        }

        cw = CloudWrapper(self.config_holder)
        cw._get_runtime_parameter = Mock(side_effect=_get_runtime_parameter)
        cw._get_nodes_instances = Mock(return_value=node_instances)

        scale_states = cw._get_effective_scale_states()

        assert 1 == len(scale_states)
        assert CloudWrapper.SCALE_STATE_RESIZING in scale_states
        assert 2 == len(scale_states[CloudWrapper.SCALE_STATE_RESIZING])
        assert ['m.1', 'n.1'
                ] == sorted(scale_states[CloudWrapper.SCALE_STATE_RESIZING])

        assert CloudWrapper.SCALE_STATE_RESIZING == cw._get_global_scale_state(
        )

        assert True == cw.is_vertical_scaling()

        self.failUnlessRaises(InconsistentScalingNodesError,
                              cw.get_scaling_node_and_instance_names)

    def test_inconsistent_scale_state_inconsistent_scaling_nodes(self):
        """
        Inconsistent scale state: different scaling actions at a time are not allowed.
        In case node instance is not in a terminal state (as set on the NodeInstance object),
        we check the state directly on the Run (via CloudWrapper._get_runtime_parameter()).

        Inconsistent scaling nodes: only one node type at a time is allowed to be scaled.
        """
        def _get_runtime_parameter(key):
            if key.startswith('n.'):
                return CloudWrapper.SCALE_STATE_RESIZING
            elif key.startswith('m.'):
                return CloudWrapper.SCALE_STATE_DISK_ATTACHING
            else:
                return 'unknown'

        node_instances = {
            'n.1':
            NodeInstance({
                NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.1',
                NodeDecorator.NODE_NAME_KEY: 'n',
                NodeDecorator.SCALE_STATE_KEY: 'not terminal'
            }),
            'n.2':
            NodeInstance({
                NodeDecorator.NODE_INSTANCE_NAME_KEY:
                'n.2',
                NodeDecorator.NODE_NAME_KEY:
                'n',
                NodeDecorator.SCALE_STATE_KEY:
                CloudWrapper.SCALE_STATE_OPERATIONAL
            }),
            'm.1':
            NodeInstance({
                NodeDecorator.NODE_INSTANCE_NAME_KEY: 'm.1',
                NodeDecorator.NODE_NAME_KEY: 'm',
                NodeDecorator.SCALE_STATE_KEY: 'not terminal'
            })
        }

        cw = CloudWrapper(self.config_holder)
        cw._get_runtime_parameter = Mock(side_effect=_get_runtime_parameter)
        cw._get_nodes_instances = Mock(return_value=node_instances)

        scale_states = cw._get_effective_scale_states()

        assert 2 == len(scale_states)

        assert CloudWrapper.SCALE_STATE_RESIZING in scale_states
        assert 1 == len(scale_states[CloudWrapper.SCALE_STATE_RESIZING])
        assert ['n.1'] == scale_states[CloudWrapper.SCALE_STATE_RESIZING]

        assert CloudWrapper.SCALE_STATE_DISK_ATTACHING in scale_states
        assert 1 == len(scale_states[CloudWrapper.SCALE_STATE_DISK_ATTACHING])
        assert ['m.1'] == scale_states[CloudWrapper.SCALE_STATE_DISK_ATTACHING]

        self.failUnlessRaises(InconsistentScaleStateError,
                              cw._get_global_scale_state)
        self.failUnlessRaises(InconsistentScaleStateError,
                              cw.is_vertical_scaling)
        self.failUnlessRaises(InconsistentScaleStateError,
                              cw.check_scale_state_consistency)
        self.failUnlessRaises(InconsistentScalingNodesError,
                              cw.get_scaling_node_and_instance_names)

    def test_consistent_scale_state_consistent_scaling_nodes(self):
        """
        Consistent scale state: different scaling actions at a time are not allowed.
        In case node instance is not in a terminal state (as set on the NodeInstance object),
        we check the state directly on the Run (via CloudWrapper._get_runtime_parameter()).

        Consistent scaling nodes: only one node type at a time is allowed to be scaled.
        """
        def _get_runtime_parameter(key):
            if key.endswith(NodeDecorator.NODE_PROPERTY_SEPARATOR +
                            NodeDecorator.SCALE_STATE_KEY):
                return CloudWrapper.SCALE_STATE_RESIZING
            else:
                return 'unknown'

        node_instances = {
            'n.1':
            NodeInstance({
                NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.1',
                NodeDecorator.NODE_NAME_KEY: 'n',
                NodeDecorator.SCALE_STATE_KEY: 'not terminal'
            }),
            'n.2':
            NodeInstance({
                NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.2',
                NodeDecorator.NODE_NAME_KEY: 'n',
                NodeDecorator.SCALE_STATE_KEY: 'not terminal'
            }),
            'm.1':
            NodeInstance({
                NodeDecorator.NODE_INSTANCE_NAME_KEY:
                'm.1',
                NodeDecorator.NODE_NAME_KEY:
                'm',
                NodeDecorator.SCALE_STATE_KEY:
                CloudWrapper.SCALE_STATE_OPERATIONAL
            })
        }

        cw = CloudWrapper(self.config_holder)
        cw._get_runtime_parameter = Mock(side_effect=_get_runtime_parameter)
        cw._get_nodes_instances = Mock(return_value=node_instances)

        scale_states = cw._get_effective_scale_states()

        assert 1 == len(scale_states)

        assert CloudWrapper.SCALE_STATE_RESIZING in scale_states
        assert 2 == len(scale_states[CloudWrapper.SCALE_STATE_RESIZING])
        assert ['n.1', 'n.2'
                ] == sorted(scale_states[CloudWrapper.SCALE_STATE_RESIZING])

        assert CloudWrapper.SCALE_STATE_RESIZING == cw._get_global_scale_state(
        )

        try:
            cw.check_scale_state_consistency()
        except InconsistentScaleStateError as ex:
            self.fail('Should not have failed with: %s' % str(ex))

        node_and_instances = cw.get_scaling_node_and_instance_names()
        assert 'n' == node_and_instances[0]
        assert ['n.1', 'n.2'] == sorted(node_and_instances[1])

    def test_vertically_scalle_instances_nowait(self):
        _scale_state = None

        def _get_runtime_parameter(key):
            if key.endswith(NodeDecorator.NODE_PROPERTY_SEPARATOR +
                            NodeDecorator.SCALE_STATE_KEY):
                return _scale_state
            else:
                return 'unknown'

        node_instances = {
            'n.1':
            NodeInstance({
                NodeDecorator.NODE_INSTANCE_NAME_KEY:
                'n.1',
                NodeDecorator.NODE_NAME_KEY:
                'n',
                NodeDecorator.SCALE_STATE_KEY:
                CloudWrapper.SCALE_STATE_RESIZING
            }),
            'n.2':
            NodeInstance({
                NodeDecorator.NODE_INSTANCE_NAME_KEY:
                'n.2',
                NodeDecorator.NODE_NAME_KEY:
                'n',
                NodeDecorator.SCALE_STATE_KEY:
                CloudWrapper.SCALE_STATE_DISK_ATTACHING
            }),
            'n.3':
            NodeInstance({
                NodeDecorator.NODE_INSTANCE_NAME_KEY:
                'n.3',
                NodeDecorator.NODE_NAME_KEY:
                'n',
                NodeDecorator.SCALE_STATE_KEY:
                CloudWrapper.SCALE_STATE_DISK_DETACHING
            }),
            'm.1':
            NodeInstance({
                NodeDecorator.NODE_INSTANCE_NAME_KEY:
                'm.1',
                NodeDecorator.NODE_NAME_KEY:
                'm',
                NodeDecorator.SCALE_STATE_KEY:
                CloudWrapper.SCALE_STATE_OPERATIONAL
            })
        }

        self.config_holder.set('verboseLevel', 3)
        setattr(self.config_holder, 'cloud', 'local')
        setattr(self.config_holder, CONFIGPARAM_CONNECTOR_MODULE_NAME,
                'slipstream.cloudconnectors.dummy.DummyClientCloud')

        cw = CloudWrapper(self.config_holder)
        cw._get_nodes_instances = Mock(return_value=node_instances)
        cw.initCloudConnector(self.config_holder)
        cw._set_runtime_parameter = Mock()

        cw._get_user_timeout = Mock(return_value=2)

        # No waiting.
        cw._wait_pre_scale_done = Mock()
        cw._wait_scale_state = Mock()

        _scale_state = 'resizing'
        cw._get_runtime_parameter = Mock(side_effect=_get_runtime_parameter)
        cw._cloud_client.resize = Mock(wraps=cw._cloud_client.resize)
        cw.vertically_scale_instances()
        assert True == cw._cloud_client.resize.called
        node_instance = cw._cloud_client.resize.call_args[0][0][0]
        assert 'n.1' in node_instance.get_name()
        assert cw._set_runtime_parameter.called_with(
            'n.1:' + NodeDecorator.SCALE_IAAS_DONE, 'true')

        _scale_state = 'disk_attaching'
        cw._get_runtime_parameter = Mock(side_effect=_get_runtime_parameter)
        cw._cloud_client.attach_disk = Mock(wraps=cw._cloud_client.attach_disk)
        cw.vertically_scale_instances()
        assert True == cw._cloud_client.attach_disk.called
        node_instance = cw._cloud_client.attach_disk.call_args[0][0][0]
        assert 'n.2' in node_instance.get_name()
        assert cw._set_runtime_parameter.called_with(
            'n.2:' + NodeDecorator.SCALE_IAAS_DONE, 'true')

        _scale_state = 'disk_detaching'
        cw._get_runtime_parameter = Mock(side_effect=_get_runtime_parameter)
        cw._cloud_client.detach_disk = Mock(wraps=cw._cloud_client.detach_disk)
        cw.vertically_scale_instances()
        assert True == cw._cloud_client.detach_disk.called
        node_instance = cw._cloud_client.detach_disk.call_args[0][0][0]
        assert 'n.3' in node_instance.get_name()
        assert cw._set_runtime_parameter.called_with(
            'n.3:' + NodeDecorator.SCALE_IAAS_DONE, 'true')

    def test_wait_pre_scale_done(self):
        node_instances = [
            NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.1'}),
            NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.2'}),
            NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.3'}),
            NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'm.1'})
        ]
        cw = CloudWrapper(self.config_holder)

        cw.get_pre_scale_done = Mock(return_value='true')
        cw._get_state_timeout_time = Mock(return_value=(time.time() + 10))
        cw._wait_pre_scale_done(node_instances)

        cw.get_pre_scale_done = Mock(return_value='true')
        cw._get_state_timeout_time = Mock(return_value=(time.time() - 1))
        self.failUnlessRaises(TimeoutException, cw._wait_pre_scale_done,
                              node_instances)

        cw.get_pre_scale_done = Mock(return_value='')
        cw._get_state_timeout_time = Mock(return_value=(time.time() + 2))
        self.failUnlessRaises(TimeoutException, cw._wait_pre_scale_done,
                              node_instances)

    def test_wait_scale_state(self):
        node_instances = [
            NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.1'}),
            NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.2'}),
            NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'n.3'}),
            NodeInstance({NodeDecorator.NODE_INSTANCE_NAME_KEY: 'm.1'})
        ]
        cw = CloudWrapper(self.config_holder)

        # All set before timeout.
        cw._get_effective_scale_state = Mock(
            return_value=CloudWrapper.SCALE_STATE_RESIZED)
        cw._get_state_timeout_time = Mock(return_value=(time.time() + 5))
        cw._wait_scale_state(CloudWrapper.SCALE_STATE_RESIZED, node_instances)

        # Timeout is in the past.
        cw._get_effective_scale_state = Mock(
            return_value=CloudWrapper.SCALE_STATE_RESIZED)
        cw._get_state_timeout_time = Mock(return_value=(time.time() - 1))
        self.failUnlessRaises(
            TimeoutException, cw._wait_scale_state,
            *(CloudWrapper.SCALE_STATE_RESIZED, node_instances))

        # VMs do not set proper value and we timeout.
        cw._get_effective_scale_state = Mock(
            return_value=CloudWrapper.SCALE_STATE_RESIZING)
        cw._get_state_timeout_time = Mock(return_value=(time.time() + 2))
        self.failUnlessRaises(
            TimeoutException, cw._wait_scale_state,
            *(CloudWrapper.SCALE_STATE_RESIZED, node_instances))