Exemplo n.º 1
0
class OkeanosConnector(AbstractConnector):
    """
    Okeanos connector.
    """
    def __init__(self):
        AbstractConnector.__init__(self)
        self.__cyclades = None
        self.__network_client = None
        self.attach_public_ipv4 = False
        self.private_network = -1

    def authenticate(self, authentication=None):
        """

        :param authentication:
        :return:
        """
        if self.__cyclades is not None:
            return True
        try:
            authcl = AstakosClient(authentication['URL'],
                                   authentication['TOKEN'])
            authcl.authenticate()
            self.__cyclades = CycladesClient(
                authcl.get_service_endpoints('compute')['publicURL'],
                authentication['TOKEN'])
            self.__network_client = CycladesNetworkClient(
                authcl.get_service_endpoints('network')['publicURL'],
                authentication['TOKEN'])
        except ClientError:
            stderr.write('Connector initialization failed')
            return False
        return True

    def configure(self, configuration):
        self.authenticate(configuration['auth'])
        if 'private_network' in configuration and configuration[
                'private_network']:
            self.private_network = 0
        if 'attach_public_ipv4' in configuration and configuration[
                'attach_public_ipv4']:
            self.attach_public_ipv4 = True

    def prepare(self):
        """
        In this method, application-level IaaS related actions are executed.
        :return:
        """
        if self.private_network == 0:
            self.private_network = self.create_private_network()

    def create_vm(self, name, flavor_id, image_id):
        """

        :param name:
        :param flavor_id:
        :param image_id:
        :return:
        """
        networks = []
        if self.attach_public_ipv4:
            networks.append({'uuid': self.__create_floating_ip()})
        if self.private_network != -1:
            networks.append({'uuid': self.private_network})

        response = self.__cyclades.create_server(name=name,
                                                 flavor_id=flavor_id,
                                                 image_id=image_id,
                                                 networks=networks)
        ret_value = dict()
        ret_value['password'] = response['adminPass']
        ret_value['id'] = response['id']
        ret_value['user'] = response['metadata']['users']
        ret_value['hostname'] = 'snf-' + str(
            response['id']) + '.vm.okeanos.grnet.gr'
        self.__cyclades.wait_server(server_id=ret_value['id'],
                                    current_status='ACTIVE')
        return ret_value

    def delete_vm(self, server_id):
        """
        Delete VM method. The method is blocking until the VM goes to a "DELETED" state
        :param server_id:
        :return:
        """
        attachments = self.__cyclades.get_server_details(
            server_id)['attachments']
        port_id = None
        for a in attachments:
            if a['OS-EXT-IPS:type'] == 'floating':
                port_id = a['id']
        floating_ip_id = None
        for ip in self.__network_client.list_floatingips():
            if port_id is not None and ip['port_id'] == str(port_id):
                floating_ip_id = ip['id']
        self.__cyclades.delete_server(server_id)
        self.__cyclades.wait_server(
            server_id,
            current_status='DELETED')  # wait until server is deleted
        if floating_ip_id is not None:
            self.__wait_until_ip_released(floating_ip_id)
            self.__network_client.delete_floatingip(floating_ip_id)

    def __wait_until_ip_released(self, floating_ip_id):
        for i in range(1, MAX_WAIT_FOR_LOOPS + 1):
            for ip in self.__network_client.list_floatingips():
                if ip['id'] == floating_ip_id:
                    if ip['instance_id'] is None or ip['instance_id'] == 'None':
                        return True
            sleep(SLEEP_TIMEOUT)

    def list_vms(self):
        """


        :return:
        """
        return self.__cyclades.list_servers()

    def get_status(self, vm_id):
        """

        :param vm_id:
        :return:
        """
        return self.__cyclades.get_server_details(vm_id)

    def get_server_addresses(self,
                             vm_id,
                             ip_version=None,
                             connection_type=None):
        """
        Returns the enabled addresses, as referenced from the IaaS.
        """
        addresses = self.__cyclades.get_server_details(vm_id)['addresses']
        results = []
        while len(addresses) > 0:
            key, value = addresses.popitem()
            if (ip_version is None or value[0]['version'] == ip_version) and \
                    (connection_type is None or value[0]['OS-EXT-IPS:type'] == connection_type):
                results.append(value[0]['addr'])
        return results

    def __create_floating_ip(self):
        self.__network_client.floatingips_get()
        response = self.__network_client.create_floatingip()
        return response['floating_network_id']

    def create_private_network(self):
        """
        Creates a new private network and returns its id
        """
        response = self.__network_client.create_network(
            type='MAC_FILTERED', name='Deployment network')
        self.__network_client.create_subnet(network_id=response['id'],
                                            enable_dhcp=True,
                                            cidr='192.168.0.0/24')
        return response['id']

    def clone(self):
        new_connector = OkeanosConnector()
        new_connector.attach_public_ipv4 = self.attach_public_ipv4
        new_connector.private_network = self.private_network
        new_connector.__network_client = self.__network_client
        new_connector.__cyclades = self.__cyclades
        return new_connector

    def cleanup(self):
        if self.private_network != -1 and self.private_network != 0:
            self.__wait_until_private_net_is_empty(self.private_network)
            self.__network_client.delete_network(self.private_network)

    def __wait_until_private_net_is_empty(self, private_net_id):

        for i in range(1, MAX_WAIT_FOR_LOOPS):
            port_set = set()
            for p in self.__network_client.list_ports():
                port_set.add(p['network_id'])
            if private_net_id not in port_set:
                return
            else:
                sleep(SLEEP_TIMEOUT)

    def serialize(self):
        d = dict()
        d['attach_public_ipv4'] = self.attach_public_ipv4
        d['private_network'] = self.private_network
        return d

    def deserialize(self, state):
        self.attach_public_ipv4 = state['attach_public_ipv4']
        self.private_network = state['private_network']
Exemplo n.º 2
0
class Cyclades(livetest.Generic):
    """Set up a Cyclades test"""
    def setUp(self):
        print
        with open(self['cmpimage', 'details']) as f:
            self.img_details = eval(f.read())
        self.img = self.img_details['id']
        with open(self['flavor', 'details']) as f:
            self._flavor_details = eval(f.read())
        self.PROFILES = ('ENABLED', 'DISABLED', 'PROTECTED')

        self.servers = {}
        self.now = time.mktime(time.gmtime())
        self.servname1 = 'serv' + unicode(self.now)
        self.servname2 = self.servname1 + '_v2'
        self.servname1 += '_v1'
        self.flavorid = self._flavor_details['id']
        #servers have to be created at the begining...
        self.networks = {}
        self.netname1 = 'net' + unicode(self.now)
        self.netname2 = 'net' + unicode(self.now) + '_v2'

        self.cloud = 'cloud.%s' % self['testcloud']
        aurl, self.token = self[self.cloud, 'url'], self[self.cloud, 'token']
        self.auth_base = AstakosClient(aurl, self.token)
        curl = self.auth_base.get_service_endpoints('compute')['publicURL']
        self.client = CycladesClient(curl, self.token)

    def tearDown(self):
        """Destoy servers used in testing"""
        for net in self.networks.keys():
            self._delete_network(net)
        for server in self.servers.values():
            self._delete_server(server['id'])
            print('DEL VM %s (%s)' % (server['id'], server['name']))

    def test_000(self):
        "Prepare a full Cyclades test scenario"
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self.server2 = self._create_server(
            self.servname2, self.flavorid, self.img)
        super(self.__class__, self).test_000()

    def _create_server(self, servername, flavorid, imageid, personality=None):
        server = self.client.create_server(
            servername, flavorid, imageid, personality=personality)
        print('CREATE VM %s (%s)' % (server['id'], server['name']))
        self.servers[servername] = server
        return server

    def _delete_server(self, servid):
        try:
            current_state = self.client.get_server_details(servid)
            current_state = current_state['status']
            if current_state == 'DELETED':
                return
            self.client.delete_server(servid)
            self._wait_for_status(servid, current_state)
            self.client.delete_server(servid)
        except:
            return

    def _create_network(self, netname, **kwargs):
        net = self.client.create_network(netname, **kwargs)
        self.networks[net['id']] = net
        return net

    def _delete_network(self, netid):
        if not netid in self.networks:
            return None
        print('Disconnect nics of network %s' % netid)
        self.client.disconnect_network_nics(netid)

        def netwait(wait):
            try:
                self.client.delete_network(netid)
            except ClientError:
                time.sleep(wait)
        self.do_with_progress_bar(
            netwait,
            'Delete network %s' % netid,
            self._waits[:7])
        return self.networks.pop(netid)

    def _wait_for_network(self, netid, status):

        def netwait(wait):
            r = self.client.get_network_details(netid)
            if r['status'] == status:
                return
            time.sleep(wait)
        self.do_with_progress_bar(
            netwait,
            'Wait network %s to reach status %s' % (netid, status),
            self._waits[:5])

    def _wait_for_nic(self, netid, servid, in_creation=True):
        self._wait_for_network(netid, 'ACTIVE')

        def nicwait(wait):
            nics = self.client.list_server_nics(servid)
            for net in nics:
                found_nic = net['network_id'] == netid
                if (in_creation and found_nic) or not (
                        in_creation or found_nic):
                    return
            time.sleep(wait)
        self.do_with_progress_bar(
            nicwait,
            'Wait nic-%s-%s to %sconnect' % (
                netid,
                servid,
                '' if in_creation else 'dis'),
            self._waits[:5])
        for net in self.client.list_server_nics(servid):
            if netid == net['network_id']:
                return True
        return False

    def _has_status(self, servid, status):
        r = self.client.get_server_details(servid)
        #print 'MY ', servid, ' STATUS IS ', r['status']
        return r['status'] == status

    def _wait_for_status(self, servid, status):
        (wait_bar, wait_cb) = self._safe_progress_bar(
            'Server %s in %s' % (servid, status))
        self.client.wait_server(
            servid, status, wait_cb=wait_cb, delay=2, max_wait=198)
        self._safe_progress_bar_finish(wait_bar)

    def test_parallel_creation(self):
        """test create with multiple threads
        Do not use this in regular livetest
        """
        from kamaki.clients import SilentEvent
        c = []
        for i in range(8):
            sname = '%s_%s' % (self.servname1, i)
            c.append(SilentEvent(
                self._create_server, sname, self.flavorid, self.img))
        for i in range(8):
            c[i].start()

    def test_create_server(self):
        """Test create_server"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._wait_for_status(self.server1['id'], 'BUILD')
        self._test_0010_create_server()

    def _test_0010_create_server(self):
        self.assertEqual(self.server1["name"], self.servname1)
        self.assertEqual(self.server1["flavor"]["id"], self.flavorid)
        self.assertEqual(self.server1["image"]["id"], self.img)
        self.assertEqual(self.server1["status"], "BUILD")

    def test_list_servers(self):
        """Test list servers"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self.server2 = self._create_server(
            self.servname2, self.flavorid, self.img)
        self._test_0020_list_servers()

    def _test_0020_list_servers(self):
        servers = self.client.list_servers()
        dservers = self.client.list_servers(detail=True)

        """detailed and simple are same size"""
        self.assertEqual(len(dservers), len(servers))
        fields = set([
            'created', 'flavor', 'hostId', 'image', 'progress', 'status',
            'updated'])
        for i, srv in enumerate(servers):
            self.assertTrue(fields.isdisjoint(srv))
            self.assertTrue(fields.issubset(dservers[i]))

        """detailed and simple contain same names"""
        names = sorted(map(lambda x: x["name"], servers))
        dnames = sorted(map(lambda x: x["name"], dservers))
        self.assertEqual(names, dnames)

    def _test_0030_wait_test_servers_to_build(self):
        """Pseudo-test to wait for VMs to load"""
        print('')
        self._wait_for_status(self.server1['id'], 'BUILD')
        self._wait_for_status(self.server2['id'], 'BUILD')

    def test_get_server_details(self):
        """Test get_server_details"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._wait_for_status(self.server1['id'], 'BUILD')
        self._test_0040_get_server_details()

    def _test_0040_get_server_details(self):
        r = self.client.get_server_details(self.server1['id'])
        self.assertEqual(r["name"], self.servname1)
        self.assertEqual(r["flavor"]["id"], self.flavorid)
        self.assertEqual(r["image"]["id"], self.img)
        self.assertEqual(r["status"], "ACTIVE")

    def test_update_server_name(self):
        """Test update_server_name"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._test_0050_update_server_name()

    def _test_0050_update_server_name(self):
        new_name = self.servname1 + '_new_name'
        self.client.update_server_name(self.server1['id'], new_name)
        r = self.client.get_server_details(
            self.server1['id'], success=(200, 400))
        self.assertEqual(r['name'], new_name)
        changed = self.servers.pop(self.servname1)
        changed['name'] = new_name
        self.servers[new_name] = changed

    def test_reboot_server(self):
        """Test reboot server"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._wait_for_status(self.server1['id'], 'BUILD')
        self.server2 = self._create_server(
            self.servname2, self.flavorid + 2, self.img)
        self._wait_for_status(self.server2['id'], 'BUILD')
        self._test_0060_reboot_server()
        self._wait_for_status(self.server1['id'], 'REBOOT')
        self._wait_for_status(self.server2['id'], 'REBOOT')

    def _test_0060_reboot_server(self):
        self.client.reboot_server(self.server1['id'])
        self.assertTrue(self._has_status(self.server1['id'], 'REBOOT'))
        self.client.reboot_server(self.server2['id'], hard=True)
        self.assertTrue(self._has_status(self.server2['id'], 'REBOOT'))

    def test_resize_server(self):
        """Modify the flavor of a server"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._test_0065_resize_server()
        self.delete_server(self.server1['id'])
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)

    def _test_0065_resize_server(self):
        self.client.resize_server(self.servname1, self.flavorid + 2)
        srv = self.client.get_flavor_details(self.server1['id'])
        self.assertEqual(srv['flavor']['id'], self.flavorid + 2)

    def _test_0070_wait_test_servers_to_reboot(self):
        """Pseudo-test to wait for VMs to load"""
        print('')
        self._wait_for_status(self.server1['id'], 'REBOOT')
        self._wait_for_status(self.server2['id'], 'REBOOT')

    def test_create_server_metadata(self):
        """Test create_server_metadata"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._test_0080_create_server_metadata()

    def _test_0080_create_server_metadata(self):
        r1 = self.client.create_server_metadata(
            self.server1['id'], 'mymeta', 'mymeta val')
        self.assertTrue('mymeta' in r1)
        r2 = self.client.get_server_metadata(self.server1['id'], 'mymeta')
        self.assert_dicts_are_equal(r1, r2)

    def test_get_server_metadata(self):
        """Test get server_metadata"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._test_0090_get_server_metadata()

    def _test_0090_get_server_metadata(self):
        self.client.update_server_metadata(
            self.server1['id'], mymeta_0='val_0')
        r = self.client.get_server_metadata(self.server1['id'], 'mymeta_0')
        self.assertEqual(r['mymeta_0'], 'val_0')

    def test_update_server_metadata(self):
        """Test update_server_metadata"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._test_0100_update_server_metadata()

    def _test_0100_update_server_metadata(self):
        r1 = self.client.update_server_metadata(
            self.server1['id'], mymeta3='val2')
        self.assertTrue('mymeta3'in r1)
        r2 = self.client.update_server_metadata(
            self.server1['id'], mymeta3='val3')
        self.assertTrue(r2['mymeta3'], 'val3')

    def test_delete_server_metadata(self):
        """Test delete_server_metadata"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._test_0110_delete_server_metadata()

    def _test_0110_delete_server_metadata(self):
        r1 = self.client.update_server_metadata(
            self.server1['id'], mymeta='val')
        self.assertTrue('mymeta' in r1)
        self.client.delete_server_metadata(self.server1['id'], 'mymeta')
        try:
            self.client.get_server_metadata(self.server1['id'], 'mymeta')
            raise ClientError('Wrong Error', status=100)
        except ClientError as err:
            self.assertEqual(err.status, 404)

    def test_list_flavors(self):
        """Test flavors_get"""
        self._test_0120_list_flavors()

    def _test_0120_list_flavors(self):
        r = self.client.list_flavors()
        self.assertTrue(len(r) > 1)
        r = self.client.list_flavors(detail=True)
        self.assertTrue('SNF:disk_template' in r[0])

    def test_get_flavor_details(self):
        """Test test_get_flavor_details"""
        self._test_0130_get_flavor_details()

    def _test_0130_get_flavor_details(self):
        r = self.client.get_flavor_details(self.flavorid)
        self.assert_dicts_are_equal(self._flavor_details, r)

    #def test_list_images(self):
    #    """Test list_images"""
    #    self._test_0140_list_images()

    def _test_0140_list_images(self):
        r = self.client.list_images()
        self.assertTrue(len(r) > 1)
        r = self.client.list_images(detail=True)
        for detailed_img in r:
            if detailed_img['id'] == self.img:
                break
        self.assert_dicts_are_equal(detailed_img, self.img_details)

    def test_get_image_details(self):
        """Test image_details"""
        self._test_0150_get_image_details()

    def _test_0150_get_image_details(self):
        r = self.client.get_image_details(self.img)
        self.assert_dicts_are_equal(r, self.img_details)

    def test_get_image_metadata(self):
        """Test get_image_metadata"""
        self._test_0160_get_image_metadata()

    def _test_0160_get_image_metadata(self):
        r = self.client.get_image_metadata(self.img)
        self.assert_dicts_are_equal(
            self.img_details['properties'], r)
        for key, val in self.img_details['properties'].items():
            r = self.client.get_image_metadata(self.img, key)
            self.assertEqual(r[key], val)

    def test_shutdown_server(self):
        """Test shutdown_server"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._wait_for_status(self.server1['id'], 'BUILD')
        self._test_0170_shutdown_server()

    def _test_0170_shutdown_server(self):
        self.client.shutdown_server(self.server1['id'])
        self._wait_for_status(self.server1['id'], 'ACTIVE')
        r = self.client.get_server_details(self.server1['id'])
        self.assertEqual(r['status'], 'STOPPED')

    def test_start_server(self):
        """Test start_server"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._wait_for_status(self.server1['id'], 'BUILD')
        self.client.shutdown_server(self.server1['id'])
        self._wait_for_status(self.server1['id'], 'ACTIVE')
        self._test_0180_start_server()

    def _test_0180_start_server(self):
        self.client.start_server(self.server1['id'])
        self._wait_for_status(self.server1['id'], 'STOPPED')
        r = self.client.get_server_details(self.server1['id'])
        self.assertEqual(r['status'], 'ACTIVE')

    def test_get_server_console(self):
        """Test get_server_console"""
        self.server2 = self._create_server(
            self.servname2, self.flavorid, self.img)
        self._wait_for_status(self.server2['id'], 'BUILD')
        self._test_0190_get_server_console()

    def _test_0190_get_server_console(self):
        r = self.client.get_server_console(self.server2['id'])
        self.assertTrue('host' in r)
        self.assertTrue('password' in r)
        self.assertTrue('port' in r)
        self.assertTrue('type' in r)

    def test_get_firewall_profile(self):
        """Test get_firewall_profile"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._test_0200_get_firewall_profile()

    def _test_0200_get_firewall_profile(self):
        self._wait_for_status(self.server1['id'], 'BUILD')
        fprofile = self.client.get_firewall_profile(self.server1['id'])
        self.assertTrue(fprofile in self.PROFILES)

    def test_set_firewall_profile(self):
        """Test set_firewall_profile"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._test_0210_set_firewall_profile()

    def _test_0210_set_firewall_profile(self):

        self._wait_for_status(self.server1['id'], 'BUILD')
        PROFILES = ['DISABLED', 'ENABLED', 'DISABLED', 'PROTECTED']
        fprofile = self.client.get_firewall_profile(self.server1['id'])
        print('')
        count_success = 0
        for counter, fprofile in enumerate(PROFILES):
            npos = counter + 1
            try:
                nprofile = PROFILES[npos]
            except IndexError:
                nprofile = PROFILES[0]
            print('\tprofile swap %s: %s -> %s' % (npos, fprofile, nprofile))
            self.client.set_firewall_profile(self.server1['id'], nprofile)
            time.sleep(0.5)
            self.client.reboot_server(self.server1['id'], hard=True)
            time.sleep(1)
            self._wait_for_status(self.server1['id'], 'REBOOT')
            time.sleep(0.5)
            changed = self.client.get_firewall_profile(self.server1['id'])
            try:
                self.assertEqual(changed, nprofile)
            except AssertionError as err:
                if count_success:
                    print('\tFAIL in swap #%s' % npos)
                    break
                else:
                    raise err
            count_success += 1

    def test_get_server_stats(self):
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._test_0220_get_server_stats()

    def _test_0220_get_server_stats(self):
        r = self.client.get_server_stats(self.server1['id'])
        it = ('cpuBar', 'cpuTimeSeries', 'netBar', 'netTimeSeries', 'refresh')
        for term in it:
            self.assertTrue(term in r)

    def test_create_network(self):
        """Test create_network"""
        self._test_0230_create_network()

    def _test_0230_create_network(self):
        print('\twith no params')
        self.network1 = self._create_network(self.netname1)
        self._wait_for_network(self.network1['id'], 'ACTIVE')
        n1id = self.network1['id']
        self.network1 = self.client.get_network_details(n1id)
        nets = self.client.list_networks(self.network1['id'])
        chosen = [net for net in nets if net['id'] == n1id][0]
        chosen.pop('updated')
        net1 = dict(self.network1)
        net1.pop('updated')
        self.assert_dicts_are_equal(chosen, net1)
        full_args = dict(
                cidr='192.168.1.0/24',
                gateway='192.168.1.1',
                type='MAC_FILTERED',
                dhcp=True)
        try_args = dict(all=True)
        try_args.update(full_args)
        for param, val in try_args.items():
            print('\tdelete %s to avoid max net limit' % n1id)
            self._delete_network(n1id)
            kwargs = full_args if param == 'all' else {param: val}
            print('\twith %s=%s' % (param, val))
            self.network1 = self._create_network(self.netname1, **kwargs)
            n1id = self.network1['id']
            self._wait_for_network(n1id, 'ACTIVE')
            self.network1 = self.client.get_network_details(n1id)
            if param == 'all':
                for p, v in full_args.items():
                    self.assertEqual(self.network1[p], v)
            else:
                self.assertEqual(self.network1[param], val)

    def test_connect_server(self):
        """Test connect_server"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self.network1 = self._create_network(self.netname1)
        self._wait_for_status(self.server1['id'], 'BUILD')
        self._wait_for_network(self.network1['id'], 'ACTIVE')
        self._test_0240_connect_server()

    def _test_0250_connect_server(self):
        self.client.connect_server(self.server1['id'], self.network1['id'])
        self.assertTrue(
            self._wait_for_nic(self.network1['id'], self.server1['id']))

    def test_disconnect_server(self):
        """Test disconnect_server"""
        self.test_connect_server()
        self._test_0250_disconnect_server()

    def _test_0250_disconnect_server(self):
        self.client.disconnect_server(self.server1['id'], self.network1['id'])
        self.assertTrue(self._wait_for_nic(
            self.network1['id'], self.server1['id'], in_creation=False))

    def _test_0260_wait_for_second_network(self):
        self.network2 = self._create_network(self.netname2)
        self._wait_for_network(self.network2['id'], 'ACTIVE')

    def test_list_server_nics(self):
        """Test list_server_nics"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self.network2 = self._create_network(self.netname2)
        self._wait_for_status(self.server1['id'], 'BUILD')
        self._wait_for_network(self.network2['id'], 'ACTIVE')
        self._test_0280_list_server_nics()

    def _test_0280_list_server_nics(self):
        r = self.client.list_server_nics(self.server1['id'])
        len0 = len(r)
        self.client.connect_server(self.server1['id'], self.network2['id'])
        self.assertTrue(
            self._wait_for_nic(self.network2['id'], self.server1['id']))
        r = self.client.list_server_nics(self.server1['id'])
        self.assertTrue(len(r) > len0)

    def test_list_networks(self):
        """Test list_network"""
        self.network1 = self._create_network(self.netname1)
        self._wait_for_network(self.network1['id'], 'ACTIVE')
        self._test_0290_list_networks()

    def _test_0290_list_networks(self):
        r = self.client.list_networks()
        self.assertTrue(len(r) > 1)
        ids = [net['id'] for net in r]
        names = [net['name'] for net in r]
        self.assertTrue('1' in ids)
        #self.assertTrue('public' in names)
        self.assertTrue(self.network1['id'] in ids)
        self.assertTrue(self.network1['name'] in names)

        r = self.client.list_networks(detail=True)
        ids = [net['id'] for net in r]
        names = [net['name'] for net in r]
        for net in r:
            self.assertTrue(net['id'] in ids)
            self.assertTrue(net['name'] in names)
            for term in ('status', 'updated', 'created'):
                self.assertTrue(term in net.keys())

    def test_list_network_nics(self):
        """Test list_server_nics"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self.network1 = self._create_network(self.netname1)
        self.network2 = self._create_network(self.netname2)
        self._wait_for_status(self.server1['id'], 'BUILD')
        self._wait_for_network(self.network1['id'], 'ACTIVE')
        self._wait_for_network(self.network2['id'], 'ACTIVE')
        self.client.connect_server(self.server1['id'], self.network1['id'])
        self.client.connect_server(self.server1['id'], self.network2['id'])
        self._wait_for_nic(self.network1['id'], self.server1['id'])
        self._wait_for_nic(self.network2['id'], self.server1['id'])
        self._test_0293_list_network_nics()

    def _test_0293_list_network_nics(self):
        netid1, netid2 = self.network1['id'], self.network2['id']
        r = self.client.list_network_nics(netid1)
        expected = ['nic-%s-1' % self.server1['id']]
        self.assertEqual(r, expected)
        r = self.client.list_network_nics(netid2)
        expected = ['nic-%s-2' % self.server1['id']]
        self.assertEqual(r, expected)

    def test_get_network_details(self):
        """Test get_network_details"""
        self.network1 = self._create_network(self.netname1)
        self._test_0300_get_network_details()

    def _test_0300_get_network_details(self):
        r = self.client.get_network_details(self.network1['id'])
        net1 = dict(self.network1)
        net1.pop('status')
        net1.pop('updated', None)
        net1.pop('attachments')
        r.pop('status')
        r.pop('updated', None)
        r.pop('attachments')
        self.assert_dicts_are_equal(net1, r)

    def test_update_network_name(self):
        self.network2 = self._create_network(self.netname2)
        self._test_0310_update_network_name()

    def _test_0310_update_network_name(self):
        updated_name = self.netname2 + '_upd'
        self.client.update_network_name(self.network2['id'], updated_name)

        def netwait(wait):
            r = self.client.get_network_details(self.network2['id'])
            if r['name'] == updated_name:
                return
            time.sleep(wait)
        self.do_with_progress_bar(
            netwait,
            'Network %s name is changing:' % self.network2['id'],
            self._waits[:5])

        r = self.client.get_network_details(self.network2['id'])
        self.assertEqual(r['name'], updated_name)

    """ Don't have auth to test this
Exemplo n.º 3
0
class OkeanosConnector(AbstractConnector):
    """
    Okeanos connector.
    """

    def __init__(self):
        AbstractConnector.__init__(self)
        self.__cyclades = None
        self.__network_client = None
        self.attach_public_ipv4 = False
        self.private_network = -1

    def authenticate(self, authentication=None):
        """

        :param authentication:
        :return:
        """
        if self.__cyclades is not None:
            return True
        try:
            authcl = AstakosClient(authentication['URL'], authentication['TOKEN'])
            authcl.authenticate()
            self.__cyclades = CycladesClient(authcl.get_service_endpoints('compute')['publicURL'],
                                             authentication['TOKEN'])
            self.__network_client = CycladesNetworkClient(authcl.get_service_endpoints('network')['publicURL'],
                                                          authentication['TOKEN'])
        except ClientError:
            stderr.write('Connector initialization failed')
            return False
        return True

    def configure(self, configuration):
        self.authenticate(configuration['auth'])
        if 'private_network' in configuration and configuration['private_network']:
            self.private_network = 0
        if 'attach_public_ipv4' in configuration and configuration['attach_public_ipv4']:
            self.attach_public_ipv4 = True

    def prepare(self):
        """
        In this method, application-level IaaS related actions are executed.
        :return:
        """
        if self.private_network == 0:
            self.private_network = self.create_private_network()

    def create_vm(self, name, flavor_id, image_id):
        """

        :param name:
        :param flavor_id:
        :param image_id:
        :return:
        """
        networks = []
        if self.attach_public_ipv4:
            networks.append({'uuid': self.__create_floating_ip()})
        if self.private_network != -1:
            networks.append({'uuid': self.private_network})

        response = self.__cyclades.create_server(name=name, flavor_id=flavor_id, image_id=image_id, networks=networks)
        ret_value = dict()
        ret_value['password'] = response['adminPass']
        ret_value['id'] = response['id']
        ret_value['user'] = response['metadata']['users']
        ret_value['hostname'] = 'snf-' + str(response['id']) + '.vm.okeanos.grnet.gr'
        self.__cyclades.wait_server(server_id=ret_value['id'], current_status='ACTIVE')
        return ret_value

    def delete_vm(self, server_id):
        """
        Delete VM method. The method is blocking until the VM goes to a "DELETED" state
        :param server_id:
        :return:
        """
        attachments = self.__cyclades.get_server_details(server_id)['attachments']
        port_id = None
        for a in attachments:
            if a['OS-EXT-IPS:type'] == 'floating':
                port_id = a['id']
        floating_ip_id = None
        for ip in self.__network_client.list_floatingips():
            if port_id is not None and ip['port_id'] == str(port_id):
                floating_ip_id = ip['id']
        self.__cyclades.delete_server(server_id)
        self.__cyclades.wait_server(server_id, current_status='DELETED')    # wait until server is deleted
        if floating_ip_id is not None:
            self.__wait_until_ip_released(floating_ip_id)
            self.__network_client.delete_floatingip(floating_ip_id)

    def __wait_until_ip_released(self, floating_ip_id):
        for i in range(1, MAX_WAIT_FOR_LOOPS+1):
            for ip in self.__network_client.list_floatingips():
                if ip['id'] == floating_ip_id:
                    if ip['instance_id'] is None or ip['instance_id'] == 'None':
                        return True
            sleep(SLEEP_TIMEOUT)

    def list_vms(self):
        """


        :return:
        """
        return self.__cyclades.list_servers()

    def get_status(self, vm_id):
        """

        :param vm_id:
        :return:
        """
        return self.__cyclades.get_server_details(vm_id)

    def get_server_addresses(self, vm_id, ip_version=None, connection_type=None):
        """
        Returns the enabled addresses, as referenced from the IaaS.
        """
        addresses = self.__cyclades.get_server_details(vm_id)['addresses']
        results = []
        while len(addresses) > 0:
            key, value = addresses.popitem()
            if (ip_version is None or value[0]['version'] == ip_version) and \
                    (connection_type is None or value[0]['OS-EXT-IPS:type'] == connection_type):
                results.append(value[0]['addr'])
        return results

    def __create_floating_ip(self):
        self.__network_client.floatingips_get()
        response = self.__network_client.create_floatingip()
        return response['floating_network_id']

    def create_private_network(self):
        """
        Creates a new private network and returns its id
        """
        response = self.__network_client.create_network(type='MAC_FILTERED', name='Deployment network')
        self.__network_client.create_subnet(
            network_id=response['id'],
            enable_dhcp=True,
            cidr='192.168.0.0/24'
        )
        return response['id']

    def clone(self):
        new_connector = OkeanosConnector()
        new_connector.attach_public_ipv4 = self.attach_public_ipv4
        new_connector.private_network = self.private_network
        new_connector.__network_client = self.__network_client
        new_connector.__cyclades = self.__cyclades
        return new_connector

    def cleanup(self):
        if self.private_network != -1 and self.private_network != 0:
            self.__wait_until_private_net_is_empty(self.private_network)
            self.__network_client.delete_network(self.private_network)

    def __wait_until_private_net_is_empty(self, private_net_id):

        for i in range(1, MAX_WAIT_FOR_LOOPS):
            port_set = set()
            for p in self.__network_client.list_ports():
                port_set.add(p['network_id'])
            if private_net_id not in port_set:
                return
            else:
                sleep(SLEEP_TIMEOUT)

    def serialize(self):
        d = dict()
        d['attach_public_ipv4'] = self.attach_public_ipv4
        d['private_network'] = self.private_network
        return d

    def deserialize(self, state):
        self.attach_public_ipv4 = state['attach_public_ipv4']
        self.private_network = state['private_network']
Exemplo n.º 4
0
class OkeanosNativeClient(object):
    VOLUME_STATUS_CREATING = 'CREATING'
    VOLUME_STATUS_IN_USE = 'IN_USE'
    VOLUME_STATUS_DELETING = 'DELETING'
    VOLUME_STATUS_DELETED = 'DELETED'

    def __init__(self, token, authURL='https://accounts.okeanos.grnet.gr/identity/v2.0'):
        """
        :type authURL: str
        :type token: str
        """
        from kamaki.clients.utils import https
        https.patch_ignore_ssl()

        self.authURL = authURL
        self.token = token
        self.cycladesServiceType = CycladesClient.service_type
        self.blockStorageServiceType = CycladesBlockStorageClient.service_type
        self.astakosClient = AstakosClient(self.authURL, self.token)
        endpointF = self.astakosClient.get_service_endpoints
        self.cycladesEndpoint = endpointF(self.cycladesServiceType)[u'publicURL']
        self.cycladesClient = CycladesClient(self.cycladesEndpoint, self.token)
        self.blockStorageEndpoint = endpointF(self.blockStorageServiceType)[u'publicURL']
        self.blockStorageClient = CycladesBlockStorageClient(self.blockStorageEndpoint, token)

        flavorsById = {}
        flavorsByName = {}
        for flavor in self.cycladesClient.list_flavors():
            _id = flavor[u'id']
            name = flavor[u'name']
            flavorsById[_id] = name
            flavorsByName[name] = _id
        self.flavorsById = flavorsById
        self.flavorsByName = flavorsByName

    def getFlavorId(self, idOrName):
        """
        :rtype : str
        :type idOrName: str
        """
        if idOrName in self.flavorsById:
            return idOrName
        elif idOrName in self.flavorsByName:
            return self.flavorsByName[idOrName]
        else:
            return idOrName  # caller's responsibility

    def listNodes(self):
        """
        :rtype : list(ListNodeResult)
        """
        instanceInfoList = []
        servers = self.cycladesClient.list_servers()
        for server in servers:
            serverId = str(server[u'id'])  # It is a number in the result
            serverDetails = self.cycladesClient.get_server_details(serverId)
            serverStatusS = serverDetails[u'status']
            serverStatus = NodeStatus(serverStatusS)
            # serverFlavourId = serverDetails[u'flavor'][u'id']
            # serverImageId = serverDetails[u'image'][u'id']
            instanceInfo = ListNodeResult(serverId, serverStatus, serverDetails)
            instanceInfoList.append(instanceInfo)
        return instanceInfoList

    def createVolume(self, serverId, sizeGB, projectId, sleepWaitSeconds=5):
        """
        :param serverId: str
        :param sizeGB: Union[str, int]
        :param projectId: str
        :rtype str
        """
        self.log("> serverId=%s, sizeGB=%s, projectId=%s" % (serverId, sizeGB, projectId))

        response = self.blockStorageClient.create_volume(sizeGB,
                                                         serverId,
                                                         '%s-vol-%s' % (serverId, sizeGB),
                                                         project=projectId)
        # The volume is being created asynchronously, status is 'creating'
        # we wait until it changes (to 'in_use')
        volumeId = response[u'id']

        def getVolumeDetails():
            _volumeDetails = self.blockStorageClient.get_volume_details(volumeId)
            _volumeStatus = _volumeDetails[u'status'].upper()
            self.log("volumeDetails = %s" % _volumeDetails)
            return _volumeDetails, _volumeStatus

        volumeDetails, volumeStatus = getVolumeDetails()
        while volumeStatus == OkeanosNativeClient.VOLUME_STATUS_CREATING:
            time.sleep(sleepWaitSeconds)
            volumeDetails, volumeStatus = getVolumeDetails()

        # response is something like this
        # {
        #     u'display_name': u'foo',
        #     u'id': u'46974',
        #     u'links': [
        #         {
        #             u'href': u'https://cyclades.okeanos.grnet.gr/volume/v2.0/volumes/46974',
        #             u'rel': u'self'
        #         }, {
        #             u'href': u'https://cyclades.okeanos.grnet.gr/volume/v2.0/volumes/46974',
        #             u'rel': u'bookmark'
        #         }
        #     ]
        # }

        self.log("< %s" % response)

        return response

    def attachVolume(self, serverId, sizeGB, projectId):
        """Create and attach an extra volume to the VM, returning the volume name, the volume id and the device name"""
        self.log("> serverId = %s, sizeGB = %s, projectId = %s" % (serverId, sizeGB, projectId))
        result = self.createVolume(serverId, sizeGB, projectId)
        volumeId = result['id']
        return volumeId

    def deleteVolume(self, volumeId, sleepWaitSeconds=5):
        """
        Deletes the volume identified by the given `volumeId`.
        :param volumeId: str
        :return:
        """

        def getVolumeDetails():
            _volumeDetails = self.blockStorageClient.get_volume_details(volumeId)
            _volumeStatus = _volumeDetails[u'status'].upper()
            self.log("volumeDetails = %s" % _volumeDetails)
            return _volumeDetails, _volumeStatus

        volumeDetails, volumeStatus = getVolumeDetails()
        response = self.blockStorageClient.delete_volume(volumeId)

        # Normal status transition is:
        #   OkeanosNativeClient.VOLUME_STATUS_IN_USE    =>
        #   OkeanosNativeClient.VOLUME_STATUS_DELETING  =>
        #   OkeanosNativeClient.VOLUME_STATUS_DELETED

        while volumeStatus == OkeanosNativeClient.VOLUME_STATUS_IN_USE:
            time.sleep(sleepWaitSeconds)
            volumeDetails, volumeStatus = getVolumeDetails()

        # Now it should be in status:
        #   OkeanosNativeClient.VOLUME_STATUS_DELETING
        #
        # Note that real deletion means status:
        #   OkeanosNativeClient.VOLUME_STATUS_DELETED
        #
        # ... But let's not wait that long

        return response

    def createNode(self, nodeName, flavorIdOrName, imageId,
                   sshPubKey=None,
                   initScriptPathAndData=None,
                   remoteUsername="******",
                   remoteUsergroup=None,
                   localPubKeyData=None,
                   createAsyncInitScript=True,
                   projectId=None):
        """

        :rtype : NodeDetails
        :type localPubKeyData: str
        :type sshPubKey: str
        :type imageId: str
        :type flavorIdOrName: str
        :type nodeName: str
        """
        self.log("Creating node '%s', %s, %s" % (nodeName, flavorIdOrName, imageId))

        sshPubKey = sshPubKey or None
        if sshPubKey is not None:
            self.log("User SSH public key to be injected in %s: %s" % (nodeName, sshPubKey))
        remoteUsergroup = remoteUsergroup or remoteUsername
        flavorId = self.getFlavorId(flavorIdOrName)

        # We make sure:
        # a) The orchestrator can do password-less SSH on the newly created machine (via ~/.ssh/id_rsa.pub)
        # b) The SlipStream user can do password-less SSH on the newly created machine (via the provided userPubKey)
        # c) The provided init script is injected

        localPubKeyData = localPubKeyData or loadPubRsaKeyData()
        self.log("Local SSH public key to be injected in %s: %s" % (nodeName, localPubKeyData))

        if sshPubKey is None:
            authorized_keys = localPubKeyData
        else:
            if not localPubKeyData.endswith('\n'):
                localPubKeyData += '\n'
            authorized_keys = "%s%s" % (localPubKeyData, sshPubKey)

        # See https://www.synnefo.org/docs/kamaki/latest/developers/showcase.html#inject-ssh-keys
        import base64
        personality = [
            dict(
                contents=base64.b64encode(authorized_keys),
                path="/%s/.ssh/authorized_keys" % remoteUsername,
                owner=remoteUsername,
                group=remoteUsergroup,
                mode=0600
            )
        ]

        if initScriptPathAndData is not None:
            initScriptPath, initScriptData = initScriptPathAndData

            personality.append(
                dict(
                    contents=base64.b64encode(initScriptData),
                    path=initScriptPath,
                    owner=remoteUsername,
                    group=remoteUsergroup,
                    mode=0777
                )
            )

            # In order for the contextualization script to run asynchronously,
            # we create another script that launches the original via nohup
            if createAsyncInitScript:
                asyncInitScriptPath = "%s.async" % initScriptPath
                asyncInitScriptData = "#!/bin/sh -e\nexec nohup %s &\n" % initScriptPath

                personality.append(
                    dict(
                        contents=base64.b64encode(asyncInitScriptData),
                        path=asyncInitScriptPath,
                        owner=remoteUsername,
                        group=remoteUsergroup,
                        mode=0777
                    )
                )
            else:
                asyncInitScriptPath = None
        else:
            initScriptPath = None
            initScriptData = None
            asyncInitScriptPath = None
            asyncInitScriptData = None

        self.log(">> Personalities")
        for _p in personality:
            self.log(">>>> %s" % _p)

        resultDict = self.cycladesClient.create_server(nodeName,
                                                       flavorId,
                                                       imageId,
                                                       personality=personality,
                                                       project_id=projectId)
        # No IP is included in this result
        nodeDetails = NodeDetails(resultDict,
                                  sshPubKey=sshPubKey,
                                  initScriptPath=initScriptPath,
                                  initScriptData=initScriptData,
                                  asyncInitScriptPath=asyncInitScriptPath)
        self.log("Created node %s status %s, adminPass = %s, ip4s = %s" % (nodeDetails.id, nodeDetails.status.okeanosStatus, nodeDetails.adminPass, nodeDetails.ipv4s))
        return nodeDetails

    def runCommandOnNode(self, nodeDetails, command,
                         username='******',
                         localPrivKey=None,
                         timeout=None,
                         runSynchronously=True):
        """
        :type timeout: int
        :type localPrivKey: str
        :type nodeDetails: NodeDetails
        :type command: str
        """
        hostname = nodeDetails.ipv4s[0]
        return runCommandOnHost(hostname, command,
                                username=username,
                                localPrivKey=localPrivKey,
                                timeout=timeout,
                                runSynchronously=runSynchronously)

    def checkSshOnNode(self, nodeDetails, username="******", localPrivKey=None, timeout=None):
        hostname = nodeDetails.ipv4s[0]
        return checkSshOnHost(hostname, username=username, localPrivKey=localPrivKey, timeout=timeout)

    def waitSshOnHost(self, hostname, username="******", localPrivKey=None, timeout=None, sleepSeconds=10):
        t0 = time.time()
        while True:
            if checkSshOnHost(hostname, username=username, localPrivKey=localPrivKey, timeout=timeout):
                t1 = time.time()
                dtsec = t1 - t0
                self.log("SSH good for %s@%s after %s sec" % (username, hostname, dtsec))
                break
            else:
                time.sleep(sleepSeconds)

    def waitSshOnNode(self, nodeDetails, username="******", localPrivKey=None, timeout=None):
        hostname = nodeDetails.ipv4s[0]
        self.waitSshOnHost(hostname, username=username, localPrivKey=localPrivKey, timeout=timeout)

    def getNodeDetails(self, nodeId):
        """
        :type nodeId: str
        :rtype : NodeDetails
        """
        # from kamaki.cli import logger
        # logger.add_file_logger('kamaki.clients.sent', filename='get_server_details.log')
        # logger.add_file_logger('kamaki.clients.recv', filename='get_server_details.log')

        resultDict = self.cycladesClient.get_server_details(nodeId)
        nodeDetails = NodeDetails(resultDict)
        return nodeDetails

    def waitNodeStatus(self, nodeId, expectedOkeanosStatus, sleepSeconds=5):
        """
        :type expectedOkeanosStatus: str
        :type nodeId: str
        """
        t0 = time.time()
        nodeDetails = self.getNodeDetails(nodeId)
        while nodeDetails.status.okeanosStatus != expectedOkeanosStatus:
            time.sleep(sleepSeconds)
            nodeDetails = self.getNodeDetails(nodeId)
        t1 = time.time()
        dtsec = t1 - t0
        self.log("Node %s status %s after %s sec" % (nodeId, expectedOkeanosStatus, dtsec))
        return nodeDetails

    def waitCurrentStatus(self, nodeId, currentOkeanosStatus, sleepSeconds=5, maxSleepSeconds=400):
        """ Wait untile the current status changes
        :type nodeId: str
        :type currentOkeanosStatus: str
        :type sleepSeconds: float
        """
        t0 = time.time()
        self.cycladesClient.wait_server(nodeId,
                                        current_status=currentOkeanosStatus,
                                        delay=sleepSeconds,
                                        max_wait=maxSleepSeconds)
        nodeDetails = self.getNodeDetails(nodeId)
        newOkeanosStatus = nodeDetails.status.okeanosStatus
        t1 = time.time()
        dtsec = t1 - t0
        self.log("Node %s status %s -> %s after %s sec" % (nodeId, currentOkeanosStatus, newOkeanosStatus, dtsec))
        return nodeDetails

    def createNodeAndWait(self, nodeName, flavorIdOrName, imageId, sshPubKey, initScriptPathAndData=None,
                          remoteUsername="******", remoteUsergroup=None, localPubKeyData=None, localPrivKey=None,
                          sshTimeout=None, runInitScriptSynchronously=False,
                          extraVolatileDiskGB=0, projectId=None):
        """

        :type extraVolatileDiskGB: int
        :type runInitScriptSynchronously: bool
        :type sshPubKey: str
        :type imageId: str
        :type flavorIdOrName: str
        :type nodeName: str
        :type sshTimeout: float
        :rtype : NodeDetails
        """
        localPrivKey = localPrivKey or loadRsaPrivKey()

        # Note that this returned value (NodeDetails) contains the adminPass
        nodeDetails = self.createNode(nodeName, flavorIdOrName, imageId, sshPubKey,
                                      initScriptPathAndData=initScriptPathAndData,
                                      remoteUsername=remoteUsername,
                                      remoteUsergroup=remoteUsergroup,
                                      localPubKeyData=localPubKeyData,
                                      projectId=projectId)
        nodeId = nodeDetails.id
        nodeDetailsActive = self.waitCurrentStatus(nodeId, NodeStatus.BUILD)
        nodeDetails.updateIPsAndStatusFrom(nodeDetailsActive)

        # attach any additional disk
        hostIP = nodeDetails.ipv4s[0]
        if extraVolatileDiskGB:
            self.log("Creating volatile disk of size %s GB for machine IP=%s, id=%s" % (extraVolatileDiskGB, hostIP, nodeId))
            volumeId = self.createVolume(nodeId, extraVolatileDiskGB, projectId)
            self.log("Created volumeId=%s of size %s GB for machine IP=%s, id=%s" % (volumeId, extraVolatileDiskGB, hostIP, nodeId))
            # We do nothing more with the volumeId.
            # When the VM is destroyed by the IaaS, the extra disk is automatically destroyed as well.
        else:
            self.log("No need for extra volatile disk for machine IP=%s, id=%s" % (hostIP, nodeId))

        # Some times, right after node is reported ACTIVE, network is unreachable or SSH is not immediately ready.
        # We have to cope with that by waiting.
        sshTimeout = sshTimeout or 7.0
        self.waitSshOnNode(nodeDetails, username=remoteUsername, localPrivKey=localPrivKey, timeout=sshTimeout)

        initScriptPath = nodeDetails.initScriptPath

        runResult = self.runCommandOnNode(nodeDetails, initScriptPath,
                                          username=remoteUsername,
                                          localPrivKey=localPrivKey,
                                          runSynchronously=runInitScriptSynchronously)
        return nodeDetails, runResult

    def shutdownNode(self, nodeId):
        """
        :rtype : NodeDetails
        :type nodeId: str
        """
        self.log("Shutting down node %s" % nodeId)
        nodeDetails = self.getNodeDetails(nodeId)
        if not nodeDetails.status.isStopped():
            self.cycladesClient.shutdown_server(nodeId)
            self.log("Shutdown node %s status %s" % (nodeId, nodeDetails.status.okeanosStatus))
        else:
            self.log("Node %s already shut down" % nodeId)
        return nodeDetails

    def shutdownNodeAndWait(self, nodeId):
        """
        :rtype : NodeDetails
        :type nodeId: str
        """
        nodeDetails = self.shutdownNode(nodeId)
        if not nodeDetails.status.isStopped():
            nodeDetailsWait = self.waitNodeStatus(nodeId, NodeStatus.STOPPED)
            nodeDetails.updateStatusFrom(nodeDetailsWait)
            self.log("Shutdown node %s status %s" % (nodeId, nodeDetails.status.okeanosStatus))
        return nodeDetails

    def deleteNode(self, nodeId):
        """
        :rtype : NodeDetails
        :type nodeId: str
        """
        self.log("Deleting nodeId %s" % nodeId)
        nodeDetails = self.getNodeDetails(nodeId)
        if not nodeDetails.status.isDeleted():
            self.cycladesClient.delete_server(nodeId)
            self.log("Deleted node %s status %s" % (nodeId, nodeDetails.status.okeanosStatus))
        return nodeDetails

    def deleteNodeAndWait(self, nodeId):
        """
        :rtype : NodeDetails
        :type nodeId: str
        """
        nodeDetails = self.deleteNode(nodeId)
        if not nodeDetails.status.isDeleted():
            nodeDetailsWait = self.waitNodeStatus(nodeId, NodeStatus.DELETED)
            nodeDetails.updateStatusFrom(nodeDetailsWait)
            self.log("Deleted node %s status %s" % (nodeId, nodeDetails.status.okeanosStatus))
        return nodeDetails

    def log(self, msg=''):
        who = '%s::%s' % (self.__class__.__name__, inspect.stack()[1][3])
        LOG('%s# %s' % (who, msg))

    def getNodeIPv4(self, nodeId):
        nodeDetails = self.getNodeDetails(nodeId)
        ipv4 = nodeDetails.ipv4s[0]
        LOG("< for nodeId = %s, IPv4 = %s" % (nodeId, ipv4))
        return ipv4

    def getNodePartitions(self, nodeId,
                          username='******',
                          localPrivKey=None,
                          timeout=None,
                          ssh=None):
        self.log("> nodeId = %s" % nodeId)
        ipv4 = self.getNodeIPv4(nodeId)
        status, partitions = getHostPartitions(ipv4,
                                               username=username,
                                               localPrivKey=localPrivKey,
                                               timeout=timeout,
                                               ssh=ssh)
        self.log("< status = %s, partitions = %s" % (status, partitions))
        return status, partitions

    def waitForExtraNodePartition(self, serverId, partitions,
                                  username='******',
                                  localPrivKey=None,
                                  timeout=None,
                                  ssh=None):
        """
        Given the set of pre-existing partitions, we wait until a new one appears and then we return it.
        :param serverId: str
        :param partitions: set[str]
        :return: the extra partition. prepend '/dev/' to get the device name
        """
        def getem():
            return self.getNodePartitions(serverId,
                                          username=username,
                                          localPrivKey=localPrivKey,
                                          timeout=timeout,
                                          ssh=ssh)

        self.log("Waiting, current partitions: %s" % partitions)
        status1, partitions1 = getem()
        if status1 != 0:
            return None

        while partitions == partitions1:
            self.log("Looping, new partitions: %s" % partitions1)
            status1, partitions1 = getem()
            if status1 != 0:
                return None

        # We assume one more is added ...
        newPartition = partitions1.difference(partitions)
        self.log("< For serverId = %s, new partition = %s" % (serverId, newPartition))
        return newPartition

    def resizeNode(self, serverId, flavorIdOrName):
        flavorId = self.getFlavorId(flavorIdOrName)
        nodeDetails = self.getNodeDetails(serverId)
        self.log("Node %s status is %s" % (serverId, nodeDetails.status.okeanosStatus))
        existingFlavorId = nodeDetails.flavorId
        self.log("Requested flavorId = %s [given: %s]" % (flavorId, flavorIdOrName))

        if existingFlavorId == flavorId:
            self.log("FlavorId already is %s, no resizing action is needed !" % flavorId)
            return

        t0 = time.time()

        self.log("Resizing from %s -> %s" % (existingFlavorId, flavorId))
        # Hot resizing is not supported, so we must shut the server down first
        self.log("Shutting down node %s" % serverId)
        nodeDetails = self.shutdownNodeAndWait(serverId)
        self.log("Node %s status is %s" % (serverId, nodeDetails.status.okeanosStatus))

        # This takes the server to status 'RESIZE'
        self.log("Resizing node %s ..." % serverId)
        resizeResponse = self.cycladesClient.resize_server(serverId, flavorId)
        self.log("resizeResponse = %s" % resizeResponse)

        # with until server acquires the new flavor
        nodeDetails = self.getNodeDetails(serverId)
        while nodeDetails.flavorId != flavorId:
            nodeDetails = self.getNodeDetails(serverId)
        self.log("Node %s resized" % serverId)

        # And we are now ready to restart with the new flavor
        self.log("Restarting node %s" % serverId)
        self.cycladesClient.start_server(serverId)
        self.waitNodeStatus(serverId, NodeStatus.ACTIVE)

        t1 = time.time()
        dtsec = t1 - t0
        self.log("Node %s restarted with new flavor %s in %s sec" % (serverId, flavorId, dtsec))
        return flavorId
Exemplo n.º 5
0
class OkeanosNativeClient(object):
    VOLUME_STATUS_CREATING = 'CREATING'
    VOLUME_STATUS_IN_USE = 'IN_USE'
    VOLUME_STATUS_DELETING = 'DELETING'
    VOLUME_STATUS_DELETED = 'DELETED'

    def __init__(self,
                 token,
                 authURL='https://accounts.okeanos.grnet.gr/identity/v2.0'):
        """
        :type authURL: str
        :type token: str
        """
        from kamaki.clients.utils import https
        https.patch_ignore_ssl()

        self.authURL = authURL
        self.token = token
        self.cycladesServiceType = CycladesClient.service_type
        self.blockStorageServiceType = CycladesBlockStorageClient.service_type
        self.astakosClient = AstakosClient(self.authURL, self.token)
        endpointF = self.astakosClient.get_service_endpoints
        self.cycladesEndpoint = endpointF(
            self.cycladesServiceType)[u'publicURL']
        self.cycladesClient = CycladesClient(self.cycladesEndpoint, self.token)
        self.blockStorageEndpoint = endpointF(
            self.blockStorageServiceType)[u'publicURL']
        self.blockStorageClient = CycladesBlockStorageClient(
            self.blockStorageEndpoint, token)

        flavorsById = {}
        flavorsByName = {}
        for flavor in self.cycladesClient.list_flavors():
            _id = flavor[u'id']
            name = flavor[u'name']
            flavorsById[_id] = name
            flavorsByName[name] = _id
        self.flavorsById = flavorsById
        self.flavorsByName = flavorsByName

    def getFlavorId(self, idOrName):
        """
        :rtype : str
        :type idOrName: str
        """
        if idOrName in self.flavorsById:
            return idOrName
        elif idOrName in self.flavorsByName:
            return self.flavorsByName[idOrName]
        else:
            return idOrName  # caller's responsibility

    def listNodes(self):
        """
        :rtype : list(ListNodeResult)
        """
        instanceInfoList = []
        servers = self.cycladesClient.list_servers()
        for server in servers:
            serverId = str(server[u'id'])  # It is a number in the result
            serverDetails = self.cycladesClient.get_server_details(serverId)
            serverStatusS = serverDetails[u'status']
            serverStatus = NodeStatus(serverStatusS)
            # serverFlavourId = serverDetails[u'flavor'][u'id']
            # serverImageId = serverDetails[u'image'][u'id']
            instanceInfo = ListNodeResult(serverId, serverStatus,
                                          serverDetails)
            instanceInfoList.append(instanceInfo)
        return instanceInfoList

    def createVolume(self, serverId, sizeGB, projectId, sleepWaitSeconds=5):
        """
        :param serverId: str
        :param sizeGB: Union[str, int]
        :param projectId: str
        :rtype str
        """
        self.log("> serverId=%s, sizeGB=%s, projectId=%s" %
                 (serverId, sizeGB, projectId))

        response = self.blockStorageClient.create_volume(sizeGB,
                                                         serverId,
                                                         '%s-vol-%s' %
                                                         (serverId, sizeGB),
                                                         project=projectId)
        # The volume is being created asynchronously, status is 'creating'
        # we wait until it changes (to 'in_use')
        volumeId = response[u'id']

        def getVolumeDetails():
            _volumeDetails = self.blockStorageClient.get_volume_details(
                volumeId)
            _volumeStatus = _volumeDetails[u'status'].upper()
            self.log("volumeDetails = %s" % _volumeDetails)
            return _volumeDetails, _volumeStatus

        volumeDetails, volumeStatus = getVolumeDetails()
        while volumeStatus == OkeanosNativeClient.VOLUME_STATUS_CREATING:
            time.sleep(sleepWaitSeconds)
            volumeDetails, volumeStatus = getVolumeDetails()

        # response is something like this
        # {
        #     u'display_name': u'foo',
        #     u'id': u'46974',
        #     u'links': [
        #         {
        #             u'href': u'https://cyclades.okeanos.grnet.gr/volume/v2.0/volumes/46974',
        #             u'rel': u'self'
        #         }, {
        #             u'href': u'https://cyclades.okeanos.grnet.gr/volume/v2.0/volumes/46974',
        #             u'rel': u'bookmark'
        #         }
        #     ]
        # }

        self.log("< %s" % response)

        return response

    def attachVolume(self, serverId, sizeGB, projectId):
        """Create and attach an extra volume to the VM, returning the volume name, the volume id and the device name"""
        self.log("> serverId = %s, sizeGB = %s, projectId = %s" %
                 (serverId, sizeGB, projectId))
        result = self.createVolume(serverId, sizeGB, projectId)
        volumeId = result['id']
        return volumeId

    def deleteVolume(self, volumeId, sleepWaitSeconds=5):
        """
        Deletes the volume identified by the given `volumeId`.
        :param volumeId: str
        :return:
        """
        def getVolumeDetails():
            _volumeDetails = self.blockStorageClient.get_volume_details(
                volumeId)
            _volumeStatus = _volumeDetails[u'status'].upper()
            self.log("volumeDetails = %s" % _volumeDetails)
            return _volumeDetails, _volumeStatus

        volumeDetails, volumeStatus = getVolumeDetails()
        response = self.blockStorageClient.delete_volume(volumeId)

        # Normal status transition is:
        #   OkeanosNativeClient.VOLUME_STATUS_IN_USE    =>
        #   OkeanosNativeClient.VOLUME_STATUS_DELETING  =>
        #   OkeanosNativeClient.VOLUME_STATUS_DELETED

        while volumeStatus == OkeanosNativeClient.VOLUME_STATUS_IN_USE:
            time.sleep(sleepWaitSeconds)
            volumeDetails, volumeStatus = getVolumeDetails()

        # Now it should be in status:
        #   OkeanosNativeClient.VOLUME_STATUS_DELETING
        #
        # Note that real deletion means status:
        #   OkeanosNativeClient.VOLUME_STATUS_DELETED
        #
        # ... But let's not wait that long

        return response

    def createNode(self,
                   nodeName,
                   flavorIdOrName,
                   imageId,
                   sshPubKey=None,
                   initScriptPathAndData=None,
                   remoteUsername="******",
                   remoteUsergroup=None,
                   localPubKeyData=None,
                   createAsyncInitScript=True,
                   projectId=None):
        """

        :rtype : NodeDetails
        :type localPubKeyData: str
        :type sshPubKey: str
        :type imageId: str
        :type flavorIdOrName: str
        :type nodeName: str
        """
        self.log("Creating node '%s', %s, %s" %
                 (nodeName, flavorIdOrName, imageId))

        sshPubKey = sshPubKey or None
        if sshPubKey is not None:
            self.log("User SSH public key to be injected in %s: %s" %
                     (nodeName, sshPubKey))
        remoteUsergroup = remoteUsergroup or remoteUsername
        flavorId = self.getFlavorId(flavorIdOrName)

        # We make sure:
        # a) The orchestrator can do password-less SSH on the newly created machine (via ~/.ssh/id_rsa.pub)
        # b) The SlipStream user can do password-less SSH on the newly created machine (via the provided userPubKey)
        # c) The provided init script is injected

        localPubKeyData = localPubKeyData or loadPubRsaKeyData()
        self.log("Local SSH public key to be injected in %s: %s" %
                 (nodeName, localPubKeyData))

        if sshPubKey is None:
            authorized_keys = localPubKeyData
        else:
            if not localPubKeyData.endswith('\n'):
                localPubKeyData += '\n'
            authorized_keys = "%s%s" % (localPubKeyData, sshPubKey)

        # See https://www.synnefo.org/docs/kamaki/latest/developers/showcase.html#inject-ssh-keys
        import base64
        personality = [
            dict(contents=base64.b64encode(authorized_keys),
                 path="/%s/.ssh/authorized_keys" % remoteUsername,
                 owner=remoteUsername,
                 group=remoteUsergroup,
                 mode=0600)
        ]

        if initScriptPathAndData is not None:
            initScriptPath, initScriptData = initScriptPathAndData

            personality.append(
                dict(contents=base64.b64encode(initScriptData),
                     path=initScriptPath,
                     owner=remoteUsername,
                     group=remoteUsergroup,
                     mode=0777))

            # In order for the contextualization script to run asynchronously,
            # we create another script that launches the original via nohup
            if createAsyncInitScript:
                asyncInitScriptPath = "%s.async" % initScriptPath
                asyncInitScriptData = "#!/bin/sh -e\nexec nohup %s &\n" % initScriptPath

                personality.append(
                    dict(contents=base64.b64encode(asyncInitScriptData),
                         path=asyncInitScriptPath,
                         owner=remoteUsername,
                         group=remoteUsergroup,
                         mode=0777))
            else:
                asyncInitScriptPath = None
        else:
            initScriptPath = None
            initScriptData = None
            asyncInitScriptPath = None
            asyncInitScriptData = None

        self.log(">> Personalities")
        for _p in personality:
            self.log(">>>> %s" % _p)

        resultDict = self.cycladesClient.create_server(nodeName,
                                                       flavorId,
                                                       imageId,
                                                       personality=personality,
                                                       project_id=projectId)
        # No IP is included in this result
        nodeDetails = NodeDetails(resultDict,
                                  sshPubKey=sshPubKey,
                                  initScriptPath=initScriptPath,
                                  initScriptData=initScriptData,
                                  asyncInitScriptPath=asyncInitScriptPath)
        self.log("Created node %s status %s, adminPass = %s, ip4s = %s" %
                 (nodeDetails.id, nodeDetails.status.okeanosStatus,
                  nodeDetails.adminPass, nodeDetails.ipv4s))
        return nodeDetails

    def runCommandOnNode(self,
                         nodeDetails,
                         command,
                         username='******',
                         localPrivKey=None,
                         timeout=None,
                         runSynchronously=True):
        """
        :type timeout: int
        :type localPrivKey: str
        :type nodeDetails: NodeDetails
        :type command: str
        """
        hostname = nodeDetails.ipv4s[0]
        return runCommandOnHost(hostname,
                                command,
                                username=username,
                                localPrivKey=localPrivKey,
                                timeout=timeout,
                                runSynchronously=runSynchronously)

    def checkSshOnNode(self,
                       nodeDetails,
                       username="******",
                       localPrivKey=None,
                       timeout=None):
        hostname = nodeDetails.ipv4s[0]
        return checkSshOnHost(hostname,
                              username=username,
                              localPrivKey=localPrivKey,
                              timeout=timeout)

    def waitSshOnHost(self,
                      hostname,
                      username="******",
                      localPrivKey=None,
                      timeout=None,
                      sleepSeconds=10):
        t0 = time.time()
        while True:
            if checkSshOnHost(hostname,
                              username=username,
                              localPrivKey=localPrivKey,
                              timeout=timeout):
                t1 = time.time()
                dtsec = t1 - t0
                self.log("SSH good for %s@%s after %s sec" %
                         (username, hostname, dtsec))
                break
            else:
                time.sleep(sleepSeconds)

    def waitSshOnNode(self,
                      nodeDetails,
                      username="******",
                      localPrivKey=None,
                      timeout=None):
        hostname = nodeDetails.ipv4s[0]
        self.waitSshOnHost(hostname,
                           username=username,
                           localPrivKey=localPrivKey,
                           timeout=timeout)

    def getNodeDetails(self, nodeId):
        """
        :type nodeId: str
        :rtype : NodeDetails
        """
        # from kamaki.cli import logger
        # logger.add_file_logger('kamaki.clients.sent', filename='get_server_details.log')
        # logger.add_file_logger('kamaki.clients.recv', filename='get_server_details.log')

        resultDict = self.cycladesClient.get_server_details(nodeId)
        nodeDetails = NodeDetails(resultDict)
        return nodeDetails

    def waitNodeStatus(self, nodeId, expectedOkeanosStatus, sleepSeconds=5):
        """
        :type expectedOkeanosStatus: str
        :type nodeId: str
        """
        t0 = time.time()
        nodeDetails = self.getNodeDetails(nodeId)
        while nodeDetails.status.okeanosStatus != expectedOkeanosStatus:
            time.sleep(sleepSeconds)
            nodeDetails = self.getNodeDetails(nodeId)
        t1 = time.time()
        dtsec = t1 - t0
        self.log("Node %s status %s after %s sec" %
                 (nodeId, expectedOkeanosStatus, dtsec))
        return nodeDetails

    def waitCurrentStatus(self,
                          nodeId,
                          currentOkeanosStatus,
                          sleepSeconds=5,
                          maxSleepSeconds=400):
        """ Wait untile the current status changes
        :type nodeId: str
        :type currentOkeanosStatus: str
        :type sleepSeconds: float
        """
        t0 = time.time()
        self.cycladesClient.wait_server(nodeId,
                                        current_status=currentOkeanosStatus,
                                        delay=sleepSeconds,
                                        max_wait=maxSleepSeconds)
        nodeDetails = self.getNodeDetails(nodeId)
        newOkeanosStatus = nodeDetails.status.okeanosStatus
        t1 = time.time()
        dtsec = t1 - t0
        self.log("Node %s status %s -> %s after %s sec" %
                 (nodeId, currentOkeanosStatus, newOkeanosStatus, dtsec))
        return nodeDetails

    def createNodeAndWait(self,
                          nodeName,
                          flavorIdOrName,
                          imageId,
                          sshPubKey,
                          initScriptPathAndData=None,
                          remoteUsername="******",
                          remoteUsergroup=None,
                          localPubKeyData=None,
                          localPrivKey=None,
                          sshTimeout=None,
                          runInitScriptSynchronously=False,
                          extraVolatileDiskGB=0,
                          projectId=None):
        """

        :type extraVolatileDiskGB: int
        :type runInitScriptSynchronously: bool
        :type sshPubKey: str
        :type imageId: str
        :type flavorIdOrName: str
        :type nodeName: str
        :type sshTimeout: float
        :rtype : NodeDetails
        """
        localPrivKey = localPrivKey or loadRsaPrivKey()

        # Note that this returned value (NodeDetails) contains the adminPass
        nodeDetails = self.createNode(
            nodeName,
            flavorIdOrName,
            imageId,
            sshPubKey,
            initScriptPathAndData=initScriptPathAndData,
            remoteUsername=remoteUsername,
            remoteUsergroup=remoteUsergroup,
            localPubKeyData=localPubKeyData,
            projectId=projectId)
        nodeId = nodeDetails.id
        nodeDetailsActive = self.waitCurrentStatus(nodeId, NodeStatus.BUILD)
        nodeDetails.updateIPsAndStatusFrom(nodeDetailsActive)

        # attach any additional disk
        hostIP = nodeDetails.ipv4s[0]
        if extraVolatileDiskGB:
            self.log(
                "Creating volatile disk of size %s GB for machine IP=%s, id=%s"
                % (extraVolatileDiskGB, hostIP, nodeId))
            volumeId = self.createVolume(nodeId, extraVolatileDiskGB,
                                         projectId)
            self.log(
                "Created volumeId=%s of size %s GB for machine IP=%s, id=%s" %
                (volumeId, extraVolatileDiskGB, hostIP, nodeId))
            # We do nothing more with the volumeId.
            # When the VM is destroyed by the IaaS, the extra disk is automatically destroyed as well.
        else:
            self.log(
                "No need for extra volatile disk for machine IP=%s, id=%s" %
                (hostIP, nodeId))

        # Some times, right after node is reported ACTIVE, network is unreachable or SSH is not immediately ready.
        # We have to cope with that by waiting.
        sshTimeout = sshTimeout or 7.0
        self.waitSshOnNode(nodeDetails,
                           username=remoteUsername,
                           localPrivKey=localPrivKey,
                           timeout=sshTimeout)

        initScriptPath = nodeDetails.initScriptPath

        runResult = self.runCommandOnNode(
            nodeDetails,
            initScriptPath,
            username=remoteUsername,
            localPrivKey=localPrivKey,
            runSynchronously=runInitScriptSynchronously)
        return nodeDetails, runResult

    def shutdownNode(self, nodeId):
        """
        :rtype : NodeDetails
        :type nodeId: str
        """
        self.log("Shutting down node %s" % nodeId)
        nodeDetails = self.getNodeDetails(nodeId)
        if not nodeDetails.status.isStopped():
            self.cycladesClient.shutdown_server(nodeId)
            self.log("Shutdown node %s status %s" %
                     (nodeId, nodeDetails.status.okeanosStatus))
        else:
            self.log("Node %s already shut down" % nodeId)
        return nodeDetails

    def shutdownNodeAndWait(self, nodeId):
        """
        :rtype : NodeDetails
        :type nodeId: str
        """
        nodeDetails = self.shutdownNode(nodeId)
        if not nodeDetails.status.isStopped():
            nodeDetailsWait = self.waitNodeStatus(nodeId, NodeStatus.STOPPED)
            nodeDetails.updateStatusFrom(nodeDetailsWait)
            self.log("Shutdown node %s status %s" %
                     (nodeId, nodeDetails.status.okeanosStatus))
        return nodeDetails

    def deleteNode(self, nodeId):
        """
        :rtype : NodeDetails
        :type nodeId: str
        """
        self.log("Deleting nodeId %s" % nodeId)
        nodeDetails = self.getNodeDetails(nodeId)
        if not nodeDetails.status.isDeleted():
            self.cycladesClient.delete_server(nodeId)
            self.log("Deleted node %s status %s" %
                     (nodeId, nodeDetails.status.okeanosStatus))
        return nodeDetails

    def deleteNodeAndWait(self, nodeId):
        """
        :rtype : NodeDetails
        :type nodeId: str
        """
        nodeDetails = self.deleteNode(nodeId)
        if not nodeDetails.status.isDeleted():
            nodeDetailsWait = self.waitNodeStatus(nodeId, NodeStatus.DELETED)
            nodeDetails.updateStatusFrom(nodeDetailsWait)
            self.log("Deleted node %s status %s" %
                     (nodeId, nodeDetails.status.okeanosStatus))
        return nodeDetails

    def log(self, msg=''):
        who = '%s::%s' % (self.__class__.__name__, inspect.stack()[1][3])
        LOG('%s# %s' % (who, msg))

    def getNodeIPv4(self, nodeId):
        nodeDetails = self.getNodeDetails(nodeId)
        ipv4 = nodeDetails.ipv4s[0]
        LOG("< for nodeId = %s, IPv4 = %s" % (nodeId, ipv4))
        return ipv4

    def getNodePartitions(self,
                          nodeId,
                          username='******',
                          localPrivKey=None,
                          timeout=None,
                          ssh=None):
        self.log("> nodeId = %s" % nodeId)
        ipv4 = self.getNodeIPv4(nodeId)
        status, partitions = getHostPartitions(ipv4,
                                               username=username,
                                               localPrivKey=localPrivKey,
                                               timeout=timeout,
                                               ssh=ssh)
        self.log("< status = %s, partitions = %s" % (status, partitions))
        return status, partitions

    def waitForExtraNodePartition(self,
                                  serverId,
                                  partitions,
                                  username='******',
                                  localPrivKey=None,
                                  timeout=None,
                                  ssh=None):
        """
        Given the set of pre-existing partitions, we wait until a new one appears and then we return it.
        :param serverId: str
        :param partitions: set[str]
        :return: the extra partition. prepend '/dev/' to get the device name
        """
        def getem():
            return self.getNodePartitions(serverId,
                                          username=username,
                                          localPrivKey=localPrivKey,
                                          timeout=timeout,
                                          ssh=ssh)

        self.log("Waiting, current partitions: %s" % partitions)
        status1, partitions1 = getem()
        if status1 != 0:
            return None

        while partitions == partitions1:
            self.log("Looping, new partitions: %s" % partitions1)
            status1, partitions1 = getem()
            if status1 != 0:
                return None

        # We assume one more is added ...
        newPartition = partitions1.difference(partitions)
        self.log("< For serverId = %s, new partition = %s" %
                 (serverId, newPartition))
        return newPartition

    def resizeNode(self, serverId, flavorIdOrName):
        flavorId = self.getFlavorId(flavorIdOrName)
        nodeDetails = self.getNodeDetails(serverId)
        self.log("Node %s status is %s" %
                 (serverId, nodeDetails.status.okeanosStatus))
        existingFlavorId = nodeDetails.flavorId
        self.log("Requested flavorId = %s [given: %s]" %
                 (flavorId, flavorIdOrName))

        if existingFlavorId == flavorId:
            self.log("FlavorId already is %s, no resizing action is needed !" %
                     flavorId)
            return

        t0 = time.time()

        self.log("Resizing from %s -> %s" % (existingFlavorId, flavorId))
        # Hot resizing is not supported, so we must shut the server down first
        self.log("Shutting down node %s" % serverId)
        nodeDetails = self.shutdownNodeAndWait(serverId)
        self.log("Node %s status is %s" %
                 (serverId, nodeDetails.status.okeanosStatus))

        # This takes the server to status 'RESIZE'
        self.log("Resizing node %s ..." % serverId)
        resizeResponse = self.cycladesClient.resize_server(serverId, flavorId)
        self.log("resizeResponse = %s" % resizeResponse)

        # with until server acquires the new flavor
        nodeDetails = self.getNodeDetails(serverId)
        while nodeDetails.flavorId != flavorId:
            nodeDetails = self.getNodeDetails(serverId)
        self.log("Node %s resized" % serverId)

        # And we are now ready to restart with the new flavor
        self.log("Restarting node %s" % serverId)
        self.cycladesClient.start_server(serverId)
        self.waitNodeStatus(serverId, NodeStatus.ACTIVE)

        t1 = time.time()
        dtsec = t1 - t0
        self.log("Node %s restarted with new flavor %s in %s sec" %
                 (serverId, flavorId, dtsec))
        return flavorId