Esempio n. 1
0
    def __init__(self,
                 token,
                 authURL='https://accounts.okeanos.grnet.gr/identity/v2.0'):
        """
        :type authURL: str
        :type token: str
        """
        from kamaki.clients.utils import https
        https.patch_ignore_ssl()

        self.authURL = authURL
        self.token = token
        self.cycladesServiceType = CycladesClient.service_type
        self.blockStorageServiceType = CycladesBlockStorageClient.service_type
        self.astakosClient = AstakosClient(self.authURL, self.token)
        endpointF = self.astakosClient.get_service_endpoints
        self.cycladesEndpoint = endpointF(
            self.cycladesServiceType)[u'publicURL']
        self.cycladesClient = CycladesClient(self.cycladesEndpoint, self.token)
        self.blockStorageEndpoint = endpointF(
            self.blockStorageServiceType)[u'publicURL']
        self.blockStorageClient = CycladesBlockStorageClient(
            self.blockStorageEndpoint, token)

        flavorsById = {}
        flavorsByName = {}
        for flavor in self.cycladesClient.list_flavors():
            _id = flavor[u'id']
            name = flavor[u'name']
            flavorsById[_id] = name
            flavorsByName[name] = _id
        self.flavorsById = flavorsById
        self.flavorsByName = flavorsByName
    def __init__(self, token, authURL='https://accounts.okeanos.grnet.gr/identity/v2.0'):
        """
        :type authURL: str
        :type token: str
        """
        from kamaki.clients.utils import https
        https.patch_ignore_ssl()

        self.authURL = authURL
        self.token = token
        self.cycladesServiceType = CycladesClient.service_type
        self.blockStorageServiceType = CycladesBlockStorageClient.service_type
        self.astakosClient = AstakosClient(self.authURL, self.token)
        endpointF = self.astakosClient.get_service_endpoints
        self.cycladesEndpoint = endpointF(self.cycladesServiceType)[u'publicURL']
        self.cycladesClient = CycladesClient(self.cycladesEndpoint, self.token)
        self.blockStorageEndpoint = endpointF(self.blockStorageServiceType)[u'publicURL']
        self.blockStorageClient = CycladesBlockStorageClient(self.blockStorageEndpoint, token)

        flavorsById = {}
        flavorsByName = {}
        for flavor in self.cycladesClient.list_flavors():
            _id = flavor[u'id']
            name = flavor[u'name']
            flavorsById[_id] = name
            flavorsByName[name] = _id
        self.flavorsById = flavorsById
        self.flavorsByName = flavorsByName
Esempio n. 3
0
    def setUp(self):
        print
        with open(self['cmpimage', 'details']) as f:
            self.img_details = eval(f.read())
        self.img = self.img_details['id']
        with open(self['flavor', 'details']) as f:
            self._flavor_details = eval(f.read())
        self.PROFILES = ('ENABLED', 'DISABLED', 'PROTECTED')

        self.servers = {}
        self.now = time.mktime(time.gmtime())
        self.servname1 = 'serv' + unicode(self.now)
        self.servname2 = self.servname1 + '_v2'
        self.servname1 += '_v1'
        self.flavorid = self._flavor_details['id']
        #servers have to be created at the begining...
        self.networks = {}
        self.netname1 = 'net' + unicode(self.now)
        self.netname2 = 'net' + unicode(self.now) + '_v2'

        self.cloud = 'cloud.%s' % self['testcloud']
        aurl, self.token = self[self.cloud, 'url'], self[self.cloud, 'token']
        self.auth_base = AstakosClient(aurl, self.token)
        curl = self.auth_base.get_service_endpoints('compute')['publicURL']
        self.client = CycladesClient(curl, self.token)
Esempio n. 4
0
    def initialize_clients(self):
        """Initialize all the Kamaki Clients"""
        self.astakos = AstakosClient(self.auth_url, self.token)
        self.astakos.CONNECTION_RETRY_LIMIT = self.retry

        endpoints = self.astakos.authenticate()

        self.compute_url = _get_endpoint_url(endpoints, "compute")
        self.compute = ComputeClient(self.compute_url, self.token)
        self.compute.CONNECTION_RETRY_LIMIT = self.retry

        self.cyclades = CycladesClient(self.compute_url, self.token)
        self.cyclades.CONNECTION_RETRY_LIMIT = self.retry

        self.network_url = _get_endpoint_url(endpoints, "network")
        self.network = CycladesNetworkClient(self.network_url, self.token)
        self.network.CONNECTION_RETRY_LIMIT = self.retry

        self.pithos_url = _get_endpoint_url(endpoints, "object-store")
        self.pithos = PithosClient(self.pithos_url, self.token)
        self.pithos.CONNECTION_RETRY_LIMIT = self.retry

        self.image_url = _get_endpoint_url(endpoints, "image")
        self.image = ImageClient(self.image_url, self.token)
        self.image.CONNECTION_RETRY_LIMIT = self.retry
Esempio n. 5
0
    def setup_kamaki(self):
        """Initialize kamaki

        Setup cyclades_client, image_client and compute_client
        """

        # Patch kamaki for SSL verification
        _kamaki_ssl(ignore_ssl=IGNORE_SSL)

        config = kamaki_config.Config()
        if self.kamaki_cloud is None:
            try:
                self.kamaki_cloud = config.get("global", "default_cloud")
            except AttributeError:
                # Compatibility with kamaki version <=0.10
                self.kamaki_cloud = config.get("global", "default_cloud")

        self.logger.info("Setup kamaki client, using cloud '%s'.." %
                         self.kamaki_cloud)
        auth_url = config.get_cloud(self.kamaki_cloud, "url")
        self.logger.debug("Authentication URL is %s" % _green(auth_url))
        token = config.get_cloud(self.kamaki_cloud, "token")
        # self.logger.debug("Token is %s" % _green(token))

        self.astakos_client = AstakosClient(auth_url, token)
        endpoints = self.astakos_client.authenticate()

        cyclades_url = get_endpoint_url(endpoints, "compute")
        self.logger.debug("Cyclades API url is %s" % _green(cyclades_url))
        self.cyclades_client = CycladesClient(cyclades_url, token)
        self.cyclades_client.CONNECTION_RETRY_LIMIT = 2

        network_url = get_endpoint_url(endpoints, "network")
        self.logger.debug("Network API url is %s" % _green(network_url))
        self.network_client = CycladesNetworkClient(network_url, token)
        self.network_client.CONNECTION_RETRY_LIMIT = 2

        image_url = get_endpoint_url(endpoints, "image")
        self.logger.debug("Images API url is %s" % _green(image_url))
        self.image_client = ImageClient(cyclades_url, token)
        self.image_client.CONNECTION_RETRY_LIMIT = 2

        compute_url = get_endpoint_url(endpoints, "compute")
        self.logger.debug("Compute API url is %s" % _green(compute_url))
        self.compute_client = ComputeClient(compute_url, token)
        self.compute_client.CONNECTION_RETRY_LIMIT = 2
Esempio n. 6
0
def init_cyclades(endpoint, token):
    """
    Compute / Initialize Cyclades client.CycladesClient is used
    to create virtual machines
    """
    logging.log(REPORT, ' Initialize a cyclades client')
    try:
        return CycladesClient(endpoint, token)
    except ClientError:
        msg = ' Failed to initialize cyclades client'
        raise ClientError(msg)
Esempio n. 7
0
    def authenticate(self, authentication=None):
        """

        :param authentication:
        :return:
        """
        if self.__cyclades is not None:
            return True
        try:
            authcl = AstakosClient(authentication['URL'],
                                   authentication['TOKEN'])
            authcl.authenticate()
            self.__cyclades = CycladesClient(
                authcl.get_service_endpoints('compute')['publicURL'],
                authentication['TOKEN'])
            self.__network_client = CycladesNetworkClient(
                authcl.get_service_endpoints('network')['publicURL'],
                authentication['TOKEN'])
        except ClientError:
            stderr.write('Connector initialization failed')
            return False
        return True
Esempio n. 8
0
 def setUp(self):
     self.now = time.mktime(time.gmtime())
     self.cloud = 'cloud.%s' % self['testcloud']
     aurl, self.token = self[self.cloud, 'url'], self[self.cloud, 'token']
     self.auth_base = AstakosCachedClient(aurl, self.token)
     self.imgname = 'img_%s' % self.now
     url = self.auth_base.get_service_endpoints('image')['publicURL']
     self.token = self.auth_base.token
     self.client = ImageClient(url, self.token)
     cyclades_url = self.auth_base.get_service_endpoints(
         'compute')['publicURL']
     self.cyclades = CycladesClient(cyclades_url, self.token)
     self._imglist = {}
     self._imgdetails = {}
Esempio n. 9
0
    def setup_kamaki(self):
        """Initialize kamaki

        Setup cyclades_client, image_client and compute_client
        """

        # Patch kamaki for SSL verification
        _kamaki_ssl(ignore_ssl=IGNORE_SSL)

        config = kamaki_config.Config()
        if self.kamaki_cloud is None:
            try:
                self.kamaki_cloud = config.get("global", "default_cloud")
            except AttributeError:
                # Compatibility with kamaki version <=0.10
                self.kamaki_cloud = config.get("global", "default_cloud")

        self.logger.info("Setup kamaki client, using cloud '%s'.." %
                         self.kamaki_cloud)
        auth_url = config.get_cloud(self.kamaki_cloud, "url")
        self.logger.debug("Authentication URL is %s" % _green(auth_url))
        token = config.get_cloud(self.kamaki_cloud, "token")
        # self.logger.debug("Token is %s" % _green(token))

        self.astakos_client = AstakosClient(auth_url, token)
        endpoints = self.astakos_client.authenticate()

        cyclades_url = get_endpoint_url(endpoints, "compute")
        self.logger.debug("Cyclades API url is %s" % _green(cyclades_url))
        self.cyclades_client = CycladesClient(cyclades_url, token)
        self.cyclades_client.CONNECTION_RETRY_LIMIT = 2

        network_url = get_endpoint_url(endpoints, "network")
        self.logger.debug("Network API url is %s" % _green(network_url))
        self.network_client = CycladesNetworkClient(network_url, token)
        self.network_client.CONNECTION_RETRY_LIMIT = 2

        image_url = get_endpoint_url(endpoints, "image")
        self.logger.debug("Images API url is %s" % _green(image_url))
        self.image_client = ImageClient(cyclades_url, token)
        self.image_client.CONNECTION_RETRY_LIMIT = 2

        compute_url = get_endpoint_url(endpoints, "compute")
        self.logger.debug("Compute API url is %s" % _green(compute_url))
        self.compute_client = ComputeClient(compute_url, token)
        self.compute_client.CONNECTION_RETRY_LIMIT = 2
Esempio n. 10
0
    def authenticate(self, authentication=None):
        """

        :param authentication:
        :return:
        """
        if self.__cyclades is not None:
            return True
        try:
            authcl = AstakosClient(authentication['URL'], authentication['TOKEN'])
            authcl.authenticate()
            self.__cyclades = CycladesClient(authcl.get_service_endpoints('compute')['publicURL'],
                                             authentication['TOKEN'])
            self.__network_client = CycladesNetworkClient(authcl.get_service_endpoints('network')['publicURL'],
                                                          authentication['TOKEN'])
        except ClientError:
            stderr.write('Connector initialization failed')
            return False
        return True
Esempio n. 11
0
    def initialize_clients(self, ignore_ssl=False):
        """Initialize all the Kamaki Clients"""

        # Path kamaki for SSL verification
        self._kamaki_ssl(ignore_ssl=ignore_ssl)

        # Initialize kamaki Clients
        self.astakos = AstakosClient(self.auth_url, self.token)
        self.astakos.CONNECTION_RETRY_LIMIT = self.retry

        self.compute_url = self.astakos.get_endpoint_url(
            ComputeClient.service_type)
        self.compute = ComputeClient(self.compute_url, self.token)
        self.compute.CONNECTION_RETRY_LIMIT = self.retry

        self.cyclades_url = self.astakos.get_endpoint_url(
            CycladesClient.service_type)
        self.cyclades = CycladesClient(self.cyclades_url, self.token)
        self.cyclades.CONNECTION_RETRY_LIMIT = self.retry

        self.block_storage_url = self.astakos.get_endpoint_url(
            CycladesBlockStorageClient.service_type)
        self.block_storage = CycladesBlockStorageClient(
            self.block_storage_url, self.token)
        self.block_storage.CONNECTION_RETRY_LIMIT = self.retry

        self.network_url = self.astakos.get_endpoint_url(
            CycladesNetworkClient.service_type)
        self.network = CycladesNetworkClient(self.network_url, self.token)
        self.network.CONNECTION_RETRY_LIMIT = self.retry

        self.pithos_url = self.astakos.get_endpoint_url(
            PithosClient.service_type)
        self.pithos = PithosClient(self.pithos_url, self.token)
        self.pithos.CONNECTION_RETRY_LIMIT = self.retry

        self.image_url = self.astakos.get_endpoint_url(
            ImageClient.service_type)
        self.image = ImageClient(self.image_url, self.token)
        self.image.CONNECTION_RETRY_LIMIT = self.retry
Esempio n. 12
0
    def setup_kamaki(self):
        """Initialize kamaki

        Setup cyclades_client, image_client and compute_client
        """

        config = kamaki_config.Config()
        if self.kamaki_cloud is None:
            self.kamaki_cloud = config.get_global("default_cloud")

        self.logger.info("Setup kamaki client, using cloud '%s'.." %
                         self.kamaki_cloud)
        auth_url = config.get_cloud(self.kamaki_cloud, "url")
        self.logger.debug("Authentication URL is %s" % _green(auth_url))
        token = config.get_cloud(self.kamaki_cloud, "token")
        #self.logger.debug("Token is %s" % _green(token))

        self.astakos_client = AstakosClient(auth_url, token)

        cyclades_url = \
            self.astakos_client.get_service_endpoints('compute')['publicURL']
        self.logger.debug("Cyclades API url is %s" % _green(cyclades_url))
        self.cyclades_client = CycladesClient(cyclades_url, token)
        self.cyclades_client.CONNECTION_RETRY_LIMIT = 2

        image_url = \
            self.astakos_client.get_service_endpoints('image')['publicURL']
        self.logger.debug("Images API url is %s" % _green(image_url))
        self.image_client = ImageClient(cyclades_url, token)
        self.image_client.CONNECTION_RETRY_LIMIT = 2

        compute_url = \
            self.astakos_client.get_service_endpoints('compute')['publicURL']
        self.logger.debug("Compute API url is %s" % _green(compute_url))
        self.compute_client = ComputeClient(compute_url, token)
        self.compute_client.CONNECTION_RETRY_LIMIT = 2
Esempio n. 13
0
class Cyclades(livetest.Generic):
    """Set up a Cyclades test"""
    def setUp(self):
        print
        with open(self['cmpimage', 'details']) as f:
            self.img_details = eval(f.read())
        self.img = self.img_details['id']
        with open(self['flavor', 'details']) as f:
            self._flavor_details = eval(f.read())
        self.PROFILES = ('ENABLED', 'DISABLED', 'PROTECTED')

        self.servers = {}
        self.now = time.mktime(time.gmtime())
        self.servname1 = 'serv' + unicode(self.now)
        self.servname2 = self.servname1 + '_v2'
        self.servname1 += '_v1'
        self.flavorid = self._flavor_details['id']
        #servers have to be created at the begining...
        self.networks = {}
        self.netname1 = 'net' + unicode(self.now)
        self.netname2 = 'net' + unicode(self.now) + '_v2'

        self.cloud = 'cloud.%s' % self['testcloud']
        aurl, self.token = self[self.cloud, 'url'], self[self.cloud, 'token']
        self.auth_base = AstakosClient(aurl, self.token)
        curl = self.auth_base.get_service_endpoints('compute')['publicURL']
        self.client = CycladesClient(curl, self.token)

    def tearDown(self):
        """Destoy servers used in testing"""
        for net in self.networks.keys():
            self._delete_network(net)
        for server in self.servers.values():
            self._delete_server(server['id'])
            print('DEL VM %s (%s)' % (server['id'], server['name']))

    def test_000(self):
        "Prepare a full Cyclades test scenario"
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self.server2 = self._create_server(
            self.servname2, self.flavorid, self.img)
        super(self.__class__, self).test_000()

    def _create_server(self, servername, flavorid, imageid, personality=None):
        server = self.client.create_server(
            servername, flavorid, imageid, personality=personality)
        print('CREATE VM %s (%s)' % (server['id'], server['name']))
        self.servers[servername] = server
        return server

    def _delete_server(self, servid):
        try:
            current_state = self.client.get_server_details(servid)
            current_state = current_state['status']
            if current_state == 'DELETED':
                return
            self.client.delete_server(servid)
            self._wait_for_status(servid, current_state)
            self.client.delete_server(servid)
        except:
            return

    def _create_network(self, netname, **kwargs):
        net = self.client.create_network(netname, **kwargs)
        self.networks[net['id']] = net
        return net

    def _delete_network(self, netid):
        if not netid in self.networks:
            return None
        print('Disconnect nics of network %s' % netid)
        self.client.disconnect_network_nics(netid)

        def netwait(wait):
            try:
                self.client.delete_network(netid)
            except ClientError:
                time.sleep(wait)
        self.do_with_progress_bar(
            netwait,
            'Delete network %s' % netid,
            self._waits[:7])
        return self.networks.pop(netid)

    def _wait_for_network(self, netid, status):

        def netwait(wait):
            r = self.client.get_network_details(netid)
            if r['status'] == status:
                return
            time.sleep(wait)
        self.do_with_progress_bar(
            netwait,
            'Wait network %s to reach status %s' % (netid, status),
            self._waits[:5])

    def _wait_for_nic(self, netid, servid, in_creation=True):
        self._wait_for_network(netid, 'ACTIVE')

        def nicwait(wait):
            nics = self.client.list_server_nics(servid)
            for net in nics:
                found_nic = net['network_id'] == netid
                if (in_creation and found_nic) or not (
                        in_creation or found_nic):
                    return
            time.sleep(wait)
        self.do_with_progress_bar(
            nicwait,
            'Wait nic-%s-%s to %sconnect' % (
                netid,
                servid,
                '' if in_creation else 'dis'),
            self._waits[:5])
        for net in self.client.list_server_nics(servid):
            if netid == net['network_id']:
                return True
        return False

    def _has_status(self, servid, status):
        r = self.client.get_server_details(servid)
        #print 'MY ', servid, ' STATUS IS ', r['status']
        return r['status'] == status

    def _wait_for_status(self, servid, status):
        (wait_bar, wait_cb) = self._safe_progress_bar(
            'Server %s in %s' % (servid, status))
        self.client.wait_server(
            servid, status, wait_cb=wait_cb, delay=2, max_wait=198)
        self._safe_progress_bar_finish(wait_bar)

    def test_parallel_creation(self):
        """test create with multiple threads
        Do not use this in regular livetest
        """
        from kamaki.clients import SilentEvent
        c = []
        for i in range(8):
            sname = '%s_%s' % (self.servname1, i)
            c.append(SilentEvent(
                self._create_server, sname, self.flavorid, self.img))
        for i in range(8):
            c[i].start()

    def test_create_server(self):
        """Test create_server"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._wait_for_status(self.server1['id'], 'BUILD')
        self._test_0010_create_server()

    def _test_0010_create_server(self):
        self.assertEqual(self.server1["name"], self.servname1)
        self.assertEqual(self.server1["flavor"]["id"], self.flavorid)
        self.assertEqual(self.server1["image"]["id"], self.img)
        self.assertEqual(self.server1["status"], "BUILD")

    def test_list_servers(self):
        """Test list servers"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self.server2 = self._create_server(
            self.servname2, self.flavorid, self.img)
        self._test_0020_list_servers()

    def _test_0020_list_servers(self):
        servers = self.client.list_servers()
        dservers = self.client.list_servers(detail=True)

        """detailed and simple are same size"""
        self.assertEqual(len(dservers), len(servers))
        fields = set([
            'created', 'flavor', 'hostId', 'image', 'progress', 'status',
            'updated'])
        for i, srv in enumerate(servers):
            self.assertTrue(fields.isdisjoint(srv))
            self.assertTrue(fields.issubset(dservers[i]))

        """detailed and simple contain same names"""
        names = sorted(map(lambda x: x["name"], servers))
        dnames = sorted(map(lambda x: x["name"], dservers))
        self.assertEqual(names, dnames)

    def _test_0030_wait_test_servers_to_build(self):
        """Pseudo-test to wait for VMs to load"""
        print('')
        self._wait_for_status(self.server1['id'], 'BUILD')
        self._wait_for_status(self.server2['id'], 'BUILD')

    def test_get_server_details(self):
        """Test get_server_details"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._wait_for_status(self.server1['id'], 'BUILD')
        self._test_0040_get_server_details()

    def _test_0040_get_server_details(self):
        r = self.client.get_server_details(self.server1['id'])
        self.assertEqual(r["name"], self.servname1)
        self.assertEqual(r["flavor"]["id"], self.flavorid)
        self.assertEqual(r["image"]["id"], self.img)
        self.assertEqual(r["status"], "ACTIVE")

    def test_update_server_name(self):
        """Test update_server_name"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._test_0050_update_server_name()

    def _test_0050_update_server_name(self):
        new_name = self.servname1 + '_new_name'
        self.client.update_server_name(self.server1['id'], new_name)
        r = self.client.get_server_details(
            self.server1['id'], success=(200, 400))
        self.assertEqual(r['name'], new_name)
        changed = self.servers.pop(self.servname1)
        changed['name'] = new_name
        self.servers[new_name] = changed

    def test_reboot_server(self):
        """Test reboot server"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._wait_for_status(self.server1['id'], 'BUILD')
        self.server2 = self._create_server(
            self.servname2, self.flavorid + 2, self.img)
        self._wait_for_status(self.server2['id'], 'BUILD')
        self._test_0060_reboot_server()
        self._wait_for_status(self.server1['id'], 'REBOOT')
        self._wait_for_status(self.server2['id'], 'REBOOT')

    def _test_0060_reboot_server(self):
        self.client.reboot_server(self.server1['id'])
        self.assertTrue(self._has_status(self.server1['id'], 'REBOOT'))
        self.client.reboot_server(self.server2['id'], hard=True)
        self.assertTrue(self._has_status(self.server2['id'], 'REBOOT'))

    def test_resize_server(self):
        """Modify the flavor of a server"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._test_0065_resize_server()
        self.delete_server(self.server1['id'])
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)

    def _test_0065_resize_server(self):
        self.client.resize_server(self.servname1, self.flavorid + 2)
        srv = self.client.get_flavor_details(self.server1['id'])
        self.assertEqual(srv['flavor']['id'], self.flavorid + 2)

    def _test_0070_wait_test_servers_to_reboot(self):
        """Pseudo-test to wait for VMs to load"""
        print('')
        self._wait_for_status(self.server1['id'], 'REBOOT')
        self._wait_for_status(self.server2['id'], 'REBOOT')

    def test_create_server_metadata(self):
        """Test create_server_metadata"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._test_0080_create_server_metadata()

    def _test_0080_create_server_metadata(self):
        r1 = self.client.create_server_metadata(
            self.server1['id'], 'mymeta', 'mymeta val')
        self.assertTrue('mymeta' in r1)
        r2 = self.client.get_server_metadata(self.server1['id'], 'mymeta')
        self.assert_dicts_are_equal(r1, r2)

    def test_get_server_metadata(self):
        """Test get server_metadata"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._test_0090_get_server_metadata()

    def _test_0090_get_server_metadata(self):
        self.client.update_server_metadata(
            self.server1['id'], mymeta_0='val_0')
        r = self.client.get_server_metadata(self.server1['id'], 'mymeta_0')
        self.assertEqual(r['mymeta_0'], 'val_0')

    def test_update_server_metadata(self):
        """Test update_server_metadata"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._test_0100_update_server_metadata()

    def _test_0100_update_server_metadata(self):
        r1 = self.client.update_server_metadata(
            self.server1['id'], mymeta3='val2')
        self.assertTrue('mymeta3'in r1)
        r2 = self.client.update_server_metadata(
            self.server1['id'], mymeta3='val3')
        self.assertTrue(r2['mymeta3'], 'val3')

    def test_delete_server_metadata(self):
        """Test delete_server_metadata"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._test_0110_delete_server_metadata()

    def _test_0110_delete_server_metadata(self):
        r1 = self.client.update_server_metadata(
            self.server1['id'], mymeta='val')
        self.assertTrue('mymeta' in r1)
        self.client.delete_server_metadata(self.server1['id'], 'mymeta')
        try:
            self.client.get_server_metadata(self.server1['id'], 'mymeta')
            raise ClientError('Wrong Error', status=100)
        except ClientError as err:
            self.assertEqual(err.status, 404)

    def test_list_flavors(self):
        """Test flavors_get"""
        self._test_0120_list_flavors()

    def _test_0120_list_flavors(self):
        r = self.client.list_flavors()
        self.assertTrue(len(r) > 1)
        r = self.client.list_flavors(detail=True)
        self.assertTrue('SNF:disk_template' in r[0])

    def test_get_flavor_details(self):
        """Test test_get_flavor_details"""
        self._test_0130_get_flavor_details()

    def _test_0130_get_flavor_details(self):
        r = self.client.get_flavor_details(self.flavorid)
        self.assert_dicts_are_equal(self._flavor_details, r)

    #def test_list_images(self):
    #    """Test list_images"""
    #    self._test_0140_list_images()

    def _test_0140_list_images(self):
        r = self.client.list_images()
        self.assertTrue(len(r) > 1)
        r = self.client.list_images(detail=True)
        for detailed_img in r:
            if detailed_img['id'] == self.img:
                break
        self.assert_dicts_are_equal(detailed_img, self.img_details)

    def test_get_image_details(self):
        """Test image_details"""
        self._test_0150_get_image_details()

    def _test_0150_get_image_details(self):
        r = self.client.get_image_details(self.img)
        self.assert_dicts_are_equal(r, self.img_details)

    def test_get_image_metadata(self):
        """Test get_image_metadata"""
        self._test_0160_get_image_metadata()

    def _test_0160_get_image_metadata(self):
        r = self.client.get_image_metadata(self.img)
        self.assert_dicts_are_equal(
            self.img_details['properties'], r)
        for key, val in self.img_details['properties'].items():
            r = self.client.get_image_metadata(self.img, key)
            self.assertEqual(r[key], val)

    def test_shutdown_server(self):
        """Test shutdown_server"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._wait_for_status(self.server1['id'], 'BUILD')
        self._test_0170_shutdown_server()

    def _test_0170_shutdown_server(self):
        self.client.shutdown_server(self.server1['id'])
        self._wait_for_status(self.server1['id'], 'ACTIVE')
        r = self.client.get_server_details(self.server1['id'])
        self.assertEqual(r['status'], 'STOPPED')

    def test_start_server(self):
        """Test start_server"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._wait_for_status(self.server1['id'], 'BUILD')
        self.client.shutdown_server(self.server1['id'])
        self._wait_for_status(self.server1['id'], 'ACTIVE')
        self._test_0180_start_server()

    def _test_0180_start_server(self):
        self.client.start_server(self.server1['id'])
        self._wait_for_status(self.server1['id'], 'STOPPED')
        r = self.client.get_server_details(self.server1['id'])
        self.assertEqual(r['status'], 'ACTIVE')

    def test_get_server_console(self):
        """Test get_server_console"""
        self.server2 = self._create_server(
            self.servname2, self.flavorid, self.img)
        self._wait_for_status(self.server2['id'], 'BUILD')
        self._test_0190_get_server_console()

    def _test_0190_get_server_console(self):
        r = self.client.get_server_console(self.server2['id'])
        self.assertTrue('host' in r)
        self.assertTrue('password' in r)
        self.assertTrue('port' in r)
        self.assertTrue('type' in r)

    def test_get_firewall_profile(self):
        """Test get_firewall_profile"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._test_0200_get_firewall_profile()

    def _test_0200_get_firewall_profile(self):
        self._wait_for_status(self.server1['id'], 'BUILD')
        fprofile = self.client.get_firewall_profile(self.server1['id'])
        self.assertTrue(fprofile in self.PROFILES)

    def test_set_firewall_profile(self):
        """Test set_firewall_profile"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._test_0210_set_firewall_profile()

    def _test_0210_set_firewall_profile(self):

        self._wait_for_status(self.server1['id'], 'BUILD')
        PROFILES = ['DISABLED', 'ENABLED', 'DISABLED', 'PROTECTED']
        fprofile = self.client.get_firewall_profile(self.server1['id'])
        print('')
        count_success = 0
        for counter, fprofile in enumerate(PROFILES):
            npos = counter + 1
            try:
                nprofile = PROFILES[npos]
            except IndexError:
                nprofile = PROFILES[0]
            print('\tprofile swap %s: %s -> %s' % (npos, fprofile, nprofile))
            self.client.set_firewall_profile(self.server1['id'], nprofile)
            time.sleep(0.5)
            self.client.reboot_server(self.server1['id'], hard=True)
            time.sleep(1)
            self._wait_for_status(self.server1['id'], 'REBOOT')
            time.sleep(0.5)
            changed = self.client.get_firewall_profile(self.server1['id'])
            try:
                self.assertEqual(changed, nprofile)
            except AssertionError as err:
                if count_success:
                    print('\tFAIL in swap #%s' % npos)
                    break
                else:
                    raise err
            count_success += 1

    def test_get_server_stats(self):
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self._test_0220_get_server_stats()

    def _test_0220_get_server_stats(self):
        r = self.client.get_server_stats(self.server1['id'])
        it = ('cpuBar', 'cpuTimeSeries', 'netBar', 'netTimeSeries', 'refresh')
        for term in it:
            self.assertTrue(term in r)

    def test_create_network(self):
        """Test create_network"""
        self._test_0230_create_network()

    def _test_0230_create_network(self):
        print('\twith no params')
        self.network1 = self._create_network(self.netname1)
        self._wait_for_network(self.network1['id'], 'ACTIVE')
        n1id = self.network1['id']
        self.network1 = self.client.get_network_details(n1id)
        nets = self.client.list_networks(self.network1['id'])
        chosen = [net for net in nets if net['id'] == n1id][0]
        chosen.pop('updated')
        net1 = dict(self.network1)
        net1.pop('updated')
        self.assert_dicts_are_equal(chosen, net1)
        full_args = dict(
                cidr='192.168.1.0/24',
                gateway='192.168.1.1',
                type='MAC_FILTERED',
                dhcp=True)
        try_args = dict(all=True)
        try_args.update(full_args)
        for param, val in try_args.items():
            print('\tdelete %s to avoid max net limit' % n1id)
            self._delete_network(n1id)
            kwargs = full_args if param == 'all' else {param: val}
            print('\twith %s=%s' % (param, val))
            self.network1 = self._create_network(self.netname1, **kwargs)
            n1id = self.network1['id']
            self._wait_for_network(n1id, 'ACTIVE')
            self.network1 = self.client.get_network_details(n1id)
            if param == 'all':
                for p, v in full_args.items():
                    self.assertEqual(self.network1[p], v)
            else:
                self.assertEqual(self.network1[param], val)

    def test_connect_server(self):
        """Test connect_server"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self.network1 = self._create_network(self.netname1)
        self._wait_for_status(self.server1['id'], 'BUILD')
        self._wait_for_network(self.network1['id'], 'ACTIVE')
        self._test_0240_connect_server()

    def _test_0250_connect_server(self):
        self.client.connect_server(self.server1['id'], self.network1['id'])
        self.assertTrue(
            self._wait_for_nic(self.network1['id'], self.server1['id']))

    def test_disconnect_server(self):
        """Test disconnect_server"""
        self.test_connect_server()
        self._test_0250_disconnect_server()

    def _test_0250_disconnect_server(self):
        self.client.disconnect_server(self.server1['id'], self.network1['id'])
        self.assertTrue(self._wait_for_nic(
            self.network1['id'], self.server1['id'], in_creation=False))

    def _test_0260_wait_for_second_network(self):
        self.network2 = self._create_network(self.netname2)
        self._wait_for_network(self.network2['id'], 'ACTIVE')

    def test_list_server_nics(self):
        """Test list_server_nics"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self.network2 = self._create_network(self.netname2)
        self._wait_for_status(self.server1['id'], 'BUILD')
        self._wait_for_network(self.network2['id'], 'ACTIVE')
        self._test_0280_list_server_nics()

    def _test_0280_list_server_nics(self):
        r = self.client.list_server_nics(self.server1['id'])
        len0 = len(r)
        self.client.connect_server(self.server1['id'], self.network2['id'])
        self.assertTrue(
            self._wait_for_nic(self.network2['id'], self.server1['id']))
        r = self.client.list_server_nics(self.server1['id'])
        self.assertTrue(len(r) > len0)

    def test_list_networks(self):
        """Test list_network"""
        self.network1 = self._create_network(self.netname1)
        self._wait_for_network(self.network1['id'], 'ACTIVE')
        self._test_0290_list_networks()

    def _test_0290_list_networks(self):
        r = self.client.list_networks()
        self.assertTrue(len(r) > 1)
        ids = [net['id'] for net in r]
        names = [net['name'] for net in r]
        self.assertTrue('1' in ids)
        #self.assertTrue('public' in names)
        self.assertTrue(self.network1['id'] in ids)
        self.assertTrue(self.network1['name'] in names)

        r = self.client.list_networks(detail=True)
        ids = [net['id'] for net in r]
        names = [net['name'] for net in r]
        for net in r:
            self.assertTrue(net['id'] in ids)
            self.assertTrue(net['name'] in names)
            for term in ('status', 'updated', 'created'):
                self.assertTrue(term in net.keys())

    def test_list_network_nics(self):
        """Test list_server_nics"""
        self.server1 = self._create_server(
            self.servname1, self.flavorid, self.img)
        self.network1 = self._create_network(self.netname1)
        self.network2 = self._create_network(self.netname2)
        self._wait_for_status(self.server1['id'], 'BUILD')
        self._wait_for_network(self.network1['id'], 'ACTIVE')
        self._wait_for_network(self.network2['id'], 'ACTIVE')
        self.client.connect_server(self.server1['id'], self.network1['id'])
        self.client.connect_server(self.server1['id'], self.network2['id'])
        self._wait_for_nic(self.network1['id'], self.server1['id'])
        self._wait_for_nic(self.network2['id'], self.server1['id'])
        self._test_0293_list_network_nics()

    def _test_0293_list_network_nics(self):
        netid1, netid2 = self.network1['id'], self.network2['id']
        r = self.client.list_network_nics(netid1)
        expected = ['nic-%s-1' % self.server1['id']]
        self.assertEqual(r, expected)
        r = self.client.list_network_nics(netid2)
        expected = ['nic-%s-2' % self.server1['id']]
        self.assertEqual(r, expected)

    def test_get_network_details(self):
        """Test get_network_details"""
        self.network1 = self._create_network(self.netname1)
        self._test_0300_get_network_details()

    def _test_0300_get_network_details(self):
        r = self.client.get_network_details(self.network1['id'])
        net1 = dict(self.network1)
        net1.pop('status')
        net1.pop('updated', None)
        net1.pop('attachments')
        r.pop('status')
        r.pop('updated', None)
        r.pop('attachments')
        self.assert_dicts_are_equal(net1, r)

    def test_update_network_name(self):
        self.network2 = self._create_network(self.netname2)
        self._test_0310_update_network_name()

    def _test_0310_update_network_name(self):
        updated_name = self.netname2 + '_upd'
        self.client.update_network_name(self.network2['id'], updated_name)

        def netwait(wait):
            r = self.client.get_network_details(self.network2['id'])
            if r['name'] == updated_name:
                return
            time.sleep(wait)
        self.do_with_progress_bar(
            netwait,
            'Network %s name is changing:' % self.network2['id'],
            self._waits[:5])

        r = self.client.get_network_details(self.network2['id'])
        self.assertEqual(r['name'], updated_name)

    """ Don't have auth to test this
Esempio n. 14
0
class OkeanosConnector(AbstractConnector):
    """
    Okeanos connector.
    """
    def __init__(self):
        AbstractConnector.__init__(self)
        self.__cyclades = None
        self.__network_client = None
        self.attach_public_ipv4 = False
        self.private_network = -1

    def authenticate(self, authentication=None):
        """

        :param authentication:
        :return:
        """
        if self.__cyclades is not None:
            return True
        try:
            authcl = AstakosClient(authentication['URL'],
                                   authentication['TOKEN'])
            authcl.authenticate()
            self.__cyclades = CycladesClient(
                authcl.get_service_endpoints('compute')['publicURL'],
                authentication['TOKEN'])
            self.__network_client = CycladesNetworkClient(
                authcl.get_service_endpoints('network')['publicURL'],
                authentication['TOKEN'])
        except ClientError:
            stderr.write('Connector initialization failed')
            return False
        return True

    def configure(self, configuration):
        self.authenticate(configuration['auth'])
        if 'private_network' in configuration and configuration[
                'private_network']:
            self.private_network = 0
        if 'attach_public_ipv4' in configuration and configuration[
                'attach_public_ipv4']:
            self.attach_public_ipv4 = True

    def prepare(self):
        """
        In this method, application-level IaaS related actions are executed.
        :return:
        """
        if self.private_network == 0:
            self.private_network = self.create_private_network()

    def create_vm(self, name, flavor_id, image_id):
        """

        :param name:
        :param flavor_id:
        :param image_id:
        :return:
        """
        networks = []
        if self.attach_public_ipv4:
            networks.append({'uuid': self.__create_floating_ip()})
        if self.private_network != -1:
            networks.append({'uuid': self.private_network})

        response = self.__cyclades.create_server(name=name,
                                                 flavor_id=flavor_id,
                                                 image_id=image_id,
                                                 networks=networks)
        ret_value = dict()
        ret_value['password'] = response['adminPass']
        ret_value['id'] = response['id']
        ret_value['user'] = response['metadata']['users']
        ret_value['hostname'] = 'snf-' + str(
            response['id']) + '.vm.okeanos.grnet.gr'
        self.__cyclades.wait_server(server_id=ret_value['id'],
                                    current_status='ACTIVE')
        return ret_value

    def delete_vm(self, server_id):
        """
        Delete VM method. The method is blocking until the VM goes to a "DELETED" state
        :param server_id:
        :return:
        """
        attachments = self.__cyclades.get_server_details(
            server_id)['attachments']
        port_id = None
        for a in attachments:
            if a['OS-EXT-IPS:type'] == 'floating':
                port_id = a['id']
        floating_ip_id = None
        for ip in self.__network_client.list_floatingips():
            if port_id is not None and ip['port_id'] == str(port_id):
                floating_ip_id = ip['id']
        self.__cyclades.delete_server(server_id)
        self.__cyclades.wait_server(
            server_id,
            current_status='DELETED')  # wait until server is deleted
        if floating_ip_id is not None:
            self.__wait_until_ip_released(floating_ip_id)
            self.__network_client.delete_floatingip(floating_ip_id)

    def __wait_until_ip_released(self, floating_ip_id):
        for i in range(1, MAX_WAIT_FOR_LOOPS + 1):
            for ip in self.__network_client.list_floatingips():
                if ip['id'] == floating_ip_id:
                    if ip['instance_id'] is None or ip['instance_id'] == 'None':
                        return True
            sleep(SLEEP_TIMEOUT)

    def list_vms(self):
        """


        :return:
        """
        return self.__cyclades.list_servers()

    def get_status(self, vm_id):
        """

        :param vm_id:
        :return:
        """
        return self.__cyclades.get_server_details(vm_id)

    def get_server_addresses(self,
                             vm_id,
                             ip_version=None,
                             connection_type=None):
        """
        Returns the enabled addresses, as referenced from the IaaS.
        """
        addresses = self.__cyclades.get_server_details(vm_id)['addresses']
        results = []
        while len(addresses) > 0:
            key, value = addresses.popitem()
            if (ip_version is None or value[0]['version'] == ip_version) and \
                    (connection_type is None or value[0]['OS-EXT-IPS:type'] == connection_type):
                results.append(value[0]['addr'])
        return results

    def __create_floating_ip(self):
        self.__network_client.floatingips_get()
        response = self.__network_client.create_floatingip()
        return response['floating_network_id']

    def create_private_network(self):
        """
        Creates a new private network and returns its id
        """
        response = self.__network_client.create_network(
            type='MAC_FILTERED', name='Deployment network')
        self.__network_client.create_subnet(network_id=response['id'],
                                            enable_dhcp=True,
                                            cidr='192.168.0.0/24')
        return response['id']

    def clone(self):
        new_connector = OkeanosConnector()
        new_connector.attach_public_ipv4 = self.attach_public_ipv4
        new_connector.private_network = self.private_network
        new_connector.__network_client = self.__network_client
        new_connector.__cyclades = self.__cyclades
        return new_connector

    def cleanup(self):
        if self.private_network != -1 and self.private_network != 0:
            self.__wait_until_private_net_is_empty(self.private_network)
            self.__network_client.delete_network(self.private_network)

    def __wait_until_private_net_is_empty(self, private_net_id):

        for i in range(1, MAX_WAIT_FOR_LOOPS):
            port_set = set()
            for p in self.__network_client.list_ports():
                port_set.add(p['network_id'])
            if private_net_id not in port_set:
                return
            else:
                sleep(SLEEP_TIMEOUT)

    def serialize(self):
        d = dict()
        d['attach_public_ipv4'] = self.attach_public_ipv4
        d['private_network'] = self.private_network
        return d

    def deserialize(self, state):
        self.attach_public_ipv4 = state['attach_public_ipv4']
        self.private_network = state['private_network']
Esempio n. 15
0
def main():
    """Parse arguments, use kamaki to create cluster, setup using ssh"""

    (opts, args) = parse_arguments(sys.argv[1:])

    global CYCLADES, TOKEN

    AUTHENTICATION_URL = opts.cyclades
    TOKEN = opts.token

    # Cleanup stale servers from previous runs
    if opts.show_stale:
        cleanup_servers(prefix=opts.prefix, delete_stale=opts.delete_stale)
        return 0

    # Initialize a kamaki instance, get endpoints
    user = AstakosClient(AUTHENTICATION_URL, TOKEN)
    my_accountData = user.authenticate()
    endpoints = user.get_endpoints() 
    cyclades_endpoints = user.get_endpoints('compute')
    cyclades_base_url = parseAstakosEndpoints(endpoints,'cyclades_compute')
    cyclades_network_base_url = parseAstakosEndpoints(endpoints,'cyclades_network')
    my_cyclades_client = CycladesClient(cyclades_base_url, TOKEN)
    my_compute_client = ComputeClient(cyclades_base_url, TOKEN)
    my_network_client = CycladesNetworkClient(cyclades_network_base_url, TOKEN) 

    cnt = int(opts.clustersize)	# calculate size of cluster into 'cnt'
    # Initialize
    nodes = []
    masterName = ''
    # Create a file to store the root password for later use
    pass_fname = opts.hadoop_dir+'/bak/adminPass'+str(datetime.now())[:19].replace(' ', '')
    adminPass_f = open(pass_fname, 'w')

    myNetworks = my_network_client.list_networks();  
    NetWork_free = parseNetwork(myNetworks,'public');
    myIp = my_network_client.create_floatingip(NetWork_free);  
    LastIp = myIp.get("floating_ip_address")

    initialClusterSize = 0
    server = {}
    if opts.extend == False:
        # Create master node (0th node)
        server = create_machine(opts, my_cyclades_client, 0)
        if server == {}:
            return
    else:
        servers = my_cyclades_client.list_servers(detail=True)
        cluster = [s for s in servers if s["name"].startswith(opts.prefix)]
        initialClusterSize = len(cluster)
        if initialClusterSize==0:
            log.info("Cluster cannot be expanded: it does not exist.")
            return

    servername = "%s-0" % (opts.prefix)
    masterName = servername
    nodes.append(server)

    # Create slave (worker) nodes
    if cnt>1 or opts.extend:
        startingOffset = 1
        if opts.extend: startingOffset = initialClusterSize
        for i in xrange(startingOffset, initialClusterSize+cnt):
            server = {}
            server = create_machine(opts, my_cyclades_client, i)
            if server == {}:
                return;
            nodes.append(server)
            servername = "%s-%d" % (opts.prefix, i)
            # Write the root password to a file
            adminPass_f.write('machine = %s, password = %s\n' % (servername, server['adminPass']))

    adminPass_f.close()

    # Setup Hadoop files and settings on all cluster nodes
    # Create the 'cluster' dictionary out of servers, with only Hadoop-relevant keys (name, ip, integer key)
    servers = my_cyclades_client.list_servers(detail=True)
    cluster = [s for s in my_cyclades_client.list_servers(detail=True) if s["name"].startswith(opts.prefix)]
    cluster = [(s["name"], s["attachments"][1]["ipv4"], int(s["name"][s["name"].find('-')+1:])) for s in cluster]
    cluster = sorted(cluster, key=lambda cluster: cluster[2])

    # Prepare Ansible-Hadoop config files (hosts, conf/slaves)
    hosts = open(opts.hadoop_dir+'/hosts', 'w')
    hosts.write('[master]\n')
    for i in xrange(0, initialClusterSize+cnt):
        for s in cluster:
            if s[0] == opts.prefix+"-"+str(i):
                if s[0] == masterName:
                    hosts.write(s[1]+'\n\n'+'[slaves]\n')
                else:
                    hosts.write(s[1]+'\n')
    hosts.close()

    slaves = open(opts.hadoop_dir+'/conf/slaves', 'w')
    for s in cluster[1:]:
        slaves.write(s[1]+'\n')
    slaves.close()

    # Execute respective ansible playbook
    if (opts.extend==False):
        cmd = "ansible-playbook hadoop.yml -i hosts -vv --extra-vars \""+"master_ip="+cluster[0][1]+"\""+" -l master"
        retval = os.system(cmd)
        cmd = "ansible-playbook hadoop.yml -i hosts -vv --extra-vars \""+"master_ip="+cluster[0][1]+"\""+" -l slaves"
        retval = os.system(cmd)
        slave_ip_list = []
        for i in xrange(1, cnt):
            slave_ip_list.append(cluster[i][1]) 
        enable_ssh_login(cluster[0][1], [cluster[0][1]])
        enable_ssh_login(cluster[0][1], slave_ip_list)
    else:
        hosts_latest = open(opts.hadoop_dir+'/hosts.latest', 'w')
        hosts_latest.write('[master]\n')
        hosts_latest.write(cluster[0][1]+'\n\n'+'[slaves]\n')
        for i in xrange(initialClusterSize, initialClusterSize+cnt):
            hosts_latest.write(cluster[i][1]+'\n')
        hosts_latest.close()
        cmd = "ansible-playbook hadoop.yml -i hosts.latest -vv --extra-vars \""+"master_ip="+cluster[0][1]+"\""+" -l slaves"
        retval = os.system(cmd) 
        slave_ip_list = []
        for i in xrange(initialClusterSize, initialClusterSize+cnt):
            slave_ip_list.append(cluster[i][1]) 
        enable_ssh_login(cluster[0][1], slave_ip_list)

    # Update conf/slaves in master
    cmd = "ansible-playbook hadoop.yml -i hosts -vv --extra-vars \""+"master_ip="+cluster[0][1]+"\""+" -l master -t slaves"
    retval = os.system(cmd)

    log.info("Done.")
class OkeanosNativeClient(object):
    VOLUME_STATUS_CREATING = 'CREATING'
    VOLUME_STATUS_IN_USE = 'IN_USE'
    VOLUME_STATUS_DELETING = 'DELETING'
    VOLUME_STATUS_DELETED = 'DELETED'

    def __init__(self, token, authURL='https://accounts.okeanos.grnet.gr/identity/v2.0'):
        """
        :type authURL: str
        :type token: str
        """
        from kamaki.clients.utils import https
        https.patch_ignore_ssl()

        self.authURL = authURL
        self.token = token
        self.cycladesServiceType = CycladesClient.service_type
        self.blockStorageServiceType = CycladesBlockStorageClient.service_type
        self.astakosClient = AstakosClient(self.authURL, self.token)
        endpointF = self.astakosClient.get_service_endpoints
        self.cycladesEndpoint = endpointF(self.cycladesServiceType)[u'publicURL']
        self.cycladesClient = CycladesClient(self.cycladesEndpoint, self.token)
        self.blockStorageEndpoint = endpointF(self.blockStorageServiceType)[u'publicURL']
        self.blockStorageClient = CycladesBlockStorageClient(self.blockStorageEndpoint, token)

        flavorsById = {}
        flavorsByName = {}
        for flavor in self.cycladesClient.list_flavors():
            _id = flavor[u'id']
            name = flavor[u'name']
            flavorsById[_id] = name
            flavorsByName[name] = _id
        self.flavorsById = flavorsById
        self.flavorsByName = flavorsByName

    def getFlavorId(self, idOrName):
        """
        :rtype : str
        :type idOrName: str
        """
        if idOrName in self.flavorsById:
            return idOrName
        elif idOrName in self.flavorsByName:
            return self.flavorsByName[idOrName]
        else:
            return idOrName  # caller's responsibility

    def listNodes(self):
        """
        :rtype : list(ListNodeResult)
        """
        instanceInfoList = []
        servers = self.cycladesClient.list_servers()
        for server in servers:
            serverId = str(server[u'id'])  # It is a number in the result
            serverDetails = self.cycladesClient.get_server_details(serverId)
            serverStatusS = serverDetails[u'status']
            serverStatus = NodeStatus(serverStatusS)
            # serverFlavourId = serverDetails[u'flavor'][u'id']
            # serverImageId = serverDetails[u'image'][u'id']
            instanceInfo = ListNodeResult(serverId, serverStatus, serverDetails)
            instanceInfoList.append(instanceInfo)
        return instanceInfoList

    def createVolume(self, serverId, sizeGB, projectId, sleepWaitSeconds=5):
        """
        :param serverId: str
        :param sizeGB: Union[str, int]
        :param projectId: str
        :rtype str
        """
        self.log("> serverId=%s, sizeGB=%s, projectId=%s" % (serverId, sizeGB, projectId))

        response = self.blockStorageClient.create_volume(sizeGB,
                                                         serverId,
                                                         '%s-vol-%s' % (serverId, sizeGB),
                                                         project=projectId)
        # The volume is being created asynchronously, status is 'creating'
        # we wait until it changes (to 'in_use')
        volumeId = response[u'id']

        def getVolumeDetails():
            _volumeDetails = self.blockStorageClient.get_volume_details(volumeId)
            _volumeStatus = _volumeDetails[u'status'].upper()
            self.log("volumeDetails = %s" % _volumeDetails)
            return _volumeDetails, _volumeStatus

        volumeDetails, volumeStatus = getVolumeDetails()
        while volumeStatus == OkeanosNativeClient.VOLUME_STATUS_CREATING:
            time.sleep(sleepWaitSeconds)
            volumeDetails, volumeStatus = getVolumeDetails()

        # response is something like this
        # {
        #     u'display_name': u'foo',
        #     u'id': u'46974',
        #     u'links': [
        #         {
        #             u'href': u'https://cyclades.okeanos.grnet.gr/volume/v2.0/volumes/46974',
        #             u'rel': u'self'
        #         }, {
        #             u'href': u'https://cyclades.okeanos.grnet.gr/volume/v2.0/volumes/46974',
        #             u'rel': u'bookmark'
        #         }
        #     ]
        # }

        self.log("< %s" % response)

        return response

    def attachVolume(self, serverId, sizeGB, projectId):
        """Create and attach an extra volume to the VM, returning the volume name, the volume id and the device name"""
        self.log("> serverId = %s, sizeGB = %s, projectId = %s" % (serverId, sizeGB, projectId))
        result = self.createVolume(serverId, sizeGB, projectId)
        volumeId = result['id']
        return volumeId

    def deleteVolume(self, volumeId, sleepWaitSeconds=5):
        """
        Deletes the volume identified by the given `volumeId`.
        :param volumeId: str
        :return:
        """

        def getVolumeDetails():
            _volumeDetails = self.blockStorageClient.get_volume_details(volumeId)
            _volumeStatus = _volumeDetails[u'status'].upper()
            self.log("volumeDetails = %s" % _volumeDetails)
            return _volumeDetails, _volumeStatus

        volumeDetails, volumeStatus = getVolumeDetails()
        response = self.blockStorageClient.delete_volume(volumeId)

        # Normal status transition is:
        #   OkeanosNativeClient.VOLUME_STATUS_IN_USE    =>
        #   OkeanosNativeClient.VOLUME_STATUS_DELETING  =>
        #   OkeanosNativeClient.VOLUME_STATUS_DELETED

        while volumeStatus == OkeanosNativeClient.VOLUME_STATUS_IN_USE:
            time.sleep(sleepWaitSeconds)
            volumeDetails, volumeStatus = getVolumeDetails()

        # Now it should be in status:
        #   OkeanosNativeClient.VOLUME_STATUS_DELETING
        #
        # Note that real deletion means status:
        #   OkeanosNativeClient.VOLUME_STATUS_DELETED
        #
        # ... But let's not wait that long

        return response

    def createNode(self, nodeName, flavorIdOrName, imageId,
                   sshPubKey=None,
                   initScriptPathAndData=None,
                   remoteUsername="******",
                   remoteUsergroup=None,
                   localPubKeyData=None,
                   createAsyncInitScript=True,
                   projectId=None):
        """

        :rtype : NodeDetails
        :type localPubKeyData: str
        :type sshPubKey: str
        :type imageId: str
        :type flavorIdOrName: str
        :type nodeName: str
        """
        self.log("Creating node '%s', %s, %s" % (nodeName, flavorIdOrName, imageId))

        sshPubKey = sshPubKey or None
        if sshPubKey is not None:
            self.log("User SSH public key to be injected in %s: %s" % (nodeName, sshPubKey))
        remoteUsergroup = remoteUsergroup or remoteUsername
        flavorId = self.getFlavorId(flavorIdOrName)

        # We make sure:
        # a) The orchestrator can do password-less SSH on the newly created machine (via ~/.ssh/id_rsa.pub)
        # b) The SlipStream user can do password-less SSH on the newly created machine (via the provided userPubKey)
        # c) The provided init script is injected

        localPubKeyData = localPubKeyData or loadPubRsaKeyData()
        self.log("Local SSH public key to be injected in %s: %s" % (nodeName, localPubKeyData))

        if sshPubKey is None:
            authorized_keys = localPubKeyData
        else:
            if not localPubKeyData.endswith('\n'):
                localPubKeyData += '\n'
            authorized_keys = "%s%s" % (localPubKeyData, sshPubKey)

        # See https://www.synnefo.org/docs/kamaki/latest/developers/showcase.html#inject-ssh-keys
        import base64
        personality = [
            dict(
                contents=base64.b64encode(authorized_keys),
                path="/%s/.ssh/authorized_keys" % remoteUsername,
                owner=remoteUsername,
                group=remoteUsergroup,
                mode=0600
            )
        ]

        if initScriptPathAndData is not None:
            initScriptPath, initScriptData = initScriptPathAndData

            personality.append(
                dict(
                    contents=base64.b64encode(initScriptData),
                    path=initScriptPath,
                    owner=remoteUsername,
                    group=remoteUsergroup,
                    mode=0777
                )
            )

            # In order for the contextualization script to run asynchronously,
            # we create another script that launches the original via nohup
            if createAsyncInitScript:
                asyncInitScriptPath = "%s.async" % initScriptPath
                asyncInitScriptData = "#!/bin/sh -e\nexec nohup %s &\n" % initScriptPath

                personality.append(
                    dict(
                        contents=base64.b64encode(asyncInitScriptData),
                        path=asyncInitScriptPath,
                        owner=remoteUsername,
                        group=remoteUsergroup,
                        mode=0777
                    )
                )
            else:
                asyncInitScriptPath = None
        else:
            initScriptPath = None
            initScriptData = None
            asyncInitScriptPath = None
            asyncInitScriptData = None

        self.log(">> Personalities")
        for _p in personality:
            self.log(">>>> %s" % _p)

        resultDict = self.cycladesClient.create_server(nodeName,
                                                       flavorId,
                                                       imageId,
                                                       personality=personality,
                                                       project_id=projectId)
        # No IP is included in this result
        nodeDetails = NodeDetails(resultDict,
                                  sshPubKey=sshPubKey,
                                  initScriptPath=initScriptPath,
                                  initScriptData=initScriptData,
                                  asyncInitScriptPath=asyncInitScriptPath)
        self.log("Created node %s status %s, adminPass = %s, ip4s = %s" % (nodeDetails.id, nodeDetails.status.okeanosStatus, nodeDetails.adminPass, nodeDetails.ipv4s))
        return nodeDetails

    def runCommandOnNode(self, nodeDetails, command,
                         username='******',
                         localPrivKey=None,
                         timeout=None,
                         runSynchronously=True):
        """
        :type timeout: int
        :type localPrivKey: str
        :type nodeDetails: NodeDetails
        :type command: str
        """
        hostname = nodeDetails.ipv4s[0]
        return runCommandOnHost(hostname, command,
                                username=username,
                                localPrivKey=localPrivKey,
                                timeout=timeout,
                                runSynchronously=runSynchronously)

    def checkSshOnNode(self, nodeDetails, username="******", localPrivKey=None, timeout=None):
        hostname = nodeDetails.ipv4s[0]
        return checkSshOnHost(hostname, username=username, localPrivKey=localPrivKey, timeout=timeout)

    def waitSshOnHost(self, hostname, username="******", localPrivKey=None, timeout=None, sleepSeconds=10):
        t0 = time.time()
        while True:
            if checkSshOnHost(hostname, username=username, localPrivKey=localPrivKey, timeout=timeout):
                t1 = time.time()
                dtsec = t1 - t0
                self.log("SSH good for %s@%s after %s sec" % (username, hostname, dtsec))
                break
            else:
                time.sleep(sleepSeconds)

    def waitSshOnNode(self, nodeDetails, username="******", localPrivKey=None, timeout=None):
        hostname = nodeDetails.ipv4s[0]
        self.waitSshOnHost(hostname, username=username, localPrivKey=localPrivKey, timeout=timeout)

    def getNodeDetails(self, nodeId):
        """
        :type nodeId: str
        :rtype : NodeDetails
        """
        # from kamaki.cli import logger
        # logger.add_file_logger('kamaki.clients.sent', filename='get_server_details.log')
        # logger.add_file_logger('kamaki.clients.recv', filename='get_server_details.log')

        resultDict = self.cycladesClient.get_server_details(nodeId)
        nodeDetails = NodeDetails(resultDict)
        return nodeDetails

    def waitNodeStatus(self, nodeId, expectedOkeanosStatus, sleepSeconds=5):
        """
        :type expectedOkeanosStatus: str
        :type nodeId: str
        """
        t0 = time.time()
        nodeDetails = self.getNodeDetails(nodeId)
        while nodeDetails.status.okeanosStatus != expectedOkeanosStatus:
            time.sleep(sleepSeconds)
            nodeDetails = self.getNodeDetails(nodeId)
        t1 = time.time()
        dtsec = t1 - t0
        self.log("Node %s status %s after %s sec" % (nodeId, expectedOkeanosStatus, dtsec))
        return nodeDetails

    def waitCurrentStatus(self, nodeId, currentOkeanosStatus, sleepSeconds=5, maxSleepSeconds=400):
        """ Wait untile the current status changes
        :type nodeId: str
        :type currentOkeanosStatus: str
        :type sleepSeconds: float
        """
        t0 = time.time()
        self.cycladesClient.wait_server(nodeId,
                                        current_status=currentOkeanosStatus,
                                        delay=sleepSeconds,
                                        max_wait=maxSleepSeconds)
        nodeDetails = self.getNodeDetails(nodeId)
        newOkeanosStatus = nodeDetails.status.okeanosStatus
        t1 = time.time()
        dtsec = t1 - t0
        self.log("Node %s status %s -> %s after %s sec" % (nodeId, currentOkeanosStatus, newOkeanosStatus, dtsec))
        return nodeDetails

    def createNodeAndWait(self, nodeName, flavorIdOrName, imageId, sshPubKey, initScriptPathAndData=None,
                          remoteUsername="******", remoteUsergroup=None, localPubKeyData=None, localPrivKey=None,
                          sshTimeout=None, runInitScriptSynchronously=False,
                          extraVolatileDiskGB=0, projectId=None):
        """

        :type extraVolatileDiskGB: int
        :type runInitScriptSynchronously: bool
        :type sshPubKey: str
        :type imageId: str
        :type flavorIdOrName: str
        :type nodeName: str
        :type sshTimeout: float
        :rtype : NodeDetails
        """
        localPrivKey = localPrivKey or loadRsaPrivKey()

        # Note that this returned value (NodeDetails) contains the adminPass
        nodeDetails = self.createNode(nodeName, flavorIdOrName, imageId, sshPubKey,
                                      initScriptPathAndData=initScriptPathAndData,
                                      remoteUsername=remoteUsername,
                                      remoteUsergroup=remoteUsergroup,
                                      localPubKeyData=localPubKeyData,
                                      projectId=projectId)
        nodeId = nodeDetails.id
        nodeDetailsActive = self.waitCurrentStatus(nodeId, NodeStatus.BUILD)
        nodeDetails.updateIPsAndStatusFrom(nodeDetailsActive)

        # attach any additional disk
        hostIP = nodeDetails.ipv4s[0]
        if extraVolatileDiskGB:
            self.log("Creating volatile disk of size %s GB for machine IP=%s, id=%s" % (extraVolatileDiskGB, hostIP, nodeId))
            volumeId = self.createVolume(nodeId, extraVolatileDiskGB, projectId)
            self.log("Created volumeId=%s of size %s GB for machine IP=%s, id=%s" % (volumeId, extraVolatileDiskGB, hostIP, nodeId))
            # We do nothing more with the volumeId.
            # When the VM is destroyed by the IaaS, the extra disk is automatically destroyed as well.
        else:
            self.log("No need for extra volatile disk for machine IP=%s, id=%s" % (hostIP, nodeId))

        # Some times, right after node is reported ACTIVE, network is unreachable or SSH is not immediately ready.
        # We have to cope with that by waiting.
        sshTimeout = sshTimeout or 7.0
        self.waitSshOnNode(nodeDetails, username=remoteUsername, localPrivKey=localPrivKey, timeout=sshTimeout)

        initScriptPath = nodeDetails.initScriptPath

        runResult = self.runCommandOnNode(nodeDetails, initScriptPath,
                                          username=remoteUsername,
                                          localPrivKey=localPrivKey,
                                          runSynchronously=runInitScriptSynchronously)
        return nodeDetails, runResult

    def shutdownNode(self, nodeId):
        """
        :rtype : NodeDetails
        :type nodeId: str
        """
        self.log("Shutting down node %s" % nodeId)
        nodeDetails = self.getNodeDetails(nodeId)
        if not nodeDetails.status.isStopped():
            self.cycladesClient.shutdown_server(nodeId)
            self.log("Shutdown node %s status %s" % (nodeId, nodeDetails.status.okeanosStatus))
        else:
            self.log("Node %s already shut down" % nodeId)
        return nodeDetails

    def shutdownNodeAndWait(self, nodeId):
        """
        :rtype : NodeDetails
        :type nodeId: str
        """
        nodeDetails = self.shutdownNode(nodeId)
        if not nodeDetails.status.isStopped():
            nodeDetailsWait = self.waitNodeStatus(nodeId, NodeStatus.STOPPED)
            nodeDetails.updateStatusFrom(nodeDetailsWait)
            self.log("Shutdown node %s status %s" % (nodeId, nodeDetails.status.okeanosStatus))
        return nodeDetails

    def deleteNode(self, nodeId):
        """
        :rtype : NodeDetails
        :type nodeId: str
        """
        self.log("Deleting nodeId %s" % nodeId)
        nodeDetails = self.getNodeDetails(nodeId)
        if not nodeDetails.status.isDeleted():
            self.cycladesClient.delete_server(nodeId)
            self.log("Deleted node %s status %s" % (nodeId, nodeDetails.status.okeanosStatus))
        return nodeDetails

    def deleteNodeAndWait(self, nodeId):
        """
        :rtype : NodeDetails
        :type nodeId: str
        """
        nodeDetails = self.deleteNode(nodeId)
        if not nodeDetails.status.isDeleted():
            nodeDetailsWait = self.waitNodeStatus(nodeId, NodeStatus.DELETED)
            nodeDetails.updateStatusFrom(nodeDetailsWait)
            self.log("Deleted node %s status %s" % (nodeId, nodeDetails.status.okeanosStatus))
        return nodeDetails

    def log(self, msg=''):
        who = '%s::%s' % (self.__class__.__name__, inspect.stack()[1][3])
        LOG('%s# %s' % (who, msg))

    def getNodeIPv4(self, nodeId):
        nodeDetails = self.getNodeDetails(nodeId)
        ipv4 = nodeDetails.ipv4s[0]
        LOG("< for nodeId = %s, IPv4 = %s" % (nodeId, ipv4))
        return ipv4

    def getNodePartitions(self, nodeId,
                          username='******',
                          localPrivKey=None,
                          timeout=None,
                          ssh=None):
        self.log("> nodeId = %s" % nodeId)
        ipv4 = self.getNodeIPv4(nodeId)
        status, partitions = getHostPartitions(ipv4,
                                               username=username,
                                               localPrivKey=localPrivKey,
                                               timeout=timeout,
                                               ssh=ssh)
        self.log("< status = %s, partitions = %s" % (status, partitions))
        return status, partitions

    def waitForExtraNodePartition(self, serverId, partitions,
                                  username='******',
                                  localPrivKey=None,
                                  timeout=None,
                                  ssh=None):
        """
        Given the set of pre-existing partitions, we wait until a new one appears and then we return it.
        :param serverId: str
        :param partitions: set[str]
        :return: the extra partition. prepend '/dev/' to get the device name
        """
        def getem():
            return self.getNodePartitions(serverId,
                                          username=username,
                                          localPrivKey=localPrivKey,
                                          timeout=timeout,
                                          ssh=ssh)

        self.log("Waiting, current partitions: %s" % partitions)
        status1, partitions1 = getem()
        if status1 != 0:
            return None

        while partitions == partitions1:
            self.log("Looping, new partitions: %s" % partitions1)
            status1, partitions1 = getem()
            if status1 != 0:
                return None

        # We assume one more is added ...
        newPartition = partitions1.difference(partitions)
        self.log("< For serverId = %s, new partition = %s" % (serverId, newPartition))
        return newPartition

    def resizeNode(self, serverId, flavorIdOrName):
        flavorId = self.getFlavorId(flavorIdOrName)
        nodeDetails = self.getNodeDetails(serverId)
        self.log("Node %s status is %s" % (serverId, nodeDetails.status.okeanosStatus))
        existingFlavorId = nodeDetails.flavorId
        self.log("Requested flavorId = %s [given: %s]" % (flavorId, flavorIdOrName))

        if existingFlavorId == flavorId:
            self.log("FlavorId already is %s, no resizing action is needed !" % flavorId)
            return

        t0 = time.time()

        self.log("Resizing from %s -> %s" % (existingFlavorId, flavorId))
        # Hot resizing is not supported, so we must shut the server down first
        self.log("Shutting down node %s" % serverId)
        nodeDetails = self.shutdownNodeAndWait(serverId)
        self.log("Node %s status is %s" % (serverId, nodeDetails.status.okeanosStatus))

        # This takes the server to status 'RESIZE'
        self.log("Resizing node %s ..." % serverId)
        resizeResponse = self.cycladesClient.resize_server(serverId, flavorId)
        self.log("resizeResponse = %s" % resizeResponse)

        # with until server acquires the new flavor
        nodeDetails = self.getNodeDetails(serverId)
        while nodeDetails.flavorId != flavorId:
            nodeDetails = self.getNodeDetails(serverId)
        self.log("Node %s resized" % serverId)

        # And we are now ready to restart with the new flavor
        self.log("Restarting node %s" % serverId)
        self.cycladesClient.start_server(serverId)
        self.waitNodeStatus(serverId, NodeStatus.ACTIVE)

        t1 = time.time()
        dtsec = t1 - t0
        self.log("Node %s restarted with new flavor %s in %s sec" % (serverId, flavorId, dtsec))
        return flavorId
Esempio n. 17
0
class Image(livetest.Generic):
    def setUp(self):
        self.now = time.mktime(time.gmtime())
        self.cloud = 'cloud.%s' % self['testcloud']
        aurl, self.token = self[self.cloud, 'url'], self[self.cloud, 'token']
        self.auth_base = AstakosCachedClient(aurl, self.token)
        self.imgname = 'img_%s' % self.now
        url = self.auth_base.get_service_endpoints('image')['publicURL']
        self.token = self.auth_base.token
        self.client = ImageClient(url, self.token)
        cyclades_url = self.auth_base.get_service_endpoints(
            'compute')['publicURL']
        self.cyclades = CycladesClient(cyclades_url, self.token)
        self._imglist = {}
        self._imgdetails = {}

    def test_000(self):
        self._prepare_img()
        super(self.__class__, self).test_000()

    def _prepare_img(self):
        f = open(self['image', 'local_path'], 'rb')
        (token, uuid) = (self.token, self.auth_base.user_term('id'))
        purl = self.auth_base.get_service_endpoints(
            'object-store')['publicURL']
        from kamaki.clients.pithos import PithosClient
        self.pithcli = PithosClient(purl, token, uuid)
        cont = 'cont_%s' % self.now
        self.pithcli.container = cont
        self.obj = 'obj_%s' % self.now
        print('\t- Create container %s on Pithos server' % cont)
        self.pithcli.container_put()
        self.location = 'pithos://%s/%s/%s' % (uuid, cont, self.obj)
        print('\t- Upload an image at %s...\n' % self.location)
        self.pithcli.upload_object(self.obj, f)
        print('\t- ok')
        f.close()

        r = self.client.register(
            self.imgname, self.location, params=dict(is_public=True))
        self._imglist[self.imgname] = dict(
            name=r['name'], id=r['id'])
        self._imgdetails[self.imgname] = r

    def tearDown(self):
        for img in self._imglist.values():
            print('\tDeleting image %s' % img['id'])
            self.cyclades.delete_image(img['id'])
        if hasattr(self, 'pithcli'):
            print('\tDeleting container %s' % self.pithcli.container)
            try:
                self.pithcli.del_container(delimiter='/')
                self.pithcli.purge_container()
            except ClientError:
                pass

    def _get_img_by_name(self, name):
        r = self.cyclades.list_images()
        for img in r:
            if img['name'] == name:
                return img
        return None

    def test_list_public(self):
        """Test list_public"""
        self._test_list_public()

    def _test_list_public(self):
        r = self.client.list_public()
        r0 = self.client.list_public(order='-')
        self.assertTrue(len(r) > 0)
        for img in r:
            for term in (
                    'status',
                    'name',
                    'container_format',
                    'disk_format',
                    'id',
                    'size'):
                self.assertTrue(term in img)
        self.assertTrue(r, r0)
        r0.reverse()
        for i, img in enumerate(r):
            self.assert_dicts_are_equal(img, r0[i])
        r1 = self.client.list_public(detail=True)
        for img in r1:
            for term in (
                    'status',
                    'name',
                    'checksum',
                    'created_at',
                    'disk_format',
                    'updated_at',
                    'id',
                    'location',
                    'container_format',
                    'owner',
                    'is_public',
                    'deleted_at',
                    'properties',
                    'size'):
                self.assertTrue(term in img)
                if len(img['properties']):
                    for interm in ('osfamily', 'root_partition'):
                        self.assertTrue(interm in img['properties'])
        size_max = 1000000000000
        r2 = self.client.list_public(filters=dict(size_max=size_max))
        self.assertTrue(len(r2) <= len(r))
        for img in r2:
            self.assertTrue(int(img['size']) <= size_max)

    def test_get_meta(self):
        """Test get_meta"""
        self._test_get_meta()

    def _test_get_meta(self):
        r = self.client.get_meta(self['image', 'id'])
        self.assertEqual(r['id'], self['image', 'id'])
        for term in (
                'status',
                'name',
                'checksum',
                'updated-at',
                'created-at',
                'deleted-at',
                'location',
                'is-public',
                'owner',
                'disk-format',
                'size',
                'container-format'):
            self.assertTrue(term in r)
            for interm in (
                    'OSFAMILY',
                    'USERS',
                    'ROOT_PARTITION',
                    'OS',
                    'DESCRIPTION'):
                self.assertTrue(interm in r['properties'])

    def test_register(self):
        """Test register"""
        self._prepare_img()
        self._test_register()

    def _test_register(self):
        self.assertTrue(self._imglist)
        for img in self._imglist.values():
            self.assertTrue(img is not None)
            r = set(self._imgdetails[img['name']].keys())
            self.assertTrue(r.issubset(IMGMETA.union(['properties'])))

    def test_unregister(self):
        """Test unregister"""
        self._prepare_img()
        self._test_unregister()

    def _test_unregister(self):
        try:
            for img in self._imglist.values():
                self.client.unregister(img['id'])
                self._prepare_img()
                break
        except ClientError as ce:
            if ce.status in (405,):
                print 'IMAGE UNREGISTER is not supported by server: %s' % ce
            else:
                raise

    def test_set_members(self):
        """Test set_members"""
        self._prepare_img()
        self._test_set_members()

    def _test_set_members(self):
        members = ['*****@*****.**' % self.now]
        for img in self._imglist.values():
            self.client.set_members(img['id'], members)
            r = self.client.list_members(img['id'])
            self.assertEqual(r[0]['member_id'], members[0])

    def test_list_members(self):
        """Test list_members"""
        self._test_list_members()

    def _test_list_members(self):
        self._test_set_members()

    def test_remove_members(self):
        """Test remove_members - NO CHECK"""
        self._prepare_img()
        self._test_remove_members()

    def _test_remove_members(self):
        return
        members = ['*****@*****.**' % self.now, '*****@*****.**' % self.now]
        for img in self._imglist.values():
            self.client.set_members(img['id'], members)
            r = self.client.list_members(img['id'])
            self.assertTrue(len(r) > 1)
            self.client.remove_member(img['id'], members[0])
            r0 = self.client.list_members(img['id'])
            self.assertEqual(len(r), 1 + len(r0))
            self.assertEqual(r0[0]['member_id'], members[1])

    def test_list_shared(self):
        """Test list_shared - NOT CHECKED"""
        self._test_list_shared()

    def _test_list_shared(self):
        #No way to test this, if I dont have member images
        pass
Esempio n. 18
0
class SynnefoCI(object):
    """SynnefoCI python class"""

    def __init__(self, config_file=None, build_id=None, cloud=None):
        """ Initialize SynnefoCI python class

        Setup logger, local_dir, config and kamaki
        """
        # Setup logger
        self.logger = logging.getLogger('synnefo-ci')
        self.logger.setLevel(logging.DEBUG)

        handler1 = logging.StreamHandler(sys.stdout)
        handler1.setLevel(logging.DEBUG)
        handler1.addFilter(_InfoFilter())
        handler1.setFormatter(_MyFormatter())
        handler2 = logging.StreamHandler(sys.stderr)
        handler2.setLevel(logging.WARNING)
        handler2.setFormatter(_MyFormatter())

        self.logger.addHandler(handler1)
        self.logger.addHandler(handler2)

        # Get our local dir
        self.ci_dir = os.path.dirname(os.path.abspath(__file__))
        self.repo_dir = os.path.dirname(self.ci_dir)

        # Read config file
        if config_file is None:
            config_file = os.path.join(self.ci_dir, DEFAULT_CONFIG_FILE)
        config_file = os.path.abspath(config_file)
        self.config = ConfigParser()
        self.config.optionxform = str
        self.config.read(config_file)

        # Read temporary_config file
        self.temp_config_file = \
            os.path.expanduser(self.config.get('Global', 'temporary_config'))
        self.temp_config = ConfigParser()
        self.temp_config.optionxform = str
        self.temp_config.read(self.temp_config_file)
        self.build_id = build_id
        if build_id is not None:
            self.logger.info("Will use \"%s\" as build id" %
                             _green(self.build_id))

        # Set kamaki cloud
        if cloud is not None:
            self.kamaki_cloud = cloud
        elif self.config.has_option("Deployment", "kamaki_cloud"):
            kamaki_cloud = self.config.get("Deployment", "kamaki_cloud")
            if kamaki_cloud == "":
                self.kamaki_cloud = None
        else:
            self.kamaki_cloud = None

        # Initialize variables
        self.fabric_installed = False
        self.kamaki_installed = False
        self.cyclades_client = None
        self.network_client = None
        self.compute_client = None
        self.image_client = None
        self.astakos_client = None

    def setup_kamaki(self):
        """Initialize kamaki

        Setup cyclades_client, image_client and compute_client
        """

        config = kamaki_config.Config()
        if self.kamaki_cloud is None:
            try:
                self.kamaki_cloud = config.get("global", "default_cloud")
            except AttributeError:
                # Compatibility with kamaki version <=0.10
                self.kamaki_cloud = config.get("global", "default_cloud")

        self.logger.info("Setup kamaki client, using cloud '%s'.." %
                         self.kamaki_cloud)
        auth_url = config.get_cloud(self.kamaki_cloud, "url")
        self.logger.debug("Authentication URL is %s" % _green(auth_url))
        token = config.get_cloud(self.kamaki_cloud, "token")
        #self.logger.debug("Token is %s" % _green(token))

        self.astakos_client = AstakosClient(auth_url, token)
        endpoints = self.astakos_client.authenticate()

        cyclades_url = get_endpoint_url(endpoints, "compute")
        self.logger.debug("Cyclades API url is %s" % _green(cyclades_url))
        self.cyclades_client = CycladesClient(cyclades_url, token)
        self.cyclades_client.CONNECTION_RETRY_LIMIT = 2

        network_url = get_endpoint_url(endpoints, "network")
        self.logger.debug("Network API url is %s" % _green(network_url))
        self.network_client = CycladesNetworkClient(network_url, token)
        self.network_client.CONNECTION_RETRY_LIMIT = 2

        image_url = get_endpoint_url(endpoints, "image")
        self.logger.debug("Images API url is %s" % _green(image_url))
        self.image_client = ImageClient(cyclades_url, token)
        self.image_client.CONNECTION_RETRY_LIMIT = 2

        compute_url = get_endpoint_url(endpoints, "compute")
        self.logger.debug("Compute API url is %s" % _green(compute_url))
        self.compute_client = ComputeClient(compute_url, token)
        self.compute_client.CONNECTION_RETRY_LIMIT = 2

    def _wait_transition(self, server_id, current_status, new_status):
        """Wait for server to go from current_status to new_status"""
        self.logger.debug("Waiting for server to become %s" % new_status)
        timeout = self.config.getint('Global', 'build_timeout')
        sleep_time = 5
        while True:
            server = self.cyclades_client.get_server_details(server_id)
            if server['status'] == new_status:
                return server
            elif timeout < 0:
                self.logger.error(
                    "Waiting for server to become %s timed out" % new_status)
                self.destroy_server(False)
                sys.exit(1)
            elif server['status'] == current_status:
                # Sleep for #n secs and continue
                timeout = timeout - sleep_time
                time.sleep(sleep_time)
            else:
                self.logger.error(
                    "Server failed with status %s" % server['status'])
                self.destroy_server(False)
                sys.exit(1)

    @_check_kamaki
    def destroy_server(self, wait=True):
        """Destroy slave server"""
        server_id = int(self.read_temp_config('server_id'))
        fips = [f for f in self.network_client.list_floatingips()
                if str(f['instance_id']) == str(server_id)]
        self.logger.info("Destoying server with id %s " % server_id)
        self.cyclades_client.delete_server(server_id)
        if wait:
            self._wait_transition(server_id, "ACTIVE", "DELETED")
        for fip in fips:
            self.logger.info("Destroying floating ip %s",
                             fip['floating_ip_address'])
            self.network_client.delete_floatingip(fip['id'])

    def _create_floating_ip(self):
        """Create a new floating ip"""
        networks = self.network_client.list_networks(detail=True)
        pub_nets = [n for n in networks
                    if n['SNF:floating_ip_pool'] and n['public']]
        for pub_net in pub_nets:
            # Try until we find a public network that is not full
            try:
                fip = self.network_client.create_floatingip(pub_net['id'])
            except ClientError as err:
                self.logger.warning("%s: %s", err.message, err.details)
                continue
            self.logger.debug("Floating IP %s with id %s created",
                              fip['floating_ip_address'], fip['id'])
            return fip
        self.logger.error("No mor IP addresses available")
        sys.exit(1)

    def _create_port(self, floating_ip):
        """Create a new port for our floating IP"""
        net_id = floating_ip['floating_network_id']
        self.logger.debug("Creating a new port to network with id %s", net_id)
        fixed_ips = [{'ip_address': floating_ip['floating_ip_address']}]
        port = self.network_client.create_port(
            net_id, device_id=None, fixed_ips=fixed_ips)
        return port

    @_check_kamaki
    # Too many local variables. pylint: disable-msg=R0914
    def create_server(self, image=None, flavor=None, ssh_keys=None,
                      server_name=None):
        """Create slave server"""
        self.logger.info("Create a new server..")

        # Find a build_id to use
        self._create_new_build_id()

        # Find an image to use
        image_id = self._find_image(image)
        # Find a flavor to use
        flavor_id = self._find_flavor(flavor)

        # Create Server
        networks = []
        if self.config.get("Deployment", "allocate_floating_ip") == "True":
            fip = self._create_floating_ip()
            port = self._create_port(fip)
            networks.append({'port': port['id']})
        private_networks = self.config.get('Deployment', 'private_networks')
        if private_networks:
            private_networks = [p.strip() for p in private_networks.split(",")]
            networks.extend([{"uuid": uuid} for uuid in private_networks])
        if server_name is None:
            server_name = self.config.get("Deployment", "server_name")
            server_name = "%s(BID: %s)" % (server_name, self.build_id)
        server = self.cyclades_client.create_server(
            server_name, flavor_id, image_id, networks=networks)
        server_id = server['id']
        self.write_temp_config('server_id', server_id)
        self.logger.debug("Server got id %s" % _green(server_id))
        server_user = server['metadata']['users']
        self.write_temp_config('server_user', server_user)
        self.logger.debug("Server's admin user is %s" % _green(server_user))
        server_passwd = server['adminPass']
        self.write_temp_config('server_passwd', server_passwd)

        server = self._wait_transition(server_id, "BUILD", "ACTIVE")
        self._get_server_ip_and_port(server, private_networks)
        self._copy_ssh_keys(ssh_keys)

        # Setup Firewall
        self.setup_fabric()
        self.logger.info("Setup firewall")
        accept_ssh_from = self.config.get('Global', 'accept_ssh_from')
        if accept_ssh_from != "":
            self.logger.debug("Block ssh except from %s" % accept_ssh_from)
            cmd = """
            local_ip=$(/sbin/ifconfig eth0 | grep 'inet addr:' | \
                cut -d':' -f2 | cut -d' ' -f1)
            iptables -A INPUT -s localhost -j ACCEPT
            iptables -A INPUT -s $local_ip -j ACCEPT
            iptables -A INPUT -s {0} -p tcp --dport 22 -j ACCEPT
            iptables -A INPUT -p tcp --dport 22 -j DROP
            """.format(accept_ssh_from)
            _run(cmd, False)

        # Setup apt, download packages
        self.logger.debug("Setup apt. Install x2goserver and firefox")
        cmd = """
        echo 'APT::Install-Suggests "false";' >> /etc/apt/apt.conf
        echo 'precedence ::ffff:0:0/96  100' >> /etc/gai.conf
        apt-get update
        apt-get install curl --yes --force-yes
        echo -e "\n\n{0}" >> /etc/apt/sources.list
        # Synnefo repo's key
        curl https://dev.grnet.gr/files/apt-grnetdev.pub | apt-key add -

        # X2GO Key
        apt-key adv --recv-keys --keyserver keys.gnupg.net E1F958385BFE2B6E
        apt-get install x2go-keyring --yes --force-yes
        apt-get update
        apt-get install x2goserver x2goserver-xsession \
                iceweasel --yes --force-yes

        # xterm published application
        echo '[Desktop Entry]' > /usr/share/applications/xterm.desktop
        echo 'Name=XTerm' >> /usr/share/applications/xterm.desktop
        echo 'Comment=standard terminal emulator for the X window system' >> \
            /usr/share/applications/xterm.desktop
        echo 'Exec=xterm' >> /usr/share/applications/xterm.desktop
        echo 'Terminal=false' >> /usr/share/applications/xterm.desktop
        echo 'Type=Application' >> /usr/share/applications/xterm.desktop
        echo 'Encoding=UTF-8' >> /usr/share/applications/xterm.desktop
        echo 'Icon=xterm-color_48x48' >> /usr/share/applications/xterm.desktop
        echo 'Categories=System;TerminalEmulator;' >> \
                /usr/share/applications/xterm.desktop
        """.format(self.config.get('Global', 'apt_repo'))
        _run(cmd, False)

    def _find_flavor(self, flavor=None):
        """Find a suitable flavor to use

        Search by name (reg expression) or by id
        """
        # Get a list of flavors from config file
        flavors = self.config.get('Deployment', 'flavors').split(",")
        if flavor is not None:
            # If we have a flavor_name to use, add it to our list
            flavors.insert(0, flavor)

        list_flavors = self.compute_client.list_flavors()
        for flv in flavors:
            flv_type, flv_value = parse_typed_option(option="flavor",
                                                     value=flv)
            if flv_type == "name":
                # Filter flavors by name
                self.logger.debug(
                    "Trying to find a flavor with name \"%s\"" % flv_value)
                list_flvs = \
                    [f for f in list_flavors
                     if re.search(flv_value, f['name'], flags=re.I)
                     is not None]
            elif flv_type == "id":
                # Filter flavors by id
                self.logger.debug(
                    "Trying to find a flavor with id \"%s\"" % flv_value)
                list_flvs = \
                    [f for f in list_flavors
                     if str(f['id']) == flv_value]
            else:
                self.logger.error("Unrecognized flavor type %s" % flv_type)

            # Check if we found one
            if list_flvs:
                self.logger.debug("Will use \"%s\" with id \"%s\""
                                  % (_green(list_flvs[0]['name']),
                                     _green(list_flvs[0]['id'])))
                return list_flvs[0]['id']

        self.logger.error("No matching flavor found.. aborting")
        sys.exit(1)

    def _find_image(self, image=None):
        """Find a suitable image to use

        In case of search by name, the image has to belong to one
        of the `DEFAULT_SYSTEM_IMAGES_UUID' users.
        In case of search by id it only has to exist.
        """
        # Get a list of images from config file
        images = self.config.get('Deployment', 'images').split(",")
        if image is not None:
            # If we have an image from command line, add it to our list
            images.insert(0, image)

        auth = self.astakos_client.authenticate()
        user_uuid = auth["access"]["token"]["tenant"]["id"]
        list_images = self.image_client.list_public(detail=True)['images']
        for img in images:
            img_type, img_value = parse_typed_option(option="image", value=img)
            if img_type == "name":
                # Filter images by name
                self.logger.debug(
                    "Trying to find an image with name \"%s\"" % img_value)
                accepted_uuids = DEFAULT_SYSTEM_IMAGES_UUID + [user_uuid]
                list_imgs = \
                    [i for i in list_images if i['user_id'] in accepted_uuids
                     and
                     re.search(img_value, i['name'], flags=re.I) is not None]
            elif img_type == "id":
                # Filter images by id
                self.logger.debug(
                    "Trying to find an image with id \"%s\"" % img_value)
                list_imgs = \
                    [i for i in list_images
                     if i['id'].lower() == img_value.lower()]
            else:
                self.logger.error("Unrecognized image type %s" % img_type)
                sys.exit(1)

            # Check if we found one
            if list_imgs:
                self.logger.debug("Will use \"%s\" with id \"%s\""
                                  % (_green(list_imgs[0]['name']),
                                     _green(list_imgs[0]['id'])))
                return list_imgs[0]['id']

        # We didn't found one
        self.logger.error("No matching image found.. aborting")
        sys.exit(1)

    def _get_server_ip_and_port(self, server, private_networks):
        """Compute server's IPv4 and ssh port number"""
        self.logger.info("Get server connection details..")
        if private_networks:
            # Choose the networks that belong to private_networks
            networks = [n for n in server['attachments']
                        if n['network_id'] in private_networks]
        else:
            # Choose the networks that are public
            networks = [n for n in server['attachments']
                        if self.network_client.
                        get_network_details(n['network_id'])['public']]
        # Choose the networks with IPv4
        networks = [n for n in networks if n['ipv4']]
        # Use the first network as IPv4
        server_ip = networks[0]['ipv4']

        if (".okeanos.io" in self.cyclades_client.base_url or
           ".demo.synnefo.org" in self.cyclades_client.base_url):
            tmp1 = int(server_ip.split(".")[2])
            tmp2 = int(server_ip.split(".")[3])
            server_ip = "gate.okeanos.io"
            server_port = 10000 + tmp1 * 256 + tmp2
        else:
            server_port = 22
        self.write_temp_config('server_ip', server_ip)
        self.logger.debug("Server's IPv4 is %s" % _green(server_ip))
        self.write_temp_config('server_port', server_port)
        self.logger.debug("Server's ssh port is %s" % _green(server_port))
        ssh_command = "ssh -p %s %s@%s" \
            % (server_port, server['metadata']['users'], server_ip)
        self.logger.debug("Access server using \"%s\"" %
                          (_green(ssh_command)))

    @_check_fabric
    def _copy_ssh_keys(self, ssh_keys):
        """Upload/Install ssh keys to server"""
        self.logger.debug("Check for authentication keys to use")
        if ssh_keys is None:
            ssh_keys = self.config.get("Deployment", "ssh_keys")

        if ssh_keys != "":
            ssh_keys = os.path.expanduser(ssh_keys)
            self.logger.debug("Will use \"%s\" authentication keys file" %
                              _green(ssh_keys))
            keyfile = '/tmp/%s.pub' % fabric.env.user
            _run('mkdir -p ~/.ssh && chmod 700 ~/.ssh', False)
            if ssh_keys.startswith("http://") or \
                    ssh_keys.startswith("https://") or \
                    ssh_keys.startswith("ftp://"):
                cmd = """
                apt-get update
                apt-get install wget --yes --force-yes
                wget {0} -O {1} --no-check-certificate
                """.format(ssh_keys, keyfile)
                _run(cmd, False)
            elif os.path.exists(ssh_keys):
                _put(ssh_keys, keyfile)
            else:
                self.logger.debug("No ssh keys found")
                return
            _run('cat %s >> ~/.ssh/authorized_keys' % keyfile, False)
            _run('rm %s' % keyfile, False)
            self.logger.debug("Uploaded ssh authorized keys")
        else:
            self.logger.debug("No ssh keys found")

    def _create_new_build_id(self):
        """Find a uniq build_id to use"""
        with filelocker.lock("%s.lock" % self.temp_config_file,
                             filelocker.LOCK_EX):
            # Read temp_config again to get any new entries
            self.temp_config.read(self.temp_config_file)

            # Find a uniq build_id to use
            if self.build_id is None:
                ids = self.temp_config.sections()
                if ids:
                    max_id = int(max(self.temp_config.sections(), key=int))
                    self.build_id = max_id + 1
                else:
                    self.build_id = 1
            self.logger.debug("Will use \"%s\" as build id"
                              % _green(self.build_id))

            # Create a new section
            try:
                self.temp_config.add_section(str(self.build_id))
            except DuplicateSectionError:
                msg = ("Build id \"%s\" already in use. " +
                       "Please use a uniq one or cleanup \"%s\" file.\n") \
                    % (self.build_id, self.temp_config_file)
                self.logger.error(msg)
                sys.exit(1)
            creation_time = \
                time.strftime("%a, %d %b %Y %X", time.localtime())
            self.temp_config.set(str(self.build_id),
                                 "created", str(creation_time))

            # Write changes back to temp config file
            with open(self.temp_config_file, 'wb') as tcf:
                self.temp_config.write(tcf)

    def write_temp_config(self, option, value):
        """Write changes back to config file"""
        # Acquire the lock to write to temp_config_file
        with filelocker.lock("%s.lock" % self.temp_config_file,
                             filelocker.LOCK_EX):

            # Read temp_config again to get any new entries
            self.temp_config.read(self.temp_config_file)

            self.temp_config.set(str(self.build_id), option, str(value))
            curr_time = time.strftime("%a, %d %b %Y %X", time.localtime())
            self.temp_config.set(str(self.build_id), "modified", curr_time)

            # Write changes back to temp config file
            with open(self.temp_config_file, 'wb') as tcf:
                self.temp_config.write(tcf)

    def read_temp_config(self, option):
        """Read from temporary_config file"""
        # If build_id is None use the latest one
        if self.build_id is None:
            ids = self.temp_config.sections()
            if ids:
                self.build_id = int(ids[-1])
            else:
                self.logger.error("No sections in temporary config file")
                sys.exit(1)
            self.logger.debug("Will use \"%s\" as build id"
                              % _green(self.build_id))
        # Read specified option
        return self.temp_config.get(str(self.build_id), option)

    def setup_fabric(self):
        """Setup fabric environment"""
        self.logger.info("Setup fabric parameters..")
        fabric.env.user = self.read_temp_config('server_user')
        fabric.env.host_string = self.read_temp_config('server_ip')
        fabric.env.port = int(self.read_temp_config('server_port'))
        fabric.env.password = self.read_temp_config('server_passwd')
        fabric.env.connection_attempts = 10
        fabric.env.shell = "/bin/bash -c"
        fabric.env.disable_known_hosts = True
        fabric.env.output_prefix = None

    def _check_hash_sum(self, localfile, remotefile):
        """Check hash sums of two files"""
        self.logger.debug("Check hash sum for local file %s" % localfile)
        hash1 = os.popen("sha256sum %s" % localfile).read().split(' ')[0]
        self.logger.debug("Local file has sha256 hash %s" % hash1)
        self.logger.debug("Check hash sum for remote file %s" % remotefile)
        hash2 = _run("sha256sum %s" % remotefile, False)
        hash2 = hash2.split(' ')[0]
        self.logger.debug("Remote file has sha256 hash %s" % hash2)
        if hash1 != hash2:
            self.logger.error("Hashes differ.. aborting")
            sys.exit(1)

    @_check_fabric
    def clone_repo(self, local_repo=False):
        """Clone Synnefo repo from slave server"""
        self.logger.info("Configure repositories on remote server..")
        self.logger.debug("Install/Setup git")
        cmd = """
        apt-get install git --yes --force-yes
        git config --global user.name {0}
        git config --global user.email {1}
        """.format(self.config.get('Global', 'git_config_name'),
                   self.config.get('Global', 'git_config_mail'))
        _run(cmd, False)

        # Clone synnefo_repo
        synnefo_branch = self.clone_synnefo_repo(local_repo=local_repo)
        # Clone pithos-web-client
        self.clone_pithos_webclient_repo(synnefo_branch)

    @_check_fabric
    def clone_synnefo_repo(self, local_repo=False):
        """Clone Synnefo repo to remote server"""
        # Find synnefo_repo and synnefo_branch to use
        synnefo_repo = self.config.get('Global', 'synnefo_repo')
        synnefo_branch = self.config.get("Global", "synnefo_branch")
        if synnefo_branch == "":
            synnefo_branch = \
                subprocess.Popen(
                    ["git", "rev-parse", "--abbrev-ref", "HEAD"],
                    stdout=subprocess.PIPE).communicate()[0].strip()
            if synnefo_branch == "HEAD":
                synnefo_branch = \
                    subprocess.Popen(
                        ["git", "rev-parse", "--short", "HEAD"],
                        stdout=subprocess.PIPE).communicate()[0].strip()
        self.logger.debug("Will use branch \"%s\"" % _green(synnefo_branch))

        if local_repo or synnefo_repo == "":
            # Use local_repo
            self.logger.debug("Push local repo to server")
            # Firstly create the remote repo
            _run("git init synnefo", False)
            # Then push our local repo over ssh
            # We have to pass some arguments to ssh command
            # namely to disable host checking.
            (temp_ssh_file_handle, temp_ssh_file) = tempfile.mkstemp()
            os.close(temp_ssh_file_handle)
            # XXX: git push doesn't read the password
            cmd = """
            echo 'exec ssh -o "StrictHostKeyChecking no" \
                           -o "UserKnownHostsFile /dev/null" \
                           -q "$@"' > {4}
            chmod u+x {4}
            export GIT_SSH="{4}"
            echo "{0}" | git push --quiet --mirror ssh://{1}@{2}:{3}/~/synnefo
            rm -f {4}
            """.format(fabric.env.password,
                       fabric.env.user,
                       fabric.env.host_string,
                       fabric.env.port,
                       temp_ssh_file)
            os.system(cmd)
        else:
            # Clone Synnefo from remote repo
            self.logger.debug("Clone synnefo from %s" % synnefo_repo)
            self._git_clone(synnefo_repo)

        # Checkout the desired synnefo_branch
        self.logger.debug("Checkout \"%s\" branch/commit" % synnefo_branch)
        cmd = """
        cd synnefo
        for branch in `git branch -a | grep remotes | grep -v HEAD`; do
            git branch --track ${branch##*/} $branch
        done
        git checkout %s
        """ % (synnefo_branch)
        _run(cmd, False)

        return synnefo_branch

    @_check_fabric
    def clone_pithos_webclient_repo(self, synnefo_branch):
        """Clone Pithos WebClient repo to remote server"""
        # Find pithos_webclient_repo and pithos_webclient_branch to use
        pithos_webclient_repo = \
            self.config.get('Global', 'pithos_webclient_repo')
        pithos_webclient_branch = \
            self.config.get('Global', 'pithos_webclient_branch')

        # Clone pithos-webclient from remote repo
        self.logger.debug("Clone pithos-webclient from %s" %
                          pithos_webclient_repo)
        self._git_clone(pithos_webclient_repo)

        # Track all pithos-webclient branches
        cmd = """
        cd pithos-web-client
        for branch in `git branch -a | grep remotes | grep -v HEAD`; do
            git branch --track ${branch##*/} $branch > /dev/null 2>&1
        done
        git --no-pager branch --no-color
        """
        webclient_branches = _run(cmd, False)
        webclient_branches = webclient_branches.split()

        # If we have pithos_webclient_branch in config file use this one
        # else try to use the same branch as synnefo_branch
        # else use an appropriate one.
        if pithos_webclient_branch == "":
            if synnefo_branch in webclient_branches:
                pithos_webclient_branch = synnefo_branch
            else:
                # If synnefo_branch starts with one of
                # 'master', 'hotfix'; use the master branch
                if synnefo_branch.startswith('master') or \
                        synnefo_branch.startswith('hotfix'):
                    pithos_webclient_branch = "master"
                # If synnefo_branch starts with one of
                # 'develop', 'feature'; use the develop branch
                elif synnefo_branch.startswith('develop') or \
                        synnefo_branch.startswith('feature'):
                    pithos_webclient_branch = "develop"
                else:
                    self.logger.warning(
                        "Cannot determine which pithos-web-client branch to "
                        "use based on \"%s\" synnefo branch. "
                        "Will use develop." % synnefo_branch)
                    pithos_webclient_branch = "develop"
        # Checkout branch
        self.logger.debug("Checkout \"%s\" branch" %
                          _green(pithos_webclient_branch))
        cmd = """
        cd pithos-web-client
        git checkout {0}
        """.format(pithos_webclient_branch)
        _run(cmd, False)

    def _git_clone(self, repo):
        """Clone repo to remote server

        Currently clonning from code.grnet.gr can fail unexpectedly.
        So retry!!

        """
        cloned = False
        for i in range(1, 11):
            try:
                _run("git clone %s" % repo, False)
                cloned = True
                break
            except BaseException:
                self.logger.warning("Clonning failed.. retrying %s/10" % i)
        if not cloned:
            self.logger.error("Can not clone repo.")
            sys.exit(1)

    @_check_fabric
    def build_packages(self):
        """Build packages needed by Synnefo software"""
        self.logger.info("Install development packages")
        cmd = """
        apt-get update
        apt-get install zlib1g-dev dpkg-dev debhelper git-buildpackage \
                python-dev python-all python-pip ant --yes --force-yes
        pip install -U devflow
        """
        _run(cmd, False)

        # Patch pydist bug
        if self.config.get('Global', 'patch_pydist') == "True":
            self.logger.debug("Patch pydist.py module")
            cmd = r"""
            sed -r -i 's/(\(\?P<name>\[A-Za-z\]\[A-Za-z0-9_\.)/\1\\\-/' \
                /usr/share/python/debpython/pydist.py
            """
            _run(cmd, False)

        # Build synnefo packages
        self.build_synnefo()
        # Build pithos-web-client packages
        self.build_pithos_webclient()

    @_check_fabric
    def build_synnefo(self):
        """Build Synnefo packages"""
        self.logger.info("Build Synnefo packages..")

        cmd = """
        devflow-autopkg snapshot -b ~/synnefo_build-area --no-sign
        """
        with fabric.cd("synnefo"):
            _run(cmd, True)

        # Install snf-deploy package
        self.logger.debug("Install snf-deploy package")
        cmd = """
        dpkg -i snf-deploy*.deb
        apt-get -f install --yes --force-yes
        """
        with fabric.cd("synnefo_build-area"):
            with fabric.settings(warn_only=True):
                _run(cmd, True)

        # Setup synnefo packages for snf-deploy
        self.logger.debug("Copy synnefo debs to snf-deploy packages dir")
        cmd = """
        cp ~/synnefo_build-area/*.deb /var/lib/snf-deploy/packages/
        """
        _run(cmd, False)

    @_check_fabric
    def build_pithos_webclient(self):
        """Build pithos-web-client packages"""
        self.logger.info("Build pithos-web-client packages..")

        cmd = """
        devflow-autopkg snapshot -b ~/webclient_build-area --no-sign
        """
        with fabric.cd("pithos-web-client"):
            _run(cmd, True)

        # Setup pithos-web-client packages for snf-deploy
        self.logger.debug("Copy webclient debs to snf-deploy packages dir")
        cmd = """
        cp ~/webclient_build-area/*.deb /var/lib/snf-deploy/packages/
        """
        _run(cmd, False)

    @_check_fabric
    def build_documentation(self):
        """Build Synnefo documentation"""
        self.logger.info("Build Synnefo documentation..")
        _run("pip install -U Sphinx", False)
        with fabric.cd("synnefo"):
            _run("devflow-update-version; "
                 "./ci/make_docs.sh synnefo_documentation", False)

    def fetch_documentation(self, dest=None):
        """Fetch Synnefo documentation"""
        self.logger.info("Fetch Synnefo documentation..")
        if dest is None:
            dest = "synnefo_documentation"
        dest = os.path.abspath(dest)
        if not os.path.exists(dest):
            os.makedirs(dest)
        self.fetch_compressed("synnefo/synnefo_documentation", dest)
        self.logger.info("Downloaded documentation to %s" %
                         _green(dest))

    @_check_fabric
    def deploy_synnefo(self, schema=None):
        """Deploy Synnefo using snf-deploy"""
        self.logger.info("Deploy Synnefo..")
        if schema is None:
            schema = self.config.get('Global', 'schema')
        self.logger.debug("Will use \"%s\" schema" % _green(schema))

        schema_dir = os.path.join(self.ci_dir, "schemas/%s" % schema)
        if not (os.path.exists(schema_dir) and os.path.isdir(schema_dir)):
            raise ValueError("Unknown schema: %s" % schema)

        self.logger.debug("Upload schema files to server")
        _put(os.path.join(schema_dir, "*"), "/etc/snf-deploy/")

        self.logger.debug("Change password in nodes.conf file")
        cmd = """
        sed -i 's/^password =.*/password = {0}/' /etc/snf-deploy/nodes.conf
        """.format(fabric.env.password)
        _run(cmd, False)

        self.logger.debug("Run snf-deploy")
        cmd = """
        snf-deploy keygen --force
        snf-deploy --disable-colors --autoconf all
        """
        _run(cmd, True)

    @_check_fabric
    def unit_test(self):
        """Run Synnefo unit test suite"""
        self.logger.info("Run Synnefo unit test suite")
        component = self.config.get('Unit Tests', 'component')

        self.logger.debug("Install needed packages")
        cmd = """
        pip install -U mock
        pip install -U factory_boy
        pip install -U nose
        """
        _run(cmd, False)

        self.logger.debug("Upload tests.sh file")
        unit_tests_file = os.path.join(self.ci_dir, "tests.sh")
        _put(unit_tests_file, ".")

        self.logger.debug("Run unit tests")
        cmd = """
        bash tests.sh {0}
        """.format(component)
        _run(cmd, True)

    @_check_fabric
    def run_burnin(self):
        """Run burnin functional test suite"""
        self.logger.info("Run Burnin functional test suite")
        cmd = """
        auth_url=$(grep -e '^url =' .kamakirc | cut -d' ' -f3)
        token=$(grep -e '^token =' .kamakirc | cut -d' ' -f3)
        images_user=$(kamaki image list -l | grep owner | \
                      cut -d':' -f2 | tr -d ' ')
        snf-burnin --auth-url=$auth_url --token=$token {0}
        BurninExitStatus=$?
        exit $BurninExitStatus
        """.format(self.config.get('Burnin', 'cmd_options'))
        _run(cmd, True)

    @_check_fabric
    def fetch_compressed(self, src, dest=None):
        """Create a tarball and fetch it locally"""
        self.logger.debug("Creating tarball of %s" % src)
        basename = os.path.basename(src)
        tar_file = basename + ".tgz"
        cmd = "tar czf %s %s" % (tar_file, src)
        _run(cmd, False)
        if not os.path.exists(dest):
            os.makedirs(dest)

        tmp_dir = tempfile.mkdtemp()
        fabric.get(tar_file, tmp_dir)

        dest_file = os.path.join(tmp_dir, tar_file)
        self._check_hash_sum(dest_file, tar_file)
        self.logger.debug("Untar packages file %s" % dest_file)
        cmd = """
        cd %s
        tar xzf %s
        cp -r %s/* %s
        rm -r %s
        """ % (tmp_dir, tar_file, src, dest, tmp_dir)
        os.system(cmd)
        self.logger.info("Downloaded %s to %s" %
                         (src, _green(dest)))

    @_check_fabric
    def fetch_packages(self, dest=None):
        """Fetch Synnefo packages"""
        if dest is None:
            dest = self.config.get('Global', 'pkgs_dir')
        dest = os.path.abspath(os.path.expanduser(dest))
        if not os.path.exists(dest):
            os.makedirs(dest)
        self.fetch_compressed("synnefo_build-area", dest)
        self.fetch_compressed("webclient_build-area", dest)
        self.logger.info("Downloaded debian packages to %s" %
                         _green(dest))

    def x2go_plugin(self, dest=None):
        """Produce an html page which will use the x2goplugin

        Arguments:
          dest  -- The file where to save the page (String)

        """
        output_str = """
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
        <html>
        <head>
        <title>X2Go SynnefoCI Service</title>
        </head>
        <body onload="checkPlugin()">
        <div id="x2goplugin">
            <object
                src="location"
                type="application/x2go"
                name="x2goplugin"
                palette="background"
                height="100%"
                hspace="0"
                vspace="0"
                width="100%"
                x2goconfig="
                    session=X2Go-SynnefoCI-Session
                    server={0}
                    user={1}
                    sshport={2}
                    published=true
                    autologin=true
                ">
            </object>
        </div>
        </body>
        </html>
        """.format(self.read_temp_config('server_ip'),
                   self.read_temp_config('server_user'),
                   self.read_temp_config('server_port'))
        if dest is None:
            dest = self.config.get('Global', 'x2go_plugin_file')

        self.logger.info("Writting x2go plugin html file to %s" % dest)
        fid = open(dest, 'w')
        fid.write(output_str)
        fid.close()
Esempio n. 19
0
class OkeanosConnector(AbstractConnector):
    """
    Okeanos connector.
    """

    def __init__(self):
        AbstractConnector.__init__(self)
        self.__cyclades = None
        self.__network_client = None
        self.attach_public_ipv4 = False
        self.private_network = -1

    def authenticate(self, authentication=None):
        """

        :param authentication:
        :return:
        """
        if self.__cyclades is not None:
            return True
        try:
            authcl = AstakosClient(authentication['URL'], authentication['TOKEN'])
            authcl.authenticate()
            self.__cyclades = CycladesClient(authcl.get_service_endpoints('compute')['publicURL'],
                                             authentication['TOKEN'])
            self.__network_client = CycladesNetworkClient(authcl.get_service_endpoints('network')['publicURL'],
                                                          authentication['TOKEN'])
        except ClientError:
            stderr.write('Connector initialization failed')
            return False
        return True

    def configure(self, configuration):
        self.authenticate(configuration['auth'])
        if 'private_network' in configuration and configuration['private_network']:
            self.private_network = 0
        if 'attach_public_ipv4' in configuration and configuration['attach_public_ipv4']:
            self.attach_public_ipv4 = True

    def prepare(self):
        """
        In this method, application-level IaaS related actions are executed.
        :return:
        """
        if self.private_network == 0:
            self.private_network = self.create_private_network()

    def create_vm(self, name, flavor_id, image_id):
        """

        :param name:
        :param flavor_id:
        :param image_id:
        :return:
        """
        networks = []
        if self.attach_public_ipv4:
            networks.append({'uuid': self.__create_floating_ip()})
        if self.private_network != -1:
            networks.append({'uuid': self.private_network})

        response = self.__cyclades.create_server(name=name, flavor_id=flavor_id, image_id=image_id, networks=networks)
        ret_value = dict()
        ret_value['password'] = response['adminPass']
        ret_value['id'] = response['id']
        ret_value['user'] = response['metadata']['users']
        ret_value['hostname'] = 'snf-' + str(response['id']) + '.vm.okeanos.grnet.gr'
        self.__cyclades.wait_server(server_id=ret_value['id'], current_status='ACTIVE')
        return ret_value

    def delete_vm(self, server_id):
        """
        Delete VM method. The method is blocking until the VM goes to a "DELETED" state
        :param server_id:
        :return:
        """
        attachments = self.__cyclades.get_server_details(server_id)['attachments']
        port_id = None
        for a in attachments:
            if a['OS-EXT-IPS:type'] == 'floating':
                port_id = a['id']
        floating_ip_id = None
        for ip in self.__network_client.list_floatingips():
            if port_id is not None and ip['port_id'] == str(port_id):
                floating_ip_id = ip['id']
        self.__cyclades.delete_server(server_id)
        self.__cyclades.wait_server(server_id, current_status='DELETED')    # wait until server is deleted
        if floating_ip_id is not None:
            self.__wait_until_ip_released(floating_ip_id)
            self.__network_client.delete_floatingip(floating_ip_id)

    def __wait_until_ip_released(self, floating_ip_id):
        for i in range(1, MAX_WAIT_FOR_LOOPS+1):
            for ip in self.__network_client.list_floatingips():
                if ip['id'] == floating_ip_id:
                    if ip['instance_id'] is None or ip['instance_id'] == 'None':
                        return True
            sleep(SLEEP_TIMEOUT)

    def list_vms(self):
        """


        :return:
        """
        return self.__cyclades.list_servers()

    def get_status(self, vm_id):
        """

        :param vm_id:
        :return:
        """
        return self.__cyclades.get_server_details(vm_id)

    def get_server_addresses(self, vm_id, ip_version=None, connection_type=None):
        """
        Returns the enabled addresses, as referenced from the IaaS.
        """
        addresses = self.__cyclades.get_server_details(vm_id)['addresses']
        results = []
        while len(addresses) > 0:
            key, value = addresses.popitem()
            if (ip_version is None or value[0]['version'] == ip_version) and \
                    (connection_type is None or value[0]['OS-EXT-IPS:type'] == connection_type):
                results.append(value[0]['addr'])
        return results

    def __create_floating_ip(self):
        self.__network_client.floatingips_get()
        response = self.__network_client.create_floatingip()
        return response['floating_network_id']

    def create_private_network(self):
        """
        Creates a new private network and returns its id
        """
        response = self.__network_client.create_network(type='MAC_FILTERED', name='Deployment network')
        self.__network_client.create_subnet(
            network_id=response['id'],
            enable_dhcp=True,
            cidr='192.168.0.0/24'
        )
        return response['id']

    def clone(self):
        new_connector = OkeanosConnector()
        new_connector.attach_public_ipv4 = self.attach_public_ipv4
        new_connector.private_network = self.private_network
        new_connector.__network_client = self.__network_client
        new_connector.__cyclades = self.__cyclades
        return new_connector

    def cleanup(self):
        if self.private_network != -1 and self.private_network != 0:
            self.__wait_until_private_net_is_empty(self.private_network)
            self.__network_client.delete_network(self.private_network)

    def __wait_until_private_net_is_empty(self, private_net_id):

        for i in range(1, MAX_WAIT_FOR_LOOPS):
            port_set = set()
            for p in self.__network_client.list_ports():
                port_set.add(p['network_id'])
            if private_net_id not in port_set:
                return
            else:
                sleep(SLEEP_TIMEOUT)

    def serialize(self):
        d = dict()
        d['attach_public_ipv4'] = self.attach_public_ipv4
        d['private_network'] = self.private_network
        return d

    def deserialize(self, state):
        self.attach_public_ipv4 = state['attach_public_ipv4']
        self.private_network = state['private_network']
Esempio n. 20
0
class OkeanosNativeClient(object):
    VOLUME_STATUS_CREATING = 'CREATING'
    VOLUME_STATUS_IN_USE = 'IN_USE'
    VOLUME_STATUS_DELETING = 'DELETING'
    VOLUME_STATUS_DELETED = 'DELETED'

    def __init__(self,
                 token,
                 authURL='https://accounts.okeanos.grnet.gr/identity/v2.0'):
        """
        :type authURL: str
        :type token: str
        """
        from kamaki.clients.utils import https
        https.patch_ignore_ssl()

        self.authURL = authURL
        self.token = token
        self.cycladesServiceType = CycladesClient.service_type
        self.blockStorageServiceType = CycladesBlockStorageClient.service_type
        self.astakosClient = AstakosClient(self.authURL, self.token)
        endpointF = self.astakosClient.get_service_endpoints
        self.cycladesEndpoint = endpointF(
            self.cycladesServiceType)[u'publicURL']
        self.cycladesClient = CycladesClient(self.cycladesEndpoint, self.token)
        self.blockStorageEndpoint = endpointF(
            self.blockStorageServiceType)[u'publicURL']
        self.blockStorageClient = CycladesBlockStorageClient(
            self.blockStorageEndpoint, token)

        flavorsById = {}
        flavorsByName = {}
        for flavor in self.cycladesClient.list_flavors():
            _id = flavor[u'id']
            name = flavor[u'name']
            flavorsById[_id] = name
            flavorsByName[name] = _id
        self.flavorsById = flavorsById
        self.flavorsByName = flavorsByName

    def getFlavorId(self, idOrName):
        """
        :rtype : str
        :type idOrName: str
        """
        if idOrName in self.flavorsById:
            return idOrName
        elif idOrName in self.flavorsByName:
            return self.flavorsByName[idOrName]
        else:
            return idOrName  # caller's responsibility

    def listNodes(self):
        """
        :rtype : list(ListNodeResult)
        """
        instanceInfoList = []
        servers = self.cycladesClient.list_servers()
        for server in servers:
            serverId = str(server[u'id'])  # It is a number in the result
            serverDetails = self.cycladesClient.get_server_details(serverId)
            serverStatusS = serverDetails[u'status']
            serverStatus = NodeStatus(serverStatusS)
            # serverFlavourId = serverDetails[u'flavor'][u'id']
            # serverImageId = serverDetails[u'image'][u'id']
            instanceInfo = ListNodeResult(serverId, serverStatus,
                                          serverDetails)
            instanceInfoList.append(instanceInfo)
        return instanceInfoList

    def createVolume(self, serverId, sizeGB, projectId, sleepWaitSeconds=5):
        """
        :param serverId: str
        :param sizeGB: Union[str, int]
        :param projectId: str
        :rtype str
        """
        self.log("> serverId=%s, sizeGB=%s, projectId=%s" %
                 (serverId, sizeGB, projectId))

        response = self.blockStorageClient.create_volume(sizeGB,
                                                         serverId,
                                                         '%s-vol-%s' %
                                                         (serverId, sizeGB),
                                                         project=projectId)
        # The volume is being created asynchronously, status is 'creating'
        # we wait until it changes (to 'in_use')
        volumeId = response[u'id']

        def getVolumeDetails():
            _volumeDetails = self.blockStorageClient.get_volume_details(
                volumeId)
            _volumeStatus = _volumeDetails[u'status'].upper()
            self.log("volumeDetails = %s" % _volumeDetails)
            return _volumeDetails, _volumeStatus

        volumeDetails, volumeStatus = getVolumeDetails()
        while volumeStatus == OkeanosNativeClient.VOLUME_STATUS_CREATING:
            time.sleep(sleepWaitSeconds)
            volumeDetails, volumeStatus = getVolumeDetails()

        # response is something like this
        # {
        #     u'display_name': u'foo',
        #     u'id': u'46974',
        #     u'links': [
        #         {
        #             u'href': u'https://cyclades.okeanos.grnet.gr/volume/v2.0/volumes/46974',
        #             u'rel': u'self'
        #         }, {
        #             u'href': u'https://cyclades.okeanos.grnet.gr/volume/v2.0/volumes/46974',
        #             u'rel': u'bookmark'
        #         }
        #     ]
        # }

        self.log("< %s" % response)

        return response

    def attachVolume(self, serverId, sizeGB, projectId):
        """Create and attach an extra volume to the VM, returning the volume name, the volume id and the device name"""
        self.log("> serverId = %s, sizeGB = %s, projectId = %s" %
                 (serverId, sizeGB, projectId))
        result = self.createVolume(serverId, sizeGB, projectId)
        volumeId = result['id']
        return volumeId

    def deleteVolume(self, volumeId, sleepWaitSeconds=5):
        """
        Deletes the volume identified by the given `volumeId`.
        :param volumeId: str
        :return:
        """
        def getVolumeDetails():
            _volumeDetails = self.blockStorageClient.get_volume_details(
                volumeId)
            _volumeStatus = _volumeDetails[u'status'].upper()
            self.log("volumeDetails = %s" % _volumeDetails)
            return _volumeDetails, _volumeStatus

        volumeDetails, volumeStatus = getVolumeDetails()
        response = self.blockStorageClient.delete_volume(volumeId)

        # Normal status transition is:
        #   OkeanosNativeClient.VOLUME_STATUS_IN_USE    =>
        #   OkeanosNativeClient.VOLUME_STATUS_DELETING  =>
        #   OkeanosNativeClient.VOLUME_STATUS_DELETED

        while volumeStatus == OkeanosNativeClient.VOLUME_STATUS_IN_USE:
            time.sleep(sleepWaitSeconds)
            volumeDetails, volumeStatus = getVolumeDetails()

        # Now it should be in status:
        #   OkeanosNativeClient.VOLUME_STATUS_DELETING
        #
        # Note that real deletion means status:
        #   OkeanosNativeClient.VOLUME_STATUS_DELETED
        #
        # ... But let's not wait that long

        return response

    def createNode(self,
                   nodeName,
                   flavorIdOrName,
                   imageId,
                   sshPubKey=None,
                   initScriptPathAndData=None,
                   remoteUsername="******",
                   remoteUsergroup=None,
                   localPubKeyData=None,
                   createAsyncInitScript=True,
                   projectId=None):
        """

        :rtype : NodeDetails
        :type localPubKeyData: str
        :type sshPubKey: str
        :type imageId: str
        :type flavorIdOrName: str
        :type nodeName: str
        """
        self.log("Creating node '%s', %s, %s" %
                 (nodeName, flavorIdOrName, imageId))

        sshPubKey = sshPubKey or None
        if sshPubKey is not None:
            self.log("User SSH public key to be injected in %s: %s" %
                     (nodeName, sshPubKey))
        remoteUsergroup = remoteUsergroup or remoteUsername
        flavorId = self.getFlavorId(flavorIdOrName)

        # We make sure:
        # a) The orchestrator can do password-less SSH on the newly created machine (via ~/.ssh/id_rsa.pub)
        # b) The SlipStream user can do password-less SSH on the newly created machine (via the provided userPubKey)
        # c) The provided init script is injected

        localPubKeyData = localPubKeyData or loadPubRsaKeyData()
        self.log("Local SSH public key to be injected in %s: %s" %
                 (nodeName, localPubKeyData))

        if sshPubKey is None:
            authorized_keys = localPubKeyData
        else:
            if not localPubKeyData.endswith('\n'):
                localPubKeyData += '\n'
            authorized_keys = "%s%s" % (localPubKeyData, sshPubKey)

        # See https://www.synnefo.org/docs/kamaki/latest/developers/showcase.html#inject-ssh-keys
        import base64
        personality = [
            dict(contents=base64.b64encode(authorized_keys),
                 path="/%s/.ssh/authorized_keys" % remoteUsername,
                 owner=remoteUsername,
                 group=remoteUsergroup,
                 mode=0600)
        ]

        if initScriptPathAndData is not None:
            initScriptPath, initScriptData = initScriptPathAndData

            personality.append(
                dict(contents=base64.b64encode(initScriptData),
                     path=initScriptPath,
                     owner=remoteUsername,
                     group=remoteUsergroup,
                     mode=0777))

            # In order for the contextualization script to run asynchronously,
            # we create another script that launches the original via nohup
            if createAsyncInitScript:
                asyncInitScriptPath = "%s.async" % initScriptPath
                asyncInitScriptData = "#!/bin/sh -e\nexec nohup %s &\n" % initScriptPath

                personality.append(
                    dict(contents=base64.b64encode(asyncInitScriptData),
                         path=asyncInitScriptPath,
                         owner=remoteUsername,
                         group=remoteUsergroup,
                         mode=0777))
            else:
                asyncInitScriptPath = None
        else:
            initScriptPath = None
            initScriptData = None
            asyncInitScriptPath = None
            asyncInitScriptData = None

        self.log(">> Personalities")
        for _p in personality:
            self.log(">>>> %s" % _p)

        resultDict = self.cycladesClient.create_server(nodeName,
                                                       flavorId,
                                                       imageId,
                                                       personality=personality,
                                                       project_id=projectId)
        # No IP is included in this result
        nodeDetails = NodeDetails(resultDict,
                                  sshPubKey=sshPubKey,
                                  initScriptPath=initScriptPath,
                                  initScriptData=initScriptData,
                                  asyncInitScriptPath=asyncInitScriptPath)
        self.log("Created node %s status %s, adminPass = %s, ip4s = %s" %
                 (nodeDetails.id, nodeDetails.status.okeanosStatus,
                  nodeDetails.adminPass, nodeDetails.ipv4s))
        return nodeDetails

    def runCommandOnNode(self,
                         nodeDetails,
                         command,
                         username='******',
                         localPrivKey=None,
                         timeout=None,
                         runSynchronously=True):
        """
        :type timeout: int
        :type localPrivKey: str
        :type nodeDetails: NodeDetails
        :type command: str
        """
        hostname = nodeDetails.ipv4s[0]
        return runCommandOnHost(hostname,
                                command,
                                username=username,
                                localPrivKey=localPrivKey,
                                timeout=timeout,
                                runSynchronously=runSynchronously)

    def checkSshOnNode(self,
                       nodeDetails,
                       username="******",
                       localPrivKey=None,
                       timeout=None):
        hostname = nodeDetails.ipv4s[0]
        return checkSshOnHost(hostname,
                              username=username,
                              localPrivKey=localPrivKey,
                              timeout=timeout)

    def waitSshOnHost(self,
                      hostname,
                      username="******",
                      localPrivKey=None,
                      timeout=None,
                      sleepSeconds=10):
        t0 = time.time()
        while True:
            if checkSshOnHost(hostname,
                              username=username,
                              localPrivKey=localPrivKey,
                              timeout=timeout):
                t1 = time.time()
                dtsec = t1 - t0
                self.log("SSH good for %s@%s after %s sec" %
                         (username, hostname, dtsec))
                break
            else:
                time.sleep(sleepSeconds)

    def waitSshOnNode(self,
                      nodeDetails,
                      username="******",
                      localPrivKey=None,
                      timeout=None):
        hostname = nodeDetails.ipv4s[0]
        self.waitSshOnHost(hostname,
                           username=username,
                           localPrivKey=localPrivKey,
                           timeout=timeout)

    def getNodeDetails(self, nodeId):
        """
        :type nodeId: str
        :rtype : NodeDetails
        """
        # from kamaki.cli import logger
        # logger.add_file_logger('kamaki.clients.sent', filename='get_server_details.log')
        # logger.add_file_logger('kamaki.clients.recv', filename='get_server_details.log')

        resultDict = self.cycladesClient.get_server_details(nodeId)
        nodeDetails = NodeDetails(resultDict)
        return nodeDetails

    def waitNodeStatus(self, nodeId, expectedOkeanosStatus, sleepSeconds=5):
        """
        :type expectedOkeanosStatus: str
        :type nodeId: str
        """
        t0 = time.time()
        nodeDetails = self.getNodeDetails(nodeId)
        while nodeDetails.status.okeanosStatus != expectedOkeanosStatus:
            time.sleep(sleepSeconds)
            nodeDetails = self.getNodeDetails(nodeId)
        t1 = time.time()
        dtsec = t1 - t0
        self.log("Node %s status %s after %s sec" %
                 (nodeId, expectedOkeanosStatus, dtsec))
        return nodeDetails

    def waitCurrentStatus(self,
                          nodeId,
                          currentOkeanosStatus,
                          sleepSeconds=5,
                          maxSleepSeconds=400):
        """ Wait untile the current status changes
        :type nodeId: str
        :type currentOkeanosStatus: str
        :type sleepSeconds: float
        """
        t0 = time.time()
        self.cycladesClient.wait_server(nodeId,
                                        current_status=currentOkeanosStatus,
                                        delay=sleepSeconds,
                                        max_wait=maxSleepSeconds)
        nodeDetails = self.getNodeDetails(nodeId)
        newOkeanosStatus = nodeDetails.status.okeanosStatus
        t1 = time.time()
        dtsec = t1 - t0
        self.log("Node %s status %s -> %s after %s sec" %
                 (nodeId, currentOkeanosStatus, newOkeanosStatus, dtsec))
        return nodeDetails

    def createNodeAndWait(self,
                          nodeName,
                          flavorIdOrName,
                          imageId,
                          sshPubKey,
                          initScriptPathAndData=None,
                          remoteUsername="******",
                          remoteUsergroup=None,
                          localPubKeyData=None,
                          localPrivKey=None,
                          sshTimeout=None,
                          runInitScriptSynchronously=False,
                          extraVolatileDiskGB=0,
                          projectId=None):
        """

        :type extraVolatileDiskGB: int
        :type runInitScriptSynchronously: bool
        :type sshPubKey: str
        :type imageId: str
        :type flavorIdOrName: str
        :type nodeName: str
        :type sshTimeout: float
        :rtype : NodeDetails
        """
        localPrivKey = localPrivKey or loadRsaPrivKey()

        # Note that this returned value (NodeDetails) contains the adminPass
        nodeDetails = self.createNode(
            nodeName,
            flavorIdOrName,
            imageId,
            sshPubKey,
            initScriptPathAndData=initScriptPathAndData,
            remoteUsername=remoteUsername,
            remoteUsergroup=remoteUsergroup,
            localPubKeyData=localPubKeyData,
            projectId=projectId)
        nodeId = nodeDetails.id
        nodeDetailsActive = self.waitCurrentStatus(nodeId, NodeStatus.BUILD)
        nodeDetails.updateIPsAndStatusFrom(nodeDetailsActive)

        # attach any additional disk
        hostIP = nodeDetails.ipv4s[0]
        if extraVolatileDiskGB:
            self.log(
                "Creating volatile disk of size %s GB for machine IP=%s, id=%s"
                % (extraVolatileDiskGB, hostIP, nodeId))
            volumeId = self.createVolume(nodeId, extraVolatileDiskGB,
                                         projectId)
            self.log(
                "Created volumeId=%s of size %s GB for machine IP=%s, id=%s" %
                (volumeId, extraVolatileDiskGB, hostIP, nodeId))
            # We do nothing more with the volumeId.
            # When the VM is destroyed by the IaaS, the extra disk is automatically destroyed as well.
        else:
            self.log(
                "No need for extra volatile disk for machine IP=%s, id=%s" %
                (hostIP, nodeId))

        # Some times, right after node is reported ACTIVE, network is unreachable or SSH is not immediately ready.
        # We have to cope with that by waiting.
        sshTimeout = sshTimeout or 7.0
        self.waitSshOnNode(nodeDetails,
                           username=remoteUsername,
                           localPrivKey=localPrivKey,
                           timeout=sshTimeout)

        initScriptPath = nodeDetails.initScriptPath

        runResult = self.runCommandOnNode(
            nodeDetails,
            initScriptPath,
            username=remoteUsername,
            localPrivKey=localPrivKey,
            runSynchronously=runInitScriptSynchronously)
        return nodeDetails, runResult

    def shutdownNode(self, nodeId):
        """
        :rtype : NodeDetails
        :type nodeId: str
        """
        self.log("Shutting down node %s" % nodeId)
        nodeDetails = self.getNodeDetails(nodeId)
        if not nodeDetails.status.isStopped():
            self.cycladesClient.shutdown_server(nodeId)
            self.log("Shutdown node %s status %s" %
                     (nodeId, nodeDetails.status.okeanosStatus))
        else:
            self.log("Node %s already shut down" % nodeId)
        return nodeDetails

    def shutdownNodeAndWait(self, nodeId):
        """
        :rtype : NodeDetails
        :type nodeId: str
        """
        nodeDetails = self.shutdownNode(nodeId)
        if not nodeDetails.status.isStopped():
            nodeDetailsWait = self.waitNodeStatus(nodeId, NodeStatus.STOPPED)
            nodeDetails.updateStatusFrom(nodeDetailsWait)
            self.log("Shutdown node %s status %s" %
                     (nodeId, nodeDetails.status.okeanosStatus))
        return nodeDetails

    def deleteNode(self, nodeId):
        """
        :rtype : NodeDetails
        :type nodeId: str
        """
        self.log("Deleting nodeId %s" % nodeId)
        nodeDetails = self.getNodeDetails(nodeId)
        if not nodeDetails.status.isDeleted():
            self.cycladesClient.delete_server(nodeId)
            self.log("Deleted node %s status %s" %
                     (nodeId, nodeDetails.status.okeanosStatus))
        return nodeDetails

    def deleteNodeAndWait(self, nodeId):
        """
        :rtype : NodeDetails
        :type nodeId: str
        """
        nodeDetails = self.deleteNode(nodeId)
        if not nodeDetails.status.isDeleted():
            nodeDetailsWait = self.waitNodeStatus(nodeId, NodeStatus.DELETED)
            nodeDetails.updateStatusFrom(nodeDetailsWait)
            self.log("Deleted node %s status %s" %
                     (nodeId, nodeDetails.status.okeanosStatus))
        return nodeDetails

    def log(self, msg=''):
        who = '%s::%s' % (self.__class__.__name__, inspect.stack()[1][3])
        LOG('%s# %s' % (who, msg))

    def getNodeIPv4(self, nodeId):
        nodeDetails = self.getNodeDetails(nodeId)
        ipv4 = nodeDetails.ipv4s[0]
        LOG("< for nodeId = %s, IPv4 = %s" % (nodeId, ipv4))
        return ipv4

    def getNodePartitions(self,
                          nodeId,
                          username='******',
                          localPrivKey=None,
                          timeout=None,
                          ssh=None):
        self.log("> nodeId = %s" % nodeId)
        ipv4 = self.getNodeIPv4(nodeId)
        status, partitions = getHostPartitions(ipv4,
                                               username=username,
                                               localPrivKey=localPrivKey,
                                               timeout=timeout,
                                               ssh=ssh)
        self.log("< status = %s, partitions = %s" % (status, partitions))
        return status, partitions

    def waitForExtraNodePartition(self,
                                  serverId,
                                  partitions,
                                  username='******',
                                  localPrivKey=None,
                                  timeout=None,
                                  ssh=None):
        """
        Given the set of pre-existing partitions, we wait until a new one appears and then we return it.
        :param serverId: str
        :param partitions: set[str]
        :return: the extra partition. prepend '/dev/' to get the device name
        """
        def getem():
            return self.getNodePartitions(serverId,
                                          username=username,
                                          localPrivKey=localPrivKey,
                                          timeout=timeout,
                                          ssh=ssh)

        self.log("Waiting, current partitions: %s" % partitions)
        status1, partitions1 = getem()
        if status1 != 0:
            return None

        while partitions == partitions1:
            self.log("Looping, new partitions: %s" % partitions1)
            status1, partitions1 = getem()
            if status1 != 0:
                return None

        # We assume one more is added ...
        newPartition = partitions1.difference(partitions)
        self.log("< For serverId = %s, new partition = %s" %
                 (serverId, newPartition))
        return newPartition

    def resizeNode(self, serverId, flavorIdOrName):
        flavorId = self.getFlavorId(flavorIdOrName)
        nodeDetails = self.getNodeDetails(serverId)
        self.log("Node %s status is %s" %
                 (serverId, nodeDetails.status.okeanosStatus))
        existingFlavorId = nodeDetails.flavorId
        self.log("Requested flavorId = %s [given: %s]" %
                 (flavorId, flavorIdOrName))

        if existingFlavorId == flavorId:
            self.log("FlavorId already is %s, no resizing action is needed !" %
                     flavorId)
            return

        t0 = time.time()

        self.log("Resizing from %s -> %s" % (existingFlavorId, flavorId))
        # Hot resizing is not supported, so we must shut the server down first
        self.log("Shutting down node %s" % serverId)
        nodeDetails = self.shutdownNodeAndWait(serverId)
        self.log("Node %s status is %s" %
                 (serverId, nodeDetails.status.okeanosStatus))

        # This takes the server to status 'RESIZE'
        self.log("Resizing node %s ..." % serverId)
        resizeResponse = self.cycladesClient.resize_server(serverId, flavorId)
        self.log("resizeResponse = %s" % resizeResponse)

        # with until server acquires the new flavor
        nodeDetails = self.getNodeDetails(serverId)
        while nodeDetails.flavorId != flavorId:
            nodeDetails = self.getNodeDetails(serverId)
        self.log("Node %s resized" % serverId)

        # And we are now ready to restart with the new flavor
        self.log("Restarting node %s" % serverId)
        self.cycladesClient.start_server(serverId)
        self.waitNodeStatus(serverId, NodeStatus.ACTIVE)

        t1 = time.time()
        dtsec = t1 - t0
        self.log("Node %s restarted with new flavor %s in %s sec" %
                 (serverId, flavorId, dtsec))
        return flavorId
Esempio n. 21
0
from base64 import b64encode
from lib.persistance_module import *
from logging import ERROR

USER = "******"

#retrieve the credentials for the specified users
AUTHENTICATION_URL, TOKEN = get_credentials(USER)

synnefo_user = AstakosClient(AUTHENTICATION_URL, TOKEN)
synnefo_user.logger.setLevel(ERROR)
getLogger().setLevel(ERROR)

cyclades_endpoints = synnefo_user.get_service_endpoints("compute")
CYCLADES_URL = cyclades_endpoints['publicURL']
cyclades_client = CycladesClient(CYCLADES_URL, TOKEN)
cyclades_net_client = CycladesNetworkClient(CYCLADES_URL, TOKEN)


pub_keys_path = 'keys/just_a_key.pub'
priv_keys_path = 'keys/just_a_key'


#creates a "personality"
def personality(username):
    """
    :param pub_keys_path: a path to the public key(s) to be used for this personality
    :param ssh_keys_path: a path to the private key(s) to be used for this personality
    """
    personality = []
    with open(abspath(pub_keys_path)) as f:
import psycopg2
from kamaki.clients.astakos import AstakosClient
from kamaki.clients.cyclades import CycladesClient
from logging import getLogger, ERROR


# init synneffo  stuff

AUTHENTICATION_URL="https://accounts.okeanos.grnet.gr/identity/v2.0"
TOKEN="hYDRO-FEV5d8wFxpOID-DF3_FWhsuD8dvTdbEX2qQRQ"
synnefo_user = AstakosClient(AUTHENTICATION_URL, TOKEN)
synnefo_user.logger.setLevel(ERROR)
getLogger().setLevel(ERROR)
cyclades_endpoints = synnefo_user.get_service_endpoints('compute')
CYCLADES_URL = cyclades_endpoints['publicURL']
cyclades_client = CycladesClient(CYCLADES_URL, TOKEN)

# connect to db

db = psycopg2.connect(host="127.0.0.1", user="******", password="******", database="celardb")
cursor = db.cursor()


#clear all old values from the DB
cursor.execute("DELETE FROM \"RESOURCE_TYPE\" WHERE TRUE")
cursor.execute("DELETE FROM \"SPECS\" WHERE TRUE")
cursor.execute("DELETE FROM \"PROVIDED_RESOURCE\" WHERE TRUE")


# add 'VM_FLAVOR' entry on the RESOURCE_TYPE table
cursor.execute("INSERT INTO \"RESOURCE_TYPE\" VALUES (1, 'VM_FLAVOR')"  )
Esempio n. 23
0
class SynnefoCI(object):
    """SynnefoCI python class"""
    def __init__(self, config_file=None, build_id=None, cloud=None):
        """ Initialize SynnefoCI python class

        Setup logger, local_dir, config and kamaki
        """
        # Setup logger
        self.logger = logging.getLogger('synnefo-ci')
        self.logger.setLevel(logging.DEBUG)

        handler1 = logging.StreamHandler(sys.stdout)
        handler1.setLevel(logging.DEBUG)
        handler1.addFilter(_InfoFilter())
        handler1.setFormatter(_MyFormatter())
        handler2 = logging.StreamHandler(sys.stderr)
        handler2.setLevel(logging.WARNING)
        handler2.setFormatter(_MyFormatter())

        self.logger.addHandler(handler1)
        self.logger.addHandler(handler2)

        # Get our local dir
        self.ci_dir = os.path.dirname(os.path.abspath(__file__))
        self.repo_dir = os.path.dirname(self.ci_dir)

        # Read config file
        if config_file is None:
            config_file = os.path.join(self.ci_dir, DEFAULT_CONFIG_FILE)
        config_file = os.path.abspath(config_file)
        self.config = ConfigParser()
        self.config.optionxform = str
        self.config.read(config_file)

        # Read temporary_config file
        self.temp_config_file = \
            os.path.expanduser(self.config.get('Global', 'temporary_config'))
        self.temp_config = ConfigParser()
        self.temp_config.optionxform = str
        self.temp_config.read(self.temp_config_file)
        self.build_id = build_id
        if build_id is not None:
            self.logger.info("Will use \"%s\" as build id" %
                             _green(self.build_id))

        # Set kamaki cloud
        if cloud is not None:
            self.kamaki_cloud = cloud
        elif self.config.has_option("Deployment", "kamaki_cloud"):
            kamaki_cloud = self.config.get("Deployment", "kamaki_cloud")
            if kamaki_cloud == "":
                self.kamaki_cloud = None
        else:
            self.kamaki_cloud = None

        # Initialize variables
        self.fabric_installed = False
        self.kamaki_installed = False
        self.cyclades_client = None
        self.network_client = None
        self.compute_client = None
        self.image_client = None
        self.astakos_client = None

    def setup_kamaki(self):
        """Initialize kamaki

        Setup cyclades_client, image_client and compute_client
        """

        config = kamaki_config.Config()
        if self.kamaki_cloud is None:
            try:
                self.kamaki_cloud = config.get("global", "default_cloud")
            except AttributeError:
                # Compatibility with kamaki version <=0.10
                self.kamaki_cloud = config.get("global", "default_cloud")

        self.logger.info("Setup kamaki client, using cloud '%s'.." %
                         self.kamaki_cloud)
        auth_url = config.get_cloud(self.kamaki_cloud, "url")
        self.logger.debug("Authentication URL is %s" % _green(auth_url))
        token = config.get_cloud(self.kamaki_cloud, "token")
        #self.logger.debug("Token is %s" % _green(token))

        self.astakos_client = AstakosClient(auth_url, token)
        endpoints = self.astakos_client.authenticate()

        cyclades_url = get_endpoint_url(endpoints, "compute")
        self.logger.debug("Cyclades API url is %s" % _green(cyclades_url))
        self.cyclades_client = CycladesClient(cyclades_url, token)
        self.cyclades_client.CONNECTION_RETRY_LIMIT = 2

        network_url = get_endpoint_url(endpoints, "network")
        self.logger.debug("Network API url is %s" % _green(network_url))
        self.network_client = CycladesNetworkClient(network_url, token)
        self.network_client.CONNECTION_RETRY_LIMIT = 2

        image_url = get_endpoint_url(endpoints, "image")
        self.logger.debug("Images API url is %s" % _green(image_url))
        self.image_client = ImageClient(cyclades_url, token)
        self.image_client.CONNECTION_RETRY_LIMIT = 2

        compute_url = get_endpoint_url(endpoints, "compute")
        self.logger.debug("Compute API url is %s" % _green(compute_url))
        self.compute_client = ComputeClient(compute_url, token)
        self.compute_client.CONNECTION_RETRY_LIMIT = 2

    def _wait_transition(self, server_id, current_status, new_status):
        """Wait for server to go from current_status to new_status"""
        self.logger.debug("Waiting for server to become %s" % new_status)
        timeout = self.config.getint('Global', 'build_timeout')
        sleep_time = 5
        while True:
            server = self.cyclades_client.get_server_details(server_id)
            if server['status'] == new_status:
                return server
            elif timeout < 0:
                self.logger.error("Waiting for server to become %s timed out" %
                                  new_status)
                self.destroy_server(False)
                sys.exit(1)
            elif server['status'] == current_status:
                # Sleep for #n secs and continue
                timeout = timeout - sleep_time
                time.sleep(sleep_time)
            else:
                self.logger.error("Server failed with status %s" %
                                  server['status'])
                self.destroy_server(False)
                sys.exit(1)

    @_check_kamaki
    def destroy_server(self, wait=True):
        """Destroy slave server"""
        server_id = int(self.read_temp_config('server_id'))
        fips = [
            f for f in self.network_client.list_floatingips()
            if str(f['instance_id']) == str(server_id)
        ]
        self.logger.info("Destoying server with id %s " % server_id)
        self.cyclades_client.delete_server(server_id)
        if wait:
            self._wait_transition(server_id, "ACTIVE", "DELETED")
        for fip in fips:
            self.logger.info("Destroying floating ip %s",
                             fip['floating_ip_address'])
            self.network_client.delete_floatingip(fip['id'])

    def _create_floating_ip(self):
        """Create a new floating ip"""
        networks = self.network_client.list_networks(detail=True)
        pub_nets = [
            n for n in networks if n['SNF:floating_ip_pool'] and n['public']
        ]
        for pub_net in pub_nets:
            # Try until we find a public network that is not full
            try:
                fip = self.network_client.create_floatingip(pub_net['id'])
            except ClientError as err:
                self.logger.warning("%s: %s", err.message, err.details)
                continue
            self.logger.debug("Floating IP %s with id %s created",
                              fip['floating_ip_address'], fip['id'])
            return fip
        self.logger.error("No mor IP addresses available")
        sys.exit(1)

    def _create_port(self, floating_ip):
        """Create a new port for our floating IP"""
        net_id = floating_ip['floating_network_id']
        self.logger.debug("Creating a new port to network with id %s", net_id)
        fixed_ips = [{'ip_address': floating_ip['floating_ip_address']}]
        port = self.network_client.create_port(net_id,
                                               device_id=None,
                                               fixed_ips=fixed_ips)
        return port

    @_check_kamaki
    # Too many local variables. pylint: disable-msg=R0914
    def create_server(self,
                      image=None,
                      flavor=None,
                      ssh_keys=None,
                      server_name=None):
        """Create slave server"""
        self.logger.info("Create a new server..")

        # Find a build_id to use
        self._create_new_build_id()

        # Find an image to use
        image_id = self._find_image(image)
        # Find a flavor to use
        flavor_id = self._find_flavor(flavor)

        # Create Server
        networks = []
        if self.config.get("Deployment", "allocate_floating_ip") == "True":
            fip = self._create_floating_ip()
            port = self._create_port(fip)
            networks.append({'port': port['id']})
        private_networks = self.config.get('Deployment', 'private_networks')
        if private_networks:
            private_networks = [p.strip() for p in private_networks.split(",")]
            networks.extend([{"uuid": uuid} for uuid in private_networks])
        if server_name is None:
            server_name = self.config.get("Deployment", "server_name")
            server_name = "%s(BID: %s)" % (server_name, self.build_id)
        server = self.cyclades_client.create_server(server_name,
                                                    flavor_id,
                                                    image_id,
                                                    networks=networks)
        server_id = server['id']
        self.write_temp_config('server_id', server_id)
        self.logger.debug("Server got id %s" % _green(server_id))
        server_user = server['metadata']['users']
        self.write_temp_config('server_user', server_user)
        self.logger.debug("Server's admin user is %s" % _green(server_user))
        server_passwd = server['adminPass']
        self.write_temp_config('server_passwd', server_passwd)

        server = self._wait_transition(server_id, "BUILD", "ACTIVE")
        self._get_server_ip_and_port(server, private_networks)
        self._copy_ssh_keys(ssh_keys)

        # Setup Firewall
        self.setup_fabric()
        self.logger.info("Setup firewall")
        accept_ssh_from = self.config.get('Global', 'accept_ssh_from')
        if accept_ssh_from != "":
            self.logger.debug("Block ssh except from %s" % accept_ssh_from)
            cmd = """
            local_ip=$(/sbin/ifconfig eth0 | grep 'inet addr:' | \
                cut -d':' -f2 | cut -d' ' -f1)
            iptables -A INPUT -s localhost -j ACCEPT
            iptables -A INPUT -s $local_ip -j ACCEPT
            iptables -A INPUT -s {0} -p tcp --dport 22 -j ACCEPT
            iptables -A INPUT -p tcp --dport 22 -j DROP
            """.format(accept_ssh_from)
            _run(cmd, False)

        # Setup apt, download packages
        self.logger.debug("Setup apt. Install x2goserver and firefox")
        cmd = """
        echo 'APT::Install-Suggests "false";' >> /etc/apt/apt.conf
        echo 'precedence ::ffff:0:0/96  100' >> /etc/gai.conf
        apt-get update
        apt-get install curl --yes --force-yes
        echo -e "\n\n{0}" >> /etc/apt/sources.list
        # Synnefo repo's key
        curl https://dev.grnet.gr/files/apt-grnetdev.pub | apt-key add -

        # X2GO Key
        apt-key adv --recv-keys --keyserver keys.gnupg.net E1F958385BFE2B6E
        apt-get install x2go-keyring --yes --force-yes
        apt-get update
        apt-get install x2goserver x2goserver-xsession \
                iceweasel --yes --force-yes

        # xterm published application
        echo '[Desktop Entry]' > /usr/share/applications/xterm.desktop
        echo 'Name=XTerm' >> /usr/share/applications/xterm.desktop
        echo 'Comment=standard terminal emulator for the X window system' >> \
            /usr/share/applications/xterm.desktop
        echo 'Exec=xterm' >> /usr/share/applications/xterm.desktop
        echo 'Terminal=false' >> /usr/share/applications/xterm.desktop
        echo 'Type=Application' >> /usr/share/applications/xterm.desktop
        echo 'Encoding=UTF-8' >> /usr/share/applications/xterm.desktop
        echo 'Icon=xterm-color_48x48' >> /usr/share/applications/xterm.desktop
        echo 'Categories=System;TerminalEmulator;' >> \
                /usr/share/applications/xterm.desktop
        """.format(self.config.get('Global', 'apt_repo'))
        _run(cmd, False)

    def _find_flavor(self, flavor=None):
        """Find a suitable flavor to use

        Search by name (reg expression) or by id
        """
        # Get a list of flavors from config file
        flavors = self.config.get('Deployment', 'flavors').split(",")
        if flavor is not None:
            # If we have a flavor_name to use, add it to our list
            flavors.insert(0, flavor)

        list_flavors = self.compute_client.list_flavors()
        for flv in flavors:
            flv_type, flv_value = parse_typed_option(option="flavor",
                                                     value=flv)
            if flv_type == "name":
                # Filter flavors by name
                self.logger.debug("Trying to find a flavor with name \"%s\"" %
                                  flv_value)
                list_flvs = \
                    [f for f in list_flavors
                     if re.search(flv_value, f['name'], flags=re.I)
                     is not None]
            elif flv_type == "id":
                # Filter flavors by id
                self.logger.debug("Trying to find a flavor with id \"%s\"" %
                                  flv_value)
                list_flvs = \
                    [f for f in list_flavors
                     if str(f['id']) == flv_value]
            else:
                self.logger.error("Unrecognized flavor type %s" % flv_type)

            # Check if we found one
            if list_flvs:
                self.logger.debug(
                    "Will use \"%s\" with id \"%s\"" %
                    (_green(list_flvs[0]['name']), _green(list_flvs[0]['id'])))
                return list_flvs[0]['id']

        self.logger.error("No matching flavor found.. aborting")
        sys.exit(1)

    def _find_image(self, image=None):
        """Find a suitable image to use

        In case of search by name, the image has to belong to one
        of the `DEFAULT_SYSTEM_IMAGES_UUID' users.
        In case of search by id it only has to exist.
        """
        # Get a list of images from config file
        images = self.config.get('Deployment', 'images').split(",")
        if image is not None:
            # If we have an image from command line, add it to our list
            images.insert(0, image)

        auth = self.astakos_client.authenticate()
        user_uuid = auth["access"]["token"]["tenant"]["id"]
        list_images = self.image_client.list_public(detail=True)['images']
        for img in images:
            img_type, img_value = parse_typed_option(option="image", value=img)
            if img_type == "name":
                # Filter images by name
                self.logger.debug("Trying to find an image with name \"%s\"" %
                                  img_value)
                accepted_uuids = DEFAULT_SYSTEM_IMAGES_UUID + [user_uuid]
                list_imgs = \
                    [i for i in list_images if i['user_id'] in accepted_uuids
                     and
                     re.search(img_value, i['name'], flags=re.I) is not None]
            elif img_type == "id":
                # Filter images by id
                self.logger.debug("Trying to find an image with id \"%s\"" %
                                  img_value)
                list_imgs = \
                    [i for i in list_images
                     if i['id'].lower() == img_value.lower()]
            else:
                self.logger.error("Unrecognized image type %s" % img_type)
                sys.exit(1)

            # Check if we found one
            if list_imgs:
                self.logger.debug(
                    "Will use \"%s\" with id \"%s\"" %
                    (_green(list_imgs[0]['name']), _green(list_imgs[0]['id'])))
                return list_imgs[0]['id']

        # We didn't found one
        self.logger.error("No matching image found.. aborting")
        sys.exit(1)

    def _get_server_ip_and_port(self, server, private_networks):
        """Compute server's IPv4 and ssh port number"""
        self.logger.info("Get server connection details..")
        if private_networks:
            # Choose the networks that belong to private_networks
            networks = [
                n for n in server['attachments']
                if n['network_id'] in private_networks
            ]
        else:
            # Choose the networks that are public
            networks = [
                n for n in server['attachments']
                if self.network_client.get_network_details(n['network_id'])
                ['public']
            ]
        # Choose the networks with IPv4
        networks = [n for n in networks if n['ipv4']]
        # Use the first network as IPv4
        server_ip = networks[0]['ipv4']

        if (".okeanos.io" in self.cyclades_client.base_url
                or ".demo.synnefo.org" in self.cyclades_client.base_url):
            tmp1 = int(server_ip.split(".")[2])
            tmp2 = int(server_ip.split(".")[3])
            server_ip = "gate.okeanos.io"
            server_port = 10000 + tmp1 * 256 + tmp2
        else:
            server_port = 22
        self.write_temp_config('server_ip', server_ip)
        self.logger.debug("Server's IPv4 is %s" % _green(server_ip))
        self.write_temp_config('server_port', server_port)
        self.logger.debug("Server's ssh port is %s" % _green(server_port))
        ssh_command = "ssh -p %s %s@%s" \
            % (server_port, server['metadata']['users'], server_ip)
        self.logger.debug("Access server using \"%s\"" % (_green(ssh_command)))

    @_check_fabric
    def _copy_ssh_keys(self, ssh_keys):
        """Upload/Install ssh keys to server"""
        self.logger.debug("Check for authentication keys to use")
        if ssh_keys is None:
            ssh_keys = self.config.get("Deployment", "ssh_keys")

        if ssh_keys != "":
            ssh_keys = os.path.expanduser(ssh_keys)
            self.logger.debug("Will use \"%s\" authentication keys file" %
                              _green(ssh_keys))
            keyfile = '/tmp/%s.pub' % fabric.env.user
            _run('mkdir -p ~/.ssh && chmod 700 ~/.ssh', False)
            if ssh_keys.startswith("http://") or \
                    ssh_keys.startswith("https://") or \
                    ssh_keys.startswith("ftp://"):
                cmd = """
                apt-get update
                apt-get install wget --yes --force-yes
                wget {0} -O {1} --no-check-certificate
                """.format(ssh_keys, keyfile)
                _run(cmd, False)
            elif os.path.exists(ssh_keys):
                _put(ssh_keys, keyfile)
            else:
                self.logger.debug("No ssh keys found")
                return
            _run('cat %s >> ~/.ssh/authorized_keys' % keyfile, False)
            _run('rm %s' % keyfile, False)
            self.logger.debug("Uploaded ssh authorized keys")
        else:
            self.logger.debug("No ssh keys found")

    def _create_new_build_id(self):
        """Find a uniq build_id to use"""
        with filelocker.lock("%s.lock" % self.temp_config_file,
                             filelocker.LOCK_EX):
            # Read temp_config again to get any new entries
            self.temp_config.read(self.temp_config_file)

            # Find a uniq build_id to use
            if self.build_id is None:
                ids = self.temp_config.sections()
                if ids:
                    max_id = int(max(self.temp_config.sections(), key=int))
                    self.build_id = max_id + 1
                else:
                    self.build_id = 1
            self.logger.debug("Will use \"%s\" as build id" %
                              _green(self.build_id))

            # Create a new section
            try:
                self.temp_config.add_section(str(self.build_id))
            except DuplicateSectionError:
                msg = ("Build id \"%s\" already in use. " +
                       "Please use a uniq one or cleanup \"%s\" file.\n") \
                    % (self.build_id, self.temp_config_file)
                self.logger.error(msg)
                sys.exit(1)
            creation_time = \
                time.strftime("%a, %d %b %Y %X", time.localtime())
            self.temp_config.set(str(self.build_id), "created",
                                 str(creation_time))

            # Write changes back to temp config file
            with open(self.temp_config_file, 'wb') as tcf:
                self.temp_config.write(tcf)

    def write_temp_config(self, option, value):
        """Write changes back to config file"""
        # Acquire the lock to write to temp_config_file
        with filelocker.lock("%s.lock" % self.temp_config_file,
                             filelocker.LOCK_EX):

            # Read temp_config again to get any new entries
            self.temp_config.read(self.temp_config_file)

            self.temp_config.set(str(self.build_id), option, str(value))
            curr_time = time.strftime("%a, %d %b %Y %X", time.localtime())
            self.temp_config.set(str(self.build_id), "modified", curr_time)

            # Write changes back to temp config file
            with open(self.temp_config_file, 'wb') as tcf:
                self.temp_config.write(tcf)

    def read_temp_config(self, option):
        """Read from temporary_config file"""
        # If build_id is None use the latest one
        if self.build_id is None:
            ids = self.temp_config.sections()
            if ids:
                self.build_id = int(ids[-1])
            else:
                self.logger.error("No sections in temporary config file")
                sys.exit(1)
            self.logger.debug("Will use \"%s\" as build id" %
                              _green(self.build_id))
        # Read specified option
        return self.temp_config.get(str(self.build_id), option)

    def setup_fabric(self):
        """Setup fabric environment"""
        self.logger.info("Setup fabric parameters..")
        fabric.env.user = self.read_temp_config('server_user')
        fabric.env.host_string = self.read_temp_config('server_ip')
        fabric.env.port = int(self.read_temp_config('server_port'))
        fabric.env.password = self.read_temp_config('server_passwd')
        fabric.env.connection_attempts = 10
        fabric.env.shell = "/bin/bash -c"
        fabric.env.disable_known_hosts = True
        fabric.env.output_prefix = None

    def _check_hash_sum(self, localfile, remotefile):
        """Check hash sums of two files"""
        self.logger.debug("Check hash sum for local file %s" % localfile)
        hash1 = os.popen("sha256sum %s" % localfile).read().split(' ')[0]
        self.logger.debug("Local file has sha256 hash %s" % hash1)
        self.logger.debug("Check hash sum for remote file %s" % remotefile)
        hash2 = _run("sha256sum %s" % remotefile, False)
        hash2 = hash2.split(' ')[0]
        self.logger.debug("Remote file has sha256 hash %s" % hash2)
        if hash1 != hash2:
            self.logger.error("Hashes differ.. aborting")
            sys.exit(1)

    @_check_fabric
    def clone_repo(self, local_repo=False):
        """Clone Synnefo repo from slave server"""
        self.logger.info("Configure repositories on remote server..")
        self.logger.debug("Install/Setup git")
        cmd = """
        apt-get install git --yes --force-yes
        git config --global user.name {0}
        git config --global user.email {1}
        """.format(self.config.get('Global', 'git_config_name'),
                   self.config.get('Global', 'git_config_mail'))
        _run(cmd, False)

        # Clone synnefo_repo
        synnefo_branch = self.clone_synnefo_repo(local_repo=local_repo)
        # Clone pithos-web-client
        self.clone_pithos_webclient_repo(synnefo_branch)

    @_check_fabric
    def clone_synnefo_repo(self, local_repo=False):
        """Clone Synnefo repo to remote server"""
        # Find synnefo_repo and synnefo_branch to use
        synnefo_repo = self.config.get('Global', 'synnefo_repo')
        synnefo_branch = self.config.get("Global", "synnefo_branch")
        if synnefo_branch == "":
            synnefo_branch = \
                subprocess.Popen(
                    ["git", "rev-parse", "--abbrev-ref", "HEAD"],
                    stdout=subprocess.PIPE).communicate()[0].strip()
            if synnefo_branch == "HEAD":
                synnefo_branch = \
                    subprocess.Popen(
                        ["git", "rev-parse", "--short", "HEAD"],
                        stdout=subprocess.PIPE).communicate()[0].strip()
        self.logger.debug("Will use branch \"%s\"" % _green(synnefo_branch))

        if local_repo or synnefo_repo == "":
            # Use local_repo
            self.logger.debug("Push local repo to server")
            # Firstly create the remote repo
            _run("git init synnefo", False)
            # Then push our local repo over ssh
            # We have to pass some arguments to ssh command
            # namely to disable host checking.
            (temp_ssh_file_handle, temp_ssh_file) = tempfile.mkstemp()
            os.close(temp_ssh_file_handle)
            # XXX: git push doesn't read the password
            cmd = """
            echo 'exec ssh -o "StrictHostKeyChecking no" \
                           -o "UserKnownHostsFile /dev/null" \
                           -q "$@"' > {4}
            chmod u+x {4}
            export GIT_SSH="{4}"
            echo "{0}" | git push --quiet --mirror ssh://{1}@{2}:{3}/~/synnefo
            rm -f {4}
            """.format(fabric.env.password, fabric.env.user,
                       fabric.env.host_string, fabric.env.port, temp_ssh_file)
            os.system(cmd)
        else:
            # Clone Synnefo from remote repo
            self.logger.debug("Clone synnefo from %s" % synnefo_repo)
            self._git_clone(synnefo_repo)

        # Checkout the desired synnefo_branch
        self.logger.debug("Checkout \"%s\" branch/commit" % synnefo_branch)
        cmd = """
        cd synnefo
        for branch in `git branch -a | grep remotes | grep -v HEAD`; do
            git branch --track ${branch##*/} $branch
        done
        git checkout %s
        """ % (synnefo_branch)
        _run(cmd, False)

        return synnefo_branch

    @_check_fabric
    def clone_pithos_webclient_repo(self, synnefo_branch):
        """Clone Pithos WebClient repo to remote server"""
        # Find pithos_webclient_repo and pithos_webclient_branch to use
        pithos_webclient_repo = \
            self.config.get('Global', 'pithos_webclient_repo')
        pithos_webclient_branch = \
            self.config.get('Global', 'pithos_webclient_branch')

        # Clone pithos-webclient from remote repo
        self.logger.debug("Clone pithos-webclient from %s" %
                          pithos_webclient_repo)
        self._git_clone(pithos_webclient_repo)

        # Track all pithos-webclient branches
        cmd = """
        cd pithos-web-client
        for branch in `git branch -a | grep remotes | grep -v HEAD`; do
            git branch --track ${branch##*/} $branch > /dev/null 2>&1
        done
        git --no-pager branch --no-color
        """
        webclient_branches = _run(cmd, False)
        webclient_branches = webclient_branches.split()

        # If we have pithos_webclient_branch in config file use this one
        # else try to use the same branch as synnefo_branch
        # else use an appropriate one.
        if pithos_webclient_branch == "":
            if synnefo_branch in webclient_branches:
                pithos_webclient_branch = synnefo_branch
            else:
                # If synnefo_branch starts with one of
                # 'master', 'hotfix'; use the master branch
                if synnefo_branch.startswith('master') or \
                        synnefo_branch.startswith('hotfix'):
                    pithos_webclient_branch = "master"
                # If synnefo_branch starts with one of
                # 'develop', 'feature'; use the develop branch
                elif synnefo_branch.startswith('develop') or \
                        synnefo_branch.startswith('feature'):
                    pithos_webclient_branch = "develop"
                else:
                    self.logger.warning(
                        "Cannot determine which pithos-web-client branch to "
                        "use based on \"%s\" synnefo branch. "
                        "Will use develop." % synnefo_branch)
                    pithos_webclient_branch = "develop"
        # Checkout branch
        self.logger.debug("Checkout \"%s\" branch" %
                          _green(pithos_webclient_branch))
        cmd = """
        cd pithos-web-client
        git checkout {0}
        """.format(pithos_webclient_branch)
        _run(cmd, False)

    def _git_clone(self, repo):
        """Clone repo to remote server

        Currently clonning from code.grnet.gr can fail unexpectedly.
        So retry!!

        """
        cloned = False
        for i in range(1, 11):
            try:
                _run("git clone %s" % repo, False)
                cloned = True
                break
            except BaseException:
                self.logger.warning("Clonning failed.. retrying %s/10" % i)
        if not cloned:
            self.logger.error("Can not clone repo.")
            sys.exit(1)

    @_check_fabric
    def build_packages(self):
        """Build packages needed by Synnefo software"""
        self.logger.info("Install development packages")
        cmd = """
        apt-get update
        apt-get install zlib1g-dev dpkg-dev debhelper git-buildpackage \
                python-dev python-all python-pip ant --yes --force-yes
        pip install -U devflow
        """
        _run(cmd, False)

        # Patch pydist bug
        if self.config.get('Global', 'patch_pydist') == "True":
            self.logger.debug("Patch pydist.py module")
            cmd = r"""
            sed -r -i 's/(\(\?P<name>\[A-Za-z\]\[A-Za-z0-9_\.)/\1\\\-/' \
                /usr/share/python/debpython/pydist.py
            """
            _run(cmd, False)

        # Build synnefo packages
        self.build_synnefo()
        # Build pithos-web-client packages
        self.build_pithos_webclient()

    @_check_fabric
    def build_synnefo(self):
        """Build Synnefo packages"""
        self.logger.info("Build Synnefo packages..")

        cmd = """
        devflow-autopkg snapshot -b ~/synnefo_build-area --no-sign
        """
        with fabric.cd("synnefo"):
            _run(cmd, True)

        # Install snf-deploy package
        self.logger.debug("Install snf-deploy package")
        cmd = """
        dpkg -i snf-deploy*.deb
        apt-get -f install --yes --force-yes
        """
        with fabric.cd("synnefo_build-area"):
            with fabric.settings(warn_only=True):
                _run(cmd, True)

        # Setup synnefo packages for snf-deploy
        self.logger.debug("Copy synnefo debs to snf-deploy packages dir")
        cmd = """
        cp ~/synnefo_build-area/*.deb /var/lib/snf-deploy/packages/
        """
        _run(cmd, False)

    @_check_fabric
    def build_pithos_webclient(self):
        """Build pithos-web-client packages"""
        self.logger.info("Build pithos-web-client packages..")

        cmd = """
        devflow-autopkg snapshot -b ~/webclient_build-area --no-sign
        """
        with fabric.cd("pithos-web-client"):
            _run(cmd, True)

        # Setup pithos-web-client packages for snf-deploy
        self.logger.debug("Copy webclient debs to snf-deploy packages dir")
        cmd = """
        cp ~/webclient_build-area/*.deb /var/lib/snf-deploy/packages/
        """
        _run(cmd, False)

    @_check_fabric
    def build_documentation(self):
        """Build Synnefo documentation"""
        self.logger.info("Build Synnefo documentation..")
        _run("pip install -U Sphinx", False)
        with fabric.cd("synnefo"):
            _run(
                "devflow-update-version; "
                "./ci/make_docs.sh synnefo_documentation", False)

    def fetch_documentation(self, dest=None):
        """Fetch Synnefo documentation"""
        self.logger.info("Fetch Synnefo documentation..")
        if dest is None:
            dest = "synnefo_documentation"
        dest = os.path.abspath(dest)
        if not os.path.exists(dest):
            os.makedirs(dest)
        self.fetch_compressed("synnefo/synnefo_documentation", dest)
        self.logger.info("Downloaded documentation to %s" % _green(dest))

    @_check_fabric
    def deploy_synnefo(self, schema=None):
        """Deploy Synnefo using snf-deploy"""
        self.logger.info("Deploy Synnefo..")
        if schema is None:
            schema = self.config.get('Global', 'schema')
        self.logger.debug("Will use \"%s\" schema" % _green(schema))

        schema_dir = os.path.join(self.ci_dir, "schemas/%s" % schema)
        if not (os.path.exists(schema_dir) and os.path.isdir(schema_dir)):
            raise ValueError("Unknown schema: %s" % schema)

        self.logger.debug("Upload schema files to server")
        _put(os.path.join(schema_dir, "*"), "/etc/snf-deploy/")

        self.logger.debug("Change password in nodes.conf file")
        cmd = """
        sed -i 's/^password =.*/password = {0}/' /etc/snf-deploy/nodes.conf
        """.format(fabric.env.password)
        _run(cmd, False)

        self.logger.debug("Run snf-deploy")
        cmd = """
        snf-deploy keygen --force
        snf-deploy --disable-colors --autoconf all
        """
        _run(cmd, True)

    @_check_fabric
    def unit_test(self):
        """Run Synnefo unit test suite"""
        self.logger.info("Run Synnefo unit test suite")
        component = self.config.get('Unit Tests', 'component')

        self.logger.debug("Install needed packages")
        cmd = """
        pip install -U mock
        pip install -U factory_boy
        pip install -U nose
        """
        _run(cmd, False)

        self.logger.debug("Upload tests.sh file")
        unit_tests_file = os.path.join(self.ci_dir, "tests.sh")
        _put(unit_tests_file, ".")

        self.logger.debug("Run unit tests")
        cmd = """
        bash tests.sh {0}
        """.format(component)
        _run(cmd, True)

    @_check_fabric
    def run_burnin(self):
        """Run burnin functional test suite"""
        self.logger.info("Run Burnin functional test suite")
        cmd = """
        auth_url=$(grep -e '^url =' .kamakirc | cut -d' ' -f3)
        token=$(grep -e '^token =' .kamakirc | cut -d' ' -f3)
        images_user=$(kamaki image list -l | grep owner | \
                      cut -d':' -f2 | tr -d ' ')
        snf-burnin --auth-url=$auth_url --token=$token {0}
        BurninExitStatus=$?
        exit $BurninExitStatus
        """.format(self.config.get('Burnin', 'cmd_options'))
        _run(cmd, True)

    @_check_fabric
    def fetch_compressed(self, src, dest=None):
        """Create a tarball and fetch it locally"""
        self.logger.debug("Creating tarball of %s" % src)
        basename = os.path.basename(src)
        tar_file = basename + ".tgz"
        cmd = "tar czf %s %s" % (tar_file, src)
        _run(cmd, False)
        if not os.path.exists(dest):
            os.makedirs(dest)

        tmp_dir = tempfile.mkdtemp()
        fabric.get(tar_file, tmp_dir)

        dest_file = os.path.join(tmp_dir, tar_file)
        self._check_hash_sum(dest_file, tar_file)
        self.logger.debug("Untar packages file %s" % dest_file)
        cmd = """
        cd %s
        tar xzf %s
        cp -r %s/* %s
        rm -r %s
        """ % (tmp_dir, tar_file, src, dest, tmp_dir)
        os.system(cmd)
        self.logger.info("Downloaded %s to %s" % (src, _green(dest)))

    @_check_fabric
    def fetch_packages(self, dest=None):
        """Fetch Synnefo packages"""
        if dest is None:
            dest = self.config.get('Global', 'pkgs_dir')
        dest = os.path.abspath(os.path.expanduser(dest))
        if not os.path.exists(dest):
            os.makedirs(dest)
        self.fetch_compressed("synnefo_build-area", dest)
        self.fetch_compressed("webclient_build-area", dest)
        self.logger.info("Downloaded debian packages to %s" % _green(dest))

    def x2go_plugin(self, dest=None):
        """Produce an html page which will use the x2goplugin

        Arguments:
          dest  -- The file where to save the page (String)

        """
        output_str = """
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
        <html>
        <head>
        <title>X2Go SynnefoCI Service</title>
        </head>
        <body onload="checkPlugin()">
        <div id="x2goplugin">
            <object
                src="location"
                type="application/x2go"
                name="x2goplugin"
                palette="background"
                height="100%"
                hspace="0"
                vspace="0"
                width="100%"
                x2goconfig="
                    session=X2Go-SynnefoCI-Session
                    server={0}
                    user={1}
                    sshport={2}
                    published=true
                    autologin=true
                ">
            </object>
        </div>
        </body>
        </html>
        """.format(self.read_temp_config('server_ip'),
                   self.read_temp_config('server_user'),
                   self.read_temp_config('server_port'))
        if dest is None:
            dest = self.config.get('Global', 'x2go_plugin_file')

        self.logger.info("Writting x2go plugin html file to %s" % dest)
        fid = open(dest, 'w')
        fid.write(output_str)
        fid.close()
Esempio n. 24
0
https.patch_ignore_ssl()

authURL = "https://accounts.okeanos.grnet.gr/identity/v2.0"
X_AUTH_TOKEN_NAME = 'X_AUTH_TOKEN'
token = ENV[X_AUTH_TOKEN_NAME]

projectId = '464eb0e7-b556-4fc7-8afb-d590feebaad8'
serverId = '660580'

cycladesServiceType = CycladesClient.service_type
blockStorageServiceType = CycladesBlockStorageClient.service_type

ac = AstakosClient(authURL, token)

cycladesURL = ac.get_endpoint_url(cycladesServiceType)
cc = CycladesClient(cycladesURL, token)

blockStorageURL = ac.get_endpoint_url(blockStorageServiceType)
bsc = CycladesBlockStorageClient(blockStorageURL, token)

onc = OkeanosNativeClient(token, authURL)

print "cycladesURL = %s" % cycladesURL
print "blockStorageURL = %s" % blockStorageURL
print "ac = %s" % ac
print "cc = %s" % cc
print "bsc = %s" % bsc
print "onc = %s" % onc

# servers = cc.list_servers()
# flavors = cc.list_flavors()
Esempio n. 25
0
def main():
    """Parse arguments, use kamaki to create cluster, setup using ansible playbooks"""

    (opts, args) = parse_arguments(sys.argv[1:])

    global CYCLADES, TOKEN, my_vnat_network, my_network_client

    AUTHENTICATION_URL = opts.cyclades
    TOKEN = opts.token

    # Cleanup stale servers from previous runs
    if opts.show_stale:
        cleanup_servers(prefix=opts.prefix, delete_stale=opts.delete_stale)
        return 0

    # Initialize a kamaki instance, get endpoints
    user = AstakosClient(AUTHENTICATION_URL, TOKEN)
    my_accountData = user.authenticate()
    endpoints = user.get_endpoints() 
    cyclades_endpoints = user.get_endpoints('compute')
    cyclades_base_url = parseAstakosEndpoints(endpoints, 'cyclades_compute')
    cyclades_network_base_url = parseAstakosEndpoints(endpoints, 'cyclades_network')
    my_cyclades_client = CycladesClient(cyclades_base_url, TOKEN)
    my_compute_client = ComputeClient(cyclades_base_url, TOKEN)
    my_network_client = CycladesNetworkClient(cyclades_network_base_url, TOKEN) 

    my_vnat_network = {}

    # check if 'Hadoop' vnat is created...
    hadoop_vnat_created = False
    my_network_dict = my_network_client.list_networks()
    for n in my_network_dict:
        if n['name'] == 'Hadoop': 
            hadoop_vnat_created = True
            my_vnat_network = n

    # ...else create it
    if hadoop_vnat_created == False:
        log.info("Creating vNAT")
        my_vnat_network = my_network_client.create_network(type='MAC_FILTERED', name='Hadoop');
        my_subnet = my_network_client.create_subnet(network_id=my_vnat_network['id'], cidr='192.168.0.0/24');

    cnt = int(opts.clustersize)	# calculate size of cluster into 'cnt'
    # Initialize
    nodes = []
    masterName = ''

    # Create a file to store the root password for later use
    if not os.path.exists(opts.hadoop_dir+'/bak'):
        os.makedirs(opts.hadoop_dir+'/bak')
    pass_fname = opts.hadoop_dir+'/bak/adminPass'+str(datetime.now())[:19].replace(' ', '')
    adminPass_f = open(pass_fname, 'w')

    initialClusterSize = 0
    server = {}
    if opts.extend == False:
        # Create master node (0th node)
        server = create_machine(opts, my_cyclades_client, 0)
        if server == {}:
            return
    else:
        servers = my_cyclades_client.list_servers(detail=True)
        cluster = [s for s in servers if s["name"].startswith(opts.prefix)]
        initialClusterSize = len(cluster)
        if initialClusterSize==0:
            log.info("Cluster cannot be expanded: it does not exist.")
            return

    servername = "%s-0" % (opts.prefix)
    masterName = servername
    nodes.append(server)

    # Create slave (worker) nodes
    if cnt>1 or opts.extend:
        startingOffset = 1
        if opts.extend: startingOffset = initialClusterSize
        for i in xrange(startingOffset, initialClusterSize+cnt):
            server = {}
            server = create_machine(opts, my_cyclades_client, i)
            if server == {}:
                return;
            nodes.append(server)
            servername = "%s-%d" % (opts.prefix, i)
            # Write the root password to a file
            adminPass_f.write('machine = %s, password = %s\n' % (servername, server['adminPass']))

    adminPass_f.close()

    # Setup Hadoop files and settings on all cluster nodes
    # Create the 'cluster' dictionary out of servers, with only Hadoop-relevant keys (name, ip, integer key)
    servers = my_cyclades_client.list_servers(detail=True)
    cluster = [s for s in my_cyclades_client.list_servers(detail=True) if s["name"].startswith(opts.prefix)]
    cluster0 = [(s["name"], s["attachments"], int(s["name"][s["name"].find('-')+1:])) for s in cluster]
    cluster0 = sorted(cluster0, key=lambda cluster0: cluster0[2])
    cluster = [(cluster0[0][0], cluster0[0][1][2]["ipv4"], cluster0[0][2])]	# master IP, different index 
    cluster2 = [(s[0], s[1][1]['ipv4'], int(s[2])) for s in cluster0[1:]]	# slave IPs
    cluster += cluster2

    # Prepare Ansible-Hadoop config files (hosts, conf/slaves. vnat/etchosts)
    hosts = open(opts.hadoop_dir+'/hosts', 'w')
    hosts.write('[master]\n')
    etchosts = open(opts.hadoop_dir+'/vnat/etchosts', 'w')
    for i in xrange(0, initialClusterSize+cnt):
        for s in cluster:
            if s[0] == opts.prefix+"-"+str(i):
                if s[0] == masterName:
                    hosts.write(s[1]+'\n\n'+'[slaves]\n')
                else:
                    hosts.write(s[1]+'\n')
                etchosts.write(s[1]+'\t'+s[0]+'\n')
    hosts.close()
    etchosts.close()

    slaves = open(opts.hadoop_dir+'/vnat/slaves', 'w')
    for s in cluster[1:]:
        slaves.write(s[0]+'\n')
    slaves.close()

    # Execute respective ansible playbook
    if (opts.extend==False):
        cmd = "ansible-playbook hadoop_vnat.yml -i hosts -vv --extra-vars \""+"is_master=True, master_node="+cluster[0][0]+" master_ip="+cluster[0][1]+"\""+" -l master"
        print cmd
        retval = os.system(cmd)
        cmd = "ansible-playbook hadoop_vnat.yml -i hosts -vv --extra-vars \""+"is_slave=True, master_node="+cluster[0][0]+" master_ip="+cluster[0][1]+"\""+" -l slaves"
        print cmd
        retval = os.system(cmd)
        slave_ip_list = []
        for i in xrange(1, cnt):
            slave_ip_list.append(cluster[i][0])
        enable_ssh_login(cluster[0][1], [cluster[0][0]])
        enable_ssh_login(cluster[0][1], slave_ip_list)
    else:
        hosts_latest = open(opts.hadoop_dir+'/hosts.latest', 'w')
        hosts_latest.write('[master]\n')
        hosts_latest.write(cluster[0][1]+'\n\n'+'[slaves]\n')
        for i in xrange(initialClusterSize, initialClusterSize+cnt):
            hosts_latest.write(cluster[i][1]+'\n')
        hosts_latest.close()
        # update etc/hosts in all nodes - TODO: de-duplicate entries
        cmd = "ansible-playbook hadoop_vnat.yml -i hosts -vv --extra-vars \""+"is_master=True, master_ip="+cluster[0][1]+"\""+" -t etchosts"
        print cmd
        retval = os.system(cmd) 
        cmd = "ansible-playbook hadoop_vnat.yml -i hosts.latest -vv --extra-vars \""+"is_slave=True, master_node="+cluster[0][0]+" master_ip="+cluster[0][1]+"\""+" -l slaves"
        print cmd
        retval = os.system(cmd) 
        slave_ip_list = []
        for i in xrange(initialClusterSize, initialClusterSize+cnt):
            slave_ip_list.append(cluster[i][0])
        print "slave_ip_list=", slave_ip_list 
        enable_ssh_login(cluster[0][1], slave_ip_list)

    # Update conf/slaves in master
    cmd = "ansible-playbook hadoop_vnat.yml -i hosts -vv --extra-vars \""+"is_master=True, master_ip="+cluster[0][1]+"\""+" -l master -t slaves"
    print cmd
    retval = os.system(cmd)

    log.info("Done.")
Esempio n. 26
0
 def set_cyclades_client(cls, cyclades_url, token):
     """Foo"""
     cls.cyclades_client = CycladesClient(cyclades_url, token)
     cls.cyclades_client.CONNECTION_RETRY_LIMIT = 2
Esempio n. 27
0
class SynnefoCI(object):
    """SynnefoCI python class"""

    def __init__(self, config_file=None, build_id=None, cloud=None):
        """ Initialize SynnefoCI python class

        Setup logger, local_dir, config and kamaki
        """
        # Setup logger
        self.logger = logging.getLogger("synnefo-ci")
        self.logger.setLevel(logging.DEBUG)

        handler1 = logging.StreamHandler(sys.stdout)
        handler1.setLevel(logging.DEBUG)
        handler1.addFilter(_InfoFilter())
        handler1.setFormatter(_MyFormatter())
        handler2 = logging.StreamHandler(sys.stderr)
        handler2.setLevel(logging.WARNING)
        handler2.setFormatter(_MyFormatter())

        self.logger.addHandler(handler1)
        self.logger.addHandler(handler2)

        # Get our local dir
        self.ci_dir = os.path.dirname(os.path.abspath(__file__))
        self.repo_dir = os.path.dirname(self.ci_dir)

        # Read config file
        if config_file is None:
            config_file = os.path.join(self.ci_dir, DEFAULT_CONFIG_FILE)
        config_file = os.path.abspath(config_file)
        self.config = ConfigParser()
        self.config.optionxform = str
        self.config.read(config_file)

        # Read temporary_config file
        self.temp_config_file = os.path.expanduser(self.config.get("Global", "temporary_config"))
        self.temp_config = ConfigParser()
        self.temp_config.optionxform = str
        self.temp_config.read(self.temp_config_file)
        self.build_id = build_id
        if build_id is not None:
            self.logger.info('Will use "%s" as build id' % _green(self.build_id))

        # Set kamaki cloud
        if cloud is not None:
            self.kamaki_cloud = cloud
        elif self.config.has_option("Deployment", "kamaki_cloud"):
            self.kamaki_cloud = self.config.get("Deployment", "kamaki_cloud")
            if self.kamaki_cloud == "":
                self.kamaki_cloud = None
        else:
            self.kamaki_cloud = None

        # Initialize variables
        self.fabric_installed = False
        self.kamaki_installed = False
        self.cyclades_client = None
        self.network_client = None
        self.compute_client = None
        self.image_client = None
        self.astakos_client = None

    def setup_kamaki(self):
        """Initialize kamaki

        Setup cyclades_client, image_client and compute_client
        """

        # Patch kamaki for SSL verification
        _kamaki_ssl(ignore_ssl=IGNORE_SSL)

        config = kamaki_config.Config()
        if self.kamaki_cloud is None:
            try:
                self.kamaki_cloud = config.get("global", "default_cloud")
            except AttributeError:
                # Compatibility with kamaki version <=0.10
                self.kamaki_cloud = config.get("global", "default_cloud")

        self.logger.info("Setup kamaki client, using cloud '%s'.." % self.kamaki_cloud)
        auth_url = config.get_cloud(self.kamaki_cloud, "url")
        self.logger.debug("Authentication URL is %s" % _green(auth_url))
        token = config.get_cloud(self.kamaki_cloud, "token")
        # self.logger.debug("Token is %s" % _green(token))

        self.astakos_client = AstakosClient(auth_url, token)
        endpoints = self.astakos_client.authenticate()

        cyclades_url = get_endpoint_url(endpoints, "compute")
        self.logger.debug("Cyclades API url is %s" % _green(cyclades_url))
        self.cyclades_client = CycladesClient(cyclades_url, token)
        self.cyclades_client.CONNECTION_RETRY_LIMIT = 2

        network_url = get_endpoint_url(endpoints, "network")
        self.logger.debug("Network API url is %s" % _green(network_url))
        self.network_client = CycladesNetworkClient(network_url, token)
        self.network_client.CONNECTION_RETRY_LIMIT = 2

        image_url = get_endpoint_url(endpoints, "image")
        self.logger.debug("Images API url is %s" % _green(image_url))
        self.image_client = ImageClient(cyclades_url, token)
        self.image_client.CONNECTION_RETRY_LIMIT = 2

        compute_url = get_endpoint_url(endpoints, "compute")
        self.logger.debug("Compute API url is %s" % _green(compute_url))
        self.compute_client = ComputeClient(compute_url, token)
        self.compute_client.CONNECTION_RETRY_LIMIT = 2

    __quota_cache = None

    def _get_available_project(self, skip_config=False, **resources):
        self.project_uuid = None
        if self.config.has_option("Deployment", "project"):
            self.project_uuid = self.config.get("Deployment", "project").strip() or None

        # user requested explicit project
        if self.project_uuid and not skip_config:
            return self.project_uuid

        def _filter_projects(_project):
            uuid, project_quota = _project
            can_fit = False
            for resource, required in resources.iteritems():
                # transform dots in order to permit direct keyword
                # arguments to be used.
                # (cyclades__disk=1) -> 'cyclades.disk': 1
                resource = resource.replace("__", ".")
                project_resource = project_quota.get(resource)
                if not project_resource:
                    raise Exception("Requested resource does not exist %s" % resource)

                plimit, ppending, pusage, musage, mlimit, mpending = project_resource.values()

                pavailable = plimit - ppending - pusage
                mavailable = mlimit - mpending - musage

                can_fit = (pavailable - required) >= 0 and (mavailable - required) >= 0
                if not can_fit:
                    return None
            return uuid

        self.__quota_cache = quota = self.__quota_cache or self.astakos_client.get_quotas()
        projects = filter(bool, map(_filter_projects, quota.iteritems()))
        if not len(projects):
            raise Exception("No project available for %r" % resources)
        return projects[0]

    def _wait_transition(self, server_id, current_status, new_status):
        """Wait for server to go from current_status to new_status"""
        self.logger.debug("Waiting for server to become %s" % new_status)
        timeout = self.config.getint("Global", "build_timeout")
        sleep_time = 5
        while True:
            server = self.cyclades_client.get_server_details(server_id)
            if server["status"] == new_status:
                return server
            elif timeout < 0:
                self.logger.error("Waiting for server to become %s timed out" % new_status)
                self.destroy_server(False)
                sys.exit(1)
            elif server["status"] == current_status:
                # Sleep for #n secs and continue
                timeout = timeout - sleep_time
                time.sleep(sleep_time)
            else:
                self.logger.error("Server failed with status %s" % server["status"])
                self.destroy_server(False)
                sys.exit(1)

    @_check_kamaki
    def destroy_server(self, wait=True):
        """Destroy slave server"""
        server_id = int(self.read_temp_config("server_id"))
        fips = [f for f in self.network_client.list_floatingips() if str(f["instance_id"]) == str(server_id)]
        self.logger.info("Destoying server with id %s " % server_id)
        self.cyclades_client.delete_server(server_id)
        if wait:
            self._wait_transition(server_id, "ACTIVE", "DELETED")
        for fip in fips:
            self.logger.info("Destroying floating ip %s", fip["floating_ip_address"])
            self.network_client.delete_floatingip(fip["id"])

    # pylint: disable= no-self-use
    @_check_fabric
    def shell_connect(self):
        """Open shell to remote server"""
        fabric.open_shell("export TERM=xterm")

    def _create_floating_ip(self):
        """Create a new floating ip"""
        project_id = self._get_available_project(cyclades__floating_ip=1)
        networks = self.network_client.list_networks(detail=True)
        pub_nets = [n for n in networks if n["SNF:floating_ip_pool"] and n["public"]]
        for pub_net in pub_nets:
            # Try until we find a public network that is not full
            try:
                fip = self.network_client.create_floatingip(pub_net["id"], project_id=project_id)
            except ClientError as err:
                self.logger.warning("%s", str(err.message).strip())
                continue
            self.logger.debug("Floating IP %s with id %s created", fip["floating_ip_address"], fip["id"])
            return fip
        self.logger.error("No more IP addresses available")
        sys.exit(1)

    def _create_port(self, floating_ip):
        """Create a new port for our floating IP"""
        net_id = floating_ip["floating_network_id"]
        self.logger.debug("Creating a new port to network with id %s", net_id)
        fixed_ips = [{"ip_address": floating_ip["floating_ip_address"]}]
        port = self.network_client.create_port(net_id, device_id=None, fixed_ips=fixed_ips)
        return port

    @_check_kamaki
    # Too many local variables. pylint: disable-msg=R0914
    def create_server(self, image=None, flavor=None, ssh_keys=None, server_name=None):
        """Create slave server"""
        self.logger.info("Create a new server..")

        # Find a build_id to use
        self._create_new_build_id()

        # Find an image to use
        image_id = self._find_image(image)
        # Find a flavor to use
        flavor_id = self._find_flavor(flavor)

        # get available project
        flavor = self.cyclades_client.get_flavor_details(flavor_id)
        quota = {
            "cyclades.disk": flavor["disk"] * 1024 ** 3,
            "cyclades.ram": flavor["ram"] * 1024 ** 2,
            "cyclades.cpu": flavor["vcpus"],
            "cyclades.vm": 1,
        }
        project_id = self._get_available_project(**quota)

        # Create Server
        networks = []
        if self.config.get("Deployment", "allocate_floating_ip") == "True":
            fip = self._create_floating_ip()
            port = self._create_port(fip)
            networks.append({"port": port["id"]})
        private_networks = self.config.get("Deployment", "private_networks")
        if private_networks:
            private_networks = [p.strip() for p in private_networks.split(",")]
            networks.extend([{"uuid": uuid} for uuid in private_networks])
        if server_name is None:
            server_name = self.config.get("Deployment", "server_name")
            server_name = "%s(BID: %s)" % (server_name, self.build_id)
        server = self.cyclades_client.create_server(
            server_name, flavor_id, image_id, networks=networks, project_id=project_id
        )
        server_id = server["id"]
        self.write_temp_config("server_id", server_id)
        self.logger.debug("Server got id %s" % _green(server_id))

        # An image may have more than one user. Choose the first one.
        server_user = server["metadata"]["users"].split(" ")[0]

        self.write_temp_config("server_user", server_user)
        self.logger.debug("Server's admin user is %s" % _green(server_user))
        server_passwd = server["adminPass"]
        self.write_temp_config("server_passwd", server_passwd)

        server = self._wait_transition(server_id, "BUILD", "ACTIVE")
        self._get_server_ip_and_port(server, private_networks)
        self._copy_ssh_keys(ssh_keys)

        # Setup Firewall
        self.setup_fabric()
        self.logger.info("Setup firewall")
        accept_ssh_from = self.config.get("Global", "accept_ssh_from")
        if accept_ssh_from != "":
            self.logger.debug("Block ssh except from %s" % accept_ssh_from)
            cmd = """
            local_ip=$(/sbin/ifconfig eth0 | grep 'inet addr:' | \
                cut -d':' -f2 | cut -d' ' -f1)
            iptables -A INPUT -s localhost -j ACCEPT
            iptables -A INPUT -s $local_ip -j ACCEPT
            iptables -A INPUT -s {0} -p tcp --dport 22 -j ACCEPT
            iptables -A INPUT -p tcp --dport 22 -j DROP
            """.format(
                accept_ssh_from
            )
            _run(cmd, False)

        # Setup apt, download packages
        self.logger.debug("Setup apt")
        cmd = """
        echo 'APT::Install-Suggests "false";' >> /etc/apt/apt.conf
        echo 'Package: python-gevent' >> /etc/apt/preferences.d/00-gevent
        echo 'Pin: release o=Debian' >> /etc/apt/preferences.d/00-gevent
        echo 'Pin-Priority: 990' >> /etc/apt/preferences.d/00-gevent
        echo 'precedence ::ffff:0:0/96  100' >> /etc/gai.conf
        apt-get update
        apt-get install -q=2 curl --yes --force-yes
        echo -e "{0}" >> /etc/apt/sources.list.d/synnefo.wheezy.list
        # Synnefo repo's key
        curl https://dev.grnet.gr/files/apt-grnetdev.pub | apt-key add -
        """.format(
            self.config.get("Global", "apt_repo")
        )
        _run(cmd, False)

        cmd = """
        # X2GO Key
        apt-key adv --recv-keys --keyserver keys.gnupg.net E1F958385BFE2B6E
        apt-get install x2go-keyring --yes --force-yes
        apt-get update
        apt-get install x2goserver x2goserver-xsession \
                iceweasel --yes --force-yes

        # xterm published application
        echo '[Desktop Entry]' > /usr/share/applications/xterm.desktop
        echo 'Name=XTerm' >> /usr/share/applications/xterm.desktop
        echo 'Comment=standard terminal emulator for the X window system' >> \
            /usr/share/applications/xterm.desktop
        echo 'Exec=xterm' >> /usr/share/applications/xterm.desktop
        echo 'Terminal=false' >> /usr/share/applications/xterm.desktop
        echo 'Type=Application' >> /usr/share/applications/xterm.desktop
        echo 'Encoding=UTF-8' >> /usr/share/applications/xterm.desktop
        echo 'Icon=xterm-color_48x48' >> /usr/share/applications/xterm.desktop
        echo 'Categories=System;TerminalEmulator;' >> \
                /usr/share/applications/xterm.desktop
        """
        if self.config.get("Global", "setup_x2go") == "True":
            self.logger.debug("Install x2goserver and firefox")
            _run(cmd, False)

    def _find_flavor(self, flavor=None):
        """Find a suitable flavor to use

        Search by name (reg expression) or by id
        """

        def _is_true(value):
            """Boolean or string value that represents a bool"""
            if isinstance(value, bool):
                return value
            elif isinstance(value, str):
                return value in ["True", "true"]
            else:
                self.logger.error("Unrecognized boolean value %s" % value)
                return False

        # Get a list of flavors from config file
        flavors = self.config.get("Deployment", "flavors").split(",")
        if flavor is not None:
            # If we have a flavor_name to use, add it to our list
            flavors.insert(0, flavor)

        list_flavors = self.compute_client.list_flavors(detail=True)
        for flv in flavors:
            flv_type, flv_value = parse_typed_option(option="flavor", value=flv)
            if flv_type == "name":
                # Filter flavors by name
                self.logger.debug('Trying to find a flavor with name "%s"' % flv_value)
                list_flvs = [f for f in list_flavors if re.search(flv_value, f["name"], flags=re.I) is not None]
            elif flv_type == "id":
                # Filter flavors by id
                self.logger.debug('Trying to find a flavor with id "%s"' % flv_value)
                list_flvs = [f for f in list_flavors if str(f["id"]) == flv_value]
            else:
                self.logger.error("Unrecognized flavor type %s" % flv_type)

            # Check if we found one
            list_flvs = [f for f in list_flvs if _is_true(f["SNF:allow_create"])]
            if list_flvs:
                self.logger.debug(
                    'Will use "%s" with id "%s"' % (_green(list_flvs[0]["name"]), _green(list_flvs[0]["id"]))
                )
                return list_flvs[0]["id"]

        self.logger.error("No matching flavor found.. aborting")
        sys.exit(1)

    def _find_image(self, image=None):
        """Find a suitable image to use

        In case of search by name, the image has to belong to one
        of the `DEFAULT_SYSTEM_IMAGES_UUID' users.
        In case of search by id it only has to exist.
        """
        # Get a list of images from config file
        images = self.config.get("Deployment", "images").split(",")
        if image is not None:
            # If we have an image from command line, add it to our list
            images.insert(0, image)

        auth = self.astakos_client.authenticate()
        user_uuid = auth["access"]["token"]["tenant"]["id"]
        list_images = self.image_client.list_public(detail=True)["images"]
        for img in images:
            img_type, img_value = parse_typed_option(option="image", value=img)
            if img_type == "name":
                # Filter images by name
                self.logger.debug('Trying to find an image with name "%s"' % img_value)
                accepted_uuids = DEFAULT_SYSTEM_IMAGES_UUID + [user_uuid]
                list_imgs = [
                    i
                    for i in list_images
                    if i["user_id"] in accepted_uuids and re.search(img_value, i["name"], flags=re.I) is not None
                ]
            elif img_type == "id":
                # Filter images by id
                self.logger.debug('Trying to find an image with id "%s"' % img_value)
                list_imgs = [i for i in list_images if i["id"].lower() == img_value.lower()]
            else:
                self.logger.error("Unrecognized image type %s" % img_type)
                sys.exit(1)

            # Check if we found one
            if list_imgs:
                self.logger.debug(
                    'Will use "%s" with id "%s"' % (_green(list_imgs[0]["name"]), _green(list_imgs[0]["id"]))
                )
                return list_imgs[0]["id"]

        # We didn't found one
        self.logger.error("No matching image found.. aborting")
        sys.exit(1)

    def _get_server_ip_and_port(self, server, private_networks):
        """Compute server's IPv4 and ssh port number"""
        self.logger.info("Get server connection details..")
        if private_networks:
            # Choose the networks that belong to private_networks
            networks = [n for n in server["attachments"] if n["network_id"] in private_networks]
        else:
            # Choose the networks that are public
            networks = [
                n for n in server["attachments"] if self.network_client.get_network_details(n["network_id"])["public"]
            ]
        # Choose the networks with IPv4
        networks = [n for n in networks if n["ipv4"]]
        # Use the first network as IPv4
        server_ip = networks[0]["ipv4"]

        # Check if config has ssh_port option and if so, use that port.
        server_port = self.config.get("Deployment", "ssh_port")
        if not server_port:
            # No ssh port given. Get it from API (SNF:port_forwarding)
            if "22" in server["SNF:port_forwarding"]:
                server_ip = server["SNF:port_forwarding"]["22"]["host"]
                server_port = int(server["SNF:port_forwarding"]["22"]["port"])
            else:
                server_port = 22

        self.write_temp_config("server_ip", server_ip)
        self.logger.debug("Server's IPv4 is %s" % _green(server_ip))
        self.write_temp_config("server_port", server_port)
        self.logger.debug("Server's ssh port is %s" % _green(server_port))
        ssh_command = "ssh -p %s %s@%s" % (server_port, server["metadata"]["users"], server_ip)
        self.logger.debug('Access server using "%s"' % (_green(ssh_command)))

    @_check_fabric
    def _copy_ssh_keys(self, ssh_keys):
        """Upload/Install ssh keys to server"""
        self.logger.debug("Check for authentication keys to use")
        if ssh_keys is None:
            ssh_keys = self.config.get("Deployment", "ssh_keys")

        if ssh_keys != "":
            ssh_keys = os.path.expanduser(ssh_keys)
            self.logger.debug('Will use "%s" authentication keys file' % _green(ssh_keys))
            keyfile = "/tmp/%s.pub" % fabric.env.user
            _run("mkdir -p ~/.ssh && chmod 700 ~/.ssh", False)
            if ssh_keys.startswith("http://") or ssh_keys.startswith("https://") or ssh_keys.startswith("ftp://"):
                cmd = """
                apt-get update
                apt-get install wget --yes --force-yes
                wget {0} -O {1} --no-check-certificate
                """.format(
                    ssh_keys, keyfile
                )
                _run(cmd, False)
            elif os.path.exists(ssh_keys):
                _put(ssh_keys, keyfile)
            else:
                self.logger.debug("No ssh keys found")
                return
            _run("cat %s >> ~/.ssh/authorized_keys" % keyfile, False)
            _run("rm %s" % keyfile, False)
            self.logger.debug("Uploaded ssh authorized keys")
        else:
            self.logger.debug("No ssh keys found")

    def _create_new_build_id(self):
        """Find a uniq build_id to use"""
        with filelocker.lock("%s.lock" % self.temp_config_file, filelocker.LOCK_EX):
            # Read temp_config again to get any new entries
            self.temp_config.read(self.temp_config_file)

            # Find a uniq build_id to use
            if self.build_id is None:
                ids = self.temp_config.sections()
                if ids:
                    max_id = int(max(self.temp_config.sections(), key=int))
                    self.build_id = max_id + 1
                else:
                    self.build_id = 1
            self.logger.debug('Will use "%s" as build id' % _green(self.build_id))

            # Create a new section
            try:
                self.temp_config.add_section(str(self.build_id))
            except DuplicateSectionError:
                msg = ('Build id "%s" already in use. ' + 'Please use a uniq one or cleanup "%s" file.\n') % (
                    self.build_id,
                    self.temp_config_file,
                )
                self.logger.error(msg)
                sys.exit(1)
            creation_time = time.strftime("%a, %d %b %Y %X", time.localtime())
            self.temp_config.set(str(self.build_id), "created", str(creation_time))

            # Write changes back to temp config file
            with open(self.temp_config_file, "wb") as tcf:
                self.temp_config.write(tcf)

    def write_temp_config(self, option, value):
        """Write changes back to config file"""
        # Acquire the lock to write to temp_config_file
        with filelocker.lock("%s.lock" % self.temp_config_file, filelocker.LOCK_EX):

            # Read temp_config again to get any new entries
            self.temp_config.read(self.temp_config_file)

            self.temp_config.set(str(self.build_id), option, str(value))
            curr_time = time.strftime("%a, %d %b %Y %X", time.localtime())
            self.temp_config.set(str(self.build_id), "modified", curr_time)

            # Write changes back to temp config file
            with open(self.temp_config_file, "wb") as tcf:
                self.temp_config.write(tcf)

    def read_temp_config(self, option):
        """Read from temporary_config file"""
        # If build_id is None use the latest one
        if self.build_id is None:
            ids = self.temp_config.sections()
            if ids:
                self.build_id = int(ids[-1])
            else:
                self.logger.error("No sections in temporary config file")
                sys.exit(1)
            self.logger.debug('Will use "%s" as build id' % _green(self.build_id))
        # Read specified option
        return self.temp_config.get(str(self.build_id), option)

    def setup_fabric(self):
        """Setup fabric environment"""
        self.logger.info("Setup fabric parameters..")
        fabric.env.user = self.read_temp_config("server_user")
        fabric.env.host_string = self.read_temp_config("server_ip")
        fabric.env.port = int(self.read_temp_config("server_port"))
        fabric.env.password = self.read_temp_config("server_passwd")
        fabric.env.connection_attempts = 10
        fabric.env.shell = "/bin/bash -c"
        fabric.env.disable_known_hosts = True
        fabric.env.output_prefix = None

    def _check_hash_sum(self, localfile, remotefile):
        """Check hash sums of two files"""
        self.logger.debug("Check hash sum for local file %s" % localfile)
        hash1 = os.popen("sha256sum %s" % localfile).read().split(" ")[0]
        self.logger.debug("Local file has sha256 hash %s" % hash1)
        self.logger.debug("Check hash sum for remote file %s" % remotefile)
        hash2 = _run("sha256sum %s" % remotefile, False)
        hash2 = hash2.split(" ")[0]
        self.logger.debug("Remote file has sha256 hash %s" % hash2)
        if hash1 != hash2:
            self.logger.error("Hashes differ.. aborting")
            sys.exit(1)

    @_check_fabric
    def clone_repo(self, synnefo_repo=None, synnefo_branch=None, local_repo=False, pull_request=None):
        """Clone Synnefo repo from slave server"""
        self.logger.info("Configure repositories on remote server..")
        self.logger.debug("Install/Setup git")
        cmd = """
        apt-get install git --yes --force-yes
        git config --global user.name {0}
        git config --global user.email {1}
        """.format(
            self.config.get("Global", "git_config_name"), self.config.get("Global", "git_config_mail")
        )
        _run(cmd, False)

        # Clone synnefo_repo
        synnefo_branch = self.clone_synnefo_repo(
            synnefo_repo=synnefo_repo, synnefo_branch=synnefo_branch, local_repo=local_repo, pull_request=pull_request
        )
        # Clone pithos-web-client
        if self.config.get("Global", "build_pithos_webclient") == "True":
            # Clone pithos-web-client
            self.clone_pithos_webclient_repo(synnefo_branch)

    @_check_fabric
    def clone_synnefo_repo(self, synnefo_repo=None, synnefo_branch=None, local_repo=False, pull_request=None):
        """Clone Synnefo repo to remote server"""

        assert pull_request is None or (synnefo_branch is None and synnefo_repo is None)

        pull_repo = None
        if pull_request is not None:
            # Get a Github pull request and run the testsuite in
            # a sophisticated way.
            # Sophisticated means that it will not just check the remote branch
            # from which the pull request originated. Instead it will checkout
            # the branch for which the pull request is indented (e.g.
            # grnet:develop) and apply the pull request over it. This way it
            # checks the pull request against the branch this pull request
            # targets.
            m = re.search("github.com/([^/]+)/([^/]+)/pull/(\d+)", pull_request)
            if m is None:
                self.logger.error("Couldn't find a valid GitHub pull request" " URL")
                sys.exit(1)

            group = m.group(1)
            repo = m.group(2)
            pull_number = m.group(3)

            # Construct api url
            api_url = "/repos/%s/%s/pulls/%s" % (group, repo, pull_number)
            headers = {"User-Agent": "snf-ci"}
            # Get pull request info
            try:
                conn = httplib.HTTPSConnection("api.github.com")
                conn.request("GET", api_url, headers=headers)
                response = conn.getresponse()
                payload = json.load(response)
                synnefo_repo = payload["base"]["repo"]["html_url"]
                synnefo_branch = payload["base"]["ref"]
                pull_repo = (payload["head"]["repo"]["html_url"], payload["head"]["ref"])
            finally:
                conn.close()

        # Find synnefo_repo and synnefo_branch to use
        if synnefo_repo is None:
            synnefo_repo = self.config.get("Global", "synnefo_repo")
        if synnefo_branch is None:
            synnefo_branch = self.config.get("Global", "synnefo_branch")
        if synnefo_branch == "":
            synnefo_branch = (
                subprocess.Popen(["git", "rev-parse", "--abbrev-ref", "HEAD"], stdout=subprocess.PIPE)
                .communicate()[0]
                .strip()
            )
            if synnefo_branch == "HEAD":
                synnefo_branch = (
                    subprocess.Popen(["git", "rev-parse", "--short", "HEAD"], stdout=subprocess.PIPE)
                    .communicate()[0]
                    .strip()
                )
        self.logger.debug('Will use branch "%s"' % _green(synnefo_branch))

        if local_repo or synnefo_repo == "":
            # Use local_repo
            self.logger.debug("Push local repo to server")
            # Firstly create the remote repo
            _run("git init %s/synnefo" % work_dir, False)
            # Create a symlink to the userdir
            _run("ln -s %s/synnefo ~/synnefo" % work_dir, False)
            # Then push our local repo over ssh
            # We have to pass some arguments to ssh command
            # namely to disable host checking.
            (temp_ssh_file_handle, temp_ssh_file) = tempfile.mkstemp()
            os.close(temp_ssh_file_handle)
            # XXX: git push doesn't read the password
            cmd = """
            echo 'exec ssh -o "StrictHostKeyChecking no" \
                           -o "UserKnownHostsFile /dev/null" \
                           -q "$@"' > {5}
            chmod u+x {5}
            export GIT_SSH="{5}"
            echo "{0}" | git push -q --mirror ssh://{1}@{2}:{3}{4}/synnefo
            #echo "{0}" | git push -q --mirror ssh://{1}@{2}:{3}/~/synnefo
            rm -f {5}
            """.format(
                fabric.env.password, fabric.env.user, fabric.env.host_string, fabric.env.port, work_dir, temp_ssh_file
            )
            os.system(cmd)
        else:
            # Clone Synnefo from remote repo
            self.logger.debug("Clone synnefo from %s" % synnefo_repo)
            self._git_clone(synnefo_repo, directory="%s/synnefo" % work_dir)

        # Checkout the desired synnefo_branch
        self.logger.debug('Checkout "%s" branch/commit' % synnefo_branch)
        cmd = """
        cd %s/synnefo
        # Squelch the error message about pushing to master.
        # Keep default behaviour but hide the error message.
        git config receive.denyCurrentBranch refuse
        #cd synnefo
        for branch in `git branch -a | grep remotes | grep -v HEAD`; do
            git branch --track ${branch##*/} $branch
        done
        git checkout %s
        """ % (
            work_dir,
            synnefo_branch,
        )
        _run(cmd, False)

        # Apply a Github pull request
        if pull_repo is not None:
            self.logger.debug("Apply patches from pull request %s", pull_number)
            cmd = """
            cd %s/synnefo
            git pull --no-edit --no-rebase {0} {1}
            """.format(
                work_dir, pull_repo[0], pull_repo[1]
            )
            _run(cmd, False)

        return synnefo_branch

    @_check_fabric
    def clone_pithos_webclient_repo(self, synnefo_branch):
        """Clone Pithos WebClient repo to remote server"""
        # Find pithos_webclient_repo and pithos_webclient_branch to use
        pithos_webclient_repo = self.config.get("Global", "pithos_webclient_repo")
        pithos_webclient_branch = self.config.get("Global", "pithos_webclient_branch")

        # Clone pithos-webclient from remote repo
        self.logger.debug("Clone pithos-webclient from %s" % pithos_webclient_repo)
        self._git_clone(pithos_webclient_repo, directory="%s/pithos-web-client" % work_dir)

        # Track all pithos-webclient branches
        cmd = (
            """
        cd %s/pithos-web-client
        for branch in `git branch -a | grep remotes | grep -v HEAD`; do
            git branch --track ${branch##*/} $branch > /dev/null 2>&1
        done
        git --no-pager branch --no-color
        """
            % work_dir
        )
        webclient_branches = _run(cmd, False)
        webclient_branches = webclient_branches.split()

        # If we have pithos_webclient_branch in config file use this one
        # else try to use the same branch as synnefo_branch
        # else use an appropriate one.
        if pithos_webclient_branch == "":
            if synnefo_branch in webclient_branches:
                pithos_webclient_branch = synnefo_branch
            else:
                # If synnefo_branch starts with one of
                # 'master', 'hotfix'; use the master branch
                if synnefo_branch.startswith("master") or synnefo_branch.startswith("hotfix"):
                    pithos_webclient_branch = "master"
                # If synnefo_branch starts with one of
                # 'develop', 'feature'; use the develop branch
                elif synnefo_branch.startswith("develop") or synnefo_branch.startswith("feature"):
                    pithos_webclient_branch = "develop"
                else:
                    self.logger.warning(
                        "Cannot determine which pithos-web-client branch to "
                        'use based on "%s" synnefo branch. '
                        "Will use develop." % synnefo_branch
                    )
                    pithos_webclient_branch = "develop"
        # Checkout branch
        self.logger.debug('Checkout "%s" branch' % _green(pithos_webclient_branch))
        cmd = """
        cd {0}/pithos-web-client
        git checkout {1}
        """.format(
            work_dir, pithos_webclient_branch
        )
        _run(cmd, False)

    def _git_clone(self, repo, directory=""):
        """Clone repo to remote server

        Currently clonning from code.grnet.gr can fail unexpectedly.
        So retry!!

        """
        cloned = False
        for i in range(1, 11):
            try:
                _run("git clone %s %s" % (repo, directory), False)
                cloned = True
                break
            except BaseException:
                self.logger.warning("Clonning failed.. retrying %s/10" % i)
        if not cloned:
            self.logger.error("Can not clone repo.")
            sys.exit(1)

    @_check_fabric
    def build_packages(self):
        """Build packages needed by Synnefo software"""
        self.logger.info("Install development packages")
        cmd = """
        apt-get update
        apt-get install zlib1g-dev dpkg-dev debhelper git-buildpackage \
                python-dev python-all python-pip ant --yes --force-yes
        pip install -U devflow
        """
        _run(cmd, False)

        # Patch pydist bug
        if self.config.get("Global", "patch_pydist") == "True":
            self.logger.debug("Patch pydist.py module")
            cmd = r"""
            sed -r -i 's/(\(\?P<name>\[A-Za-z\]\[A-Za-z0-9_\.)/\1\\\-/' \
                /usr/share/python/debpython/pydist.py
            """
            _run(cmd, False)

        # Build synnefo packages
        self.build_synnefo()
        # Build pithos-web-client packages
        if self.config.get("Global", "build_pithos_webclient") == "True":
            self.build_pithos_webclient()

    @_check_fabric
    def build_synnefo(self):
        """Build Synnefo packages"""
        self.logger.info("Build Synnefo packages..")

        cmd = (
            """
        devflow-autopkg snapshot -b %s/synnefo_build-area --no-sign
        """
            % work_dir
        )
        with fabric.cd("%s/synnefo" % work_dir):
            _run(cmd, True)

        # Install snf-deploy package
        self.logger.debug("Install snf-deploy package")
        cmd = """
        dpkg -i snf-deploy*.deb
        apt-get -f install --yes --force-yes
        snf-deploy keygen
        """
        with fabric.cd("%s/synnefo_build-area" % work_dir):
            with fabric.settings(warn_only=True):
                _run(cmd, True)

        # Setup synnefo packages for snf-deploy
        self.logger.debug("Copy synnefo debs to snf-deploy packages dir")
        cmd = (
            """
        cp %s/synnefo_build-area/*.deb /var/lib/snf-deploy/packages/
        """
            % work_dir
        )
        _run(cmd, False)

    @_check_fabric
    def build_pithos_webclient(self):
        """Build pithos-web-client packages"""
        self.logger.info("Build pithos-web-client packages..")

        cmd = (
            """
        devflow-autopkg snapshot -b %s/webclient_build-area --no-sign
        """
            % work_dir
        )
        with fabric.cd("%s/pithos-web-client" % work_dir):
            _run(cmd, True)

        # Setup pithos-web-client packages for snf-deploy
        self.logger.debug("Copy webclient debs to snf-deploy packages dir")
        cmd = (
            """
        cp %s/webclient_build-area/*.deb /var/lib/snf-deploy/packages/
        """
            % work_dir
        )
        _run(cmd, False)

    @_check_fabric
    def build_documentation(self):
        """Build Synnefo documentation"""
        self.logger.info("Build Synnefo documentation..")
        _run("pip install -U Sphinx", False)
        with fabric.cd("%s/synnefo" % work_dir):
            _run("devflow-update-version; " "./ci/make_docs.sh synnefo_documentation", False)

    def fetch_documentation(self, dest=None):
        """Fetch Synnefo documentation"""
        self.logger.info("Fetch Synnefo documentation..")
        if dest is None:
            dest = "synnefo_documentation"
        dest = os.path.abspath(dest)
        if not os.path.exists(dest):
            os.makedirs(dest)
        self.fetch_compressed("%s/synnefo/synnefo_documentation" % work_dir, dest)
        self.logger.info("Downloaded documentation to %s" % _green(dest))

    @_check_fabric
    def deploy_synnefo(self, schema=None):
        """Deploy Synnefo using snf-deploy"""
        self.logger.info("Deploy Synnefo..")
        if schema is None:
            schema = self.config.get("Global", "schema")
        self.logger.debug('Will use "%s" schema' % _green(schema))

        self.logger.debug("Update schema files to server")
        cmd = """
        schema_dir="{0}/synnefo/ci/schemas/{1}"
        if [ -d "$schema_dir" ]; then
            cp "$schema_dir"/* /etc/snf-deploy/
        else
            echo "$schema_dir" does not exist
            exit 1
        fi
        """.format(
            work_dir, schema
        )
        _run(cmd, False)

        self.logger.debug("Change password in nodes.conf file")
        cmd = """
        sed -i 's/^password =.*/password = {0}/' /etc/snf-deploy/nodes.conf
        """.format(
            fabric.env.password
        )
        _run(cmd, False)

        self.logger.debug("Run snf-deploy")
        cmd = """
        snf-deploy --disable-colors --autoconf synnefo
        """
        _run(cmd, True)

    @_check_fabric
    def unit_test(self):
        """Run Synnefo unit test suite"""
        self.logger.info("Run Synnefo unit test suite")
        component = self.config.get("Unit Tests", "component")

        self.logger.debug("Install needed packages")
        cmd = """
        pip install -U funcsigs mock==1.1.2 factory_boy==2.4.1 nose coverage
        """
        _run(cmd, False)

        self.logger.debug("Upload tests.sh file")
        unit_tests_file = os.path.join(self.ci_dir, "tests.sh")
        _put(unit_tests_file, ".")

        self.logger.debug("Run unit tests")
        cmd = """
        bash tests.sh {0}
        """.format(
            component
        )
        _run(cmd, True)

    @_check_fabric
    def run_burnin(self):
        """Run burnin functional test suite"""
        self.logger.info("Run Burnin functional test suite")
        cmd = """
        auth_url=$(grep -e '^url =' .kamakirc | cut -d' ' -f3)
        token=$(grep -e '^token =' .kamakirc | cut -d' ' -f3)
        images_user=$(kamaki image list -l | grep owner | \
                      cut -d':' -f2 | tr -d ' ')
        snf-burnin --auth-url=$auth_url --token=$token {0}
        BurninExitStatus=$?
        exit $BurninExitStatus
        """.format(
            self.config.get("Burnin", "cmd_options")
        )
        _run(cmd, True)

    @_check_fabric
    def fetch_compressed(self, src, dest=None):
        """Create a tarball and fetch it locally"""
        self.logger.debug("Creating tarball of %s" % src)
        basename = os.path.basename(src)
        tar_file = basename + ".tgz"
        cmd = "tar czf %s %s" % (tar_file, src)
        _run(cmd, False)
        if not os.path.exists(dest):
            os.makedirs(dest)

        tmp_dir = tempfile.mkdtemp()
        fabric.get(tar_file, tmp_dir)

        dest_file = os.path.join(tmp_dir, tar_file)
        self._check_hash_sum(dest_file, tar_file)
        self.logger.debug("Untar packages file %s" % dest_file)
        cmd = """
        cd %s
        tar xzf %s
        cp -r %s/%s/* %s
        rm -r %s
        """ % (
            tmp_dir,
            tar_file,
            tmp_dir,
            src,
            dest,
            tmp_dir,
        )
        os.system(cmd)
        self.logger.info("Downloaded %s to %s" % (src, _green(dest)))

    @_check_fabric
    def fetch_packages(self, dest=None):
        """Fetch Synnefo packages"""
        if dest is None:
            dest = self.config.get("Global", "pkgs_dir")
        dest = os.path.abspath(os.path.expanduser(dest))
        if not os.path.exists(dest):
            os.makedirs(dest)
        self.fetch_compressed("%s/synnefo_build-area" % work_dir, dest)
        if self.config.get("Global", "build_pithos_webclient") == "True":
            self.fetch_compressed("%s/webclient_build-area" % work_dir, dest)
        self.logger.info("Downloaded debian packages to %s" % _green(dest))

    def x2go_plugin(self, dest=None):
        """Produce an html page which will use the x2goplugin

        Arguments:
          dest  -- The file where to save the page (String)

        """
        output_str = """
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
        <html>
        <head>
        <title>X2Go SynnefoCI Service</title>
        </head>
        <body onload="checkPlugin()">
        <div id="x2goplugin">
            <object
                src="location"
                type="application/x2go"
                name="x2goplugin"
                palette="background"
                height="100%"
                hspace="0"
                vspace="0"
                width="100%"
                x2goconfig="
                    session=X2Go-SynnefoCI-Session
                    server={0}
                    user={1}
                    sshport={2}
                    published=true
                    autologin=true
                ">
            </object>
        </div>
        </body>
        </html>
        """.format(
            self.read_temp_config("server_ip"),
            self.read_temp_config("server_user"),
            self.read_temp_config("server_port"),
        )
        if dest is None:
            dest = self.config.get("Global", "x2go_plugin_file")

        self.logger.info("Writting x2go plugin html file to %s" % dest)
        fid = open(dest, "w")
        fid.write(output_str)
        fid.close()