def authenticate_clients(): """ function to instantiate Clients (astakos, cyclades, compute) """ try: astakos_client = AstakosClient(AUTHENTICATION_URL, TOKEN) astakos_client.authenticate() logging.info('Successful authentication') except ClientError: logging.info('\n Failed to authenticate user token') print 'Failed to authenticate user token' try: endpoints = astakos_client.get_endpoints() cyclades_base_url = parse_astakos_endpoints(endpoints, 'cyclades_compute') cyclades_network_base_url = parse_astakos_endpoints(endpoints, 'cyclades_network') except ClientError: print('Failed to get endpoints for cyclades') try: cyclades_client = CycladesClient(cyclades_base_url, TOKEN) compute_client = ComputeClient(cyclades_base_url, TOKEN) network_client = CycladesNetworkClient(cyclades_network_base_url, TOKEN) return cyclades_client, compute_client, network_client, astakos_client except ClientError: print 'Failed to initialize Cyclades client'
def ADD_VM(url, token, name, project): # create a new vm astakos = AstakosClient(url, token, name) service_type = CycladesComputeClient.service_type endpoint = astakos.get_endpoint_url(service_type) compute = CycladesComputeClient(endpoint, token) vm_name = name flavor = '260' image = 'eca2f4ef-b428-4096-a47d-29ddf5ed68d9' # server = compute.create_server(name=vm_name, flavor_id=flavor, image_id=image) server = compute.create_server(name=vm_name, key_name='cluster key', flavor_id=flavor, image_id=image, project_id=project) print(server['status']) with open(vm_name + '.info', "w") as f: for s in server: f.write(s) active = compute.wait_server_until(server['id'], 'ACTIVE') if active != 'ACTIVE': print('Waiting for server to build...')
def initialize_clients(self): """Initialize all the Kamaki Clients""" self.astakos = AstakosClient(self.auth_url, self.token) self.astakos.CONNECTION_RETRY_LIMIT = self.retry endpoints = self.astakos.authenticate() self.compute_url = _get_endpoint_url(endpoints, "compute") self.compute = ComputeClient(self.compute_url, self.token) self.compute.CONNECTION_RETRY_LIMIT = self.retry self.cyclades = CycladesClient(self.compute_url, self.token) self.cyclades.CONNECTION_RETRY_LIMIT = self.retry self.network_url = _get_endpoint_url(endpoints, "network") self.network = CycladesNetworkClient(self.network_url, self.token) self.network.CONNECTION_RETRY_LIMIT = self.retry self.pithos_url = _get_endpoint_url(endpoints, "object-store") self.pithos = PithosClient(self.pithos_url, self.token) self.pithos.CONNECTION_RETRY_LIMIT = self.retry self.image_url = _get_endpoint_url(endpoints, "image") self.image = ImageClient(self.image_url, self.token) self.image.CONNECTION_RETRY_LIMIT = self.retry
def check_credentials(token, auth_url=auth_url): """Identity,Account/Astakos. Test authentication credentials""" logging.log(REPORT, ' Test the credentials') try: auth = AstakosClient(auth_url, token) auth.authenticate() except ClientError: msg = ' Authentication failed with url %s and token %s'\ % (auth_url, token) raise ClientError(msg, error_authentication) return auth
def check_credentials(auth_url, token): print(' Test the credentials') try: auth = AstakosClient(auth_url, token) auth.authenticate() except ClientError: print('Authentication failed with url %s and token %s' % ( auth_url, token)) raise print 'Authentication verified' return auth
def check_user_credentials(token, auth_url='https://accounts.okeanos.grnet.gr' '/identity/v2.0'): """Identity,Account/Astakos. Test ~okeanos authentication credentials""" logging.info(' Test the credentials') try: auth = AstakosClient(auth_url, token) auth.authenticate() logging.info(' Authentication verified') return AUTHENTICATED except ClientError: logging.error('Authentication failed with url %s and token %s' % ( auth_url, token)) return NOT_AUTHENTICATED
def check_credentials(token, auth_url='https://accounts.okeanos.grnet.gr' '/identity/v2.0'): '''Identity,Account/Astakos. Test authentication credentials''' logging.log(REPORT, ' Test the credentials') try: auth = AstakosClient(auth_url, token) auth.authenticate() except ClientError: logging.error('Authentication failed with url %s and token %s' % ( auth_url, token)) sys.exit(error_authentication) logging.log(REPORT, ' Authentication verified') return auth
def check_user_credentials(token, auth_url='https://accounts.okeanos.grnet.gr' '/identity/v2.0'): '''Identity,Account/Astakos. Test ~okeanos authentication credentials''' logging.info(' Test the credentials') try: auth = AstakosClient(auth_url, token) auth.authenticate() logging.info(' Authentication verified') return AUTHENTICATED except ClientError: logging.error('Authentication failed with url %s and token %s' % ( auth_url, token)) return NOT_AUTHENTICATED
def REMOVE_VM(url, token, server): astakos = AstakosClient(url, token) service_type = CycladesComputeClient.service_type endpoint = astakos.get_endpoint_url(service_type) compute = CycladesComputeClient(endpoint, token) # find server id s = compute.list_servers(name=server) for i in s: if i['name'] == server: id = i['id'] print(id) compute.delete_server(id)
def __init__(self, config): self.config = config cloud_name = self.config.get('global', 'default_cloud') self.auth_token = self.config.get_cloud(cloud_name, 'token') cacerts_path = self.config.get('global', 'ca_certs') https.patch_with_certs(cacerts_path) auth_url = self.config.get_cloud(cloud_name, 'url') auth = AstakosClient(auth_url, self.auth_token) self.endpoints = dict( astakos=auth_url, cyclades=auth.get_endpoint_url(CycladesComputeClient.service_type), network=auth.get_endpoint_url(CycladesNetworkClient.service_type), plankton=auth.get_endpoint_url(ImageClient.service_type) ) self.user_id = auth.user_info['id']
def setUp(self): print with open(self['cmpimage', 'details']) as f: self.img_details = eval(f.read()) self.img = self.img_details['id'] with open(self['flavor', 'details']) as f: self._flavor_details = eval(f.read()) self.PROFILES = ('ENABLED', 'DISABLED', 'PROTECTED') self.servers = {} self.now = time.mktime(time.gmtime()) self.servname1 = 'serv' + unicode(self.now) self.servname2 = self.servname1 + '_v2' self.servname1 += '_v1' self.flavorid = self._flavor_details['id'] #servers have to be created at the begining... self.networks = {} self.netname1 = 'net' + unicode(self.now) self.netname2 = 'net' + unicode(self.now) + '_v2' self.cloud = 'cloud.%s' % self['testcloud'] aurl, self.token = self[self.cloud, 'url'], self[self.cloud, 'token'] self.auth_base = AstakosClient(aurl, self.token) curl = self.auth_base.get_service_endpoints('compute')['publicURL'] self.client = CycladesClient(curl, self.token)
def __init__(self, token, authURL='https://accounts.okeanos.grnet.gr/identity/v2.0'): """ :type authURL: str :type token: str """ from kamaki.clients.utils import https https.patch_ignore_ssl() self.authURL = authURL self.token = token self.cycladesServiceType = CycladesClient.service_type self.blockStorageServiceType = CycladesBlockStorageClient.service_type self.astakosClient = AstakosClient(self.authURL, self.token) endpointF = self.astakosClient.get_service_endpoints self.cycladesEndpoint = endpointF( self.cycladesServiceType)[u'publicURL'] self.cycladesClient = CycladesClient(self.cycladesEndpoint, self.token) self.blockStorageEndpoint = endpointF( self.blockStorageServiceType)[u'publicURL'] self.blockStorageClient = CycladesBlockStorageClient( self.blockStorageEndpoint, token) flavorsById = {} flavorsByName = {} for flavor in self.cycladesClient.list_flavors(): _id = flavor[u'id'] name = flavor[u'name'] flavorsById[_id] = name flavorsByName[name] = _id self.flavorsById = flavorsById self.flavorsByName = flavorsByName
class SynnefoClient(object): """Synnefo Client Wrapper class around clients of kamaki's clients for various Synnefo services: * astakos: Astakos client * compute: Cyclades Compute client * network: Cyclades Network client * image: Cyclades Plankton client """ def __init__(self, cloud=None, auth_url=None, token=None): if cloud is not None: auth_url, token = utils.get_cloud_credentials(cloud) self.auth_url, self.token = auth_url, token self.astakos = AstakosClient(self.auth_url, self.token) self.endpoints = self.get_api_endpoints() self.volume = BlockStorageClient(self.endpoints["cyclades_volume"], token) self.compute = CycladesClient(self.endpoints["cyclades_compute"], token) self.cyclades_networks = CycladesNetworkClient(self.endpoints["cyclades_network"], token) self.network = NetworkClient(self.endpoints["cyclades_network"], token) self.image = ImageClient(self.endpoints["cyclades_plankton"], token) def get_api_endpoints(self): """Get service endpoints from Astakos""" _endpoints = self.astakos.get_endpoints()["access"] endpoints = {} for service in _endpoints["serviceCatalog"]: endpoints[service["name"]] = service["endpoints"][0]["publicURL"] return endpoints
def setup_kamaki(self): """Initialize kamaki Setup cyclades_client, image_client and compute_client """ # Patch kamaki for SSL verification _kamaki_ssl(ignore_ssl=IGNORE_SSL) config = kamaki_config.Config() if self.kamaki_cloud is None: try: self.kamaki_cloud = config.get("global", "default_cloud") except AttributeError: # Compatibility with kamaki version <=0.10 self.kamaki_cloud = config.get("global", "default_cloud") self.logger.info("Setup kamaki client, using cloud '%s'.." % self.kamaki_cloud) auth_url = config.get_cloud(self.kamaki_cloud, "url") self.logger.debug("Authentication URL is %s" % _green(auth_url)) token = config.get_cloud(self.kamaki_cloud, "token") # self.logger.debug("Token is %s" % _green(token)) self.astakos_client = AstakosClient(auth_url, token) endpoints = self.astakos_client.authenticate() cyclades_url = get_endpoint_url(endpoints, "compute") self.logger.debug("Cyclades API url is %s" % _green(cyclades_url)) self.cyclades_client = CycladesClient(cyclades_url, token) self.cyclades_client.CONNECTION_RETRY_LIMIT = 2 network_url = get_endpoint_url(endpoints, "network") self.logger.debug("Network API url is %s" % _green(network_url)) self.network_client = CycladesNetworkClient(network_url, token) self.network_client.CONNECTION_RETRY_LIMIT = 2 image_url = get_endpoint_url(endpoints, "image") self.logger.debug("Images API url is %s" % _green(image_url)) self.image_client = ImageClient(cyclades_url, token) self.image_client.CONNECTION_RETRY_LIMIT = 2 compute_url = get_endpoint_url(endpoints, "compute") self.logger.debug("Compute API url is %s" % _green(compute_url)) self.compute_client = ComputeClient(compute_url, token) self.compute_client.CONNECTION_RETRY_LIMIT = 2
def _get_pithos_client(self, auth_url, token, container): try: astakos = AstakosClient(auth_url, token) except ClientError: logger.error("Failed to authenticate user token") raise try: PITHOS_URL = astakos.get_endpoint_url( AgkyraPithosClient.service_type) except ClientError: logger.error("Failed to get endpoints for Pithos") raise try: account = astakos.user_info['id'] return AgkyraPithosClient(PITHOS_URL, token, account, container) except ClientError: logger.error("Failed to initialize Pithos client") raise
def get_user_id(token): """Check kamaki and returns user uuid from matching ~okeanos token""" auth = AstakosClient(auth_url, token) try: logging.info(' Get the uuid') uuid = auth.user_info['id'] return uuid except ClientError: msg = 'Failed to get uuid from identity server' raise ClientError(msg)
def authenticate(self, authentication=None): """ :param authentication: :return: """ if self.__cyclades is not None: return True try: authcl = AstakosClient(authentication['URL'], authentication['TOKEN']) authcl.authenticate() self.__cyclades = CycladesClient(authcl.get_service_endpoints('compute')['publicURL'], authentication['TOKEN']) self.__network_client = CycladesNetworkClient(authcl.get_service_endpoints('network')['publicURL'], authentication['TOKEN']) except ClientError: stderr.write('Connector initialization failed') return False return True
def create_account(url, token): """Given a valid (URL, tokens) pair this method returns an Astakos client instance """ client = AstakosClient(url, token) try: client.authenticate() except ClientError: return None return client
def list_pithos_files(self): """ Method for listing pithos+ files available to the user """ auth_url = self.opts['auth_url'] token = self.opts['token'] try: auth = AstakosClient(auth_url, token) auth.authenticate() except ClientError: msg = ' Authentication error: Invalid Token' logging.error(msg) exit(error_fatal) pithos_endpoint = auth.get_endpoint_url('object-store') pithos_container = self.opts.get('pithos_container','pithos') user_id = auth.user_info['id'] pithos_client = PithosClient(pithos_endpoint,self.opts['token'], user_id, pithos_container) objects = pithos_client.list_objects() for object in objects: is_dir = 'application/directory' in object.get('content_type', object.get('content-type', '')) if not is_dir: print u"{:>12s} \"pithos:/{:s}/{:s}\"".format(bytes_to_shorthand(object['bytes']), pithos_container,object['name'])
def check_auth_token(auth_token, auth_url=None): """ Checks the validity of a user authentication token. :param auth_token: User authentication token :param auth_url: Authentication url :return: tuple(Success status, details) """ if not auth_url: auth_url = "https://accounts.okeanos.grnet.gr/identity/v2.0" patch_certs() cl = AstakosClient(auth_url, auth_token) try: user_info = cl.authenticate() except ClientError as ex: if ex.message == 'UNAUTHORIZED': return False, ex.details else: raise return True, user_info
def initialize_clients(self, ignore_ssl=False): """Initialize all the Kamaki Clients""" # Path kamaki for SSL verification self._kamaki_ssl(ignore_ssl=ignore_ssl) # Initialize kamaki Clients self.astakos = AstakosClient(self.auth_url, self.token) self.astakos.CONNECTION_RETRY_LIMIT = self.retry self.compute_url = self.astakos.get_endpoint_url( ComputeClient.service_type) self.compute = ComputeClient(self.compute_url, self.token) self.compute.CONNECTION_RETRY_LIMIT = self.retry self.cyclades_url = self.astakos.get_endpoint_url( CycladesClient.service_type) self.cyclades = CycladesClient(self.cyclades_url, self.token) self.cyclades.CONNECTION_RETRY_LIMIT = self.retry self.block_storage_url = self.astakos.get_endpoint_url( CycladesBlockStorageClient.service_type) self.block_storage = CycladesBlockStorageClient( self.block_storage_url, self.token) self.block_storage.CONNECTION_RETRY_LIMIT = self.retry self.network_url = self.astakos.get_endpoint_url( CycladesNetworkClient.service_type) self.network = CycladesNetworkClient(self.network_url, self.token) self.network.CONNECTION_RETRY_LIMIT = self.retry self.pithos_url = self.astakos.get_endpoint_url( PithosClient.service_type) self.pithos = PithosClient(self.pithos_url, self.token) self.pithos.CONNECTION_RETRY_LIMIT = self.retry self.image_url = self.astakos.get_endpoint_url( ImageClient.service_type) self.image = ImageClient(self.image_url, self.token) self.image.CONNECTION_RETRY_LIMIT = self.retry
def list_pithos_files(self): """ Method for listing pithos+ files available to the user """ auth_url = self.opts['auth_url'] token = self.opts['token'] try: auth = AstakosClient(auth_url, token) auth.authenticate() except ClientError: msg = 'Authentication error: Invalid Token' logging.error(msg) exit(error_fatal) pithos_endpoint = auth.get_endpoint_url('object-store') pithos_container = self.opts.get('pithos_container','pithos') user_id = auth.user_info['id'] pithos_client = PithosClient(pithos_endpoint,self.opts['token'], user_id, pithos_container) objects = pithos_client.list_objects() for object in objects: is_dir = 'application/directory' in object.get('content_type', object.get('content-type', '')) is_dir = 'application/folder' in object.get('content_type', object.get('content-type', '')) if not is_dir: print u"{:>12s} \"pithos:/{:s}/{:s}\"".format(bytes_to_shorthand(object['bytes']), pithos_container,object['name'])
def __init__(self, cloud=None, auth_url=None, token=None): if cloud is not None: auth_url, token = utils.get_cloud_credentials(cloud) self.auth_url, self.token = auth_url, token self.astakos = AstakosClient(self.auth_url, self.token) self.endpoints = self.get_api_endpoints() self.volume = BlockStorageClient(self.endpoints["cyclades_volume"], token) self.compute = CycladesClient(self.endpoints["cyclades_compute"], token) self.cyclades_networks = CycladesNetworkClient(self.endpoints["cyclades_network"], token) self.network = NetworkClient(self.endpoints["cyclades_network"], token) self.image = ImageClient(self.endpoints["cyclades_plankton"], token)
def setUp(self): self.now = time.mktime(time.gmtime()) self.cloud = 'cloud.%s' % self['testcloud'] aurl, self.token = self[self.cloud, 'url'], self[self.cloud, 'token'] self.auth_base = AstakosCachedClient(aurl, self.token) self.imgname = 'img_%s' % self.now url = self.auth_base.get_service_endpoints('image')['publicURL'] self.token = self.auth_base.token self.client = ImageClient(url, self.token) cyclades_url = self.auth_base.get_service_endpoints( 'compute')['publicURL'] self.cyclades = CycladesClient(cyclades_url, self.token) self._imglist = {} self._imgdetails = {}
class Clients(object): """Our kamaki clients""" auth_url = None token = None # Astakos astakos = None retry = CONNECTION_RETRY_LIMIT # Compute compute = None compute_url = None # Cyclades cyclades = None # Network network = None network_url = None # Pithos pithos = None pithos_url = None # Image image = None image_url = None def initialize_clients(self): """Initialize all the Kamaki Clients""" self.astakos = AstakosClient(self.auth_url, self.token) self.astakos.CONNECTION_RETRY_LIMIT = self.retry endpoints = self.astakos.authenticate() self.compute_url = _get_endpoint_url(endpoints, "compute") self.compute = ComputeClient(self.compute_url, self.token) self.compute.CONNECTION_RETRY_LIMIT = self.retry self.cyclades = CycladesClient(self.compute_url, self.token) self.cyclades.CONNECTION_RETRY_LIMIT = self.retry self.network_url = _get_endpoint_url(endpoints, "network") self.network = CycladesNetworkClient(self.network_url, self.token) self.network.CONNECTION_RETRY_LIMIT = self.retry self.pithos_url = _get_endpoint_url(endpoints, "object-store") self.pithos = PithosClient(self.pithos_url, self.token) self.pithos.CONNECTION_RETRY_LIMIT = self.retry self.image_url = _get_endpoint_url(endpoints, "image") self.image = ImageClient(self.image_url, self.token) self.image.CONNECTION_RETRY_LIMIT = self.retry
def __init__(self, config, cloud=None, debug=False): _setup_logging(debug, debug) self.config = kamaki_config.Config(config) self.cloud = cloud if not self.cloud: self.cloud = self.config.get("global", "default_cloud") self.ignore_ssl = \ self.config.get("global", "ignore_ssl").lower() == "on" self.ca_certs = \ self.config.get("global", "ca_certs") https.patch_ignore_ssl(self.ignore_ssl) if self.ca_certs is not None: https.patch_with_certs(self.ca_certs) self.auth_url = self.config.get_cloud(self.cloud, "url") self.token = self.config.get_cloud(self.cloud, "token") self.auth_client = AstakosClient(self.auth_url, self.token) self.endpoints = None # lazyness
def setUp(self): self.cloud = 'cloud.%s' % self['testcloud'] aurl, self.token = self[self.cloud, 'url'], self[self.cloud, 'token'] self.auth_base = AstakosClient(aurl, self.token) purl = self.auth_base.get_service_endpoints( 'object-store')['publicURL'] self.uuid = self.auth_base.user_term('id') self.client = PithosClient(purl, self.token, self.uuid) self.now = time.mktime(time.gmtime()) self.now_unformated = datetime.datetime.utcnow() self._init_data() """Prepare an object to be shared - also its container""" self.client.container = self.c1 self.client.object_post( 'test', update=True, permissions={'read': [self.client.account]}) self.create_remote_object(self.c1, 'another.test')
def initialize_clients(self, ignore_ssl=False): """Initialize all the Kamaki Clients""" # Path kamaki for SSL verification self._kamaki_ssl(ignore_ssl=ignore_ssl) # Initialize kamaki Clients self.astakos = AstakosClient(self.auth_url, self.token) self.astakos.CONNECTION_RETRY_LIMIT = self.retry self.compute_url = self.astakos.get_endpoint_url( ComputeClient.service_type) self.compute = ComputeClient(self.compute_url, self.token) self.compute.CONNECTION_RETRY_LIMIT = self.retry self.cyclades_url = self.astakos.get_endpoint_url( CycladesClient.service_type) self.cyclades = CycladesClient(self.cyclades_url, self.token) self.cyclades.CONNECTION_RETRY_LIMIT = self.retry self.block_storage_url = self.astakos.get_endpoint_url( BlockStorageClient.service_type) self.block_storage = BlockStorageClient(self.block_storage_url, self.token) self.block_storage.CONNECTION_RETRY_LIMIT = self.retry self.network_url = self.astakos.get_endpoint_url( CycladesNetworkClient.service_type) self.network = CycladesNetworkClient(self.network_url, self.token) self.network.CONNECTION_RETRY_LIMIT = self.retry self.pithos_url = self.astakos.get_endpoint_url( PithosClient.service_type) self.pithos = PithosClient(self.pithos_url, self.token) self.pithos.CONNECTION_RETRY_LIMIT = self.retry self.image_url = self.astakos.get_endpoint_url( ImageClient.service_type) self.image = ImageClient(self.image_url, self.token) self.image.CONNECTION_RETRY_LIMIT = self.retry
def setup_kamaki(self): """Initialize kamaki Setup cyclades_client, image_client and compute_client """ config = kamaki_config.Config() if self.kamaki_cloud is None: self.kamaki_cloud = config.get_global("default_cloud") self.logger.info("Setup kamaki client, using cloud '%s'.." % self.kamaki_cloud) auth_url = config.get_cloud(self.kamaki_cloud, "url") self.logger.debug("Authentication URL is %s" % _green(auth_url)) token = config.get_cloud(self.kamaki_cloud, "token") #self.logger.debug("Token is %s" % _green(token)) self.astakos_client = AstakosClient(auth_url, token) cyclades_url = \ self.astakos_client.get_service_endpoints('compute')['publicURL'] self.logger.debug("Cyclades API url is %s" % _green(cyclades_url)) self.cyclades_client = CycladesClient(cyclades_url, token) self.cyclades_client.CONNECTION_RETRY_LIMIT = 2 image_url = \ self.astakos_client.get_service_endpoints('image')['publicURL'] self.logger.debug("Images API url is %s" % _green(image_url)) self.image_client = ImageClient(cyclades_url, token) self.image_client.CONNECTION_RETRY_LIMIT = 2 compute_url = \ self.astakos_client.get_service_endpoints('compute')['publicURL'] self.logger.debug("Compute API url is %s" % _green(compute_url)) self.compute_client = ComputeClient(compute_url, token) self.compute_client.CONNECTION_RETRY_LIMIT = 2
def authenticate(self, authentication=None): """ :param authentication: :return: """ if self.__cyclades is not None: return True try: authcl = AstakosClient(authentication['URL'], authentication['TOKEN']) authcl.authenticate() self.__cyclades = CycladesClient( authcl.get_service_endpoints('compute')['publicURL'], authentication['TOKEN']) self.__network_client = CycladesNetworkClient( authcl.get_service_endpoints('network')['publicURL'], authentication['TOKEN']) except ClientError: stderr.write('Connector initialization failed') return False return True
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and # documentation are those of the authors and should not be # interpreted as representing official policies, either expressed # or implied, of GRNET S.A. from kamaki.clients.astakos import AstakosClient from kamaki.clients.cyclades import CycladesComputeClient AUTHENTICATION_URL = "https://astakos.example.com/identity/v2.0" TOKEN = "User-Token" astakos = AstakosClient(AUTHENTICATION_URL, TOKEN) service_type = CycladesComputeClient.service_type endpoint = astakos.get_endpoint_url(service_type) compute = CycladesComputeClient(endpoint, TOKEN) # Create a server (automatic IP) name = "My public server" flavor = "420" image = "image-id-for-debian" project = "a1234567-a890-1234-56ae-78f90bb1c2db" server = compute.create_server(name, flavor, image, project=project) print "Server status is {0}".server["status"] new_status = compute.wait_server_until(server["id"], "ACTIVE")
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and # documentation are those of the authors and should not be # interpreted as representing official policies, either expressed # or implied, of GRNET S.A. from kamaki.clients.astakos import AstakosClient from kamaki.clients.image import ImageClient AUTHENTICATION_URL = "https://astakos.example.com/identity/v2.0" TOKEN = "User-Token" astakos = AstakosClient(AUTHENTICATION_URL, TOKEN) user = astakos.authenticate() uuid = user["access"]["user"]["id"] service_type = ImageClient.service_type endpoint = astakos.get_endpoint_url(service_type) image = ImageClient(endpoint, TOKEN) # Get all images owned/registered by me images = filter(lambda img: img["owner"] == uuid, image.list_public()) print "My images:\n" for i in images: print "\t{name} ({location})".format(name=i["name"], location=i["location"])
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and # documentation are those of the authors and should not be # interpreted as representing official policies, either expressed # or implied, of GRNET S.A. from kamaki.clients.astakos import AstakosClient from kamaki.clients.pithos import PithosClient AUTHENTICATION_URL = "https://astakos.example.com/identity/v2.0" TOKEN = "User-Token" astakos = AstakosClient(AUTHENTICATION_URL, TOKEN) service_type = PithosClient.service_type endpoint = astakos.get_endpoint_url(service_type) pithos = PithosClient(endpoint, TOKEN) user = astakos.authenticate() uuid = user["access"]["user"]["id"] pithos.account = uuid # Get the project containers we care for containers = filter( lambda c: c["name"] in ("pithos", "images"), pithos.list_containers()) # Construct dict of the form {CONTAINER_NAME: PROJECT_ID, ...}
from kamaki.clients.astakos import AstakosClient from kamaki.clients.astakos import getLogger from kamaki.clients.cyclades import CycladesClient, CycladesNetworkClient #http://www.synnefo.org/docs/kamaki/latest/developers/code.html#the-client-api-ref from sys import stderr from os.path import abspath from base64 import b64encode from lib.persistance_module import * from logging import ERROR USER = "******" #retrieve the credentials for the specified users AUTHENTICATION_URL, TOKEN = get_credentials(USER) synnefo_user = AstakosClient(AUTHENTICATION_URL, TOKEN) synnefo_user.logger.setLevel(ERROR) getLogger().setLevel(ERROR) cyclades_endpoints = synnefo_user.get_service_endpoints("compute") CYCLADES_URL = cyclades_endpoints['publicURL'] cyclades_client = CycladesClient(CYCLADES_URL, TOKEN) cyclades_net_client = CycladesNetworkClient(CYCLADES_URL, TOKEN) pub_keys_path = 'keys/just_a_key.pub' priv_keys_path = 'keys/just_a_key' #creates a "personality" def personality(username):
def main(): """Parse arguments, use kamaki to create cluster, setup using ssh""" (opts, args) = parse_arguments(sys.argv[1:]) global CYCLADES, TOKEN AUTHENTICATION_URL = opts.cyclades TOKEN = opts.token # Cleanup stale servers from previous runs if opts.show_stale: cleanup_servers(prefix=opts.prefix, delete_stale=opts.delete_stale) return 0 # Initialize a kamaki instance, get endpoints user = AstakosClient(AUTHENTICATION_URL, TOKEN) my_accountData = user.authenticate() endpoints = user.get_endpoints() cyclades_endpoints = user.get_endpoints('compute') cyclades_base_url = parseAstakosEndpoints(endpoints,'cyclades_compute') cyclades_network_base_url = parseAstakosEndpoints(endpoints,'cyclades_network') my_cyclades_client = CycladesClient(cyclades_base_url, TOKEN) my_compute_client = ComputeClient(cyclades_base_url, TOKEN) my_network_client = CycladesNetworkClient(cyclades_network_base_url, TOKEN) cnt = int(opts.clustersize) # calculate size of cluster into 'cnt' # Initialize nodes = [] masterName = '' # Create a file to store the root password for later use pass_fname = opts.hadoop_dir+'/bak/adminPass'+str(datetime.now())[:19].replace(' ', '') adminPass_f = open(pass_fname, 'w') myNetworks = my_network_client.list_networks(); NetWork_free = parseNetwork(myNetworks,'public'); myIp = my_network_client.create_floatingip(NetWork_free); LastIp = myIp.get("floating_ip_address") initialClusterSize = 0 server = {} if opts.extend == False: # Create master node (0th node) server = create_machine(opts, my_cyclades_client, 0) if server == {}: return else: servers = my_cyclades_client.list_servers(detail=True) cluster = [s for s in servers if s["name"].startswith(opts.prefix)] initialClusterSize = len(cluster) if initialClusterSize==0: log.info("Cluster cannot be expanded: it does not exist.") return servername = "%s-0" % (opts.prefix) masterName = servername nodes.append(server) # Create slave (worker) nodes if cnt>1 or opts.extend: startingOffset = 1 if opts.extend: startingOffset = initialClusterSize for i in xrange(startingOffset, initialClusterSize+cnt): server = {} server = create_machine(opts, my_cyclades_client, i) if server == {}: return; nodes.append(server) servername = "%s-%d" % (opts.prefix, i) # Write the root password to a file adminPass_f.write('machine = %s, password = %s\n' % (servername, server['adminPass'])) adminPass_f.close() # Setup Hadoop files and settings on all cluster nodes # Create the 'cluster' dictionary out of servers, with only Hadoop-relevant keys (name, ip, integer key) servers = my_cyclades_client.list_servers(detail=True) cluster = [s for s in my_cyclades_client.list_servers(detail=True) if s["name"].startswith(opts.prefix)] cluster = [(s["name"], s["attachments"][1]["ipv4"], int(s["name"][s["name"].find('-')+1:])) for s in cluster] cluster = sorted(cluster, key=lambda cluster: cluster[2]) # Prepare Ansible-Hadoop config files (hosts, conf/slaves) hosts = open(opts.hadoop_dir+'/hosts', 'w') hosts.write('[master]\n') for i in xrange(0, initialClusterSize+cnt): for s in cluster: if s[0] == opts.prefix+"-"+str(i): if s[0] == masterName: hosts.write(s[1]+'\n\n'+'[slaves]\n') else: hosts.write(s[1]+'\n') hosts.close() slaves = open(opts.hadoop_dir+'/conf/slaves', 'w') for s in cluster[1:]: slaves.write(s[1]+'\n') slaves.close() # Execute respective ansible playbook if (opts.extend==False): cmd = "ansible-playbook hadoop.yml -i hosts -vv --extra-vars \""+"master_ip="+cluster[0][1]+"\""+" -l master" retval = os.system(cmd) cmd = "ansible-playbook hadoop.yml -i hosts -vv --extra-vars \""+"master_ip="+cluster[0][1]+"\""+" -l slaves" retval = os.system(cmd) slave_ip_list = [] for i in xrange(1, cnt): slave_ip_list.append(cluster[i][1]) enable_ssh_login(cluster[0][1], [cluster[0][1]]) enable_ssh_login(cluster[0][1], slave_ip_list) else: hosts_latest = open(opts.hadoop_dir+'/hosts.latest', 'w') hosts_latest.write('[master]\n') hosts_latest.write(cluster[0][1]+'\n\n'+'[slaves]\n') for i in xrange(initialClusterSize, initialClusterSize+cnt): hosts_latest.write(cluster[i][1]+'\n') hosts_latest.close() cmd = "ansible-playbook hadoop.yml -i hosts.latest -vv --extra-vars \""+"master_ip="+cluster[0][1]+"\""+" -l slaves" retval = os.system(cmd) slave_ip_list = [] for i in xrange(initialClusterSize, initialClusterSize+cnt): slave_ip_list.append(cluster[i][1]) enable_ssh_login(cluster[0][1], slave_ip_list) # Update conf/slaves in master cmd = "ansible-playbook hadoop.yml -i hosts -vv --extra-vars \""+"master_ip="+cluster[0][1]+"\""+" -l master -t slaves" retval = os.system(cmd) log.info("Done.")
class SynnefoCI(object): """SynnefoCI python class""" def __init__(self, config_file=None, build_id=None, cloud=None): """ Initialize SynnefoCI python class Setup logger, local_dir, config and kamaki """ # Setup logger self.logger = logging.getLogger('synnefo-ci') self.logger.setLevel(logging.DEBUG) handler1 = logging.StreamHandler(sys.stdout) handler1.setLevel(logging.DEBUG) handler1.addFilter(_InfoFilter()) handler1.setFormatter(_MyFormatter()) handler2 = logging.StreamHandler(sys.stderr) handler2.setLevel(logging.WARNING) handler2.setFormatter(_MyFormatter()) self.logger.addHandler(handler1) self.logger.addHandler(handler2) # Get our local dir self.ci_dir = os.path.dirname(os.path.abspath(__file__)) self.repo_dir = os.path.dirname(self.ci_dir) # Read config file if config_file is None: config_file = os.path.join(self.ci_dir, DEFAULT_CONFIG_FILE) config_file = os.path.abspath(config_file) self.config = ConfigParser() self.config.optionxform = str self.config.read(config_file) # Read temporary_config file self.temp_config_file = \ os.path.expanduser(self.config.get('Global', 'temporary_config')) self.temp_config = ConfigParser() self.temp_config.optionxform = str self.temp_config.read(self.temp_config_file) self.build_id = build_id if build_id is not None: self.logger.info("Will use \"%s\" as build id" % _green(self.build_id)) # Set kamaki cloud if cloud is not None: self.kamaki_cloud = cloud elif self.config.has_option("Deployment", "kamaki_cloud"): kamaki_cloud = self.config.get("Deployment", "kamaki_cloud") if kamaki_cloud == "": self.kamaki_cloud = None else: self.kamaki_cloud = None # Initialize variables self.fabric_installed = False self.kamaki_installed = False self.cyclades_client = None self.network_client = None self.compute_client = None self.image_client = None self.astakos_client = None def setup_kamaki(self): """Initialize kamaki Setup cyclades_client, image_client and compute_client """ config = kamaki_config.Config() if self.kamaki_cloud is None: try: self.kamaki_cloud = config.get("global", "default_cloud") except AttributeError: # Compatibility with kamaki version <=0.10 self.kamaki_cloud = config.get("global", "default_cloud") self.logger.info("Setup kamaki client, using cloud '%s'.." % self.kamaki_cloud) auth_url = config.get_cloud(self.kamaki_cloud, "url") self.logger.debug("Authentication URL is %s" % _green(auth_url)) token = config.get_cloud(self.kamaki_cloud, "token") #self.logger.debug("Token is %s" % _green(token)) self.astakos_client = AstakosClient(auth_url, token) endpoints = self.astakos_client.authenticate() cyclades_url = get_endpoint_url(endpoints, "compute") self.logger.debug("Cyclades API url is %s" % _green(cyclades_url)) self.cyclades_client = CycladesClient(cyclades_url, token) self.cyclades_client.CONNECTION_RETRY_LIMIT = 2 network_url = get_endpoint_url(endpoints, "network") self.logger.debug("Network API url is %s" % _green(network_url)) self.network_client = CycladesNetworkClient(network_url, token) self.network_client.CONNECTION_RETRY_LIMIT = 2 image_url = get_endpoint_url(endpoints, "image") self.logger.debug("Images API url is %s" % _green(image_url)) self.image_client = ImageClient(cyclades_url, token) self.image_client.CONNECTION_RETRY_LIMIT = 2 compute_url = get_endpoint_url(endpoints, "compute") self.logger.debug("Compute API url is %s" % _green(compute_url)) self.compute_client = ComputeClient(compute_url, token) self.compute_client.CONNECTION_RETRY_LIMIT = 2 def _wait_transition(self, server_id, current_status, new_status): """Wait for server to go from current_status to new_status""" self.logger.debug("Waiting for server to become %s" % new_status) timeout = self.config.getint('Global', 'build_timeout') sleep_time = 5 while True: server = self.cyclades_client.get_server_details(server_id) if server['status'] == new_status: return server elif timeout < 0: self.logger.error( "Waiting for server to become %s timed out" % new_status) self.destroy_server(False) sys.exit(1) elif server['status'] == current_status: # Sleep for #n secs and continue timeout = timeout - sleep_time time.sleep(sleep_time) else: self.logger.error( "Server failed with status %s" % server['status']) self.destroy_server(False) sys.exit(1) @_check_kamaki def destroy_server(self, wait=True): """Destroy slave server""" server_id = int(self.read_temp_config('server_id')) fips = [f for f in self.network_client.list_floatingips() if str(f['instance_id']) == str(server_id)] self.logger.info("Destoying server with id %s " % server_id) self.cyclades_client.delete_server(server_id) if wait: self._wait_transition(server_id, "ACTIVE", "DELETED") for fip in fips: self.logger.info("Destroying floating ip %s", fip['floating_ip_address']) self.network_client.delete_floatingip(fip['id']) def _create_floating_ip(self): """Create a new floating ip""" networks = self.network_client.list_networks(detail=True) pub_nets = [n for n in networks if n['SNF:floating_ip_pool'] and n['public']] for pub_net in pub_nets: # Try until we find a public network that is not full try: fip = self.network_client.create_floatingip(pub_net['id']) except ClientError as err: self.logger.warning("%s: %s", err.message, err.details) continue self.logger.debug("Floating IP %s with id %s created", fip['floating_ip_address'], fip['id']) return fip self.logger.error("No mor IP addresses available") sys.exit(1) def _create_port(self, floating_ip): """Create a new port for our floating IP""" net_id = floating_ip['floating_network_id'] self.logger.debug("Creating a new port to network with id %s", net_id) fixed_ips = [{'ip_address': floating_ip['floating_ip_address']}] port = self.network_client.create_port( net_id, device_id=None, fixed_ips=fixed_ips) return port @_check_kamaki # Too many local variables. pylint: disable-msg=R0914 def create_server(self, image=None, flavor=None, ssh_keys=None, server_name=None): """Create slave server""" self.logger.info("Create a new server..") # Find a build_id to use self._create_new_build_id() # Find an image to use image_id = self._find_image(image) # Find a flavor to use flavor_id = self._find_flavor(flavor) # Create Server networks = [] if self.config.get("Deployment", "allocate_floating_ip") == "True": fip = self._create_floating_ip() port = self._create_port(fip) networks.append({'port': port['id']}) private_networks = self.config.get('Deployment', 'private_networks') if private_networks: private_networks = [p.strip() for p in private_networks.split(",")] networks.extend([{"uuid": uuid} for uuid in private_networks]) if server_name is None: server_name = self.config.get("Deployment", "server_name") server_name = "%s(BID: %s)" % (server_name, self.build_id) server = self.cyclades_client.create_server( server_name, flavor_id, image_id, networks=networks) server_id = server['id'] self.write_temp_config('server_id', server_id) self.logger.debug("Server got id %s" % _green(server_id)) server_user = server['metadata']['users'] self.write_temp_config('server_user', server_user) self.logger.debug("Server's admin user is %s" % _green(server_user)) server_passwd = server['adminPass'] self.write_temp_config('server_passwd', server_passwd) server = self._wait_transition(server_id, "BUILD", "ACTIVE") self._get_server_ip_and_port(server, private_networks) self._copy_ssh_keys(ssh_keys) # Setup Firewall self.setup_fabric() self.logger.info("Setup firewall") accept_ssh_from = self.config.get('Global', 'accept_ssh_from') if accept_ssh_from != "": self.logger.debug("Block ssh except from %s" % accept_ssh_from) cmd = """ local_ip=$(/sbin/ifconfig eth0 | grep 'inet addr:' | \ cut -d':' -f2 | cut -d' ' -f1) iptables -A INPUT -s localhost -j ACCEPT iptables -A INPUT -s $local_ip -j ACCEPT iptables -A INPUT -s {0} -p tcp --dport 22 -j ACCEPT iptables -A INPUT -p tcp --dport 22 -j DROP """.format(accept_ssh_from) _run(cmd, False) # Setup apt, download packages self.logger.debug("Setup apt. Install x2goserver and firefox") cmd = """ echo 'APT::Install-Suggests "false";' >> /etc/apt/apt.conf echo 'precedence ::ffff:0:0/96 100' >> /etc/gai.conf apt-get update apt-get install curl --yes --force-yes echo -e "\n\n{0}" >> /etc/apt/sources.list # Synnefo repo's key curl https://dev.grnet.gr/files/apt-grnetdev.pub | apt-key add - # X2GO Key apt-key adv --recv-keys --keyserver keys.gnupg.net E1F958385BFE2B6E apt-get install x2go-keyring --yes --force-yes apt-get update apt-get install x2goserver x2goserver-xsession \ iceweasel --yes --force-yes # xterm published application echo '[Desktop Entry]' > /usr/share/applications/xterm.desktop echo 'Name=XTerm' >> /usr/share/applications/xterm.desktop echo 'Comment=standard terminal emulator for the X window system' >> \ /usr/share/applications/xterm.desktop echo 'Exec=xterm' >> /usr/share/applications/xterm.desktop echo 'Terminal=false' >> /usr/share/applications/xterm.desktop echo 'Type=Application' >> /usr/share/applications/xterm.desktop echo 'Encoding=UTF-8' >> /usr/share/applications/xterm.desktop echo 'Icon=xterm-color_48x48' >> /usr/share/applications/xterm.desktop echo 'Categories=System;TerminalEmulator;' >> \ /usr/share/applications/xterm.desktop """.format(self.config.get('Global', 'apt_repo')) _run(cmd, False) def _find_flavor(self, flavor=None): """Find a suitable flavor to use Search by name (reg expression) or by id """ # Get a list of flavors from config file flavors = self.config.get('Deployment', 'flavors').split(",") if flavor is not None: # If we have a flavor_name to use, add it to our list flavors.insert(0, flavor) list_flavors = self.compute_client.list_flavors() for flv in flavors: flv_type, flv_value = parse_typed_option(option="flavor", value=flv) if flv_type == "name": # Filter flavors by name self.logger.debug( "Trying to find a flavor with name \"%s\"" % flv_value) list_flvs = \ [f for f in list_flavors if re.search(flv_value, f['name'], flags=re.I) is not None] elif flv_type == "id": # Filter flavors by id self.logger.debug( "Trying to find a flavor with id \"%s\"" % flv_value) list_flvs = \ [f for f in list_flavors if str(f['id']) == flv_value] else: self.logger.error("Unrecognized flavor type %s" % flv_type) # Check if we found one if list_flvs: self.logger.debug("Will use \"%s\" with id \"%s\"" % (_green(list_flvs[0]['name']), _green(list_flvs[0]['id']))) return list_flvs[0]['id'] self.logger.error("No matching flavor found.. aborting") sys.exit(1) def _find_image(self, image=None): """Find a suitable image to use In case of search by name, the image has to belong to one of the `DEFAULT_SYSTEM_IMAGES_UUID' users. In case of search by id it only has to exist. """ # Get a list of images from config file images = self.config.get('Deployment', 'images').split(",") if image is not None: # If we have an image from command line, add it to our list images.insert(0, image) auth = self.astakos_client.authenticate() user_uuid = auth["access"]["token"]["tenant"]["id"] list_images = self.image_client.list_public(detail=True)['images'] for img in images: img_type, img_value = parse_typed_option(option="image", value=img) if img_type == "name": # Filter images by name self.logger.debug( "Trying to find an image with name \"%s\"" % img_value) accepted_uuids = DEFAULT_SYSTEM_IMAGES_UUID + [user_uuid] list_imgs = \ [i for i in list_images if i['user_id'] in accepted_uuids and re.search(img_value, i['name'], flags=re.I) is not None] elif img_type == "id": # Filter images by id self.logger.debug( "Trying to find an image with id \"%s\"" % img_value) list_imgs = \ [i for i in list_images if i['id'].lower() == img_value.lower()] else: self.logger.error("Unrecognized image type %s" % img_type) sys.exit(1) # Check if we found one if list_imgs: self.logger.debug("Will use \"%s\" with id \"%s\"" % (_green(list_imgs[0]['name']), _green(list_imgs[0]['id']))) return list_imgs[0]['id'] # We didn't found one self.logger.error("No matching image found.. aborting") sys.exit(1) def _get_server_ip_and_port(self, server, private_networks): """Compute server's IPv4 and ssh port number""" self.logger.info("Get server connection details..") if private_networks: # Choose the networks that belong to private_networks networks = [n for n in server['attachments'] if n['network_id'] in private_networks] else: # Choose the networks that are public networks = [n for n in server['attachments'] if self.network_client. get_network_details(n['network_id'])['public']] # Choose the networks with IPv4 networks = [n for n in networks if n['ipv4']] # Use the first network as IPv4 server_ip = networks[0]['ipv4'] if (".okeanos.io" in self.cyclades_client.base_url or ".demo.synnefo.org" in self.cyclades_client.base_url): tmp1 = int(server_ip.split(".")[2]) tmp2 = int(server_ip.split(".")[3]) server_ip = "gate.okeanos.io" server_port = 10000 + tmp1 * 256 + tmp2 else: server_port = 22 self.write_temp_config('server_ip', server_ip) self.logger.debug("Server's IPv4 is %s" % _green(server_ip)) self.write_temp_config('server_port', server_port) self.logger.debug("Server's ssh port is %s" % _green(server_port)) ssh_command = "ssh -p %s %s@%s" \ % (server_port, server['metadata']['users'], server_ip) self.logger.debug("Access server using \"%s\"" % (_green(ssh_command))) @_check_fabric def _copy_ssh_keys(self, ssh_keys): """Upload/Install ssh keys to server""" self.logger.debug("Check for authentication keys to use") if ssh_keys is None: ssh_keys = self.config.get("Deployment", "ssh_keys") if ssh_keys != "": ssh_keys = os.path.expanduser(ssh_keys) self.logger.debug("Will use \"%s\" authentication keys file" % _green(ssh_keys)) keyfile = '/tmp/%s.pub' % fabric.env.user _run('mkdir -p ~/.ssh && chmod 700 ~/.ssh', False) if ssh_keys.startswith("http://") or \ ssh_keys.startswith("https://") or \ ssh_keys.startswith("ftp://"): cmd = """ apt-get update apt-get install wget --yes --force-yes wget {0} -O {1} --no-check-certificate """.format(ssh_keys, keyfile) _run(cmd, False) elif os.path.exists(ssh_keys): _put(ssh_keys, keyfile) else: self.logger.debug("No ssh keys found") return _run('cat %s >> ~/.ssh/authorized_keys' % keyfile, False) _run('rm %s' % keyfile, False) self.logger.debug("Uploaded ssh authorized keys") else: self.logger.debug("No ssh keys found") def _create_new_build_id(self): """Find a uniq build_id to use""" with filelocker.lock("%s.lock" % self.temp_config_file, filelocker.LOCK_EX): # Read temp_config again to get any new entries self.temp_config.read(self.temp_config_file) # Find a uniq build_id to use if self.build_id is None: ids = self.temp_config.sections() if ids: max_id = int(max(self.temp_config.sections(), key=int)) self.build_id = max_id + 1 else: self.build_id = 1 self.logger.debug("Will use \"%s\" as build id" % _green(self.build_id)) # Create a new section try: self.temp_config.add_section(str(self.build_id)) except DuplicateSectionError: msg = ("Build id \"%s\" already in use. " + "Please use a uniq one or cleanup \"%s\" file.\n") \ % (self.build_id, self.temp_config_file) self.logger.error(msg) sys.exit(1) creation_time = \ time.strftime("%a, %d %b %Y %X", time.localtime()) self.temp_config.set(str(self.build_id), "created", str(creation_time)) # Write changes back to temp config file with open(self.temp_config_file, 'wb') as tcf: self.temp_config.write(tcf) def write_temp_config(self, option, value): """Write changes back to config file""" # Acquire the lock to write to temp_config_file with filelocker.lock("%s.lock" % self.temp_config_file, filelocker.LOCK_EX): # Read temp_config again to get any new entries self.temp_config.read(self.temp_config_file) self.temp_config.set(str(self.build_id), option, str(value)) curr_time = time.strftime("%a, %d %b %Y %X", time.localtime()) self.temp_config.set(str(self.build_id), "modified", curr_time) # Write changes back to temp config file with open(self.temp_config_file, 'wb') as tcf: self.temp_config.write(tcf) def read_temp_config(self, option): """Read from temporary_config file""" # If build_id is None use the latest one if self.build_id is None: ids = self.temp_config.sections() if ids: self.build_id = int(ids[-1]) else: self.logger.error("No sections in temporary config file") sys.exit(1) self.logger.debug("Will use \"%s\" as build id" % _green(self.build_id)) # Read specified option return self.temp_config.get(str(self.build_id), option) def setup_fabric(self): """Setup fabric environment""" self.logger.info("Setup fabric parameters..") fabric.env.user = self.read_temp_config('server_user') fabric.env.host_string = self.read_temp_config('server_ip') fabric.env.port = int(self.read_temp_config('server_port')) fabric.env.password = self.read_temp_config('server_passwd') fabric.env.connection_attempts = 10 fabric.env.shell = "/bin/bash -c" fabric.env.disable_known_hosts = True fabric.env.output_prefix = None def _check_hash_sum(self, localfile, remotefile): """Check hash sums of two files""" self.logger.debug("Check hash sum for local file %s" % localfile) hash1 = os.popen("sha256sum %s" % localfile).read().split(' ')[0] self.logger.debug("Local file has sha256 hash %s" % hash1) self.logger.debug("Check hash sum for remote file %s" % remotefile) hash2 = _run("sha256sum %s" % remotefile, False) hash2 = hash2.split(' ')[0] self.logger.debug("Remote file has sha256 hash %s" % hash2) if hash1 != hash2: self.logger.error("Hashes differ.. aborting") sys.exit(1) @_check_fabric def clone_repo(self, local_repo=False): """Clone Synnefo repo from slave server""" self.logger.info("Configure repositories on remote server..") self.logger.debug("Install/Setup git") cmd = """ apt-get install git --yes --force-yes git config --global user.name {0} git config --global user.email {1} """.format(self.config.get('Global', 'git_config_name'), self.config.get('Global', 'git_config_mail')) _run(cmd, False) # Clone synnefo_repo synnefo_branch = self.clone_synnefo_repo(local_repo=local_repo) # Clone pithos-web-client self.clone_pithos_webclient_repo(synnefo_branch) @_check_fabric def clone_synnefo_repo(self, local_repo=False): """Clone Synnefo repo to remote server""" # Find synnefo_repo and synnefo_branch to use synnefo_repo = self.config.get('Global', 'synnefo_repo') synnefo_branch = self.config.get("Global", "synnefo_branch") if synnefo_branch == "": synnefo_branch = \ subprocess.Popen( ["git", "rev-parse", "--abbrev-ref", "HEAD"], stdout=subprocess.PIPE).communicate()[0].strip() if synnefo_branch == "HEAD": synnefo_branch = \ subprocess.Popen( ["git", "rev-parse", "--short", "HEAD"], stdout=subprocess.PIPE).communicate()[0].strip() self.logger.debug("Will use branch \"%s\"" % _green(synnefo_branch)) if local_repo or synnefo_repo == "": # Use local_repo self.logger.debug("Push local repo to server") # Firstly create the remote repo _run("git init synnefo", False) # Then push our local repo over ssh # We have to pass some arguments to ssh command # namely to disable host checking. (temp_ssh_file_handle, temp_ssh_file) = tempfile.mkstemp() os.close(temp_ssh_file_handle) # XXX: git push doesn't read the password cmd = """ echo 'exec ssh -o "StrictHostKeyChecking no" \ -o "UserKnownHostsFile /dev/null" \ -q "$@"' > {4} chmod u+x {4} export GIT_SSH="{4}" echo "{0}" | git push --quiet --mirror ssh://{1}@{2}:{3}/~/synnefo rm -f {4} """.format(fabric.env.password, fabric.env.user, fabric.env.host_string, fabric.env.port, temp_ssh_file) os.system(cmd) else: # Clone Synnefo from remote repo self.logger.debug("Clone synnefo from %s" % synnefo_repo) self._git_clone(synnefo_repo) # Checkout the desired synnefo_branch self.logger.debug("Checkout \"%s\" branch/commit" % synnefo_branch) cmd = """ cd synnefo for branch in `git branch -a | grep remotes | grep -v HEAD`; do git branch --track ${branch##*/} $branch done git checkout %s """ % (synnefo_branch) _run(cmd, False) return synnefo_branch @_check_fabric def clone_pithos_webclient_repo(self, synnefo_branch): """Clone Pithos WebClient repo to remote server""" # Find pithos_webclient_repo and pithos_webclient_branch to use pithos_webclient_repo = \ self.config.get('Global', 'pithos_webclient_repo') pithos_webclient_branch = \ self.config.get('Global', 'pithos_webclient_branch') # Clone pithos-webclient from remote repo self.logger.debug("Clone pithos-webclient from %s" % pithos_webclient_repo) self._git_clone(pithos_webclient_repo) # Track all pithos-webclient branches cmd = """ cd pithos-web-client for branch in `git branch -a | grep remotes | grep -v HEAD`; do git branch --track ${branch##*/} $branch > /dev/null 2>&1 done git --no-pager branch --no-color """ webclient_branches = _run(cmd, False) webclient_branches = webclient_branches.split() # If we have pithos_webclient_branch in config file use this one # else try to use the same branch as synnefo_branch # else use an appropriate one. if pithos_webclient_branch == "": if synnefo_branch in webclient_branches: pithos_webclient_branch = synnefo_branch else: # If synnefo_branch starts with one of # 'master', 'hotfix'; use the master branch if synnefo_branch.startswith('master') or \ synnefo_branch.startswith('hotfix'): pithos_webclient_branch = "master" # If synnefo_branch starts with one of # 'develop', 'feature'; use the develop branch elif synnefo_branch.startswith('develop') or \ synnefo_branch.startswith('feature'): pithos_webclient_branch = "develop" else: self.logger.warning( "Cannot determine which pithos-web-client branch to " "use based on \"%s\" synnefo branch. " "Will use develop." % synnefo_branch) pithos_webclient_branch = "develop" # Checkout branch self.logger.debug("Checkout \"%s\" branch" % _green(pithos_webclient_branch)) cmd = """ cd pithos-web-client git checkout {0} """.format(pithos_webclient_branch) _run(cmd, False) def _git_clone(self, repo): """Clone repo to remote server Currently clonning from code.grnet.gr can fail unexpectedly. So retry!! """ cloned = False for i in range(1, 11): try: _run("git clone %s" % repo, False) cloned = True break except BaseException: self.logger.warning("Clonning failed.. retrying %s/10" % i) if not cloned: self.logger.error("Can not clone repo.") sys.exit(1) @_check_fabric def build_packages(self): """Build packages needed by Synnefo software""" self.logger.info("Install development packages") cmd = """ apt-get update apt-get install zlib1g-dev dpkg-dev debhelper git-buildpackage \ python-dev python-all python-pip ant --yes --force-yes pip install -U devflow """ _run(cmd, False) # Patch pydist bug if self.config.get('Global', 'patch_pydist') == "True": self.logger.debug("Patch pydist.py module") cmd = r""" sed -r -i 's/(\(\?P<name>\[A-Za-z\]\[A-Za-z0-9_\.)/\1\\\-/' \ /usr/share/python/debpython/pydist.py """ _run(cmd, False) # Build synnefo packages self.build_synnefo() # Build pithos-web-client packages self.build_pithos_webclient() @_check_fabric def build_synnefo(self): """Build Synnefo packages""" self.logger.info("Build Synnefo packages..") cmd = """ devflow-autopkg snapshot -b ~/synnefo_build-area --no-sign """ with fabric.cd("synnefo"): _run(cmd, True) # Install snf-deploy package self.logger.debug("Install snf-deploy package") cmd = """ dpkg -i snf-deploy*.deb apt-get -f install --yes --force-yes """ with fabric.cd("synnefo_build-area"): with fabric.settings(warn_only=True): _run(cmd, True) # Setup synnefo packages for snf-deploy self.logger.debug("Copy synnefo debs to snf-deploy packages dir") cmd = """ cp ~/synnefo_build-area/*.deb /var/lib/snf-deploy/packages/ """ _run(cmd, False) @_check_fabric def build_pithos_webclient(self): """Build pithos-web-client packages""" self.logger.info("Build pithos-web-client packages..") cmd = """ devflow-autopkg snapshot -b ~/webclient_build-area --no-sign """ with fabric.cd("pithos-web-client"): _run(cmd, True) # Setup pithos-web-client packages for snf-deploy self.logger.debug("Copy webclient debs to snf-deploy packages dir") cmd = """ cp ~/webclient_build-area/*.deb /var/lib/snf-deploy/packages/ """ _run(cmd, False) @_check_fabric def build_documentation(self): """Build Synnefo documentation""" self.logger.info("Build Synnefo documentation..") _run("pip install -U Sphinx", False) with fabric.cd("synnefo"): _run("devflow-update-version; " "./ci/make_docs.sh synnefo_documentation", False) def fetch_documentation(self, dest=None): """Fetch Synnefo documentation""" self.logger.info("Fetch Synnefo documentation..") if dest is None: dest = "synnefo_documentation" dest = os.path.abspath(dest) if not os.path.exists(dest): os.makedirs(dest) self.fetch_compressed("synnefo/synnefo_documentation", dest) self.logger.info("Downloaded documentation to %s" % _green(dest)) @_check_fabric def deploy_synnefo(self, schema=None): """Deploy Synnefo using snf-deploy""" self.logger.info("Deploy Synnefo..") if schema is None: schema = self.config.get('Global', 'schema') self.logger.debug("Will use \"%s\" schema" % _green(schema)) schema_dir = os.path.join(self.ci_dir, "schemas/%s" % schema) if not (os.path.exists(schema_dir) and os.path.isdir(schema_dir)): raise ValueError("Unknown schema: %s" % schema) self.logger.debug("Upload schema files to server") _put(os.path.join(schema_dir, "*"), "/etc/snf-deploy/") self.logger.debug("Change password in nodes.conf file") cmd = """ sed -i 's/^password =.*/password = {0}/' /etc/snf-deploy/nodes.conf """.format(fabric.env.password) _run(cmd, False) self.logger.debug("Run snf-deploy") cmd = """ snf-deploy keygen --force snf-deploy --disable-colors --autoconf all """ _run(cmd, True) @_check_fabric def unit_test(self): """Run Synnefo unit test suite""" self.logger.info("Run Synnefo unit test suite") component = self.config.get('Unit Tests', 'component') self.logger.debug("Install needed packages") cmd = """ pip install -U mock pip install -U factory_boy pip install -U nose """ _run(cmd, False) self.logger.debug("Upload tests.sh file") unit_tests_file = os.path.join(self.ci_dir, "tests.sh") _put(unit_tests_file, ".") self.logger.debug("Run unit tests") cmd = """ bash tests.sh {0} """.format(component) _run(cmd, True) @_check_fabric def run_burnin(self): """Run burnin functional test suite""" self.logger.info("Run Burnin functional test suite") cmd = """ auth_url=$(grep -e '^url =' .kamakirc | cut -d' ' -f3) token=$(grep -e '^token =' .kamakirc | cut -d' ' -f3) images_user=$(kamaki image list -l | grep owner | \ cut -d':' -f2 | tr -d ' ') snf-burnin --auth-url=$auth_url --token=$token {0} BurninExitStatus=$? exit $BurninExitStatus """.format(self.config.get('Burnin', 'cmd_options')) _run(cmd, True) @_check_fabric def fetch_compressed(self, src, dest=None): """Create a tarball and fetch it locally""" self.logger.debug("Creating tarball of %s" % src) basename = os.path.basename(src) tar_file = basename + ".tgz" cmd = "tar czf %s %s" % (tar_file, src) _run(cmd, False) if not os.path.exists(dest): os.makedirs(dest) tmp_dir = tempfile.mkdtemp() fabric.get(tar_file, tmp_dir) dest_file = os.path.join(tmp_dir, tar_file) self._check_hash_sum(dest_file, tar_file) self.logger.debug("Untar packages file %s" % dest_file) cmd = """ cd %s tar xzf %s cp -r %s/* %s rm -r %s """ % (tmp_dir, tar_file, src, dest, tmp_dir) os.system(cmd) self.logger.info("Downloaded %s to %s" % (src, _green(dest))) @_check_fabric def fetch_packages(self, dest=None): """Fetch Synnefo packages""" if dest is None: dest = self.config.get('Global', 'pkgs_dir') dest = os.path.abspath(os.path.expanduser(dest)) if not os.path.exists(dest): os.makedirs(dest) self.fetch_compressed("synnefo_build-area", dest) self.fetch_compressed("webclient_build-area", dest) self.logger.info("Downloaded debian packages to %s" % _green(dest)) def x2go_plugin(self, dest=None): """Produce an html page which will use the x2goplugin Arguments: dest -- The file where to save the page (String) """ output_str = """ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"> <html> <head> <title>X2Go SynnefoCI Service</title> </head> <body onload="checkPlugin()"> <div id="x2goplugin"> <object src="location" type="application/x2go" name="x2goplugin" palette="background" height="100%" hspace="0" vspace="0" width="100%" x2goconfig=" session=X2Go-SynnefoCI-Session server={0} user={1} sshport={2} published=true autologin=true "> </object> </div> </body> </html> """.format(self.read_temp_config('server_ip'), self.read_temp_config('server_user'), self.read_temp_config('server_port')) if dest is None: dest = self.config.get('Global', 'x2go_plugin_file') self.logger.info("Writting x2go plugin html file to %s" % dest) fid = open(dest, 'w') fid.write(output_str) fid.close()
class Image(livetest.Generic): def setUp(self): self.now = time.mktime(time.gmtime()) self.cloud = 'cloud.%s' % self['testcloud'] aurl, self.token = self[self.cloud, 'url'], self[self.cloud, 'token'] self.auth_base = AstakosCachedClient(aurl, self.token) self.imgname = 'img_%s' % self.now url = self.auth_base.get_service_endpoints('image')['publicURL'] self.token = self.auth_base.token self.client = ImageClient(url, self.token) cyclades_url = self.auth_base.get_service_endpoints( 'compute')['publicURL'] self.cyclades = CycladesClient(cyclades_url, self.token) self._imglist = {} self._imgdetails = {} def test_000(self): self._prepare_img() super(self.__class__, self).test_000() def _prepare_img(self): f = open(self['image', 'local_path'], 'rb') (token, uuid) = (self.token, self.auth_base.user_term('id')) purl = self.auth_base.get_service_endpoints( 'object-store')['publicURL'] from kamaki.clients.pithos import PithosClient self.pithcli = PithosClient(purl, token, uuid) cont = 'cont_%s' % self.now self.pithcli.container = cont self.obj = 'obj_%s' % self.now print('\t- Create container %s on Pithos server' % cont) self.pithcli.container_put() self.location = 'pithos://%s/%s/%s' % (uuid, cont, self.obj) print('\t- Upload an image at %s...\n' % self.location) self.pithcli.upload_object(self.obj, f) print('\t- ok') f.close() r = self.client.register( self.imgname, self.location, params=dict(is_public=True)) self._imglist[self.imgname] = dict( name=r['name'], id=r['id']) self._imgdetails[self.imgname] = r def tearDown(self): for img in self._imglist.values(): print('\tDeleting image %s' % img['id']) self.cyclades.delete_image(img['id']) if hasattr(self, 'pithcli'): print('\tDeleting container %s' % self.pithcli.container) try: self.pithcli.del_container(delimiter='/') self.pithcli.purge_container() except ClientError: pass def _get_img_by_name(self, name): r = self.cyclades.list_images() for img in r: if img['name'] == name: return img return None def test_list_public(self): """Test list_public""" self._test_list_public() def _test_list_public(self): r = self.client.list_public() r0 = self.client.list_public(order='-') self.assertTrue(len(r) > 0) for img in r: for term in ( 'status', 'name', 'container_format', 'disk_format', 'id', 'size'): self.assertTrue(term in img) self.assertTrue(r, r0) r0.reverse() for i, img in enumerate(r): self.assert_dicts_are_equal(img, r0[i]) r1 = self.client.list_public(detail=True) for img in r1: for term in ( 'status', 'name', 'checksum', 'created_at', 'disk_format', 'updated_at', 'id', 'location', 'container_format', 'owner', 'is_public', 'deleted_at', 'properties', 'size'): self.assertTrue(term in img) if len(img['properties']): for interm in ('osfamily', 'root_partition'): self.assertTrue(interm in img['properties']) size_max = 1000000000000 r2 = self.client.list_public(filters=dict(size_max=size_max)) self.assertTrue(len(r2) <= len(r)) for img in r2: self.assertTrue(int(img['size']) <= size_max) def test_get_meta(self): """Test get_meta""" self._test_get_meta() def _test_get_meta(self): r = self.client.get_meta(self['image', 'id']) self.assertEqual(r['id'], self['image', 'id']) for term in ( 'status', 'name', 'checksum', 'updated-at', 'created-at', 'deleted-at', 'location', 'is-public', 'owner', 'disk-format', 'size', 'container-format'): self.assertTrue(term in r) for interm in ( 'OSFAMILY', 'USERS', 'ROOT_PARTITION', 'OS', 'DESCRIPTION'): self.assertTrue(interm in r['properties']) def test_register(self): """Test register""" self._prepare_img() self._test_register() def _test_register(self): self.assertTrue(self._imglist) for img in self._imglist.values(): self.assertTrue(img is not None) r = set(self._imgdetails[img['name']].keys()) self.assertTrue(r.issubset(IMGMETA.union(['properties']))) def test_unregister(self): """Test unregister""" self._prepare_img() self._test_unregister() def _test_unregister(self): try: for img in self._imglist.values(): self.client.unregister(img['id']) self._prepare_img() break except ClientError as ce: if ce.status in (405,): print 'IMAGE UNREGISTER is not supported by server: %s' % ce else: raise def test_set_members(self): """Test set_members""" self._prepare_img() self._test_set_members() def _test_set_members(self): members = ['*****@*****.**' % self.now] for img in self._imglist.values(): self.client.set_members(img['id'], members) r = self.client.list_members(img['id']) self.assertEqual(r[0]['member_id'], members[0]) def test_list_members(self): """Test list_members""" self._test_list_members() def _test_list_members(self): self._test_set_members() def test_remove_members(self): """Test remove_members - NO CHECK""" self._prepare_img() self._test_remove_members() def _test_remove_members(self): return members = ['*****@*****.**' % self.now, '*****@*****.**' % self.now] for img in self._imglist.values(): self.client.set_members(img['id'], members) r = self.client.list_members(img['id']) self.assertTrue(len(r) > 1) self.client.remove_member(img['id'], members[0]) r0 = self.client.list_members(img['id']) self.assertEqual(len(r), 1 + len(r0)) self.assertEqual(r0[0]['member_id'], members[1]) def test_list_shared(self): """Test list_shared - NOT CHECKED""" self._test_list_shared() def _test_list_shared(self): #No way to test this, if I dont have member images pass
__author__ = 'cmantas' import psycopg2 from kamaki.clients.astakos import AstakosClient from kamaki.clients.cyclades import CycladesClient from logging import getLogger, ERROR # init synneffo stuff AUTHENTICATION_URL="https://accounts.okeanos.grnet.gr/identity/v2.0" TOKEN="hYDRO-FEV5d8wFxpOID-DF3_FWhsuD8dvTdbEX2qQRQ" synnefo_user = AstakosClient(AUTHENTICATION_URL, TOKEN) synnefo_user.logger.setLevel(ERROR) getLogger().setLevel(ERROR) cyclades_endpoints = synnefo_user.get_service_endpoints('compute') CYCLADES_URL = cyclades_endpoints['publicURL'] cyclades_client = CycladesClient(CYCLADES_URL, TOKEN) # connect to db db = psycopg2.connect(host="127.0.0.1", user="******", password="******", database="celardb") cursor = db.cursor() #clear all old values from the DB cursor.execute("DELETE FROM \"RESOURCE_TYPE\" WHERE TRUE") cursor.execute("DELETE FROM \"SPECS\" WHERE TRUE") cursor.execute("DELETE FROM \"PROVIDED_RESOURCE\" WHERE TRUE") # add 'VM_FLAVOR' entry on the RESOURCE_TYPE table
from slipstream_okeanos import OkeanosNativeClient from slipstream_okeanos import runCommandOnHost https.patch_ignore_ssl() authURL = "https://accounts.okeanos.grnet.gr/identity/v2.0" X_AUTH_TOKEN_NAME = 'X_AUTH_TOKEN' token = ENV[X_AUTH_TOKEN_NAME] projectId = '464eb0e7-b556-4fc7-8afb-d590feebaad8' serverId = '660580' cycladesServiceType = CycladesClient.service_type blockStorageServiceType = CycladesBlockStorageClient.service_type ac = AstakosClient(authURL, token) cycladesURL = ac.get_endpoint_url(cycladesServiceType) cc = CycladesClient(cycladesURL, token) blockStorageURL = ac.get_endpoint_url(blockStorageServiceType) bsc = CycladesBlockStorageClient(blockStorageURL, token) onc = OkeanosNativeClient(token, authURL) print "cycladesURL = %s" % cycladesURL print "blockStorageURL = %s" % blockStorageURL print "ac = %s" % ac print "cc = %s" % cc print "bsc = %s" % bsc print "onc = %s" % onc
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and # documentation are those of the authors and should not be # interpreted as representing official policies, either expressed # or implied, of GRNET S.A. from kamaki.clients.astakos import AstakosClient AUTHENTICATION_URL = "https://astakos.example.com/identity/v2.0" TOKEN = "User-Token" astakos = AstakosClient(AUTHENTICATION_URL, TOKEN) # Get user uuid user = astakos.authenticate() uuid = user['access']['user']['id'] # Get resources assigned on my personal (system) project all_resources = astakos.get_quotas() my_resources = all_resources[uuid] # Print usage and limits for VMs and IPs vms = my_resources["cyclades.vm"] ips = my_resources["cyclades.floating_ip"] print "You are using {vms}/{vm_limit} VMs and {ips}/{ip_limit} IPs".format( vms=vms["usage"],
def set_astakos_client(cls, auth_url, token): """Foo""" cls.astakos_client = AstakosClient(auth_url, token)
def main(): """Parse arguments, use kamaki to create cluster, setup using ansible playbooks""" (opts, args) = parse_arguments(sys.argv[1:]) global CYCLADES, TOKEN, my_vnat_network, my_network_client AUTHENTICATION_URL = opts.cyclades TOKEN = opts.token # Cleanup stale servers from previous runs if opts.show_stale: cleanup_servers(prefix=opts.prefix, delete_stale=opts.delete_stale) return 0 # Initialize a kamaki instance, get endpoints user = AstakosClient(AUTHENTICATION_URL, TOKEN) my_accountData = user.authenticate() endpoints = user.get_endpoints() cyclades_endpoints = user.get_endpoints('compute') cyclades_base_url = parseAstakosEndpoints(endpoints, 'cyclades_compute') cyclades_network_base_url = parseAstakosEndpoints(endpoints, 'cyclades_network') my_cyclades_client = CycladesClient(cyclades_base_url, TOKEN) my_compute_client = ComputeClient(cyclades_base_url, TOKEN) my_network_client = CycladesNetworkClient(cyclades_network_base_url, TOKEN) my_vnat_network = {} # check if 'Hadoop' vnat is created... hadoop_vnat_created = False my_network_dict = my_network_client.list_networks() for n in my_network_dict: if n['name'] == 'Hadoop': hadoop_vnat_created = True my_vnat_network = n # ...else create it if hadoop_vnat_created == False: log.info("Creating vNAT") my_vnat_network = my_network_client.create_network(type='MAC_FILTERED', name='Hadoop'); my_subnet = my_network_client.create_subnet(network_id=my_vnat_network['id'], cidr='192.168.0.0/24'); cnt = int(opts.clustersize) # calculate size of cluster into 'cnt' # Initialize nodes = [] masterName = '' # Create a file to store the root password for later use if not os.path.exists(opts.hadoop_dir+'/bak'): os.makedirs(opts.hadoop_dir+'/bak') pass_fname = opts.hadoop_dir+'/bak/adminPass'+str(datetime.now())[:19].replace(' ', '') adminPass_f = open(pass_fname, 'w') initialClusterSize = 0 server = {} if opts.extend == False: # Create master node (0th node) server = create_machine(opts, my_cyclades_client, 0) if server == {}: return else: servers = my_cyclades_client.list_servers(detail=True) cluster = [s for s in servers if s["name"].startswith(opts.prefix)] initialClusterSize = len(cluster) if initialClusterSize==0: log.info("Cluster cannot be expanded: it does not exist.") return servername = "%s-0" % (opts.prefix) masterName = servername nodes.append(server) # Create slave (worker) nodes if cnt>1 or opts.extend: startingOffset = 1 if opts.extend: startingOffset = initialClusterSize for i in xrange(startingOffset, initialClusterSize+cnt): server = {} server = create_machine(opts, my_cyclades_client, i) if server == {}: return; nodes.append(server) servername = "%s-%d" % (opts.prefix, i) # Write the root password to a file adminPass_f.write('machine = %s, password = %s\n' % (servername, server['adminPass'])) adminPass_f.close() # Setup Hadoop files and settings on all cluster nodes # Create the 'cluster' dictionary out of servers, with only Hadoop-relevant keys (name, ip, integer key) servers = my_cyclades_client.list_servers(detail=True) cluster = [s for s in my_cyclades_client.list_servers(detail=True) if s["name"].startswith(opts.prefix)] cluster0 = [(s["name"], s["attachments"], int(s["name"][s["name"].find('-')+1:])) for s in cluster] cluster0 = sorted(cluster0, key=lambda cluster0: cluster0[2]) cluster = [(cluster0[0][0], cluster0[0][1][2]["ipv4"], cluster0[0][2])] # master IP, different index cluster2 = [(s[0], s[1][1]['ipv4'], int(s[2])) for s in cluster0[1:]] # slave IPs cluster += cluster2 # Prepare Ansible-Hadoop config files (hosts, conf/slaves. vnat/etchosts) hosts = open(opts.hadoop_dir+'/hosts', 'w') hosts.write('[master]\n') etchosts = open(opts.hadoop_dir+'/vnat/etchosts', 'w') for i in xrange(0, initialClusterSize+cnt): for s in cluster: if s[0] == opts.prefix+"-"+str(i): if s[0] == masterName: hosts.write(s[1]+'\n\n'+'[slaves]\n') else: hosts.write(s[1]+'\n') etchosts.write(s[1]+'\t'+s[0]+'\n') hosts.close() etchosts.close() slaves = open(opts.hadoop_dir+'/vnat/slaves', 'w') for s in cluster[1:]: slaves.write(s[0]+'\n') slaves.close() # Execute respective ansible playbook if (opts.extend==False): cmd = "ansible-playbook hadoop_vnat.yml -i hosts -vv --extra-vars \""+"is_master=True, master_node="+cluster[0][0]+" master_ip="+cluster[0][1]+"\""+" -l master" print cmd retval = os.system(cmd) cmd = "ansible-playbook hadoop_vnat.yml -i hosts -vv --extra-vars \""+"is_slave=True, master_node="+cluster[0][0]+" master_ip="+cluster[0][1]+"\""+" -l slaves" print cmd retval = os.system(cmd) slave_ip_list = [] for i in xrange(1, cnt): slave_ip_list.append(cluster[i][0]) enable_ssh_login(cluster[0][1], [cluster[0][0]]) enable_ssh_login(cluster[0][1], slave_ip_list) else: hosts_latest = open(opts.hadoop_dir+'/hosts.latest', 'w') hosts_latest.write('[master]\n') hosts_latest.write(cluster[0][1]+'\n\n'+'[slaves]\n') for i in xrange(initialClusterSize, initialClusterSize+cnt): hosts_latest.write(cluster[i][1]+'\n') hosts_latest.close() # update etc/hosts in all nodes - TODO: de-duplicate entries cmd = "ansible-playbook hadoop_vnat.yml -i hosts -vv --extra-vars \""+"is_master=True, master_ip="+cluster[0][1]+"\""+" -t etchosts" print cmd retval = os.system(cmd) cmd = "ansible-playbook hadoop_vnat.yml -i hosts.latest -vv --extra-vars \""+"is_slave=True, master_node="+cluster[0][0]+" master_ip="+cluster[0][1]+"\""+" -l slaves" print cmd retval = os.system(cmd) slave_ip_list = [] for i in xrange(initialClusterSize, initialClusterSize+cnt): slave_ip_list.append(cluster[i][0]) print "slave_ip_list=", slave_ip_list enable_ssh_login(cluster[0][1], slave_ip_list) # Update conf/slaves in master cmd = "ansible-playbook hadoop_vnat.yml -i hosts -vv --extra-vars \""+"is_master=True, master_ip="+cluster[0][1]+"\""+" -l master -t slaves" print cmd retval = os.system(cmd) log.info("Done.")
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF # USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and # documentation are those of the authors and should not be # interpreted as representing official policies, either expressed # or implied, of GRNET S.A. from kamaki.clients.astakos import AstakosClient, ClientError AUTHENTICATION_URL = "https://astakos.example.com/identity/v2.0" TOKEN = "User-Token" astakos = AstakosClient(AUTHENTICATION_URL, TOKEN) try: user = astakos.authenticate() except ClientError as ce: if ce in (401, ): print "Authentication failed, {0}".format(ce) exit(0) raise user_info = user['access']['user'] print "Authentication was successful for {name}, with uuid {uuid}".format( name=user_info['name'], uuid=user_info['id'])
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and # documentation are those of the authors and should not be # interpreted as representing official policies, either expressed # or implied, of GRNET S.A. from kamaki.clients.astakos import AstakosClient AUTHENTICATION_URL = "https://astakos.example.com/identity/v2.0" TOKEN = "User-Token" astakos = AstakosClient(AUTHENTICATION_URL, TOKEN) # Get user uuid user = astakos.authenticate() uuid = user['access']['user']['id'] # Get all projects information projects = astakos.get_projects() for project in projects: print "Project {name} ({uuid})\n\t{description}\n\t{url}".format( name=project["name"], uuid=project["id"], description=project["description"], url=project["homepage"])
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and # documentation are those of the authors and should not be # interpreted as representing official policies, either expressed # or implied, of GRNET S.A. from kamaki.clients.astakos import AstakosClient # Initialize Astakos AUTHENTICATION_URL = "https://astakos.example.com/identity/v2.0" TOKEN = "User-Token" astakos = AstakosClient(AUTHENTICATION_URL, TOKEN) # Check quotas total_quotas = astakos.get_quotas() resources = ("cyclades.vm", "cyclades.cpu", "cyclades.ram", "cyclades.disk", "cyclades.network.private", "cyclades.floating_ip") for project, quotas in total_quotas.items(): print "Project {0}".format(project) for r in resources: usage, limit = quotas[r]["usage"], quotas[r]["limit"] if usage < limit: print "\t{0} ... OK".format(r) else: print "\t{0}: ... EXCEEDED".format(r)
class Clients(object): """Our kamaki clients""" auth_url = None token = None # Astakos astakos = None retry = CONNECTION_RETRY_LIMIT # Compute compute = None compute_url = None # Cyclades cyclades = None # Network network = None network_url = None # Pithos pithos = None pithos_url = None # Image image = None image_url = None def _kamaki_ssl(self, ignore_ssl=None): """Patch kamaki to use the correct CA certificates Read kamaki's config file and decide if we are going to use CA certificates and patch kamaki clients accordingly. """ config = kamaki_config.Config() if ignore_ssl is None: ignore_ssl = config.get("global", "ignore_ssl").lower() == "on" ca_file = config.get("global", "ca_certs") if ignore_ssl: # Skip SSL verification https.patch_ignore_ssl() else: # Use ca_certs path found in kamakirc https.patch_with_certs(ca_file) def initialize_clients(self, ignore_ssl=False): """Initialize all the Kamaki Clients""" # Path kamaki for SSL verification self._kamaki_ssl(ignore_ssl=ignore_ssl) # Initialize kamaki Clients self.astakos = AstakosClient(self.auth_url, self.token) self.astakos.CONNECTION_RETRY_LIMIT = self.retry self.compute_url = self.astakos.get_endpoint_url( ComputeClient.service_type) self.compute = ComputeClient(self.compute_url, self.token) self.compute.CONNECTION_RETRY_LIMIT = self.retry self.cyclades_url = self.astakos.get_endpoint_url( CycladesClient.service_type) self.cyclades = CycladesClient(self.cyclades_url, self.token) self.cyclades.CONNECTION_RETRY_LIMIT = self.retry self.block_storage_url = self.astakos.get_endpoint_url( CycladesBlockStorageClient.service_type) self.block_storage = CycladesBlockStorageClient( self.block_storage_url, self.token) self.block_storage.CONNECTION_RETRY_LIMIT = self.retry self.network_url = self.astakos.get_endpoint_url( CycladesNetworkClient.service_type) self.network = CycladesNetworkClient(self.network_url, self.token) self.network.CONNECTION_RETRY_LIMIT = self.retry self.pithos_url = self.astakos.get_endpoint_url( PithosClient.service_type) self.pithos = PithosClient(self.pithos_url, self.token) self.pithos.CONNECTION_RETRY_LIMIT = self.retry self.image_url = self.astakos.get_endpoint_url( ImageClient.service_type) self.image = ImageClient(self.image_url, self.token) self.image.CONNECTION_RETRY_LIMIT = self.retry
class SynnefoCI(object): """SynnefoCI python class""" def __init__(self, config_file=None, build_id=None, cloud=None): """ Initialize SynnefoCI python class Setup logger, local_dir, config and kamaki """ # Setup logger self.logger = logging.getLogger('synnefo-ci') self.logger.setLevel(logging.DEBUG) handler1 = logging.StreamHandler(sys.stdout) handler1.setLevel(logging.DEBUG) handler1.addFilter(_InfoFilter()) handler1.setFormatter(_MyFormatter()) handler2 = logging.StreamHandler(sys.stderr) handler2.setLevel(logging.WARNING) handler2.setFormatter(_MyFormatter()) self.logger.addHandler(handler1) self.logger.addHandler(handler2) # Get our local dir self.ci_dir = os.path.dirname(os.path.abspath(__file__)) self.repo_dir = os.path.dirname(self.ci_dir) # Read config file if config_file is None: config_file = os.path.join(self.ci_dir, DEFAULT_CONFIG_FILE) config_file = os.path.abspath(config_file) self.config = ConfigParser() self.config.optionxform = str self.config.read(config_file) # Read temporary_config file self.temp_config_file = \ os.path.expanduser(self.config.get('Global', 'temporary_config')) self.temp_config = ConfigParser() self.temp_config.optionxform = str self.temp_config.read(self.temp_config_file) self.build_id = build_id if build_id is not None: self.logger.info("Will use \"%s\" as build id" % _green(self.build_id)) # Set kamaki cloud if cloud is not None: self.kamaki_cloud = cloud elif self.config.has_option("Deployment", "kamaki_cloud"): kamaki_cloud = self.config.get("Deployment", "kamaki_cloud") if kamaki_cloud == "": self.kamaki_cloud = None else: self.kamaki_cloud = None # Initialize variables self.fabric_installed = False self.kamaki_installed = False self.cyclades_client = None self.network_client = None self.compute_client = None self.image_client = None self.astakos_client = None def setup_kamaki(self): """Initialize kamaki Setup cyclades_client, image_client and compute_client """ config = kamaki_config.Config() if self.kamaki_cloud is None: try: self.kamaki_cloud = config.get("global", "default_cloud") except AttributeError: # Compatibility with kamaki version <=0.10 self.kamaki_cloud = config.get("global", "default_cloud") self.logger.info("Setup kamaki client, using cloud '%s'.." % self.kamaki_cloud) auth_url = config.get_cloud(self.kamaki_cloud, "url") self.logger.debug("Authentication URL is %s" % _green(auth_url)) token = config.get_cloud(self.kamaki_cloud, "token") #self.logger.debug("Token is %s" % _green(token)) self.astakos_client = AstakosClient(auth_url, token) endpoints = self.astakos_client.authenticate() cyclades_url = get_endpoint_url(endpoints, "compute") self.logger.debug("Cyclades API url is %s" % _green(cyclades_url)) self.cyclades_client = CycladesClient(cyclades_url, token) self.cyclades_client.CONNECTION_RETRY_LIMIT = 2 network_url = get_endpoint_url(endpoints, "network") self.logger.debug("Network API url is %s" % _green(network_url)) self.network_client = CycladesNetworkClient(network_url, token) self.network_client.CONNECTION_RETRY_LIMIT = 2 image_url = get_endpoint_url(endpoints, "image") self.logger.debug("Images API url is %s" % _green(image_url)) self.image_client = ImageClient(cyclades_url, token) self.image_client.CONNECTION_RETRY_LIMIT = 2 compute_url = get_endpoint_url(endpoints, "compute") self.logger.debug("Compute API url is %s" % _green(compute_url)) self.compute_client = ComputeClient(compute_url, token) self.compute_client.CONNECTION_RETRY_LIMIT = 2 def _wait_transition(self, server_id, current_status, new_status): """Wait for server to go from current_status to new_status""" self.logger.debug("Waiting for server to become %s" % new_status) timeout = self.config.getint('Global', 'build_timeout') sleep_time = 5 while True: server = self.cyclades_client.get_server_details(server_id) if server['status'] == new_status: return server elif timeout < 0: self.logger.error("Waiting for server to become %s timed out" % new_status) self.destroy_server(False) sys.exit(1) elif server['status'] == current_status: # Sleep for #n secs and continue timeout = timeout - sleep_time time.sleep(sleep_time) else: self.logger.error("Server failed with status %s" % server['status']) self.destroy_server(False) sys.exit(1) @_check_kamaki def destroy_server(self, wait=True): """Destroy slave server""" server_id = int(self.read_temp_config('server_id')) fips = [ f for f in self.network_client.list_floatingips() if str(f['instance_id']) == str(server_id) ] self.logger.info("Destoying server with id %s " % server_id) self.cyclades_client.delete_server(server_id) if wait: self._wait_transition(server_id, "ACTIVE", "DELETED") for fip in fips: self.logger.info("Destroying floating ip %s", fip['floating_ip_address']) self.network_client.delete_floatingip(fip['id']) def _create_floating_ip(self): """Create a new floating ip""" networks = self.network_client.list_networks(detail=True) pub_nets = [ n for n in networks if n['SNF:floating_ip_pool'] and n['public'] ] for pub_net in pub_nets: # Try until we find a public network that is not full try: fip = self.network_client.create_floatingip(pub_net['id']) except ClientError as err: self.logger.warning("%s: %s", err.message, err.details) continue self.logger.debug("Floating IP %s with id %s created", fip['floating_ip_address'], fip['id']) return fip self.logger.error("No mor IP addresses available") sys.exit(1) def _create_port(self, floating_ip): """Create a new port for our floating IP""" net_id = floating_ip['floating_network_id'] self.logger.debug("Creating a new port to network with id %s", net_id) fixed_ips = [{'ip_address': floating_ip['floating_ip_address']}] port = self.network_client.create_port(net_id, device_id=None, fixed_ips=fixed_ips) return port @_check_kamaki # Too many local variables. pylint: disable-msg=R0914 def create_server(self, image=None, flavor=None, ssh_keys=None, server_name=None): """Create slave server""" self.logger.info("Create a new server..") # Find a build_id to use self._create_new_build_id() # Find an image to use image_id = self._find_image(image) # Find a flavor to use flavor_id = self._find_flavor(flavor) # Create Server networks = [] if self.config.get("Deployment", "allocate_floating_ip") == "True": fip = self._create_floating_ip() port = self._create_port(fip) networks.append({'port': port['id']}) private_networks = self.config.get('Deployment', 'private_networks') if private_networks: private_networks = [p.strip() for p in private_networks.split(",")] networks.extend([{"uuid": uuid} for uuid in private_networks]) if server_name is None: server_name = self.config.get("Deployment", "server_name") server_name = "%s(BID: %s)" % (server_name, self.build_id) server = self.cyclades_client.create_server(server_name, flavor_id, image_id, networks=networks) server_id = server['id'] self.write_temp_config('server_id', server_id) self.logger.debug("Server got id %s" % _green(server_id)) server_user = server['metadata']['users'] self.write_temp_config('server_user', server_user) self.logger.debug("Server's admin user is %s" % _green(server_user)) server_passwd = server['adminPass'] self.write_temp_config('server_passwd', server_passwd) server = self._wait_transition(server_id, "BUILD", "ACTIVE") self._get_server_ip_and_port(server, private_networks) self._copy_ssh_keys(ssh_keys) # Setup Firewall self.setup_fabric() self.logger.info("Setup firewall") accept_ssh_from = self.config.get('Global', 'accept_ssh_from') if accept_ssh_from != "": self.logger.debug("Block ssh except from %s" % accept_ssh_from) cmd = """ local_ip=$(/sbin/ifconfig eth0 | grep 'inet addr:' | \ cut -d':' -f2 | cut -d' ' -f1) iptables -A INPUT -s localhost -j ACCEPT iptables -A INPUT -s $local_ip -j ACCEPT iptables -A INPUT -s {0} -p tcp --dport 22 -j ACCEPT iptables -A INPUT -p tcp --dport 22 -j DROP """.format(accept_ssh_from) _run(cmd, False) # Setup apt, download packages self.logger.debug("Setup apt. Install x2goserver and firefox") cmd = """ echo 'APT::Install-Suggests "false";' >> /etc/apt/apt.conf echo 'precedence ::ffff:0:0/96 100' >> /etc/gai.conf apt-get update apt-get install curl --yes --force-yes echo -e "\n\n{0}" >> /etc/apt/sources.list # Synnefo repo's key curl https://dev.grnet.gr/files/apt-grnetdev.pub | apt-key add - # X2GO Key apt-key adv --recv-keys --keyserver keys.gnupg.net E1F958385BFE2B6E apt-get install x2go-keyring --yes --force-yes apt-get update apt-get install x2goserver x2goserver-xsession \ iceweasel --yes --force-yes # xterm published application echo '[Desktop Entry]' > /usr/share/applications/xterm.desktop echo 'Name=XTerm' >> /usr/share/applications/xterm.desktop echo 'Comment=standard terminal emulator for the X window system' >> \ /usr/share/applications/xterm.desktop echo 'Exec=xterm' >> /usr/share/applications/xterm.desktop echo 'Terminal=false' >> /usr/share/applications/xterm.desktop echo 'Type=Application' >> /usr/share/applications/xterm.desktop echo 'Encoding=UTF-8' >> /usr/share/applications/xterm.desktop echo 'Icon=xterm-color_48x48' >> /usr/share/applications/xterm.desktop echo 'Categories=System;TerminalEmulator;' >> \ /usr/share/applications/xterm.desktop """.format(self.config.get('Global', 'apt_repo')) _run(cmd, False) def _find_flavor(self, flavor=None): """Find a suitable flavor to use Search by name (reg expression) or by id """ # Get a list of flavors from config file flavors = self.config.get('Deployment', 'flavors').split(",") if flavor is not None: # If we have a flavor_name to use, add it to our list flavors.insert(0, flavor) list_flavors = self.compute_client.list_flavors() for flv in flavors: flv_type, flv_value = parse_typed_option(option="flavor", value=flv) if flv_type == "name": # Filter flavors by name self.logger.debug("Trying to find a flavor with name \"%s\"" % flv_value) list_flvs = \ [f for f in list_flavors if re.search(flv_value, f['name'], flags=re.I) is not None] elif flv_type == "id": # Filter flavors by id self.logger.debug("Trying to find a flavor with id \"%s\"" % flv_value) list_flvs = \ [f for f in list_flavors if str(f['id']) == flv_value] else: self.logger.error("Unrecognized flavor type %s" % flv_type) # Check if we found one if list_flvs: self.logger.debug( "Will use \"%s\" with id \"%s\"" % (_green(list_flvs[0]['name']), _green(list_flvs[0]['id']))) return list_flvs[0]['id'] self.logger.error("No matching flavor found.. aborting") sys.exit(1) def _find_image(self, image=None): """Find a suitable image to use In case of search by name, the image has to belong to one of the `DEFAULT_SYSTEM_IMAGES_UUID' users. In case of search by id it only has to exist. """ # Get a list of images from config file images = self.config.get('Deployment', 'images').split(",") if image is not None: # If we have an image from command line, add it to our list images.insert(0, image) auth = self.astakos_client.authenticate() user_uuid = auth["access"]["token"]["tenant"]["id"] list_images = self.image_client.list_public(detail=True)['images'] for img in images: img_type, img_value = parse_typed_option(option="image", value=img) if img_type == "name": # Filter images by name self.logger.debug("Trying to find an image with name \"%s\"" % img_value) accepted_uuids = DEFAULT_SYSTEM_IMAGES_UUID + [user_uuid] list_imgs = \ [i for i in list_images if i['user_id'] in accepted_uuids and re.search(img_value, i['name'], flags=re.I) is not None] elif img_type == "id": # Filter images by id self.logger.debug("Trying to find an image with id \"%s\"" % img_value) list_imgs = \ [i for i in list_images if i['id'].lower() == img_value.lower()] else: self.logger.error("Unrecognized image type %s" % img_type) sys.exit(1) # Check if we found one if list_imgs: self.logger.debug( "Will use \"%s\" with id \"%s\"" % (_green(list_imgs[0]['name']), _green(list_imgs[0]['id']))) return list_imgs[0]['id'] # We didn't found one self.logger.error("No matching image found.. aborting") sys.exit(1) def _get_server_ip_and_port(self, server, private_networks): """Compute server's IPv4 and ssh port number""" self.logger.info("Get server connection details..") if private_networks: # Choose the networks that belong to private_networks networks = [ n for n in server['attachments'] if n['network_id'] in private_networks ] else: # Choose the networks that are public networks = [ n for n in server['attachments'] if self.network_client.get_network_details(n['network_id']) ['public'] ] # Choose the networks with IPv4 networks = [n for n in networks if n['ipv4']] # Use the first network as IPv4 server_ip = networks[0]['ipv4'] if (".okeanos.io" in self.cyclades_client.base_url or ".demo.synnefo.org" in self.cyclades_client.base_url): tmp1 = int(server_ip.split(".")[2]) tmp2 = int(server_ip.split(".")[3]) server_ip = "gate.okeanos.io" server_port = 10000 + tmp1 * 256 + tmp2 else: server_port = 22 self.write_temp_config('server_ip', server_ip) self.logger.debug("Server's IPv4 is %s" % _green(server_ip)) self.write_temp_config('server_port', server_port) self.logger.debug("Server's ssh port is %s" % _green(server_port)) ssh_command = "ssh -p %s %s@%s" \ % (server_port, server['metadata']['users'], server_ip) self.logger.debug("Access server using \"%s\"" % (_green(ssh_command))) @_check_fabric def _copy_ssh_keys(self, ssh_keys): """Upload/Install ssh keys to server""" self.logger.debug("Check for authentication keys to use") if ssh_keys is None: ssh_keys = self.config.get("Deployment", "ssh_keys") if ssh_keys != "": ssh_keys = os.path.expanduser(ssh_keys) self.logger.debug("Will use \"%s\" authentication keys file" % _green(ssh_keys)) keyfile = '/tmp/%s.pub' % fabric.env.user _run('mkdir -p ~/.ssh && chmod 700 ~/.ssh', False) if ssh_keys.startswith("http://") or \ ssh_keys.startswith("https://") or \ ssh_keys.startswith("ftp://"): cmd = """ apt-get update apt-get install wget --yes --force-yes wget {0} -O {1} --no-check-certificate """.format(ssh_keys, keyfile) _run(cmd, False) elif os.path.exists(ssh_keys): _put(ssh_keys, keyfile) else: self.logger.debug("No ssh keys found") return _run('cat %s >> ~/.ssh/authorized_keys' % keyfile, False) _run('rm %s' % keyfile, False) self.logger.debug("Uploaded ssh authorized keys") else: self.logger.debug("No ssh keys found") def _create_new_build_id(self): """Find a uniq build_id to use""" with filelocker.lock("%s.lock" % self.temp_config_file, filelocker.LOCK_EX): # Read temp_config again to get any new entries self.temp_config.read(self.temp_config_file) # Find a uniq build_id to use if self.build_id is None: ids = self.temp_config.sections() if ids: max_id = int(max(self.temp_config.sections(), key=int)) self.build_id = max_id + 1 else: self.build_id = 1 self.logger.debug("Will use \"%s\" as build id" % _green(self.build_id)) # Create a new section try: self.temp_config.add_section(str(self.build_id)) except DuplicateSectionError: msg = ("Build id \"%s\" already in use. " + "Please use a uniq one or cleanup \"%s\" file.\n") \ % (self.build_id, self.temp_config_file) self.logger.error(msg) sys.exit(1) creation_time = \ time.strftime("%a, %d %b %Y %X", time.localtime()) self.temp_config.set(str(self.build_id), "created", str(creation_time)) # Write changes back to temp config file with open(self.temp_config_file, 'wb') as tcf: self.temp_config.write(tcf) def write_temp_config(self, option, value): """Write changes back to config file""" # Acquire the lock to write to temp_config_file with filelocker.lock("%s.lock" % self.temp_config_file, filelocker.LOCK_EX): # Read temp_config again to get any new entries self.temp_config.read(self.temp_config_file) self.temp_config.set(str(self.build_id), option, str(value)) curr_time = time.strftime("%a, %d %b %Y %X", time.localtime()) self.temp_config.set(str(self.build_id), "modified", curr_time) # Write changes back to temp config file with open(self.temp_config_file, 'wb') as tcf: self.temp_config.write(tcf) def read_temp_config(self, option): """Read from temporary_config file""" # If build_id is None use the latest one if self.build_id is None: ids = self.temp_config.sections() if ids: self.build_id = int(ids[-1]) else: self.logger.error("No sections in temporary config file") sys.exit(1) self.logger.debug("Will use \"%s\" as build id" % _green(self.build_id)) # Read specified option return self.temp_config.get(str(self.build_id), option) def setup_fabric(self): """Setup fabric environment""" self.logger.info("Setup fabric parameters..") fabric.env.user = self.read_temp_config('server_user') fabric.env.host_string = self.read_temp_config('server_ip') fabric.env.port = int(self.read_temp_config('server_port')) fabric.env.password = self.read_temp_config('server_passwd') fabric.env.connection_attempts = 10 fabric.env.shell = "/bin/bash -c" fabric.env.disable_known_hosts = True fabric.env.output_prefix = None def _check_hash_sum(self, localfile, remotefile): """Check hash sums of two files""" self.logger.debug("Check hash sum for local file %s" % localfile) hash1 = os.popen("sha256sum %s" % localfile).read().split(' ')[0] self.logger.debug("Local file has sha256 hash %s" % hash1) self.logger.debug("Check hash sum for remote file %s" % remotefile) hash2 = _run("sha256sum %s" % remotefile, False) hash2 = hash2.split(' ')[0] self.logger.debug("Remote file has sha256 hash %s" % hash2) if hash1 != hash2: self.logger.error("Hashes differ.. aborting") sys.exit(1) @_check_fabric def clone_repo(self, local_repo=False): """Clone Synnefo repo from slave server""" self.logger.info("Configure repositories on remote server..") self.logger.debug("Install/Setup git") cmd = """ apt-get install git --yes --force-yes git config --global user.name {0} git config --global user.email {1} """.format(self.config.get('Global', 'git_config_name'), self.config.get('Global', 'git_config_mail')) _run(cmd, False) # Clone synnefo_repo synnefo_branch = self.clone_synnefo_repo(local_repo=local_repo) # Clone pithos-web-client self.clone_pithos_webclient_repo(synnefo_branch) @_check_fabric def clone_synnefo_repo(self, local_repo=False): """Clone Synnefo repo to remote server""" # Find synnefo_repo and synnefo_branch to use synnefo_repo = self.config.get('Global', 'synnefo_repo') synnefo_branch = self.config.get("Global", "synnefo_branch") if synnefo_branch == "": synnefo_branch = \ subprocess.Popen( ["git", "rev-parse", "--abbrev-ref", "HEAD"], stdout=subprocess.PIPE).communicate()[0].strip() if synnefo_branch == "HEAD": synnefo_branch = \ subprocess.Popen( ["git", "rev-parse", "--short", "HEAD"], stdout=subprocess.PIPE).communicate()[0].strip() self.logger.debug("Will use branch \"%s\"" % _green(synnefo_branch)) if local_repo or synnefo_repo == "": # Use local_repo self.logger.debug("Push local repo to server") # Firstly create the remote repo _run("git init synnefo", False) # Then push our local repo over ssh # We have to pass some arguments to ssh command # namely to disable host checking. (temp_ssh_file_handle, temp_ssh_file) = tempfile.mkstemp() os.close(temp_ssh_file_handle) # XXX: git push doesn't read the password cmd = """ echo 'exec ssh -o "StrictHostKeyChecking no" \ -o "UserKnownHostsFile /dev/null" \ -q "$@"' > {4} chmod u+x {4} export GIT_SSH="{4}" echo "{0}" | git push --quiet --mirror ssh://{1}@{2}:{3}/~/synnefo rm -f {4} """.format(fabric.env.password, fabric.env.user, fabric.env.host_string, fabric.env.port, temp_ssh_file) os.system(cmd) else: # Clone Synnefo from remote repo self.logger.debug("Clone synnefo from %s" % synnefo_repo) self._git_clone(synnefo_repo) # Checkout the desired synnefo_branch self.logger.debug("Checkout \"%s\" branch/commit" % synnefo_branch) cmd = """ cd synnefo for branch in `git branch -a | grep remotes | grep -v HEAD`; do git branch --track ${branch##*/} $branch done git checkout %s """ % (synnefo_branch) _run(cmd, False) return synnefo_branch @_check_fabric def clone_pithos_webclient_repo(self, synnefo_branch): """Clone Pithos WebClient repo to remote server""" # Find pithos_webclient_repo and pithos_webclient_branch to use pithos_webclient_repo = \ self.config.get('Global', 'pithos_webclient_repo') pithos_webclient_branch = \ self.config.get('Global', 'pithos_webclient_branch') # Clone pithos-webclient from remote repo self.logger.debug("Clone pithos-webclient from %s" % pithos_webclient_repo) self._git_clone(pithos_webclient_repo) # Track all pithos-webclient branches cmd = """ cd pithos-web-client for branch in `git branch -a | grep remotes | grep -v HEAD`; do git branch --track ${branch##*/} $branch > /dev/null 2>&1 done git --no-pager branch --no-color """ webclient_branches = _run(cmd, False) webclient_branches = webclient_branches.split() # If we have pithos_webclient_branch in config file use this one # else try to use the same branch as synnefo_branch # else use an appropriate one. if pithos_webclient_branch == "": if synnefo_branch in webclient_branches: pithos_webclient_branch = synnefo_branch else: # If synnefo_branch starts with one of # 'master', 'hotfix'; use the master branch if synnefo_branch.startswith('master') or \ synnefo_branch.startswith('hotfix'): pithos_webclient_branch = "master" # If synnefo_branch starts with one of # 'develop', 'feature'; use the develop branch elif synnefo_branch.startswith('develop') or \ synnefo_branch.startswith('feature'): pithos_webclient_branch = "develop" else: self.logger.warning( "Cannot determine which pithos-web-client branch to " "use based on \"%s\" synnefo branch. " "Will use develop." % synnefo_branch) pithos_webclient_branch = "develop" # Checkout branch self.logger.debug("Checkout \"%s\" branch" % _green(pithos_webclient_branch)) cmd = """ cd pithos-web-client git checkout {0} """.format(pithos_webclient_branch) _run(cmd, False) def _git_clone(self, repo): """Clone repo to remote server Currently clonning from code.grnet.gr can fail unexpectedly. So retry!! """ cloned = False for i in range(1, 11): try: _run("git clone %s" % repo, False) cloned = True break except BaseException: self.logger.warning("Clonning failed.. retrying %s/10" % i) if not cloned: self.logger.error("Can not clone repo.") sys.exit(1) @_check_fabric def build_packages(self): """Build packages needed by Synnefo software""" self.logger.info("Install development packages") cmd = """ apt-get update apt-get install zlib1g-dev dpkg-dev debhelper git-buildpackage \ python-dev python-all python-pip ant --yes --force-yes pip install -U devflow """ _run(cmd, False) # Patch pydist bug if self.config.get('Global', 'patch_pydist') == "True": self.logger.debug("Patch pydist.py module") cmd = r""" sed -r -i 's/(\(\?P<name>\[A-Za-z\]\[A-Za-z0-9_\.)/\1\\\-/' \ /usr/share/python/debpython/pydist.py """ _run(cmd, False) # Build synnefo packages self.build_synnefo() # Build pithos-web-client packages self.build_pithos_webclient() @_check_fabric def build_synnefo(self): """Build Synnefo packages""" self.logger.info("Build Synnefo packages..") cmd = """ devflow-autopkg snapshot -b ~/synnefo_build-area --no-sign """ with fabric.cd("synnefo"): _run(cmd, True) # Install snf-deploy package self.logger.debug("Install snf-deploy package") cmd = """ dpkg -i snf-deploy*.deb apt-get -f install --yes --force-yes """ with fabric.cd("synnefo_build-area"): with fabric.settings(warn_only=True): _run(cmd, True) # Setup synnefo packages for snf-deploy self.logger.debug("Copy synnefo debs to snf-deploy packages dir") cmd = """ cp ~/synnefo_build-area/*.deb /var/lib/snf-deploy/packages/ """ _run(cmd, False) @_check_fabric def build_pithos_webclient(self): """Build pithos-web-client packages""" self.logger.info("Build pithos-web-client packages..") cmd = """ devflow-autopkg snapshot -b ~/webclient_build-area --no-sign """ with fabric.cd("pithos-web-client"): _run(cmd, True) # Setup pithos-web-client packages for snf-deploy self.logger.debug("Copy webclient debs to snf-deploy packages dir") cmd = """ cp ~/webclient_build-area/*.deb /var/lib/snf-deploy/packages/ """ _run(cmd, False) @_check_fabric def build_documentation(self): """Build Synnefo documentation""" self.logger.info("Build Synnefo documentation..") _run("pip install -U Sphinx", False) with fabric.cd("synnefo"): _run( "devflow-update-version; " "./ci/make_docs.sh synnefo_documentation", False) def fetch_documentation(self, dest=None): """Fetch Synnefo documentation""" self.logger.info("Fetch Synnefo documentation..") if dest is None: dest = "synnefo_documentation" dest = os.path.abspath(dest) if not os.path.exists(dest): os.makedirs(dest) self.fetch_compressed("synnefo/synnefo_documentation", dest) self.logger.info("Downloaded documentation to %s" % _green(dest)) @_check_fabric def deploy_synnefo(self, schema=None): """Deploy Synnefo using snf-deploy""" self.logger.info("Deploy Synnefo..") if schema is None: schema = self.config.get('Global', 'schema') self.logger.debug("Will use \"%s\" schema" % _green(schema)) schema_dir = os.path.join(self.ci_dir, "schemas/%s" % schema) if not (os.path.exists(schema_dir) and os.path.isdir(schema_dir)): raise ValueError("Unknown schema: %s" % schema) self.logger.debug("Upload schema files to server") _put(os.path.join(schema_dir, "*"), "/etc/snf-deploy/") self.logger.debug("Change password in nodes.conf file") cmd = """ sed -i 's/^password =.*/password = {0}/' /etc/snf-deploy/nodes.conf """.format(fabric.env.password) _run(cmd, False) self.logger.debug("Run snf-deploy") cmd = """ snf-deploy keygen --force snf-deploy --disable-colors --autoconf all """ _run(cmd, True) @_check_fabric def unit_test(self): """Run Synnefo unit test suite""" self.logger.info("Run Synnefo unit test suite") component = self.config.get('Unit Tests', 'component') self.logger.debug("Install needed packages") cmd = """ pip install -U mock pip install -U factory_boy pip install -U nose """ _run(cmd, False) self.logger.debug("Upload tests.sh file") unit_tests_file = os.path.join(self.ci_dir, "tests.sh") _put(unit_tests_file, ".") self.logger.debug("Run unit tests") cmd = """ bash tests.sh {0} """.format(component) _run(cmd, True) @_check_fabric def run_burnin(self): """Run burnin functional test suite""" self.logger.info("Run Burnin functional test suite") cmd = """ auth_url=$(grep -e '^url =' .kamakirc | cut -d' ' -f3) token=$(grep -e '^token =' .kamakirc | cut -d' ' -f3) images_user=$(kamaki image list -l | grep owner | \ cut -d':' -f2 | tr -d ' ') snf-burnin --auth-url=$auth_url --token=$token {0} BurninExitStatus=$? exit $BurninExitStatus """.format(self.config.get('Burnin', 'cmd_options')) _run(cmd, True) @_check_fabric def fetch_compressed(self, src, dest=None): """Create a tarball and fetch it locally""" self.logger.debug("Creating tarball of %s" % src) basename = os.path.basename(src) tar_file = basename + ".tgz" cmd = "tar czf %s %s" % (tar_file, src) _run(cmd, False) if not os.path.exists(dest): os.makedirs(dest) tmp_dir = tempfile.mkdtemp() fabric.get(tar_file, tmp_dir) dest_file = os.path.join(tmp_dir, tar_file) self._check_hash_sum(dest_file, tar_file) self.logger.debug("Untar packages file %s" % dest_file) cmd = """ cd %s tar xzf %s cp -r %s/* %s rm -r %s """ % (tmp_dir, tar_file, src, dest, tmp_dir) os.system(cmd) self.logger.info("Downloaded %s to %s" % (src, _green(dest))) @_check_fabric def fetch_packages(self, dest=None): """Fetch Synnefo packages""" if dest is None: dest = self.config.get('Global', 'pkgs_dir') dest = os.path.abspath(os.path.expanduser(dest)) if not os.path.exists(dest): os.makedirs(dest) self.fetch_compressed("synnefo_build-area", dest) self.fetch_compressed("webclient_build-area", dest) self.logger.info("Downloaded debian packages to %s" % _green(dest)) def x2go_plugin(self, dest=None): """Produce an html page which will use the x2goplugin Arguments: dest -- The file where to save the page (String) """ output_str = """ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"> <html> <head> <title>X2Go SynnefoCI Service</title> </head> <body onload="checkPlugin()"> <div id="x2goplugin"> <object src="location" type="application/x2go" name="x2goplugin" palette="background" height="100%" hspace="0" vspace="0" width="100%" x2goconfig=" session=X2Go-SynnefoCI-Session server={0} user={1} sshport={2} published=true autologin=true "> </object> </div> </body> </html> """.format(self.read_temp_config('server_ip'), self.read_temp_config('server_user'), self.read_temp_config('server_port')) if dest is None: dest = self.config.get('Global', 'x2go_plugin_file') self.logger.info("Writting x2go plugin html file to %s" % dest) fid = open(dest, 'w') fid.write(output_str) fid.close()
from kamaki.clients.utils import https from soi.config import AUTH_URL, CA_CERTS https.patch_with_certs(CA_CERTS) try: from soi.config import IGNORE_SSL if IGNORE_SSL: https.patch_ignore_ssl() except ImportError: pass import webob.exc # endpoints are offered auth-free, so no need for an admin token ADMIN_TOKEN = '' auth = AstakosClient(AUTH_URL, ADMIN_TOKEN) endpoints = {'identity': AUTH_URL} client_classes = {'identity': AstakosClient} for cls in (CycladesComputeClient, CycladesNetworkClient, CycladesBlockStorageClient): service_type = cls.service_type endpoints[service_type] = auth.get_endpoint_url(service_type) client_classes[service_type] = cls def handle_exceptions(f): """Run a method, raise Synnefo errors as snf-occi exceptions""" def wrapper(*args, **kwargs): try:
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF # USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and # documentation are those of the authors and should not be # interpreted as representing official policies, either expressed # or implied, of GRNET S.A. from kamaki.clients.astakos import AstakosClient AUTHENTICATION_URL = "https://astakos.example.com/identity/v2.0" TOKEN = "User-Token" astakos = AstakosClient(AUTHENTICATION_URL, TOKEN) # Get user uuid user = astakos.authenticate() uuid = user['access']['user']['id'] # Get resources assigned on my personal (system) project all_resources = astakos.get_quotas() for project, resources in all_resources.items(): print "For project with id {project_id}".format(project_id=project) for resource, values in resources.items(): print " {resource}: {used}/{limit}".format( resource=resource, used=values["usage"], limit=values["limit"])
class Cyclades(livetest.Generic): """Set up a Cyclades test""" def setUp(self): print with open(self['cmpimage', 'details']) as f: self.img_details = eval(f.read()) self.img = self.img_details['id'] with open(self['flavor', 'details']) as f: self._flavor_details = eval(f.read()) self.PROFILES = ('ENABLED', 'DISABLED', 'PROTECTED') self.servers = {} self.now = time.mktime(time.gmtime()) self.servname1 = 'serv' + unicode(self.now) self.servname2 = self.servname1 + '_v2' self.servname1 += '_v1' self.flavorid = self._flavor_details['id'] #servers have to be created at the begining... self.networks = {} self.netname1 = 'net' + unicode(self.now) self.netname2 = 'net' + unicode(self.now) + '_v2' self.cloud = 'cloud.%s' % self['testcloud'] aurl, self.token = self[self.cloud, 'url'], self[self.cloud, 'token'] self.auth_base = AstakosClient(aurl, self.token) curl = self.auth_base.get_service_endpoints('compute')['publicURL'] self.client = CycladesClient(curl, self.token) def tearDown(self): """Destoy servers used in testing""" for net in self.networks.keys(): self._delete_network(net) for server in self.servers.values(): self._delete_server(server['id']) print('DEL VM %s (%s)' % (server['id'], server['name'])) def test_000(self): "Prepare a full Cyclades test scenario" self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self.server2 = self._create_server( self.servname2, self.flavorid, self.img) super(self.__class__, self).test_000() def _create_server(self, servername, flavorid, imageid, personality=None): server = self.client.create_server( servername, flavorid, imageid, personality=personality) print('CREATE VM %s (%s)' % (server['id'], server['name'])) self.servers[servername] = server return server def _delete_server(self, servid): try: current_state = self.client.get_server_details(servid) current_state = current_state['status'] if current_state == 'DELETED': return self.client.delete_server(servid) self._wait_for_status(servid, current_state) self.client.delete_server(servid) except: return def _create_network(self, netname, **kwargs): net = self.client.create_network(netname, **kwargs) self.networks[net['id']] = net return net def _delete_network(self, netid): if not netid in self.networks: return None print('Disconnect nics of network %s' % netid) self.client.disconnect_network_nics(netid) def netwait(wait): try: self.client.delete_network(netid) except ClientError: time.sleep(wait) self.do_with_progress_bar( netwait, 'Delete network %s' % netid, self._waits[:7]) return self.networks.pop(netid) def _wait_for_network(self, netid, status): def netwait(wait): r = self.client.get_network_details(netid) if r['status'] == status: return time.sleep(wait) self.do_with_progress_bar( netwait, 'Wait network %s to reach status %s' % (netid, status), self._waits[:5]) def _wait_for_nic(self, netid, servid, in_creation=True): self._wait_for_network(netid, 'ACTIVE') def nicwait(wait): nics = self.client.list_server_nics(servid) for net in nics: found_nic = net['network_id'] == netid if (in_creation and found_nic) or not ( in_creation or found_nic): return time.sleep(wait) self.do_with_progress_bar( nicwait, 'Wait nic-%s-%s to %sconnect' % ( netid, servid, '' if in_creation else 'dis'), self._waits[:5]) for net in self.client.list_server_nics(servid): if netid == net['network_id']: return True return False def _has_status(self, servid, status): r = self.client.get_server_details(servid) #print 'MY ', servid, ' STATUS IS ', r['status'] return r['status'] == status def _wait_for_status(self, servid, status): (wait_bar, wait_cb) = self._safe_progress_bar( 'Server %s in %s' % (servid, status)) self.client.wait_server( servid, status, wait_cb=wait_cb, delay=2, max_wait=198) self._safe_progress_bar_finish(wait_bar) def test_parallel_creation(self): """test create with multiple threads Do not use this in regular livetest """ from kamaki.clients import SilentEvent c = [] for i in range(8): sname = '%s_%s' % (self.servname1, i) c.append(SilentEvent( self._create_server, sname, self.flavorid, self.img)) for i in range(8): c[i].start() def test_create_server(self): """Test create_server""" self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self._wait_for_status(self.server1['id'], 'BUILD') self._test_0010_create_server() def _test_0010_create_server(self): self.assertEqual(self.server1["name"], self.servname1) self.assertEqual(self.server1["flavor"]["id"], self.flavorid) self.assertEqual(self.server1["image"]["id"], self.img) self.assertEqual(self.server1["status"], "BUILD") def test_list_servers(self): """Test list servers""" self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self.server2 = self._create_server( self.servname2, self.flavorid, self.img) self._test_0020_list_servers() def _test_0020_list_servers(self): servers = self.client.list_servers() dservers = self.client.list_servers(detail=True) """detailed and simple are same size""" self.assertEqual(len(dservers), len(servers)) fields = set([ 'created', 'flavor', 'hostId', 'image', 'progress', 'status', 'updated']) for i, srv in enumerate(servers): self.assertTrue(fields.isdisjoint(srv)) self.assertTrue(fields.issubset(dservers[i])) """detailed and simple contain same names""" names = sorted(map(lambda x: x["name"], servers)) dnames = sorted(map(lambda x: x["name"], dservers)) self.assertEqual(names, dnames) def _test_0030_wait_test_servers_to_build(self): """Pseudo-test to wait for VMs to load""" print('') self._wait_for_status(self.server1['id'], 'BUILD') self._wait_for_status(self.server2['id'], 'BUILD') def test_get_server_details(self): """Test get_server_details""" self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self._wait_for_status(self.server1['id'], 'BUILD') self._test_0040_get_server_details() def _test_0040_get_server_details(self): r = self.client.get_server_details(self.server1['id']) self.assertEqual(r["name"], self.servname1) self.assertEqual(r["flavor"]["id"], self.flavorid) self.assertEqual(r["image"]["id"], self.img) self.assertEqual(r["status"], "ACTIVE") def test_update_server_name(self): """Test update_server_name""" self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self._test_0050_update_server_name() def _test_0050_update_server_name(self): new_name = self.servname1 + '_new_name' self.client.update_server_name(self.server1['id'], new_name) r = self.client.get_server_details( self.server1['id'], success=(200, 400)) self.assertEqual(r['name'], new_name) changed = self.servers.pop(self.servname1) changed['name'] = new_name self.servers[new_name] = changed def test_reboot_server(self): """Test reboot server""" self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self._wait_for_status(self.server1['id'], 'BUILD') self.server2 = self._create_server( self.servname2, self.flavorid + 2, self.img) self._wait_for_status(self.server2['id'], 'BUILD') self._test_0060_reboot_server() self._wait_for_status(self.server1['id'], 'REBOOT') self._wait_for_status(self.server2['id'], 'REBOOT') def _test_0060_reboot_server(self): self.client.reboot_server(self.server1['id']) self.assertTrue(self._has_status(self.server1['id'], 'REBOOT')) self.client.reboot_server(self.server2['id'], hard=True) self.assertTrue(self._has_status(self.server2['id'], 'REBOOT')) def test_resize_server(self): """Modify the flavor of a server""" self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self._test_0065_resize_server() self.delete_server(self.server1['id']) self.server1 = self._create_server( self.servname1, self.flavorid, self.img) def _test_0065_resize_server(self): self.client.resize_server(self.servname1, self.flavorid + 2) srv = self.client.get_flavor_details(self.server1['id']) self.assertEqual(srv['flavor']['id'], self.flavorid + 2) def _test_0070_wait_test_servers_to_reboot(self): """Pseudo-test to wait for VMs to load""" print('') self._wait_for_status(self.server1['id'], 'REBOOT') self._wait_for_status(self.server2['id'], 'REBOOT') def test_create_server_metadata(self): """Test create_server_metadata""" self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self._test_0080_create_server_metadata() def _test_0080_create_server_metadata(self): r1 = self.client.create_server_metadata( self.server1['id'], 'mymeta', 'mymeta val') self.assertTrue('mymeta' in r1) r2 = self.client.get_server_metadata(self.server1['id'], 'mymeta') self.assert_dicts_are_equal(r1, r2) def test_get_server_metadata(self): """Test get server_metadata""" self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self._test_0090_get_server_metadata() def _test_0090_get_server_metadata(self): self.client.update_server_metadata( self.server1['id'], mymeta_0='val_0') r = self.client.get_server_metadata(self.server1['id'], 'mymeta_0') self.assertEqual(r['mymeta_0'], 'val_0') def test_update_server_metadata(self): """Test update_server_metadata""" self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self._test_0100_update_server_metadata() def _test_0100_update_server_metadata(self): r1 = self.client.update_server_metadata( self.server1['id'], mymeta3='val2') self.assertTrue('mymeta3'in r1) r2 = self.client.update_server_metadata( self.server1['id'], mymeta3='val3') self.assertTrue(r2['mymeta3'], 'val3') def test_delete_server_metadata(self): """Test delete_server_metadata""" self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self._test_0110_delete_server_metadata() def _test_0110_delete_server_metadata(self): r1 = self.client.update_server_metadata( self.server1['id'], mymeta='val') self.assertTrue('mymeta' in r1) self.client.delete_server_metadata(self.server1['id'], 'mymeta') try: self.client.get_server_metadata(self.server1['id'], 'mymeta') raise ClientError('Wrong Error', status=100) except ClientError as err: self.assertEqual(err.status, 404) def test_list_flavors(self): """Test flavors_get""" self._test_0120_list_flavors() def _test_0120_list_flavors(self): r = self.client.list_flavors() self.assertTrue(len(r) > 1) r = self.client.list_flavors(detail=True) self.assertTrue('SNF:disk_template' in r[0]) def test_get_flavor_details(self): """Test test_get_flavor_details""" self._test_0130_get_flavor_details() def _test_0130_get_flavor_details(self): r = self.client.get_flavor_details(self.flavorid) self.assert_dicts_are_equal(self._flavor_details, r) #def test_list_images(self): # """Test list_images""" # self._test_0140_list_images() def _test_0140_list_images(self): r = self.client.list_images() self.assertTrue(len(r) > 1) r = self.client.list_images(detail=True) for detailed_img in r: if detailed_img['id'] == self.img: break self.assert_dicts_are_equal(detailed_img, self.img_details) def test_get_image_details(self): """Test image_details""" self._test_0150_get_image_details() def _test_0150_get_image_details(self): r = self.client.get_image_details(self.img) self.assert_dicts_are_equal(r, self.img_details) def test_get_image_metadata(self): """Test get_image_metadata""" self._test_0160_get_image_metadata() def _test_0160_get_image_metadata(self): r = self.client.get_image_metadata(self.img) self.assert_dicts_are_equal( self.img_details['properties'], r) for key, val in self.img_details['properties'].items(): r = self.client.get_image_metadata(self.img, key) self.assertEqual(r[key], val) def test_shutdown_server(self): """Test shutdown_server""" self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self._wait_for_status(self.server1['id'], 'BUILD') self._test_0170_shutdown_server() def _test_0170_shutdown_server(self): self.client.shutdown_server(self.server1['id']) self._wait_for_status(self.server1['id'], 'ACTIVE') r = self.client.get_server_details(self.server1['id']) self.assertEqual(r['status'], 'STOPPED') def test_start_server(self): """Test start_server""" self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self._wait_for_status(self.server1['id'], 'BUILD') self.client.shutdown_server(self.server1['id']) self._wait_for_status(self.server1['id'], 'ACTIVE') self._test_0180_start_server() def _test_0180_start_server(self): self.client.start_server(self.server1['id']) self._wait_for_status(self.server1['id'], 'STOPPED') r = self.client.get_server_details(self.server1['id']) self.assertEqual(r['status'], 'ACTIVE') def test_get_server_console(self): """Test get_server_console""" self.server2 = self._create_server( self.servname2, self.flavorid, self.img) self._wait_for_status(self.server2['id'], 'BUILD') self._test_0190_get_server_console() def _test_0190_get_server_console(self): r = self.client.get_server_console(self.server2['id']) self.assertTrue('host' in r) self.assertTrue('password' in r) self.assertTrue('port' in r) self.assertTrue('type' in r) def test_get_firewall_profile(self): """Test get_firewall_profile""" self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self._test_0200_get_firewall_profile() def _test_0200_get_firewall_profile(self): self._wait_for_status(self.server1['id'], 'BUILD') fprofile = self.client.get_firewall_profile(self.server1['id']) self.assertTrue(fprofile in self.PROFILES) def test_set_firewall_profile(self): """Test set_firewall_profile""" self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self._test_0210_set_firewall_profile() def _test_0210_set_firewall_profile(self): self._wait_for_status(self.server1['id'], 'BUILD') PROFILES = ['DISABLED', 'ENABLED', 'DISABLED', 'PROTECTED'] fprofile = self.client.get_firewall_profile(self.server1['id']) print('') count_success = 0 for counter, fprofile in enumerate(PROFILES): npos = counter + 1 try: nprofile = PROFILES[npos] except IndexError: nprofile = PROFILES[0] print('\tprofile swap %s: %s -> %s' % (npos, fprofile, nprofile)) self.client.set_firewall_profile(self.server1['id'], nprofile) time.sleep(0.5) self.client.reboot_server(self.server1['id'], hard=True) time.sleep(1) self._wait_for_status(self.server1['id'], 'REBOOT') time.sleep(0.5) changed = self.client.get_firewall_profile(self.server1['id']) try: self.assertEqual(changed, nprofile) except AssertionError as err: if count_success: print('\tFAIL in swap #%s' % npos) break else: raise err count_success += 1 def test_get_server_stats(self): self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self._test_0220_get_server_stats() def _test_0220_get_server_stats(self): r = self.client.get_server_stats(self.server1['id']) it = ('cpuBar', 'cpuTimeSeries', 'netBar', 'netTimeSeries', 'refresh') for term in it: self.assertTrue(term in r) def test_create_network(self): """Test create_network""" self._test_0230_create_network() def _test_0230_create_network(self): print('\twith no params') self.network1 = self._create_network(self.netname1) self._wait_for_network(self.network1['id'], 'ACTIVE') n1id = self.network1['id'] self.network1 = self.client.get_network_details(n1id) nets = self.client.list_networks(self.network1['id']) chosen = [net for net in nets if net['id'] == n1id][0] chosen.pop('updated') net1 = dict(self.network1) net1.pop('updated') self.assert_dicts_are_equal(chosen, net1) full_args = dict( cidr='192.168.1.0/24', gateway='192.168.1.1', type='MAC_FILTERED', dhcp=True) try_args = dict(all=True) try_args.update(full_args) for param, val in try_args.items(): print('\tdelete %s to avoid max net limit' % n1id) self._delete_network(n1id) kwargs = full_args if param == 'all' else {param: val} print('\twith %s=%s' % (param, val)) self.network1 = self._create_network(self.netname1, **kwargs) n1id = self.network1['id'] self._wait_for_network(n1id, 'ACTIVE') self.network1 = self.client.get_network_details(n1id) if param == 'all': for p, v in full_args.items(): self.assertEqual(self.network1[p], v) else: self.assertEqual(self.network1[param], val) def test_connect_server(self): """Test connect_server""" self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self.network1 = self._create_network(self.netname1) self._wait_for_status(self.server1['id'], 'BUILD') self._wait_for_network(self.network1['id'], 'ACTIVE') self._test_0240_connect_server() def _test_0250_connect_server(self): self.client.connect_server(self.server1['id'], self.network1['id']) self.assertTrue( self._wait_for_nic(self.network1['id'], self.server1['id'])) def test_disconnect_server(self): """Test disconnect_server""" self.test_connect_server() self._test_0250_disconnect_server() def _test_0250_disconnect_server(self): self.client.disconnect_server(self.server1['id'], self.network1['id']) self.assertTrue(self._wait_for_nic( self.network1['id'], self.server1['id'], in_creation=False)) def _test_0260_wait_for_second_network(self): self.network2 = self._create_network(self.netname2) self._wait_for_network(self.network2['id'], 'ACTIVE') def test_list_server_nics(self): """Test list_server_nics""" self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self.network2 = self._create_network(self.netname2) self._wait_for_status(self.server1['id'], 'BUILD') self._wait_for_network(self.network2['id'], 'ACTIVE') self._test_0280_list_server_nics() def _test_0280_list_server_nics(self): r = self.client.list_server_nics(self.server1['id']) len0 = len(r) self.client.connect_server(self.server1['id'], self.network2['id']) self.assertTrue( self._wait_for_nic(self.network2['id'], self.server1['id'])) r = self.client.list_server_nics(self.server1['id']) self.assertTrue(len(r) > len0) def test_list_networks(self): """Test list_network""" self.network1 = self._create_network(self.netname1) self._wait_for_network(self.network1['id'], 'ACTIVE') self._test_0290_list_networks() def _test_0290_list_networks(self): r = self.client.list_networks() self.assertTrue(len(r) > 1) ids = [net['id'] for net in r] names = [net['name'] for net in r] self.assertTrue('1' in ids) #self.assertTrue('public' in names) self.assertTrue(self.network1['id'] in ids) self.assertTrue(self.network1['name'] in names) r = self.client.list_networks(detail=True) ids = [net['id'] for net in r] names = [net['name'] for net in r] for net in r: self.assertTrue(net['id'] in ids) self.assertTrue(net['name'] in names) for term in ('status', 'updated', 'created'): self.assertTrue(term in net.keys()) def test_list_network_nics(self): """Test list_server_nics""" self.server1 = self._create_server( self.servname1, self.flavorid, self.img) self.network1 = self._create_network(self.netname1) self.network2 = self._create_network(self.netname2) self._wait_for_status(self.server1['id'], 'BUILD') self._wait_for_network(self.network1['id'], 'ACTIVE') self._wait_for_network(self.network2['id'], 'ACTIVE') self.client.connect_server(self.server1['id'], self.network1['id']) self.client.connect_server(self.server1['id'], self.network2['id']) self._wait_for_nic(self.network1['id'], self.server1['id']) self._wait_for_nic(self.network2['id'], self.server1['id']) self._test_0293_list_network_nics() def _test_0293_list_network_nics(self): netid1, netid2 = self.network1['id'], self.network2['id'] r = self.client.list_network_nics(netid1) expected = ['nic-%s-1' % self.server1['id']] self.assertEqual(r, expected) r = self.client.list_network_nics(netid2) expected = ['nic-%s-2' % self.server1['id']] self.assertEqual(r, expected) def test_get_network_details(self): """Test get_network_details""" self.network1 = self._create_network(self.netname1) self._test_0300_get_network_details() def _test_0300_get_network_details(self): r = self.client.get_network_details(self.network1['id']) net1 = dict(self.network1) net1.pop('status') net1.pop('updated', None) net1.pop('attachments') r.pop('status') r.pop('updated', None) r.pop('attachments') self.assert_dicts_are_equal(net1, r) def test_update_network_name(self): self.network2 = self._create_network(self.netname2) self._test_0310_update_network_name() def _test_0310_update_network_name(self): updated_name = self.netname2 + '_upd' self.client.update_network_name(self.network2['id'], updated_name) def netwait(wait): r = self.client.get_network_details(self.network2['id']) if r['name'] == updated_name: return time.sleep(wait) self.do_with_progress_bar( netwait, 'Network %s name is changing:' % self.network2['id'], self._waits[:5]) r = self.client.get_network_details(self.network2['id']) self.assertEqual(r['name'], updated_name) """ Don't have auth to test this