def get_api(config): patch_api() api = Api(endpoint=config.endpoint, insecure=config.insecure) api.login_internal(config.username, config.password) return api
class CIMIClient: HEADERS = { "content-type": "application/json", "slipstream-authn-info": "internal ADMIN" } def __init__(self, cimi_api_url="https://nuv.la/api"): self.url = cimi_api_url self.api = Api(endpoint='https://{}'.format(cimi_api_url), insecure=True, reauthenticate=True) @staticmethod def logger(log_level=logging.INFO, log_file="/var/log/cimiclient.log"): logging.basicConfig(level=log_level) root_logger = logging.getLogger() file_handler = logging.FileHandler(log_file) root_logger.addHandler(file_handler) return root_logger def authenticate(self, username, password): self.api.login_internal(username, password) def local_get(self, resource_name, query=None): full_url = "{}/{}".format(self.url, resource_name) if query: full_url += "?{}".format(query) return requests.get(full_url, headers=self.HEADERS).json()
def get_dest_api(config): patch_api() args = vars(config) api = Api(endpoint=args.get("dest_endpoint", config.endpoint), insecure=args.get("dest_insecure", config.insecure)) api.login_internal(args.get("dest_username", config.username), args.get("dest_password", config.password)) return api
def cli(ctx, password, batch_mode, quiet, verbose): """ SlipStream command line tool. """ # Configure logging level = 3 # Notify level -= verbose level += quiet logger.set_level(level) if level < 0: logger.enable_http_logging() # Attach Config object to context for subsequent use cfg = ctx.obj cfg.batch_mode = batch_mode args = (ctx.args + ctx.protected_args + [ctx.invoked_subcommand]) if 'aliases' in args: return # Ask for credentials to the user when (s)he hasn't provided some if password or (not os.path.isfile(cfg.settings['cookie_file']) and 'logout' not in args and 'login' not in args): ctx.invoke(login, password=password) # Attach Api object to context for subsequent use ctx.obj = Api(cfg.settings['endpoint'], cfg.settings['cookie_file'], cfg.settings['insecure'])
def execute(self): self.name = self.args.name if self.args.name is not None else names[ int(random.uniform(1, len(names) - 1))] self.ss_api = Api(endpoint=self.args.ss_url, insecure=self.args.ss_insecure, reauthenticate=True) self.ss_api.login_internal(self.args.ss_user, self.args.ss_pass) self._kz = KazooClient(self.args.zk_hosts, connection_retry=KazooRetry(max_tries=-1), command_retry=KazooRetry(max_tries=-1), timeout=30.0) self._kz.start() self.do_work() while True: signal.pause()
def init_session(self, url): if self.session is None: url_parts = urlparse(url) endpoint = '%s://%s' % url_parts[:2] login_creds = self._get_login_creds() if not login_creds: self._log_normal( 'WARNING: No login credentials provided. ' 'Assuming cookies from a persisted cookie-jar %s will be used.' % self.cookie_filename) api = Api(endpoint=endpoint, cookie_file=self.cookie_filename, reauthenticate=True, login_creds=login_creds, insecure=True) self._ss_api = api self.session = api.session
def login(cfg, password): """ Log in with your slipstream credentials. """ should_prompt = True if not cfg.batch_mode else False api = Api(cfg.settings['endpoint'], cfg.settings['cookie_file'], cfg.settings['insecure']) username = cfg.settings.get('username') if (username and password) or cfg.batch_mode: try: api.login_internal(username, password) except HTTPError as e: if e.response.status_code != 401: raise logger.warning("Invalid credentials provided.") if cfg.batch_mode: sys.exit(3) else: should_prompt = False while should_prompt: logger.notify("Enter your SlipStream credentials.") if username is None: username = click.prompt("Username") password = click.prompt("Password for '{}'".format(username), hide_input=True) try: api.login_internal(username, password) except HTTPError as e: if e.response.status_code != 401: raise logger.error("Authentication failed.") else: cfg.settings['username'] = username logger.notify("Authentication successful.") should_prompt = False cfg.write_config() logger.info("Local credentials saved.")
def do_work(self): ch = ConfigHolder(options={ 'verboseLevel': 0, 'retry': False, KEY_RUN_CATEGORY: '' }, context={'foo': 'bar'}) self.cc = self.get_connector_class()(ch) self.cc._initialization(self.user_info, **self.get_initialization_extra_kwargs()) self.base_currency = self.get_option(self.BASE_CURRENCY_KEY) verbose = self.get_option('verbose') dry_run = self.get_option(self.DRY_RUN_KEY) ss_endpoint = self.get_option(self.SS_ENDPOINT_KEY) ss_username = self.get_option(self.SS_USERNAME_KEY) ss_password = self.get_option(self.SS_PASSWORD_KEY) connector_instance_name = self.get_option(self.CONNECTOR_NAME_KEY) filter_connector_vm = ' and '.join([ 'connector/href="{0}"'.format(connector_instance_name), 'resource:type="VM"' ]) self.ssapi = Api(endpoint=ss_endpoint, cookie_file=None, insecure=True) if not dry_run: self.ssapi.login_internal(ss_username, ss_password) self._initialize() service_offers = self._generate_service_offers(connector_instance_name) if not service_offers: raise RuntimeError("No service offer found") if not dry_run and service_offers: self._add_service_attribute_namespace_if_not_exist('resource') self._add_service_attribute_namespace_if_not_exist('price') prefix = self._get_prefix() if prefix: self._add_service_attribute_namespace_if_not_exist(prefix) service_offers_ids = set() for service_offer in service_offers: if dry_run: print('\nService offer {0}:\n{1}'.format( service_offer['name'], service_offer)) else: cimi_filter = \ ' and '.join([filter_connector_vm, 'resource:class="{0}"'.format(service_offer['resource:class']), 'resource:vcpu={0}'.format(service_offer['resource:vcpu']), 'resource:ram={0}'.format(service_offer['resource:ram']), 'resource:disk={0}'.format(service_offer['resource:disk']), 'resource:operatingSystem="{0}"'.format(service_offer['resource:operatingSystem']), 'resource:country="{0}"'.format(service_offer['resource:country']), 'resource:instanceType="{0}"'.format(service_offer['resource:instanceType'])]) search_result = self.ssapi.cimi_search('serviceOffers', filter=cimi_filter) result_list = search_result.resources_list result_count = len(result_list) if result_count == 0: if verbose: print( '\nAddinging the following service offer {0} to {1}...\n{2}' .format(service_offer['name'], ss_endpoint, service_offer)) response = self.ssapi.cimi_add('serviceOffers', service_offer) service_offers_ids.add(response.json['resource-id']) elif result_count == 1: if verbose: print( '\nUpdating the following service offer {0} to {1}...\n{2}' .format(service_offer['name'], ss_endpoint, service_offer)) response = self.ssapi.cimi_edit(result_list[0].id, service_offer) service_offers_ids.add(response.id) else: print( '\n!!! Warning duplicates found of following service offer on {0} !!!\n{1}' .format(ss_endpoint, service_offer['name'])) for result in result_list: service_offers_ids.add(result.id) if not dry_run: response = self.ssapi.cimi_search('serviceOffers', filter=filter_connector_vm) old_service_offers_ids = set(r.id for r in response.resources()) service_offers_ids_to_delete = old_service_offers_ids - service_offers_ids for id in service_offers_ids_to_delete: if verbose: offer = self.ssapi.cimi_get(id) print( '\nDeleting the following service offer with id {0}...\n{1}' .format(id, offer.json)) self.ssapi.cimi_delete(id) print('\n\nCongratulation, executon completed.')
class ServiceOffersCommand(CloudClientCommand): DEFAULT_TIMEOUT = 600 EXCHANGE_RATES_SERVICE_URL = 'https://api.exchangeratesapi.io/latest' RESOURCE_SERVICE_ATTRIBUTE_NAMESPACES = 'serviceAttributeNamespaces' DRY_RUN_KEY = 'dry-run' COUNTRY_KEY = 'country' SS_ENDPOINT_KEY = 'ss-url' SS_USERNAME_KEY = 'ss-user' SS_PASSWORD_KEY = 'ss-pass' BASE_CURRENCY_KEY = 'currency' CONNECTOR_NAME_KEY = 'connector-name' def __init__(self): super(ServiceOffersCommand, self).__init__() self.cc = None self.ssapi = None self.base_currency = 'EUR' self._exchange_rates = {} def _initialize(self): """ This method is called once command arguments have been parsed and self.cc and self.ssapi are available """ pass def _get_default_timeout(self): return self.DEFAULT_TIMEOUT def _list_vm_sizes(self): """ Return a list of available VM sizes. """ return self.cc._list_vm_sizes() if self.cc else None def _get_cpu(self, vm_size): """ Extract and return the amount of vCPU from the specified vm_size. :param vm_size: A 'size' object as in the list returned by _list_vm_sizes(). :rtype int """ return self.cc._size_get_cpu(vm_size) if self.cc else None def _get_ram(self, vm_size): """ Extract and return the size of the RAM memory in MB from the specified vm_size. :param vm_size: A 'size' object as in the list returned by _list_vm_sizes(). :rtype int """ return self.cc._size_get_ram(vm_size) if self.cc else None def _get_disk(self, vm_size): """ Extract and return the size of the root disk in GB from the specified vm_size. :param vm_size: A 'size' object as in the list returned by _list_vm_sizes(). :rtype float """ return self.cc._size_get_disk(vm_size) if self.cc else None def _get_instance_type(self, vm_size): """ Extract and return the instance type from the specified vm_size. :param vm_size: A 'size' object as in the list returned by _list_vm_sizes(). :rtype int """ return self.cc._size_get_instance_type(vm_size) if self.cc else None def _get_country(self): """ Return the 2-letters symbol of the country where the Cloud reside. """ return self.get_option(self.COUNTRY_KEY) def _get_supported_os(self, vm_size): """ Return a list of supported OS for the specified vm_size :param vm_size: A vm_size object as returned by the method _list_vm_sizes() of the connector """ return ['linux', 'windows'] def _get_price(self, vm_size, os, root_disk_size=None): """ Get the price for a give vm_size, OS and optionnal root disk size :param vm_size: A vm_size object as returned by the method _list_vm_sizes() of the connector :param os: The name of the operating system type (eg: 'linux', 'suse', 'windows') :param root_disk_size: The size of the root disk in GB :return: A tuple containing the price per hour and the currency. eg:(0.24, 'USD') ) """ return None, None def _get_root_disk_sizes(self, vm_size, os): """ Return a list of available root disk sizes for the given vm_size :param vm_size: A vm_size object as returned by the method _list_vm_sizes() of the connector :param os: The name of the operating system type (eg: 'linux', 'suse', 'windows') :return: A list of available disk sizes """ disk_size = self._get_disk(vm_size) if disk_size is not None and disk_size > 0: return [disk_size] return [ 10, 25, 50, 100, 200, 400, 600, 800, 1000, 1600, 2000, 4000, 6000, 8000, 10000 ] def _get_root_disk_type(self, vm_size): """ Return the type of the root disk (eg: HDD, SSD, EBS, ...) :param vm_size: A vm_size object as returned by the method _list_vm_sizes() of the connector """ return 'Unknown' def _get_billing_unit(self, vm_size): """ Return the billing period :param vm_size: A vm_size object as returned by the method _list_vm_sizes() of the connector """ return 'MIN' def _get_platform(self, vm_size): """ Return the name of platform :param vm_size: A vm_size object as returned by the method _list_vm_sizes() of the connector """ pass def _get_prefix(self): """ Return the prefix (namespace) to use for extra attributes :rtype: str """ pass def _get_extra_attributes(self, vm_size): """ Return the billing period :param vm_size: A vm_size object as returned by the method _list_vm_sizes() of the connector """ pass def get_exchange_rate(self, src_currency, dst_currency): if dst_currency not in self._exchange_rates: self._exchange_rates[dst_currency] = requests.get( self.EXCHANGE_RATES_SERVICE_URL, params={ 'base': dst_currency }).json().get('rates', {}) return 1.0 / self._exchange_rates.get(dst_currency, {}).get(src_currency) def convert_currency(self, src_currency, dst_currency, amount): return amount * self.get_exchange_rate( src_currency, dst_currency) if src_currency != dst_currency else amount @staticmethod def _generate_service_attribute_namespace(prefix, description=None, acl=None): if acl is None: acl = { "owner": { "principal": "ADMIN", "type": "ROLE" }, "rules": [{ "principal": "USER", "type": "ROLE", "right": "VIEW" }, { "type": "ROLE", "principal": "ADMIN", "right": "ALL" }] } san = { "prefix": prefix, "id": "service-attribute-namespace/" + prefix, "acl": acl, "resourceURI": "http://sixsq.com/slipstream/1/ServiceAttributeNamespace", "uri": "http://sixsq.com/slipstream/schema/1/" + prefix } if description is not None: san['description'] = description return san def _add_service_attribute_namespace_if_not_exist(self, prefix, description=None, acl=None): verbose = self.get_option('verbose') cimi_resp = self.ssapi.cimi_search( self.RESOURCE_SERVICE_ATTRIBUTE_NAMESPACES, filter='prefix="{0}"'.format(prefix)) if cimi_resp.count == 0: service_attribute_namespace = self._generate_service_attribute_namespace( prefix, description, acl) if verbose: print( '\nAddinging the following service attribute namespace {0} ...\n{1}' .format(prefix, service_attribute_namespace)) self.ssapi.cimi_add(self.RESOURCE_SERVICE_ATTRIBUTE_NAMESPACES, service_attribute_namespace) @staticmethod def _generate_service_offer(connector_instance_name, cpu, ram, root_disk, root_disk_type, os, price, instance_type=None, base_currency='EUR', billing_unit='MIN', country=None, platform=None, prefix=None, extra_attributes=None): resource_type = 'VM' resource_class = 'standard' instance_type = instance_type or '' instance_type_in_name = ' {0}'.format( instance_type) if instance_type else '' instance_type_in_description = ' ({0})'.format( instance_type) if instance_type else '' service_offer = { "name": "({0:d}/{1:d}/{2:d}{3} {4}) [{5}]".format(cpu, ram, root_disk, instance_type_in_name, os, country), "description": "{0} ({1}) with {2:d} vCPU, {3:d} MiB RAM, {4:d} GiB root disk, {5} [{6}]{7}" .format(resource_type, resource_class, cpu, ram, root_disk, os, country, instance_type_in_description), "resource:vcpu": cpu, "resource:ram": ram, "resource:disk": root_disk, "resource:diskType": root_disk_type, "resource:type": resource_type, "resource:class": resource_class, "resource:country": country, "resource:platform": platform, "resource:operatingSystem": os, "resource:instanceType": instance_type, "price:unitCost": price, # price price:currency/price:unitcode "price:unitCode": "HUR", "price:freeUnits": 0, "price:currency": base_currency, "price:billingUnitCode": billing_unit, # Minimum time quantum for a resource "price:billingPeriodCode": "MON", # A bill is sent every billingPeriodCode "connector": { "href": connector_instance_name }, "acl": { "owner": { "type": "ROLE", "principal": "ADMIN" }, "rules": [{ "principal": "USER", "right": "VIEW", "type": "ROLE" }, { "principal": "ADMIN", "right": "ALL", "type": "ROLE" }] }, } if extra_attributes: if not prefix: raise ValueError( 'A prefix has to be defined with _get_prefix() to specify extra_attributes' ) for k, v in extra_attributes.items(): service_offer['{0}:{1}'.format(prefix, k)] = v return service_offer def _generate_service_offers(self, connector_instance_name): service_offers = [] for vm_size in self._list_vm_sizes(): cpu = int(self._get_cpu(vm_size)) ram = int(self._get_ram(vm_size)) root_disk_type = self._get_root_disk_type(vm_size) instance_type = self._get_instance_type(vm_size) billing_unit = self._get_billing_unit(vm_size) platform = self._get_platform(vm_size) country = self._get_country() prefix = self._get_prefix() extra_attributes = self._get_extra_attributes(vm_size) if not platform and self.cc: platform = self.cc.cloudName for os in self._get_supported_os(vm_size): for root_disk in self._get_root_disk_sizes(vm_size, os): price = None raw_price, currency = self._get_price( vm_size, os, root_disk) if raw_price is not None: price = self.convert_currency(currency, self.base_currency, raw_price) service_offers.append( self._generate_service_offer( connector_instance_name, cpu, ram, root_disk, root_disk_type, os, price, instance_type, self.base_currency, billing_unit, country, platform, prefix, extra_attributes)) return service_offers def do_work(self): ch = ConfigHolder(options={ 'verboseLevel': 0, 'retry': False, KEY_RUN_CATEGORY: '' }, context={'foo': 'bar'}) self.cc = self.get_connector_class()(ch) self.cc._initialization(self.user_info, **self.get_initialization_extra_kwargs()) self.base_currency = self.get_option(self.BASE_CURRENCY_KEY) verbose = self.get_option('verbose') dry_run = self.get_option(self.DRY_RUN_KEY) ss_endpoint = self.get_option(self.SS_ENDPOINT_KEY) ss_username = self.get_option(self.SS_USERNAME_KEY) ss_password = self.get_option(self.SS_PASSWORD_KEY) connector_instance_name = self.get_option(self.CONNECTOR_NAME_KEY) filter_connector_vm = ' and '.join([ 'connector/href="{0}"'.format(connector_instance_name), 'resource:type="VM"' ]) self.ssapi = Api(endpoint=ss_endpoint, cookie_file=None, insecure=True) if not dry_run: self.ssapi.login_internal(ss_username, ss_password) self._initialize() service_offers = self._generate_service_offers(connector_instance_name) if not service_offers: raise RuntimeError("No service offer found") if not dry_run and service_offers: self._add_service_attribute_namespace_if_not_exist('resource') self._add_service_attribute_namespace_if_not_exist('price') prefix = self._get_prefix() if prefix: self._add_service_attribute_namespace_if_not_exist(prefix) service_offers_ids = set() for service_offer in service_offers: if dry_run: print('\nService offer {0}:\n{1}'.format( service_offer['name'], service_offer)) else: cimi_filter = \ ' and '.join([filter_connector_vm, 'resource:class="{0}"'.format(service_offer['resource:class']), 'resource:vcpu={0}'.format(service_offer['resource:vcpu']), 'resource:ram={0}'.format(service_offer['resource:ram']), 'resource:disk={0}'.format(service_offer['resource:disk']), 'resource:operatingSystem="{0}"'.format(service_offer['resource:operatingSystem']), 'resource:country="{0}"'.format(service_offer['resource:country']), 'resource:instanceType="{0}"'.format(service_offer['resource:instanceType'])]) search_result = self.ssapi.cimi_search('serviceOffers', filter=cimi_filter) result_list = search_result.resources_list result_count = len(result_list) if result_count == 0: if verbose: print( '\nAddinging the following service offer {0} to {1}...\n{2}' .format(service_offer['name'], ss_endpoint, service_offer)) response = self.ssapi.cimi_add('serviceOffers', service_offer) service_offers_ids.add(response.json['resource-id']) elif result_count == 1: if verbose: print( '\nUpdating the following service offer {0} to {1}...\n{2}' .format(service_offer['name'], ss_endpoint, service_offer)) response = self.ssapi.cimi_edit(result_list[0].id, service_offer) service_offers_ids.add(response.id) else: print( '\n!!! Warning duplicates found of following service offer on {0} !!!\n{1}' .format(ss_endpoint, service_offer['name'])) for result in result_list: service_offers_ids.add(result.id) if not dry_run: response = self.ssapi.cimi_search('serviceOffers', filter=filter_connector_vm) old_service_offers_ids = set(r.id for r in response.resources()) service_offers_ids_to_delete = old_service_offers_ids - service_offers_ids for id in service_offers_ids_to_delete: if verbose: offer = self.ssapi.cimi_get(id) print( '\nDeleting the following service offer with id {0}...\n{1}' .format(id, offer.json)) self.ssapi.cimi_delete(id) print('\n\nCongratulation, executon completed.') def _set_command_specific_options(self, parser): parser.add_option('--' + self.BASE_CURRENCY_KEY, dest=self.BASE_CURRENCY_KEY, help='Currency to use', default='EUR', metavar='CURRENCY') parser.add_option('--' + self.COUNTRY_KEY, dest=self.COUNTRY_KEY, help='Country where the Cloud reside', default='Unknown', metavar='COUNTRY') parser.add_option( '--' + self.CONNECTOR_NAME_KEY, dest=self.CONNECTOR_NAME_KEY, help= 'Connector instance name to be used as a connector href for service offers', default=None, metavar='CONNECTOR_NAME') parser.add_option( '--' + self.SS_ENDPOINT_KEY, dest=self.SS_ENDPOINT_KEY, help= 'SlipStream endpoint used where the service offers are pushed to. ' + '(default: https://nuv.la)', default='https://nuv.la', metavar='URL') parser.add_option('--' + self.SS_USERNAME_KEY, dest=self.SS_USERNAME_KEY, help='Username to be used on SlipStream Endpoint', default=None, metavar='USERNAME') parser.add_option('--' + self.SS_PASSWORD_KEY, dest=self.SS_PASSWORD_KEY, help='Password to be used on SlipStream Endpoint', default=None, metavar='PASSWORD') parser.add_option('--' + self.DRY_RUN_KEY, dest=self.DRY_RUN_KEY, help='Just print service offers to stdout and exit', action='store_true') def _get_command_mandatory_options(self): return [self.CONNECTOR_NAME_KEY]
def do_work(self): ch = ConfigHolder(options={'verboseLevel': 0, 'retry': False, KEY_RUN_CATEGORY: ''}, context={'foo': 'bar'}) self.cc = self.get_connector_class()(ch) self.cc._initialization(self.user_info, **self.get_initialization_extra_kwargs()) self.base_currency = self.get_option(self.BASE_CURRENCY_KEY) verbose = self.get_option('verbose') dry_run = self.get_option(self.DRY_RUN_KEY) ss_endpoint = self.get_option(self.SS_ENDPOINT_KEY) ss_username = self.get_option(self.SS_USERNAME_KEY) ss_password = self.get_option(self.SS_PASSWORD_KEY) connector_instance_name = self.get_option(self.CONNECTOR_NAME_KEY) filter_connector_vm = ' and '.join(['connector/href="{0}"'.format(connector_instance_name), 'resource:type="VM"']) self.ssapi = Api(endpoint=ss_endpoint, cookie_file=None, insecure=True) if not dry_run: self.ssapi.login_internal(ss_username, ss_password) self._initialize() service_offers = self._generate_service_offers(connector_instance_name) if not service_offers: raise RuntimeError("No service offer found") if not dry_run and service_offers: self._add_service_attribute_namespace_if_not_exist('resource') self._add_service_attribute_namespace_if_not_exist('price') prefix = self._get_prefix() if prefix: self._add_service_attribute_namespace_if_not_exist(prefix) service_offers_ids = set() for service_offer in service_offers: if dry_run: print('\nService offer {0}:\n{1}'.format(service_offer['name'], service_offer)) else: cimi_filter = \ ' and '.join([filter_connector_vm, 'resource:class="{0}"'.format(service_offer['resource:class']), 'resource:vcpu={0}'.format(service_offer['resource:vcpu']), 'resource:ram={0}'.format(service_offer['resource:ram']), 'resource:disk={0}'.format(service_offer['resource:disk']), 'resource:operatingSystem="{0}"'.format(service_offer['resource:operatingSystem']), 'resource:country="{0}"'.format(service_offer['resource:country']), 'resource:instanceType="{0}"'.format(service_offer['resource:instanceType'])]) search_result = self.ssapi.cimi_search('serviceOffers', filter=cimi_filter) result_list = search_result.resources_list result_count = len(result_list) if result_count == 0: if verbose: print('\nAddinging the following service offer {0} to {1}...\n{2}'.format(service_offer['name'], ss_endpoint, service_offer)) response = self.ssapi.cimi_add('serviceOffers', service_offer) service_offers_ids.add(response.json['resource-id']) elif result_count == 1: if verbose: print('\nUpdating the following service offer {0} to {1}...\n{2}'.format(service_offer['name'], ss_endpoint, service_offer)) response = self.ssapi.cimi_edit(result_list[0].id, service_offer) service_offers_ids.add(response.id) else: print('\n!!! Warning duplicates found of following service offer on {0} !!!\n{1}' .format(ss_endpoint, service_offer['name'])) for result in result_list: service_offers_ids.add(result.id) if not dry_run: response = self.ssapi.cimi_search('serviceOffers', filter=filter_connector_vm) old_service_offers_ids = set(r.id for r in response.resources()) service_offers_ids_to_delete = old_service_offers_ids - service_offers_ids for id in service_offers_ids_to_delete: if verbose: offer = self.ssapi.cimi_get(id) print('\nDeleting the following service offer with id {0}...\n{1}'.format(id, offer.json)) self.ssapi.cimi_delete(id) print('\n\nCongratulation, executon completed.')
class ServiceOffersCommand(CloudClientCommand): DEFAULT_TIMEOUT = 600 EXCHANGE_RATES_SERVICE_URL = 'https://api.exchangeratesapi.io/latest' RESOURCE_SERVICE_ATTRIBUTE_NAMESPACES = 'serviceAttributeNamespaces' DRY_RUN_KEY = 'dry-run' COUNTRY_KEY = 'country' SS_ENDPOINT_KEY = 'ss-url' SS_USERNAME_KEY = 'ss-user' SS_PASSWORD_KEY = 'ss-pass' BASE_CURRENCY_KEY = 'currency' CONNECTOR_NAME_KEY = 'connector-name' def __init__(self): super(ServiceOffersCommand, self).__init__() self.cc = None self.ssapi = None self.base_currency = 'EUR' self._exchange_rates = {} def _initialize(self): """ This method is called once command arguments have been parsed and self.cc and self.ssapi are available """ pass def _get_default_timeout(self): return self.DEFAULT_TIMEOUT def _list_vm_sizes(self): """ Return a list of available VM sizes. """ return self.cc._list_vm_sizes() if self.cc else None def _get_cpu(self, vm_size): """ Extract and return the amount of vCPU from the specified vm_size. :param vm_size: A 'size' object as in the list returned by _list_vm_sizes(). :rtype int """ return self.cc._size_get_cpu(vm_size) if self.cc else None def _get_ram(self, vm_size): """ Extract and return the size of the RAM memory in MB from the specified vm_size. :param vm_size: A 'size' object as in the list returned by _list_vm_sizes(). :rtype int """ return self.cc._size_get_ram(vm_size) if self.cc else None def _get_disk(self, vm_size): """ Extract and return the size of the root disk in GB from the specified vm_size. :param vm_size: A 'size' object as in the list returned by _list_vm_sizes(). :rtype float """ return self.cc._size_get_disk(vm_size) if self.cc else None def _get_instance_type(self, vm_size): """ Extract and return the instance type from the specified vm_size. :param vm_size: A 'size' object as in the list returned by _list_vm_sizes(). :rtype int """ return self.cc._size_get_instance_type(vm_size) if self.cc else None def _get_country(self): """ Return the 2-letters symbol of the country where the Cloud reside. """ return self.get_option(self.COUNTRY_KEY) def _get_supported_os(self, vm_size): """ Return a list of supported OS for the specified vm_size :param vm_size: A vm_size object as returned by the method _list_vm_sizes() of the connector """ return ['linux', 'windows'] def _get_price(self, vm_size, os, root_disk_size=None): """ Get the price for a give vm_size, OS and optionnal root disk size :param vm_size: A vm_size object as returned by the method _list_vm_sizes() of the connector :param os: The name of the operating system type (eg: 'linux', 'suse', 'windows') :param root_disk_size: The size of the root disk in GB :return: A tuple containing the price per hour and the currency. eg:(0.24, 'USD') ) """ return None, None def _get_root_disk_sizes(self, vm_size, os): """ Return a list of available root disk sizes for the given vm_size :param vm_size: A vm_size object as returned by the method _list_vm_sizes() of the connector :param os: The name of the operating system type (eg: 'linux', 'suse', 'windows') :return: A list of available disk sizes """ disk_size = self._get_disk(vm_size) if disk_size is not None and disk_size > 0: return [disk_size] return [10, 25, 50, 100, 200, 400, 600, 800, 1000, 1600, 2000, 4000, 6000, 8000, 10000] def _get_root_disk_type(self, vm_size): """ Return the type of the root disk (eg: HDD, SSD, EBS, ...) :param vm_size: A vm_size object as returned by the method _list_vm_sizes() of the connector """ return 'Unknown' def _get_billing_unit(self, vm_size): """ Return the billing period :param vm_size: A vm_size object as returned by the method _list_vm_sizes() of the connector """ return 'MIN' def _get_platform(self, vm_size): """ Return the name of platform :param vm_size: A vm_size object as returned by the method _list_vm_sizes() of the connector """ pass def _get_prefix(self): """ Return the prefix (namespace) to use for extra attributes :rtype: str """ pass def _get_extra_attributes(self, vm_size): """ Return the billing period :param vm_size: A vm_size object as returned by the method _list_vm_sizes() of the connector """ pass def get_exchange_rate(self, src_currency, dst_currency): if dst_currency not in self._exchange_rates: self._exchange_rates[dst_currency] = requests.get(self.EXCHANGE_RATES_SERVICE_URL, params={'base': dst_currency}).json().get('rates', {}) return 1.0 / self._exchange_rates.get(dst_currency, {}).get(src_currency) def convert_currency(self, src_currency, dst_currency, amount): return amount * self.get_exchange_rate(src_currency, dst_currency) if src_currency != dst_currency else amount @staticmethod def _generate_service_attribute_namespace(prefix, description=None, acl=None): if acl is None: acl = { "owner": { "principal": "ADMIN", "type": "ROLE" }, "rules": [{ "principal": "USER", "type": "ROLE", "right": "VIEW" }, { "type": "ROLE", "principal": "ADMIN", "right": "ALL" }] } san = { "prefix": prefix, "id": "service-attribute-namespace/" + prefix, "acl": acl, "resourceURI": "http://sixsq.com/slipstream/1/ServiceAttributeNamespace", "uri": "http://sixsq.com/slipstream/schema/1/" + prefix } if description is not None: san['description'] = description return san def _add_service_attribute_namespace_if_not_exist(self, prefix, description=None, acl=None): verbose = self.get_option('verbose') cimi_resp = self.ssapi.cimi_search(self.RESOURCE_SERVICE_ATTRIBUTE_NAMESPACES, filter='prefix="{0}"'.format(prefix)) if cimi_resp.count == 0: service_attribute_namespace = self._generate_service_attribute_namespace(prefix, description, acl) if verbose: print('\nAddinging the following service attribute namespace {0} ...\n{1}' .format(prefix, service_attribute_namespace)) self.ssapi.cimi_add(self.RESOURCE_SERVICE_ATTRIBUTE_NAMESPACES, service_attribute_namespace) @staticmethod def _generate_service_offer(connector_instance_name, cpu, ram, root_disk, root_disk_type, os, price, instance_type=None, base_currency='EUR', billing_unit='MIN', country=None, platform=None, prefix=None, extra_attributes=None): resource_type = 'VM' resource_class = 'standard' instance_type = instance_type or '' instance_type_in_name = ' {0}'.format(instance_type) if instance_type else '' instance_type_in_description = ' ({0})'.format(instance_type) if instance_type else '' service_offer = { "name": "({0:d}/{1:d}/{2:d}{3} {4}) [{5}]".format(cpu, ram, root_disk, instance_type_in_name, os, country), "description": "{0} ({1}) with {2:d} vCPU, {3:d} MiB RAM, {4:d} GiB root disk, {5} [{6}]{7}" .format(resource_type, resource_class, cpu, ram, root_disk, os, country, instance_type_in_description), "resource:vcpu": cpu, "resource:ram": ram, "resource:disk": root_disk, "resource:diskType": root_disk_type, "resource:type": resource_type, "resource:class": resource_class, "resource:country": country, "resource:platform": platform, "resource:operatingSystem": os, "resource:instanceType": instance_type, "price:unitCost": price, # price price:currency/price:unitcode "price:unitCode": "HUR", "price:freeUnits": 0, "price:currency": base_currency, "price:billingUnitCode": billing_unit, # Minimum time quantum for a resource "price:billingPeriodCode": "MON", # A bill is sent every billingPeriodCode "connector": {"href": connector_instance_name}, "acl": { "owner": { "type": "ROLE", "principal": "ADMIN" }, "rules": [{ "principal": "USER", "right": "VIEW", "type": "ROLE" }, { "principal": "ADMIN", "right": "ALL", "type": "ROLE" }] }, } if extra_attributes: if not prefix: raise ValueError('A prefix has to be defined with _get_prefix() to specify extra_attributes') for k, v in extra_attributes.items(): service_offer['{0}:{1}'.format(prefix, k)] = v return service_offer def _generate_service_offers(self, connector_instance_name): service_offers = [] for vm_size in self._list_vm_sizes(): cpu = int(self._get_cpu(vm_size)) ram = int(self._get_ram(vm_size)) root_disk_type = self._get_root_disk_type(vm_size) instance_type = self._get_instance_type(vm_size) billing_unit = self._get_billing_unit(vm_size) platform = self._get_platform(vm_size) country = self._get_country() prefix = self._get_prefix() extra_attributes = self._get_extra_attributes(vm_size) if not platform and self.cc: platform = self.cc.cloudName for os in self._get_supported_os(vm_size): for root_disk in self._get_root_disk_sizes(vm_size, os): price = None raw_price, currency = self._get_price(vm_size, os, root_disk) if raw_price is not None: price = self.convert_currency(currency, self.base_currency, raw_price) service_offers.append(self._generate_service_offer(connector_instance_name, cpu, ram, root_disk, root_disk_type, os, price, instance_type, self.base_currency, billing_unit, country, platform, prefix, extra_attributes)) return service_offers def do_work(self): ch = ConfigHolder(options={'verboseLevel': 0, 'retry': False, KEY_RUN_CATEGORY: ''}, context={'foo': 'bar'}) self.cc = self.get_connector_class()(ch) self.cc._initialization(self.user_info, **self.get_initialization_extra_kwargs()) self.base_currency = self.get_option(self.BASE_CURRENCY_KEY) verbose = self.get_option('verbose') dry_run = self.get_option(self.DRY_RUN_KEY) ss_endpoint = self.get_option(self.SS_ENDPOINT_KEY) ss_username = self.get_option(self.SS_USERNAME_KEY) ss_password = self.get_option(self.SS_PASSWORD_KEY) connector_instance_name = self.get_option(self.CONNECTOR_NAME_KEY) filter_connector_vm = ' and '.join(['connector/href="{0}"'.format(connector_instance_name), 'resource:type="VM"']) self.ssapi = Api(endpoint=ss_endpoint, cookie_file=None, insecure=True) if not dry_run: self.ssapi.login_internal(ss_username, ss_password) self._initialize() service_offers = self._generate_service_offers(connector_instance_name) if not service_offers: raise RuntimeError("No service offer found") if not dry_run and service_offers: self._add_service_attribute_namespace_if_not_exist('resource') self._add_service_attribute_namespace_if_not_exist('price') prefix = self._get_prefix() if prefix: self._add_service_attribute_namespace_if_not_exist(prefix) service_offers_ids = set() for service_offer in service_offers: if dry_run: print('\nService offer {0}:\n{1}'.format(service_offer['name'], service_offer)) else: cimi_filter = \ ' and '.join([filter_connector_vm, 'resource:class="{0}"'.format(service_offer['resource:class']), 'resource:vcpu={0}'.format(service_offer['resource:vcpu']), 'resource:ram={0}'.format(service_offer['resource:ram']), 'resource:disk={0}'.format(service_offer['resource:disk']), 'resource:operatingSystem="{0}"'.format(service_offer['resource:operatingSystem']), 'resource:country="{0}"'.format(service_offer['resource:country']), 'resource:instanceType="{0}"'.format(service_offer['resource:instanceType'])]) search_result = self.ssapi.cimi_search('serviceOffers', filter=cimi_filter) result_list = search_result.resources_list result_count = len(result_list) if result_count == 0: if verbose: print('\nAddinging the following service offer {0} to {1}...\n{2}'.format(service_offer['name'], ss_endpoint, service_offer)) response = self.ssapi.cimi_add('serviceOffers', service_offer) service_offers_ids.add(response.json['resource-id']) elif result_count == 1: if verbose: print('\nUpdating the following service offer {0} to {1}...\n{2}'.format(service_offer['name'], ss_endpoint, service_offer)) response = self.ssapi.cimi_edit(result_list[0].id, service_offer) service_offers_ids.add(response.id) else: print('\n!!! Warning duplicates found of following service offer on {0} !!!\n{1}' .format(ss_endpoint, service_offer['name'])) for result in result_list: service_offers_ids.add(result.id) if not dry_run: response = self.ssapi.cimi_search('serviceOffers', filter=filter_connector_vm) old_service_offers_ids = set(r.id for r in response.resources()) service_offers_ids_to_delete = old_service_offers_ids - service_offers_ids for id in service_offers_ids_to_delete: if verbose: offer = self.ssapi.cimi_get(id) print('\nDeleting the following service offer with id {0}...\n{1}'.format(id, offer.json)) self.ssapi.cimi_delete(id) print('\n\nCongratulation, executon completed.') def _set_command_specific_options(self, parser): parser.add_option('--' + self.BASE_CURRENCY_KEY, dest=self.BASE_CURRENCY_KEY, help='Currency to use', default='EUR', metavar='CURRENCY') parser.add_option('--' + self.COUNTRY_KEY, dest=self.COUNTRY_KEY, help='Country where the Cloud reside', default='Unknown', metavar='COUNTRY') parser.add_option('--' + self.CONNECTOR_NAME_KEY, dest=self.CONNECTOR_NAME_KEY, help='Connector instance name to be used as a connector href for service offers', default=None, metavar='CONNECTOR_NAME') parser.add_option('--' + self.SS_ENDPOINT_KEY, dest=self.SS_ENDPOINT_KEY, help='SlipStream endpoint used where the service offers are pushed to. ' + '(default: https://nuv.la)', default='https://nuv.la', metavar='URL') parser.add_option('--' + self.SS_USERNAME_KEY, dest=self.SS_USERNAME_KEY, help='Username to be used on SlipStream Endpoint', default=None, metavar='USERNAME') parser.add_option('--' + self.SS_PASSWORD_KEY, dest=self.SS_PASSWORD_KEY, help='Password to be used on SlipStream Endpoint', default=None, metavar='PASSWORD') parser.add_option('--' + self.DRY_RUN_KEY, dest=self.DRY_RUN_KEY, help='Just print service offers to stdout and exit', action='store_true') def _get_command_mandatory_options(self): return [self.CONNECTOR_NAME_KEY]
from slipstream.api import Api from pprint import pprint as pp api = Api() api.login('simon1992', '12mc0v2ee64o9') ''' This module provides methods to access and request SlipStream Service-Offers and also S3 buckets. The API and library used are respectively 'CIMI' and Boto. ''' url = api.endpoint + "/api/service-offer?$filter=" def _check_str_list(data): if isinstance(data, unicode) or isinstance(data, str): data = [data] return data def _join_attributes(attr, operator): attr = _check_str_list(attr) return (' ' + operator + ' ').join(attr) def _format_data_resource(data): #data = _check_str_list(data) return (["resource:class='%s.SAFE'" % prod.strip() for prod in data]) def request_data(specs, data): """ :param specs: Specs used as filter to narrpw specifically the 'DATA' service-offer :type
def __init__(self, key, secret=None, secure=True, host='nuv.la', port=None, api_version=None, **kwargs): """ Instanciate a SlipStream node driver. :param key: Username or API key :type key: ``str`` :param secret: Password or Secret key :type secret: ``str`` :param secure: Use secure (HTTPS) connection :type secure: ``bool`` :param host: Hostname of the SlipStream endpoint (default: nuv.la) :type host: ``str`` :param port: Port of the SlipStream endpoint (default: 443 if secure else 80) :type port: ``int`` :param api_version: [Unused] :type api_version: ``str`` :keyword ex_endpoint: The SlipStream endpoint (example: https://nuv.la) :type ex_endpoint: ``str`` :keyword ex_cookie_file: Path to a existing cookie file to use instead of key and secret :type ex_cookie_file: ``str`` :keyword ex_login_method: Login method (internal for username/password and api-key for key/secret) :type ex_login_method: ``str`` :keyword ex_login_parameters: Extra parameters to provide to the login method :type ex_login_parameters: ``dict`` """ insecure = not secure endpoint = kwargs.get('ex_endpoint') cookie_file = kwargs.get('ex_cookie_file') login_method = kwargs.get('ex_login_method', 'internal') login_parameters = kwargs.get('ex_login_parameters', {}) if not endpoint: scheme = 'https' if secure else 'http' port = ':{}'.format(port) if port else '' endpoint = '{}://{}{}'.format(scheme, host, port) self.ss_api = Api(endpoint=endpoint, cookie_file=cookie_file, insecure=insecure) if not cookie_file: login_params = {} if login_parameters: login_params.update(login_parameters) if login_method: login_params['href'] = 'session-template/{}'.format(login_method) if login_method == 'internal': if key: login_params['username'] = key if secret: login_params['password'] = secret elif login_method == 'api-key': if key: login_params['key'] = key if secret: login_params['secret'] = secret self.ss_api.login(login_params)
class MainProgram(CommandBase): """A command-line program to migrate reports for SS v3.45->3.46.""" def __init__(self, argv=None): self.api = Api('https://localhost', insecure=True) self.conn = jaydebeapi.connect("org.hsqldb.jdbcDriver", "jdbc:hsqldb:hsql://localhost/slipstream", ["SA", ""], "/opt/slipstream/cimi/lib/hsqldb-2.3.4.jar") super(MainProgram, self).__init__(argv) def parse(self): usage = '''usage: %prog [options]. This script should be run directly on slipstream server host. Please authenticate with ss-login before executing this script with endpoint=https://localhost'. ''' self.parser.usage = usage self.parser.add_option('--months', dest='months', help='Number of months in past from modification time of a report. (default 12 months)', default=12, type=int) self.options, self.args = self.parser.parse_args() @staticmethod def get_all_existing_reports(): return [y for x in os.walk('/var/tmp/slipstream/reports') for y in glob(os.path.join(x[0], '*.tgz'))] @staticmethod def filter_reports_updated_since_less_than(reports, months): now = int(time.time()) months_to_seconds = int(months * 2628002.88) after_time = now - months_to_seconds return [report for report in reports if os.path.getmtime(report) > after_time] def create_external_object_report(self, report): report_path_split = str.split(report, '/') uuid = report_path_split[5] report_name = report_path_split[6] node_name = str.split(report_name, '_')[0] self.db = self.conn.cursor() self.db.execute("select USER_ from RUN where UUID='{}'".format(uuid)) db_res = self.db.fetchone() if db_res is None: raise Exception('Warning: owner not found for following report: {}'.format(uuid)) else: owner = db_res[0] report_object = {'externalObjectTemplate': {'href': 'external-object-template/report', 'runUUID': uuid, 'component': node_name, 'name': report_name, 'acl': { 'owner': { 'principal': owner, 'type': 'USER' }, 'rules': [{ 'principal': owner, 'right': 'MODIFY', 'type': 'USER' }, { 'principal': 'ADMIN', 'right': 'ALL', 'type': 'ROLE' }] }}} resp = self.api.cimi_add('externalObjects', report_object) return resp.json['resource-id'] def generate_upload_url_external_object_report(self, resource_id): resp = self.api.cimi_operation(resource_id, "http://sixsq.com/slipstream/1/action/upload") return resp.json['uri'] def upload_report(self, url, report): report_file_data = open(report, 'rb').read() response = put(url, data=report_file_data) response.raise_for_status() def migrate_report(self, report): print('Migrating {}'.format(report)) resource_id = self.create_external_object_report(report) upload_url = self.generate_upload_url_external_object_report(resource_id) self.upload_report(upload_url, report) def doWork(self): print(SEPARATOR + 'Starting migration of reports...' + SEPARATOR) all_reports = self.get_all_existing_reports() print('All reports count: {}'.format(len(all_reports))) reports_to_migrate = self.filter_reports_updated_since_less_than(all_reports, self.options.months) print('Number of reports updated in last {} months: {}'.format(self.options.months, len(reports_to_migrate)) + SEPARATOR) success = 0 for report in reports_to_migrate: try: self.migrate_report(report) success += 1 except Exception as e: print("Failed to migrate this report: {} with error => {}".format(report, e.message)) print(SEPARATOR + "Migration completed, {}/{} reports successfully migrated!".format(success, len(reports_to_migrate))) exit(0)
def __init__(self, cimi_api_url="https://nuv.la/api"): self.url = cimi_api_url self.api = Api(endpoint='https://{}'.format(cimi_api_url), insecure=True, reauthenticate=True)
''' This script runs a process which populates Slipstream service catalog with SENTINEL-1 data from S3 buckets. ''' from slipstream.api import Api from pprint import pprint as pp from xml.etree import ElementTree import os, re, sys, math, requests, string # Connect to Nuvla account api = Api() api.login('<nuvla_login>', '<nuvla_password>') # INPUT ARGS FORMAT : ( "host_url", "bucket_name") # MANUAL INPUTS connectors = { 'sos.exo.io' : 'exoscale-ch-gva', 's3-eu-west-1.amazonaws.com' : 'ec2-eu-west'} def ls_bucket(host, bucket): """ :param host: URL of S3 storage :type host: str :param bucket: bucket unique name :type bucket: str """ response = requests.get(host + '/' + bucket) tree = ElementTree.fromstring(response.content) regex = re.compile('S1(.+?)SAFE') host_name = re.match(r"https://(.*)", host).group(0)[8:]
from __future__ import division from elasticsearch import Elasticsearch from pprint import pprint as pp import requests import sys from slipstream.api import Api import lib_access as la import server_dmm as srv_dmm import summarizer as summ import math api = Api() index = 'sar' type = 'eo-proc' server_host = 'localhost' res = Elasticsearch([{'host': 'localhost', 'port': 9200}]) def query_db(cloud, time, offer): query = { "query": { "range": { "%s.execution_time" % offer: { "gte": 0, } } } } return (res.get(index=index, doc_type=type, id=cloud))
def __init__(self, argv=None): self.api = Api('https://localhost', insecure=True) self.conn = jaydebeapi.connect("org.hsqldb.jdbcDriver", "jdbc:hsqldb:hsql://localhost/slipstream", ["SA", ""], "/opt/slipstream/cimi/lib/hsqldb-2.3.4.jar") super(MainProgram, self).__init__(argv)
class Base(object): def __init__(self): self.args = None self._init_args_parser() self._kz = None self.ss_api = None self.name = None self.stop_event = threading.Event() self._init_logger() signal.signal(signal.SIGTERM, partial(Base.on_exit, self.stop_event)) signal.signal(signal.SIGINT, partial(Base.on_exit, self.stop_event)) def _init_args_parser(self): parser = argparse.ArgumentParser(description='Process SlipStream jobs') required_args = parser.add_argument_group('required named arguments') parser.add_argument( '--zk-hosts', dest='zk_hosts', metavar='HOSTS', default='127.0.0.1:2181', help= 'Coma separated list of ZooKeeper hosts to connect (default: 127.0.0.1:2181)' ) parser.add_argument( '--ss-url', dest='ss_url', help='SlipStream endpoint to connect to (default: https://nuv.la)', default='https://nuv.la', metavar='URL') required_args.add_argument('--ss-user', dest='ss_user', help='SlipStream username', metavar='USERNAME', required=True) required_args.add_argument('--ss-pass', dest='ss_pass', help='SlipStream Password', metavar='PASSWORD', required=True) parser.add_argument('--ss-insecure', dest='ss_insecure', default=False, action='store_true', help='Do not check SlipStream certificate') parser.add_argument('--name', dest='name', metavar='NAME', default=None, help='Base name for this process') self._set_command_specific_options(parser) self.args = parser.parse_args() def _set_command_specific_options(self, parser): pass @staticmethod def _init_logger(): format_log = logging.Formatter( '%(asctime)s - %(levelname)s - %(threadName)s - ' '%(filename)s:%(lineno)s - %(message)s') logger = logging.getLogger() logger.handlers[0].setFormatter(format_log) logger.setLevel(logging.INFO) logging.getLogger('kazoo').setLevel(logging.WARN) logging.getLogger('elasticsearch').setLevel(logging.WARN) logging.getLogger('slipstream').setLevel(logging.INFO) logging.getLogger('urllib3').setLevel(logging.WARN) @staticmethod def on_exit(stop_event, signum, frame): print('\n\nExecution interrupted by the user!') stop_event.set() sys.exit(0) def do_work(self): raise NotImplementedError() def execute(self): self.name = self.args.name if self.args.name is not None else names[ int(random.uniform(1, len(names) - 1))] self.ss_api = Api(endpoint=self.args.ss_url, insecure=self.args.ss_insecure, reauthenticate=True) self.ss_api.login_internal(self.args.ss_user, self.args.ss_pass) self._kz = KazooClient(self.args.zk_hosts, connection_retry=KazooRetry(max_tries=-1), command_retry=KazooRetry(max_tries=-1), timeout=30.0) self._kz.start() self.do_work() while True: signal.pause()
# Read the configuration. # config = ConfigParser.RawConfigParser() config.read(context_file) api_key = config.get('contextualization', 'api_key') api_secret = config.get('contextualization', 'api_secret') service_url = config.get('contextualization', 'serviceurl') deployment_id = config.get('contextualization', 'diid') # # Setup the SlipStream API. # api = Api(endpoint=service_url) api.login_apikey(api_key, api_secret) # Recover deployment information. deployment = api.cimi_get(deployment_id) try: service_offers = deployment.json['serviceOffers'] except KeyError: service_offers = [] # Recover credential for mounting buckets. depl_params = api.cimi_search('deploymentParameters', filter=deployment_params_filter.format(
class SlipStreamNodeDriver(NodeDriver): """ SlipStream node driver Note: This driver manage KeyPair in a slighty different way than others. All configured key pairs are added to VMs at the creation of VMs. """ name = 'SlipStream' type = 'slipstream' website = 'https://sixsq.com/slipstream' features = {'create_node': []} NODE_STATE_MAP = { # Deployment states 'initializing': NodeState.PENDING, 'provisioning': NodeState.REBOOTING, #STARTING, 'executing': NodeState.RUNNING, #RECONFIGURING, 'sendingReports': NodeState.RUNNING, #RECONFIGURING, 'ready': NodeState.RUNNING, 'finalizing': NodeState.RUNNING, #STOPPING, 'done': NodeState.TERMINATED, 'aborted': NodeState.ERROR, 'cancelled': NodeState.TERMINATED, # VirtualMachine states 'rebooting': NodeState.REBOOTING, 'poweroff': NodeState.STOPPED, 'running': NodeState.RUNNING, 'stopped': NodeState.STOPPED, 'deleted': NodeState.TERMINATED, 'terminated': NodeState.TERMINATED, 'error': NodeState.ERROR, 'stopping': NodeState.RUNNING, 'failed': NodeState.ERROR, 'pending': NodeState.PENDING, 'paused': NodeState.PAUSED, 'suspended': NodeState.PAUSED, } def __init__(self, key, secret=None, secure=True, host='nuv.la', port=None, api_version=None, **kwargs): """ Instanciate a SlipStream node driver. :param key: Username or API key :type key: ``str`` :param secret: Password or Secret key :type secret: ``str`` :param secure: Use secure (HTTPS) connection :type secure: ``bool`` :param host: Hostname of the SlipStream endpoint (default: nuv.la) :type host: ``str`` :param port: Port of the SlipStream endpoint (default: 443 if secure else 80) :type port: ``int`` :param api_version: [Unused] :type api_version: ``str`` :keyword ex_endpoint: The SlipStream endpoint (example: https://nuv.la) :type ex_endpoint: ``str`` :keyword ex_cookie_file: Path to a existing cookie file to use instead of key and secret :type ex_cookie_file: ``str`` :keyword ex_login_method: Login method (internal for username/password and api-key for key/secret) :type ex_login_method: ``str`` :keyword ex_login_parameters: Extra parameters to provide to the login method :type ex_login_parameters: ``dict`` """ insecure = not secure endpoint = kwargs.get('ex_endpoint') cookie_file = kwargs.get('ex_cookie_file') login_method = kwargs.get('ex_login_method', 'internal') login_parameters = kwargs.get('ex_login_parameters', {}) if not endpoint: scheme = 'https' if secure else 'http' port = ':{}'.format(port) if port else '' endpoint = '{}://{}{}'.format(scheme, host, port) self.ss_api = Api(endpoint=endpoint, cookie_file=cookie_file, insecure=insecure) if not cookie_file: login_params = {} if login_parameters: login_params.update(login_parameters) if login_method: login_params['href'] = 'session-template/{}'.format(login_method) if login_method == 'internal': if key: login_params['username'] = key if secret: login_params['password'] = secret elif login_method == 'api-key': if key: login_params['key'] = key if secret: login_params['secret'] = secret self.ss_api.login(login_params) def list_nodes(self): """ List Nodes (SlipStream deployments) :return: List of node objects :rtype: ``list`` of :class:`Node` """ deployments = self.ss_api.list_deployments(limit=500) return [self._deployment_to_node(depl) for depl in deployments] def list_sizes(self, location=None): """ List Sizes (SlipStream service offers)list_virt :param location: Return only sizes for the specified location :type location: :class:`NodeLocation` :return: List of node size objects :rtype: ``list`` of :class:`.NodeSize` """ filter = 'resource:type="VM"' if location: filter += ' and connector/href = "{}"'.format(location.name) service_offers = self.ss_api.cimi_search('serviceOffers', filter=filter) return [self._service_offer_to_size(so) for so in service_offers.json.get('serviceOffers', [])] def list_locations(self): """ List Locations (SlipStream cloud connectors) :return: List of node location objects :rtype: ``list`` of :class:`NodeLocation` """ return [self._cloud_to_location(cloud) for cloud in self.ss_api.get_user().configured_clouds] def create_node(self, **kwargs): """Create a new Node (deploy an application or a component) :keyword name: Name of the node (set as a SlipStream Tag). (optional) :type name: ``str`` :keyword size: Size of Cloud resources (SlipStream serviec offer). (optional) If not provided the default of each VM will be used. If provided the size will be applied to all VM. :type size: :class:`NodeSize` :keyword image: Image to deploy (SlipStream application or component). (required) :type image: :class:`NodeImage` :keyword location: Location where to create the node (SlipStream cloud). (optional) If provided all VM will be started in the specified location. If not provided the default location will be used. :type location: :class:`NodeLocation` :keyword ex_tags: List of tags that can be used to identify or annotate a node. :type ex_tags: ``str`` or ``list`` :keyword ex_cloud: To be used instead of location to specify the Cloud name on which to start VMs. To deploy a component simply specify the Cloud name as a string. To deploy a deployment specify a dict with the nodenames as keys and Cloud names as values. :type ex_cloud: ``str`` or ``dict`` :keyword ex_parameters: Parameters to (re)define for this image. To redefine a parameter of a SlipStream application's node use "<nodename>" as keys and dict of parameters as values. To redefine a parameter of a SlipStream component or a global parameter use "<parametername>" as the key. :type ex_parameters: ``dict`` :keyword ex_keep_running: [Only applies to SlipStream applications] Define when to terminate or not a deployment when it reach the 'Ready' state. If scalable is set to True, this value is ignored and it will behave as if it was set to 'always'. :type ex_keep_running: 'always' or 'never' or 'on-success' or 'on-error' :keyword ex_multiplicity: [Only applies to SlipStream applications] A dict to specify how many instances to start per application's node. Application's nodenames as keys and number of instances to start as values. :type ex_multiplicity: ``dict`` :keyword ex_tolerate_failures: [Only applies to SlipStream applications] A dict to specify how many failures to tolerate per application's node. Nodenames as keys and number of failure to tolerate as values. :type ex_tolerate_failures: ``dict`` :keyword ex_check_ssh_key: Set it to True if you want the SlipStream server to check if you have a public ssh key defined. Useful if you want to ensure you will have access to VMs. :type ex_check_ssh_key: ``bool`` :keyword ex_scalable: [Only applies to SlipStream applications] :type ex_scalable: True to start a scalable deployment. (default: False) :return: The newly created node. :rtype: :class:`Node` """ name = kwargs.get('name') size = kwargs.get('size') image = kwargs.get('image') location = kwargs.get('location') tags = kwargs.get('ex_tags', []) cloud = kwargs.get('ex_cloud') parameters = kwargs.get('ex_parameters', {}) keep_running = kwargs.get('ex_keep_running') multiplicity = kwargs.get('ex_multiplicity') tolerate_failures = kwargs.get('ex_tolerate_failures') check_ssh_key = kwargs.get('ex_check_ssh_key', False) scalable = kwargs.get('ex_scalable', False) path = image.id element = self.ss_api.get_element(path) if not cloud and location: if element.type == 'application': cloud = {} for element_node in self.ss_api.get_application_nodes(path): cloud[element_node.name] = location.name else: cloud = location.name if size: if element.type == 'application': for app_node in self.ss_api.get_application_nodes(path): node_params = parameters.setdefault(app_node.name, {}) if 'service-offer' not in node_params: node_params['service-offer'] = size.id else: if 'service-offer' not in parameters: parameters['service-offer'] = size.id tags = [tags] if isinstance(tags, basestring) else tags if name: tags = [name] + tags node_id = self.ss_api.deploy(path=path, cloud=cloud, parameters=parameters, tags=tags, keep_running=keep_running, scalable=scalable, multiplicity=multiplicity, tolerate_failures=tolerate_failures, check_ssh_key=check_ssh_key) return self.ex_get_node(node_id) def destroy_node(self, node): """" Destroy a node. :param node: The node to be destroyed :type node: :class:`Node` :return: True if the destroy was successful, False otherwise. :rtype: ``bool`` """ try: return self.ss_api.terminate(node.id) except Exception as e: warnings.warn('Exception while destroying node "{}": {}' .format(node.name, traceback.format_exc(), RuntimeWarning)) return False def list_images(self, location=None, ex_path=None, ex_recurse=False): """ List images (SlipStream components and applications) :param location: [NOT IMPLEMENTED] Return only images for the specified location :type location: :class:`NodeLocation` :param ex_path: Path on which to search for images. (optional) If not provided it will list the content of the App Store. :type ex_path: ``str`` :param ex_recurse: Recurse into subprojects. (default: False) Setting this value to True can be expensive. :return: list of node image objects. :rtype: ``list`` of :class:`NodeImage` """ if ex_path is None: elements = self.ss_api.list_applications() else: ex_path = ex_path.lstrip('/') elements = self.ss_api.list_project_content(path=ex_path, recurse=ex_recurse) return [self._element_to_image(el) for el in elements if el.type in ['component', 'application']] def delete_image(self, node_image): """ Deletes a node image from a provider. :param node_image: Node image object. :type node_image: :class:`NodeImage` :return: ``True`` if delete_image was successful, ``False`` otherwise. :rtype: ``bool`` """ try: return self.ss_api.delete_element(path=node_image.id) except Exception as e: warnings.warn('Exception while deleting image "{}": {}' .format(node.name, traceback.format_exc(), RuntimeWarning)) return False def get_image(self, image_id): """ Get an image from it's image_id :param image_id: Image ID (SlipStream path) :type image_id: ``str`` :return: NodeImage instance on success. :rtype: :class:`NodeImage`: """ return self._element_to_image(self.ss_api.get_element(path=image_id)) def list_key_pairs(self): """ List all the available key pair objects. :return: List of configured key pairs :rtype: ``list`` of :class:`KeyPair` objects """ return [self._ssh_public_key_to_key_pair(kp) for kp in self.ss_api.get_user().ssh_public_keys if kp] def get_key_pair(self, name): """ Retrieve a single key pair. :param name: Name of the key pair to retrieve. :type name: ``str`` :return: A key pair :rtype: :class:`KeyPair` """ return self._list_key_pairs_by_names().get(name) def create_key_pair(self, name): """ Create a new key pair object. This operation require a working PyCrypto installation with RSA object :param name: Key pair name. :type name: ``str`` """ if not have_pycrypto: raise RuntimeError('create_key_pair require pyCrypto') rsa_keypair = RSA.generate(2048) private_key_pem = rsa_keypair.exportKey() public_key_openssh = rsa_keypair.publickey().exportKey(format='OpenSSH') key_pair = self._ssh_public_key_to_key_pair(public_key_openssh, name) key_pair.private_key = private_key_pem self._add_ssh_public_key(key_pair.public_key) return key_pair def import_key_pair_from_string(self, name, key_material): """ Import a new public key from string. :param name: Key pair name. :type name: ``str`` :param key_material: Public key material. :type key_material: ``str`` :return: The key pair :rtype: :class:`KeyPair` object """ key_pair = self._ssh_public_key_to_key_pair(key_material, name) self._add_ssh_public_key(key_pair.public_key) return key_pair def import_key_pair_from_file(self, name, key_file_path): """ Import a new public key from string. :param name: Key pair name. :type name: ``str`` :param key_file_path: Path to the public key file. :type key_file_path: ``str`` :return: The key pair :rtype: :class:`KeyPair` object """ with open(key_file_path, 'r') as f: ssh_public_key = f.read() return self.import_key_pair_from_string(name, ssh_public_key) def delete_key_pair(self, key_pair): """ Delete an existing key pair. :param key_pair: Key pair object. :type key_pair: :class:`KeyPair` """ key_pairs = self._list_key_pairs_by_names() del key_pairs[key_pair.name] ssh_public_keys = '\n'.join([kp.public_key for kp in key_pairs.values()]) return self.ss_api.update_user(ssh_public_keys=ssh_public_keys) def ex_get_node(self, node_id): """ Get a node from it's ID :param node_id: ID of the node to retrieve :type node_id: ``str`` or :class:`UUID` :return: The requested node :rtype: :class:`Node` """ return self._deployment_to_node(self.ss_api.get_deployment(node_id)) def ex_wait_node_in_state(self, node, states='Ready', wait_period=10, timeout=600, ignore_abort=False): """ Wait a node to be in one of the specified states (default: Ready) :param states: The names of the states to wait for. (default: Ready) :type states: ``str`` or ``list`` :param wait_period: How many seconds to wait between each loop iteration. (default: 10) :type wait_period: ``int`` :param timeout: How many seconds to wait before giving up. (default: 600) :type timeout: ``int`` :param ignore_abort: If False, raise an exception if the node has failed (default: False) :type ignore_abort: ``bool`` :return: The state that was reached or raise a LibcloudError if timeout :rtype: ``str`` """ _states = [states] if isinstance(states, basestring) else states deadline = time.time() + timeout while time.time() < deadline: state = self.ss_api.get_deployment_parameter(node.id, 'ss:state', ignore_abort) if state in _states: return state time.sleep(wait_period) raise LibcloudError(value='Timed out after %s seconds' % (timeout), driver=self) def ex_list_virtual_machines(self, location=None, node=None): """ List Virtual Machines (SlipStream virtual machines) :param location: Return only virtual machines for the specified location :type location: :class:`NodeLocation` :param node: List VM belonging to the specified node :type node: :class:`Node` :return: List of virtualmachine objects :rtype: ``list`` of :class:`VirtualMachine` """ filters = [] if location: filters.append('connector/href = "connector/{}"'.format(location.name)) if node: filters.append('deployment/href = "run/{}"'.format(node.id)) filter = ' and '.join(filters) or None virtual_machines = self.ss_api.cimi_search('virtualMachines', filter=filter) return [self._virtual_machine_to_node(vm) for vm in virtual_machines.json.get('virtualMachines', [])] def ex_get_node_parameter(self, node, parameter_name, ignore_abort=True): """ Get the value of a parameter for a node :param node: The node from which to get the parameter :type node: :class:`Node` :param parameter_name: The name of the parameter to retrieve :type parameter_name: `str` :param ignore_abort: If False, raise an exception if the node has failed (default: True) :type ignore_abort: ``bool`` """ return self.ss_api.get_deployment_parameter(node.id, parameter_name, ignore_abort) def _state_to_node_state(self, state): return self.NODE_STATE_MAP.get(state.lower(), NodeState.UNKNOWN) def _cloud_to_location(self, cloud): country = None try: filter = 'resource:type="VM" and connector/href = "{}"'.format(cloud) service_offer = self.ss_api.cimi_search('serviceOffers', filter=filter, end=1) country = service_offer.json['serviceOffers'][0]['resource:country'] except Exception as e: pass return NodeLocation(id='connector/{}'.format(cloud), name=cloud, country=country, driver=self) def _deployment_to_node(self, deployment): return Node(id=str(deployment.id), name=str(deployment.id), state=self._state_to_node_state(deployment.status), public_ips=None, private_ips=None, driver=self, size=None, image=deployment.module, #created_at=deployment.started_at, extra=dict(deployment._asdict())) def _service_offer_to_size(self, service_offer): return NodeSize(id=service_offer.get('id'), name=service_offer.get('name'), ram=service_offer.get('resource:ram'), disk=service_offer.get('resource:disk'), bandwidth=None, price=service_offer.get('price:unitCost'), driver=self, extra=service_offer) def _element_to_image(self, element): return NodeImage(id='{}/{}'.format(element.path, element.version), name=element.name, driver=self, extra=dict(element._asdict())) def _virtual_machine_to_node(self, virtual_machine): ip = virtual_machine.get('ip') state = virtual_machine.get('state', 'unknown') public_ips = None private_ips = None try: if is_public_subnet(ip): public_ips = [ip] else: private_ips = [ip] except: pass return VirtualMachine(id=virtual_machine.get('id'), name=virtual_machine.get('instanceID'), state=self._state_to_node_state(state), public_ips=public_ips, private_ips=private_ips, driver=self, size=virtual_machine.get('serviceOffer', {}).get('href'), image=None, #created_at=virtual_machine.get('created'), extra=dict(virtual_machine)) def _list_key_pairs_by_names(self): return dict([(kp.name, kp) for kp in self.list_key_pairs() if kp and kp.name]) def _ssh_public_key_to_key_pair(self, ssh_public_key, name=None): key_type, key_content, key_name = self._parse_ssh_public_key(ssh_public_key) public_key_name = name if name else key_name public_key = '{} {} {}'.format(key_type, key_content, public_key_name) return KeyPair(name=public_key_name, public_key=public_key, fingerprint=None, driver=self, extra={'public_key_type': key_type, 'public_key_content': key_content}) def _parse_ssh_public_key(self, ssh_public_key): try: key = ssh_public_key.strip('\t\r\n ').split(' ', 2) key_type = key[0] key_content = key[1] key_name = key[2] if len(key) > 2 else '' except Exception: raise ValueError('Invalid OpenSSH key format for key: {}' .format(ssh_public_key)) return key_type, key_content, key_name def _add_ssh_public_key(self, ssh_public_key): user_public_keys = self.ss_api.get_user().ssh_public_keys user_public_keys.append(ssh_public_key) ssh_public_keys = '\n'.join(user_public_keys) return self.ss_api.update_user(ssh_public_keys=ssh_public_keys)
from flask import Flask, url_for, request, Response, render_template from elasticsearch import Elasticsearch import sys, os, time, json, boto, boto.s3.connection, operator import requests from pprint import pprint as pp from slipstream.api import Api from datetime import datetime from threading import Thread import lib_access as la import decision_making_module as dmm import summarizer as summarizer # -*- coding: utf-8 -*- app = Flask(__name__) api = Api() elastic_host = 'http://*****:*****@app.route('/') def form(): return render_template('form_submit.html') def connect_s3(): access_key = s3_credentials[2] secret_key = s3_credentials[3] host = s3_credentials[0] conn = boto.connect_s3(