def monitor_sizes_for(provider_id, print_logs=False): """ Run the set of tasks related to monitoring sizes for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ from service.driver import get_admin_driver if print_logs: console_handler = _init_stdout_logging() provider = Provider.objects.get(id=provider_id) admin_driver = get_admin_driver(provider) # Non-End dated sizes on this provider db_sizes = Size.objects.filter(only_current(), provider=provider) all_sizes = admin_driver.list_sizes() seen_sizes = [] for cloud_size in all_sizes: core_size = convert_esh_size(cloud_size, provider.uuid) seen_sizes.append(core_size) now_time = timezone.now() needs_end_date = [size for size in db_sizes if size not in seen_sizes] for size in needs_end_date: celery_logger.debug("End dating inactive size: %s" % size) size.end_date = now_time size.save() # Find home for 'Unknown Size' unknown_sizes = Size.objects.filter( provider=provider, name__contains='Unknown Size' ) for size in unknown_sizes: # Lookup sizes may not show up in 'list_sizes' if size.alias == 'N/A': continue # This is a sentinal value added for a separate purpose. try: libcloud_size = admin_driver.get_size( size.alias, forced_lookup=True ) except BaseHTTPError as error: if error.code == 404: # The size may have been truly deleted continue if not libcloud_size: continue cloud_size = OSSize(libcloud_size) core_size = convert_esh_size(cloud_size, provider.uuid) if print_logs: _exit_stdout_logging(console_handler) for size in seen_sizes: size.esh = None return seen_sizes
def get(self, request, provider_uuid, identity_uuid, size_alias): """ Lookup the size information (Lookup using the given provider/identity) Update on server DB (If applicable) """ user = request.user try: esh_driver = prepare_driver(request, provider_uuid, identity_uuid) except ProviderNotActive as pna: return inactive_provider(pna) except Exception as e: return failure_response( status.HTTP_409_CONFLICT, e.message) if not esh_driver: return invalid_creds(provider_uuid, identity_uuid) esh_size = esh_driver.get_size(size_alias) if not esh_size: return failure_response( status.HTTP_404_NOT_FOUND, 'Size %s not found' % (size_alias,)) core_size = convert_esh_size(esh_size, provider_uuid) serialized_data = ProviderSizeSerializer(core_size).data response = Response(serialized_data) return response
def get(self, request, provider_uuid, identity_uuid): """ Using provider and identity, getlist of machines TODO: Cache this request """ # TODO: Decide how we should pass this in (I.E. GET query string?) active = False user = request.user esh_driver = prepare_driver(request, provider_uuid, identity_uuid) if not esh_driver: return invalid_creds(provider_uuid, identity_uuid) try: esh_size_list = esh_driver.list_sizes() except MalformedResponseError: return malformed_response(provider_uuid, identity_uuid) except InvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) all_size_list = [convert_esh_size(size, provider_uuid) for size in esh_size_list] if active: all_size_list = [s for s in all_size_list if s.active()] serialized_data = ProviderSizeSerializer(all_size_list, many=True).data response = Response(serialized_data) return response
def monitor_sizes_for(provider_id, print_logs=False): """ Run the set of tasks related to monitoring sizes for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ from service.driver import get_admin_driver if print_logs: import logging import sys consolehandler = logging.StreamHandler(sys.stdout) consolehandler.setLevel(logging.DEBUG) celery_logger.addHandler(consolehandler) provider = Provider.objects.get(id=provider_id) admin_driver = get_admin_driver(provider) # Non-End dated sizes on this provider db_sizes = Size.objects.filter(only_current(), provider=provider) all_sizes = admin_driver.list_sizes() seen_sizes = [] for cloud_size in all_sizes: core_size = convert_esh_size(cloud_size, provider.uuid) seen_sizes.append(core_size) now_time = timezone.now() needs_end_date = [size for size in db_sizes if size not in seen_sizes] for size in needs_end_date: celery_logger.debug("End dating inactive size: %s" % size) size.end_date = now_time size.save() if print_logs: celery_logger.removeHandler(consolehandler)
def update_instances(driver, identity, esh_list, core_list): """ End-date core instances that don't show up in esh_list && Update the values of instances that do """ esh_ids = [instance.id for instance in esh_list] #logger.info('%s Instances for Identity %s: %s' # % (len(esh_ids), identity, esh_ids)) for core_instance in core_list: try: index = esh_ids.index(core_instance.provider_alias) except ValueError: logger.info("Did not find instance %s in ID List: %s" % (core_instance.provider_alias, esh_ids)) core_instance.end_date_all() continue esh_instance = esh_list[index] esh_size = driver.get_size(esh_instance.size.id) core_size = convert_esh_size(esh_size, identity.provider.id) core_instance.update_history( esh_instance.extra['status'], core_size, esh_instance.extra.get('task') or esh_instance.extra.get( 'metadata', {}).get('tmp_status'))
def update_instances(driver, identity, esh_list, core_list): """ End-date core instances that don't show up in esh_list && Update the values of instances that do """ esh_ids = [instance.id for instance in esh_list] # logger.info('%s Instances for Identity %s: %s' # % (len(esh_ids), identity, esh_ids)) for core_instance in core_list: try: index = esh_ids.index(core_instance.provider_alias) except ValueError: logger.info("Did not find instance %s in ID List: %s" % (core_instance.provider_alias, esh_ids)) core_instance.end_date_all() continue esh_instance = esh_list[index] esh_size = driver.get_size(esh_instance.size.id) core_size = convert_esh_size(esh_size, identity.provider.uuid) core_instance.update_history( esh_instance.extra['status'], core_size, esh_instance.extra.get('task'), esh_instance.extra.get( 'metadata', {}).get('tmp_status','MISSING'))
def monitor_sizes_for(provider_id, print_logs=False): """ Run the set of tasks related to monitoring sizes for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ from service.driver import get_admin_driver if print_logs: console_handler = _init_stdout_logging() provider = Provider.objects.get(id=provider_id) admin_driver = get_admin_driver(provider) # Non-End dated sizes on this provider db_sizes = Size.objects.filter(only_current(), provider=provider) all_sizes = admin_driver.list_sizes() seen_sizes = [] for cloud_size in all_sizes: core_size = convert_esh_size(cloud_size, provider.uuid) seen_sizes.append(core_size) now_time = timezone.now() needs_end_date = [size for size in db_sizes if size not in seen_sizes] for size in needs_end_date: celery_logger.debug("End dating inactive size: %s" % size) size.end_date = now_time size.save() if print_logs: _exit_stdout_logging(console_handler)
def get(self, request, provider_uuid, identity_uuid): """ Using provider and identity, getlist of machines TODO: Cache this request """ # TODO: Decide how we should pass this in (I.E. GET query string?) active = False try: esh_driver = prepare_driver(request, provider_uuid, identity_uuid) except ProviderNotActive as pna: return inactive_provider(pna) except Exception as e: return failure_response(status.HTTP_409_CONFLICT, e.message) if not esh_driver: return invalid_creds(provider_uuid, identity_uuid) try: esh_size_list = esh_driver.list_sizes() except LibcloudBadResponseError: return malformed_response(provider_uuid, identity_uuid) except LibcloudInvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) all_size_list = [ convert_esh_size(size, provider_uuid) for size in esh_size_list ] if active: all_size_list = [s for s in all_size_list if s.active()] serialized_data = ProviderSizeSerializer(all_size_list, many=True).data response = Response(serialized_data) return response
def check_size(esh_driver, size_alias, provider_uuid): try: esh_size = esh_driver.get_size(size_alias) if not convert_esh_size(esh_size, provider_uuid).active(): raise SizeNotAvailable() return esh_size except: raise SizeNotAvailable()
def convert_esh_instance(esh_driver, esh_instance, provider_id, identity_id, user, token=None, password=None): """ """ instance_id = esh_instance.id ip_address = _find_esh_ip(esh_instance) esh_machine = esh_instance.machine core_instance = find_instance(instance_id) if core_instance: _update_core_instance(core_instance, ip_address, password) else: start_date = _find_esh_start_date(esh_instance) logger.debug("Instance: %s" % instance_id) if type(esh_machine) == MockMachine: # MockMachine includes only the Alias/ID information # so a lookup on the machine is required to get accurate # information. esh_machine = esh_driver.get_machine(esh_machine.id) # Ensure that core Machine exists coreMachine = convert_esh_machine(esh_driver, esh_machine, provider_id, user, image_id=esh_instance.image_id) # Use New/Existing core Machine to create core Instance core_instance = create_instance( provider_id, identity_id, instance_id, coreMachine, ip_address, esh_instance.name, user, start_date, token, password, ) # Add 'esh' object core_instance.esh = esh_instance # Confirm instance exists in a project _check_project(core_instance, user) # Update the InstanceStatusHistory # NOTE: Querying for esh_size because esh_instance # Only holds the alias, not all the values. # As a bonus this is a cached-call esh_size = esh_instance.size if type(esh_size) == MockSize: # MockSize includes only the Alias/ID information # so a lookup on the size is required to get accurate # information. esh_size = esh_driver.get_size(esh_size.id) core_size = convert_esh_size(esh_size, provider_id) # TODO: You are the mole! core_instance.update_history( esh_instance.extra["status"], core_size, esh_instance.extra.get("task") or esh_instance.extra.get("metadata", {}).get("tmp_status"), ) # Update values in core with those found in metadata. core_instance = set_instance_from_metadata(esh_driver, core_instance) return core_instance
def get_occupancy(core_provider): admin_driver = get_admin_driver(core_provider) if not admin_driver: raise Exception( "The driver cannot be retrieved for this provider.") meta_driver = admin_driver.meta(admin_driver=admin_driver) esh_size_list = meta_driver.occupancy() core_size_list = [convert_esh_size(size, core_provider.uuid) for size in esh_size_list] return core_size_list
def _first_update(driver, identity, core_instance, esh_instance): #Prepare/Create the history based on 'core_instance' size esh_size = _get_size(driver, esh_instance) core_size = convert_esh_size(esh_size, identity.provider.uuid) history = core_instance.update_history( core_instance.esh.extra['status'], core_size, core_instance.esh.extra.get('task'), core_instance.esh.extra.get('metadata', {}).get('tmp_status'), first_update=True) return history
def _esh_instance_size_to_core(esh_driver, esh_instance, provider_uuid): #NOTE: Querying for esh_size because esh_instance #Only holds the alias, not all the values. #As a bonus this is a cached-call esh_size = esh_instance.size if type(esh_size) == MockSize: #MockSize includes only the Alias/ID information #so a lookup on the size is required to get accurate #information. esh_size = esh_driver.get_size(esh_size.id) core_size = convert_esh_size(esh_size, provider_uuid) return core_size
def _first_update(driver, identity, core_instance, esh_instance): #Prepare/Create the history based on 'core_instance' size esh_size = _get_size(driver, esh_instance) core_size = convert_esh_size(esh_size, identity.provider.uuid) history = core_instance.update_history( core_instance.esh.extra['status'], core_size, #3rd arg is task OR tmp_status core_instance.esh.extra.get('task') or core_instance.esh.extra.get('metadata', {}).get('tmp_status'), first_update=True) return history
def get(self, request, provider_id, identity_id, size_id): """ Lookup the size information (Lookup using the given provider/identity) Update on server DB (If applicable) """ user = request.user esh_driver = prepare_driver(request, identity_id) eshSize = esh_driver.get_size(size_id) coreSize = convert_esh_size(eshSize, provider_id) serialized_data = ProviderSizeSerializer(coreSize).data response = Response(serialized_data) return response
def get(self, request, provider_id, identity_id, size_id): """ Lookup the size information (Lookup using the given provider/identity) Update on server DB (If applicable) """ user = request.user esh_driver = prepare_driver(request, provider_id, identity_id) if not esh_driver: return invalid_creds(provider_id, identity_id) core_size = convert_esh_size(esh_driver.get_size(size_id), provider_id) serialized_data = ProviderSizeSerializer(core_size).data response = Response(serialized_data) return response
def _esh_instance_size_to_core(esh_driver, esh_instance, provider_uuid): # NOTE: Querying for esh_size because esh_instance # Only holds the alias, not all the values. # As a bonus this is a cached-call esh_size = esh_instance.size if isinstance(esh_size, MockSize): # MockSize includes only the Alias/ID information # so a lookup on the size is required to get accurate # information. # TODO: Switch to 'get_cached_size!' esh_size = esh_driver.get_size(esh_size.id) core_size = convert_esh_size(esh_size, provider_uuid) return core_size
def launch_instance(user, provider_id, identity_id, size_alias, machine_alias, **kwargs): """ Required arguments will launch the instance, extras will do provider-specific modifications. Test the quota, Launch the instance, creates a core repr and updates status. returns a core_instance object after updating core DB. """ now_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") if machine_alias: alias = "machine,%s" % machine_alias elif 'volume_alias' in kwargs: alias = "boot_volume,%s" % kwargs['volume_alias'] else: raise Exception("Not enough data to launch: " "volume_alias/machine_alias is missing") status_logger.debug("%s,%s,%s,%s,%s,%s" % (now_time, user, "No Instance", alias, size_alias, "Request Received")) core_identity = CoreIdentity.objects.get(id=identity_id) esh_driver = get_esh_driver(core_identity, user) size = esh_driver.get_size(size_alias) #May raise SizeNotAvailable check_size(size, provider_id) #May raise OverQuotaError or OverAllocationError check_quota(user.username, identity_id, size) #May raise InvalidCredsError (esh_instance, token, password) = launch_esh_instance(esh_driver, machine_alias, size_alias, core_identity, **kwargs) #Convert esh --> core core_instance = convert_esh_instance( esh_driver, esh_instance, provider_id, identity_id, user, token, password) esh_size = esh_driver.get_size(esh_instance.size.id) core_size = convert_esh_size(esh_size, provider_id) core_instance.update_history( core_instance.esh.extra['status'], core_size, #3rd arg is task OR tmp_status core_instance.esh.extra.get('task') or core_instance.esh.extra.get('metadata', {}).get('tmp_status'), first_update=True) return core_instance
def launch_instance(user, provider_id, identity_id, size_alias, machine_alias, **kwargs): """ Required arguments will launch the instance, extras will do provider-specific modifications. Test the quota, Launch the instance, creates a core repr and updates status. returns a core_instance object after updating core DB. """ now_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") if machine_alias: alias = "machine,%s" % machine_alias elif 'volume_alias' in kwargs: alias = "boot_volume,%s" % kwargs['volume_alias'] else: raise Exception("Not enough data to launch: " "volume_alias/machine_alias is missing") status_logger.debug("%s,%s,%s,%s,%s,%s" % (now_time, user, "No Instance", alias, size_alias, "Request Received")) core_identity = CoreIdentity.objects.get(id=identity_id) esh_driver = get_esh_driver(core_identity, user) size = esh_driver.get_size(size_alias) #May raise SizeNotAvailable check_size(size, provider_id) #May raise OverQuotaError or OverAllocationError check_quota(user.username, identity_id, size) #May raise InvalidCredsError, SecurityGroupNotCreated (esh_instance, token, password) = launch_esh_instance(esh_driver, machine_alias, size_alias, core_identity, **kwargs) #Convert esh --> core core_instance = convert_esh_instance( esh_driver, esh_instance, provider_id, identity_id, user, token, password) esh_size = esh_driver.get_size(esh_instance.size.id) core_size = convert_esh_size(esh_size, provider_id) core_instance.update_history( core_instance.esh.extra['status'], core_size, #3rd arg is task OR tmp_status core_instance.esh.extra.get('task') or core_instance.esh.extra.get('metadata', {}).get('tmp_status'), first_update=True) return core_instance
def convert_esh_instance(esh_driver, esh_instance, provider_id, identity_id, user, token=None, password=None): """ """ instance_id = esh_instance.id ip_address = _find_esh_ip(esh_instance) esh_machine = esh_instance.machine core_instance = find_instance(instance_id) if core_instance: _update_core_instance(core_instance, ip_address, password) else: start_date = _find_esh_start_date(esh_instance) logger.debug("Instance: %s" % instance_id) if type(esh_machine) == MockMachine: #MockMachine includes only the Alias/ID information #so a lookup on the machine is required to get accurate #information. esh_machine = esh_driver.get_machine(esh_machine.id) #Ensure that core Machine exists coreMachine = convert_esh_machine(esh_driver, esh_machine, provider_id, user, image_id=esh_instance.image_id) #Use New/Existing core Machine to create core Instance core_instance = create_instance(provider_id, identity_id, instance_id, coreMachine, ip_address, esh_instance.name, user, start_date, token, password) #Add 'esh' object core_instance.esh = esh_instance #Confirm instance exists in a project _check_project(core_instance, user) #Update the InstanceStatusHistory #NOTE: Querying for esh_size because esh_instance #Only holds the alias, not all the values. #As a bonus this is a cached-call esh_size = esh_instance.size if type(esh_size) == MockSize: #MockSize includes only the Alias/ID information #so a lookup on the size is required to get accurate #information. esh_size = esh_driver.get_size(esh_size.id) core_size = convert_esh_size(esh_size, provider_id) core_instance.update_history( esh_instance.extra['status'], core_size, esh_instance.extra.get('task')) #Update values in core with those found in metadata. core_instance = set_instance_from_metadata(esh_driver, core_instance) return core_instance
def _esh_instance_size_to_core(esh_driver, esh_instance, provider_uuid): # NOTE: Querying for esh_size because esh_instance # Only holds the alias, not all the values. # As a bonus this is a cached-call esh_size = esh_instance.size if isinstance(esh_size, MockSize): # MockSize includes only the Alias/ID information # so a lookup on the size is required to get accurate # information. # TODO: Switch to 'get_cached_size!' lc_size = esh_driver.get_size(esh_size.id, forced_lookup=True) new_size = OSSize(lc_size) if new_size: esh_size = new_size core_size = convert_esh_size(esh_size, provider_uuid) return core_size
def get(self, request, provider_id, identity_id): """ Using provider and identity, getlist of machines TODO: Cache this request """ #TODO: Decide how we should pass this in (I.E. GET query string?) active = False user = request.user esh_driver = prepare_driver(request, identity_id) esh_size_list = esh_driver.list_sizes() all_size_list = [convert_esh_size(size, provider_id) for size in esh_size_list] if active: all_size_list = [s for s in all_size_list if s.active()] serialized_data = ProviderSizeSerializer(all_size_list, many=True).data response = Response(serialized_data) return response
def get(self, request, provider_id, identity_id): """ Using provider and identity, getlist of machines TODO: Cache this request """ #TODO: Decide how we should pass this in (I.E. GET query string?) active = False user = request.user esh_driver = prepare_driver(request, provider_id, identity_id) if not esh_driver: return invalid_creds(provider_id, identity_id) esh_size_list = esh_driver.list_sizes() all_size_list = [convert_esh_size(size, provider_id) for size in esh_size_list] if active: all_size_list = [s for s in all_size_list if s.active()] serialized_data = ProviderSizeSerializer(all_size_list, many=True).data response = Response(serialized_data) return response
def get(self, request, provider_id): """ Returns occupancy data for the specific provider. """ #Get meta for provider to call occupancy try: provider = Provider.objects.get(id=provider_id) except Provider.DoesNotExist: errorObj = failureJSON([{ 'code': 404, 'message': 'The provider does not exist.'}]) return Response(errorObj, status=status.HTTP_404_NOT_FOUND) admin_driver = get_admin_driver(provider) meta_driver = admin_driver.meta(admin_driver=admin_driver) esh_size_list = meta_driver.occupancy() core_size_list = [convert_esh_size(size, provider_id) for size in esh_size_list] serialized_data = ProviderSizeSerializer(core_size_list, many=True).data return Response(serialized_data)
def get(self, request, provider_uuid): """ Returns occupancy data for the specific provider. """ try: provider = Provider.get_active(provider_uuid) except Provider.DoesNotExist: return failure_response( status.HTTP_404_NOT_FOUND, "The provider does not exist.") admin_driver = get_admin_driver(provider) if not admin_driver: return failure_response( status.HTTP_404_NOT_FOUND, "The driver cannot be retrieved for this provider.") meta_driver = admin_driver.meta(admin_driver=admin_driver) esh_size_list = meta_driver.occupancy() core_size_list = [convert_esh_size(size, provider_uuid) for size in esh_size_list] serialized_data = ProviderSizeSerializer(core_size_list, many=True).data return Response(serialized_data)
def check_size(esh_size, provider_id): try: if not convert_esh_size(esh_size, provider_id).active(): raise SizeNotAvailable() except: raise SizeNotAvailable()