def main(): for prov in Provider.objects.filter(type__name__icontains='openstack'): if not prov.is_active(): continue print "Importing machine membership for %s" % prov accounts = OSAccounts(prov) if not accounts: print "Aborting import: Could not retrieve OSAccounts driver "\ "for Provider %s" % prov continue admin_driver = get_admin_driver(prov) if not admin_driver: print "Aborting import: Could not retrieve admin_driver "\ "for Provider %s" % prov continue private_images = admin_driver.filter_machines( accounts.list_all_images(is_public=False), black_list=["eki-", "eri-", "ChromoSnapShot"]) public_images = admin_driver.filter_machines( accounts.list_all_images(is_public=True), black_list=["eki-", "eri-", "ChromoSnapShot"]) fix_public_images(public_images, prov, accounts) fix_private_images(private_images, prov, accounts) fix_private_images_without_repr(private_images, prov, accounts)
def get(self, request, provider_uuid, identity_uuid): """ Using provider and identity, getlist of machines TODO: Cache this request """ provider = Provider.objects.filter(uuid=provider_uuid) if not provider: return invalid_creds(provider_uuid, identity_uuid) esh_driver = get_admin_driver(provider[0]) esh_hypervisor_list = [] if not hasattr(esh_driver._connection, 'ex_list_hypervisor_nodes'): return failure_response( status.HTTP_404_NOT_FOUND, "The Hypervisor List cannot be retrieved for this provider.") try: esh_hypervisor_list =\ esh_driver._connection.ex_list_hypervisor_nodes() region_name = esh_driver._connection._ex_force_service_region for obj in esh_hypervisor_list: obj['service_region'] = region_name response = Response(esh_hypervisor_list) return response except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) except Exception as exc: return failure_response( status.HTTP_404_NOT_FOUND, "Error encountered retrieving hypervisor list:%s" % exc)
def get(self, request, provider_uuid): try: provider = Provider.get_active(provider_uuid) except Provider.DoesNotExist: return failure_response( status.HTTP_404_NOT_FOUND, "The provider does not exist.") admin_driver = get_admin_driver(provider) if not admin_driver: return failure_response( status.HTTP_404_NOT_FOUND, "The driver cannot be retrieved for this provider.") if not hasattr(admin_driver._connection, "ex_hypervisor_statistics"): return failure_response( status.HTTP_404_NOT_FOUND, "Occupancy statistics cannot be retrieved for this provider.") try: stats = admin_driver._connection.ex_hypervisor_statistics() return Response(stats) except (socket_error, ConnectionFailure): return connection_failure(provider_uuid) except Exception as exc: return failure_response( status.HTTP_503_SERVICE_UNAVAILABLE, "Error occurred while retrieving statistics: %s" % exc)
def get(self, request, provider_uuid, identity_uuid): """ Using provider and identity, getlist of machines TODO: Cache this request """ # TODO: Decide how we should pass this in (I.E. GET query string?) active = False user = request.user provider = Provider.objects.filter(uuid=provider_uuid) if not provider: return invalid_creds(provider_uuid, identity_uuid) esh_driver = get_admin_driver(provider[0]) esh_hypervisor_list = [] if not hasattr(esh_driver._connection, 'ex_list_hypervisor_nodes'): return failure_response( status.HTTP_404_NOT_FOUND, "The Hypervisor List cannot be retrieved for this provider.") try: esh_hypervisor_list =\ esh_driver._connection.ex_list_hypervisor_nodes() region_name = esh_driver._connection._ex_force_service_region for obj in esh_hypervisor_list: obj['service_region'] = region_name response = Response(esh_hypervisor_list) return response except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) except Exception as exc: return failure_response( status.HTTP_404_NOT_FOUND, "Error encountered retrieving hypervisor list:%s" % exc)
def suspend_all_instances(provider, action, sleep_time=None, dry_run=False): admin_driver = get_admin_driver(provider) all_insts = admin_driver.meta(admin_driver=admin_driver).all_instances() users = [] bad_instances = [] for i in all_insts: if 'creator' in i.extra['metadata']: users.append(i.extra['metadata']['creator']) else: bad_instances.append(i) if bad_instances: print "WARN: These instances are MISSING because they have incomplete metadata:\n%s" % (bad_instances,) all_users = sorted(list(OrderedDict.fromkeys(users))) for count, user in enumerate(all_users): ident = Identity.objects.filter(created_by__username=user, provider__id=4) if len(ident) > 1: print "WARN: User %s has >1 identity!" % user ident = ident[0] driver = get_esh_driver(ident) instances = driver.list_instances() print "Found %s instances for %s" % (len(instances), user) for inst in instances: if not sleep_time: sleep_for = random.uniform(SLEEP_MIN,SLEEP_MAX) else: sleep_for = sleep_time _execute_action(ident, inst, action, sleep_for, dry_run)
def enable_image_validation(machine_request, init_task, final_task, original_status="", error_handler_task=None): if not error_handler_task: error_handler_task = machine_request_error.s(machine_request.id) # Task 3 = Validate the new image by launching an instance admin_ident = machine_request.new_admin_identity() admin_driver = get_admin_driver(machine_request.new_machine_provider) if 'validating' == original_status: image_id = machine_request.new_machine.identifier celery_logger.info("Start with validating:%s" % image_id) # If validating, seed the image_id and start here.. validate_task = validate_new_image.s(image_id, machine_request.id) init_task = validate_task else: validate_task = validate_new_image.s(machine_request.id) init_task.link(validate_task) #Validate task returns an instance_id # Task 4 = Wait for new instance to be 'active' wait_for_task = wait_for_instance.s( # NOTE: 1st arg, instance_id, passed from last task. admin_driver.__class__, admin_driver.provider, admin_driver.identity, "active", test_tmp_status=True, return_id=True) validate_task.link(wait_for_task) validate_task.link_error(error_handler_task) # Task 5 = Terminate the new instance on completion destroy_task = destroy_instance.s( admin_ident.created_by, admin_ident.uuid) wait_for_task.link(destroy_task) wait_for_task.link_error(error_handler_task) destroy_task.link_error(error_handler_task) destroy_task.link(final_task) return init_task
def suspend_all_instances(): admin_driver = get_admin_driver(Provider.objects.get(id=4)) all_insts = admin_driver.meta(admin_driver=admin_driver).all_instances() users = [] bad_instances = [] for i in all_insts: if "creator" in i.extra["metadata"]: users.append(i.extra["metadata"]["creator"]) else: bad_instances.append(i) if bad_instances: print "WARN: These instances are MISSING because they have incomplete metadata:\n%s" % (bad_instances,) all_users = sorted(list(OrderedDict.fromkeys(users))) for count, user in enumerate(all_users): ident = Identity.objects.filter(created_by__username=user, provider__id=4) if len(ident) > 1: print "WARN: User %s has >1 identity!" % user ident = ident[0] driver = get_esh_driver(ident) instances = driver.list_instances() print "Found %s instances for %s" % (len(instances), user) for inst in instances: if inst._node.extra["status"] == "active": print "Attempt to suspend Instance %s in state %s" % (inst.id, inst._node.extra["status"]) try: suspend_instance(driver, inst, ident.provider.id, ident.id, ident.created_by) print "Suspended Instance %s.. Sleep 2min" % (inst.id,) time.sleep(2 * 60) except Exception, err: print "WARN: Could not suspend instance %s. Error: %s" % (inst.id, err)
def get(self, request, provider_uuid, identity_uuid, hypervisor_id): """ Lookup the Hypervisor information (Lookup using the given provider/identity) Update on server DB (If applicable) """ provider = Provider.objects.filter(uuid=provider_uuid) if not provider: return invalid_creds(provider_uuid, identity_uuid) esh_driver = get_admin_driver(provider[0]) if not esh_driver: return invalid_creds(provider_uuid, identity_uuid) hypervisor = {} if not hasattr(esh_driver._connection, 'ex_detail_hypervisor_node'): return failure_response( status.HTTP_404_NOT_FOUND, "Hypervisor Details cannot be retrieved for this provider.") try: hypervisor = esh_driver._connection\ .ex_detail_hypervisor_node(hypervisor_id) hypervisor['cpu_info'] = json.loads(hypervisor['cpu_info']) response = Response(hypervisor) return response except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) except Exception as exc: return failure_response( status.HTTP_404_NOT_FOUND, "Error encountered retrieving hypervisor details:%s" % exc)
def monitor_sizes_for(provider_id, print_logs=False): """ Run the set of tasks related to monitoring sizes for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ from service.driver import get_admin_driver if print_logs: console_handler = _init_stdout_logging() provider = Provider.objects.get(id=provider_id) admin_driver = get_admin_driver(provider) # Non-End dated sizes on this provider db_sizes = Size.objects.filter(only_current(), provider=provider) all_sizes = admin_driver.list_sizes() seen_sizes = [] for cloud_size in all_sizes: core_size = convert_esh_size(cloud_size, provider.uuid) seen_sizes.append(core_size) now_time = timezone.now() needs_end_date = [size for size in db_sizes if size not in seen_sizes] for size in needs_end_date: celery_logger.debug("End dating inactive size: %s" % size) size.end_date = now_time size.save() if print_logs: _exit_stdout_logging(console_handler)
def suspend_all_instances(): admin_driver = get_admin_driver(Provider.objects.get(id=4)) all_insts = admin_driver.meta(admin_driver=admin_driver).all_instances() users = [] bad_instances = [] for i in all_insts: if 'creator' in i.extra['metadata']: users.append(i.extra['metadata']['creator']) else: bad_instances.append(i) if bad_instances: print "WARN: These instances are MISSING because they have incomplete metadata:\n%s" % ( bad_instances, ) all_users = sorted(list(OrderedDict.fromkeys(users))) for count, user in enumerate(all_users): ident = Identity.objects.filter(created_by__username=user, provider__id=4) if len(ident) > 1: print "WARN: User %s has >1 identity!" % user ident = ident[0] driver = get_esh_driver(ident) instances = driver.list_instances() print "Found %s instances for %s" % (len(instances), user) for inst in instances: if inst._node.extra['status'] == 'active': print "Attempt to suspend Instance %s in state %s" % ( inst.id, inst._node.extra['status']) try: suspend_instance(driver, inst, ident.provider.id, ident.id, ident.created_by) print "Suspended Instance %s.. Sleep 2min" % (inst.id, ) time.sleep(2 * 60) except Exception, err: print "WARN: Could not suspend instance %s. Error: %s" % ( inst.id, err)
def monitor_sizes_for(provider_id, print_logs=False): """ Run the set of tasks related to monitoring sizes for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ from service.driver import get_admin_driver if print_logs: import logging import sys consolehandler = logging.StreamHandler(sys.stdout) consolehandler.setLevel(logging.DEBUG) celery_logger.addHandler(consolehandler) provider = Provider.objects.get(id=provider_id) admin_driver = get_admin_driver(provider) # Non-End dated sizes on this provider db_sizes = Size.objects.filter(only_current(), provider=provider) all_sizes = admin_driver.list_sizes() seen_sizes = [] for cloud_size in all_sizes: core_size = convert_esh_size(cloud_size, provider.uuid) seen_sizes.append(core_size) now_time = timezone.now() needs_end_date = [size for size in db_sizes if size not in seen_sizes] for size in needs_end_date: celery_logger.debug("End dating inactive size: %s" % size) size.end_date = now_time size.save() if print_logs: celery_logger.removeHandler(consolehandler)
def enable_image_validation(machine_request, init_task, final_task, original_status="", error_handler_task=None): if not error_handler_task: error_handler_task = machine_request_error.s(machine_request.id) # Task 3 = Validate the new image by launching an instance admin_ident = machine_request.new_admin_identity() admin_driver = get_admin_driver(machine_request.new_machine_provider) if 'validating' in original_status: image_id = machine_request.new_machine.identifier celery_logger.info("Start with validating:%s" % image_id) # If validating, seed the image_id and start here.. validate_task = validate_new_image.s(image_id, machine_request.id) init_task = validate_task else: validate_task = validate_new_image.s(machine_request.id) init_task.link(validate_task) #Validate task returns an instance_id # Task 4 = Wait for new instance to be 'active' wait_for_task = wait_for_instance.s( # NOTE: 1st arg, instance_id, passed from last task. admin_driver.__class__, admin_driver.provider, admin_driver.identity, "active", test_tmp_status=True, return_id=True) validate_task.link(wait_for_task) validate_task.link_error(error_handler_task) # Task 5 = Terminate the new instance on completion destroy_task = destroy_instance.s( admin_ident.created_by, admin_ident.uuid) wait_for_task.link(destroy_task) wait_for_task.link_error(error_handler_task) destroy_task.link_error(error_handler_task) destroy_task.link(final_task) return init_task
def main(): parser = argparse.ArgumentParser() parser.add_argument("--provider-list", action="store_true", help="List of provider names and IDs") parser.add_argument("--provider-id", type=int, help="Atmosphere provider ID" " to use when importing users.") args = parser.parse_args() if args.provider_list: print "ID\tName" for p in Provider.objects.all().order_by('id'): print "%d\t%s" % (p.id, p.location) return if not args.provider_id: print "ERROR: provider-id is required. To get a list of providers use"\ " --provider-list" return provider = Provider.objects.get(id=args.provider_id) print "Provider Selected:%s" % provider user_instances = get_user_instance_history(provider) admin_driver = get_admin_driver(provider) all_instances = admin_driver.list_all_instances() prune_history(user_instances, all_instances)
def get(self, request, provider_uuid, identity_uuid, hypervisor_id): """ Lookup the Hypervisor information (Lookup using the given provider/identity) Update on server DB (If applicable) """ user = request.user provider = Provider.objects.filter(uuid=provider_uuid) if not provider: return invalid_creds(provider_uuid, identity_uuid) esh_driver = get_admin_driver(provider[0]) if not esh_driver: return invalid_creds(provider_uuid, identity_uuid) hypervisor = {} if not hasattr(esh_driver._connection, 'ex_detail_hypervisor_node'): return failure_response( status.HTTP_404_NOT_FOUND, "Hypervisor Details cannot be retrieved for this provider.") try: hypervisor = esh_driver._connection\ .ex_detail_hypervisor_node(hypervisor_id) hypervisor['cpu_info'] = json.loads(hypervisor['cpu_info']) response = Response(hypervisor) return response except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) except Exception as exc: return failure_response( status.HTTP_404_NOT_FOUND, "Error encountered retrieving hypervisor details:%s" % exc)
def get_app_driver(provider_machine): from service.driver import get_admin_driver provider = provider_machine.provider esh_driver = get_admin_driver(provider) if not esh_driver: raise Exception("The driver of the account provider is required to" " update image metadata") return esh_driver
def monitor_sizes_for(provider_id, print_logs=False): """ Run the set of tasks related to monitoring sizes for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ from service.driver import get_admin_driver if print_logs: console_handler = _init_stdout_logging() provider = Provider.objects.get(id=provider_id) admin_driver = get_admin_driver(provider) # Non-End dated sizes on this provider db_sizes = Size.objects.filter(only_current(), provider=provider) all_sizes = admin_driver.list_sizes() seen_sizes = [] for cloud_size in all_sizes: core_size = convert_esh_size(cloud_size, provider.uuid) seen_sizes.append(core_size) now_time = timezone.now() needs_end_date = [size for size in db_sizes if size not in seen_sizes] for size in needs_end_date: celery_logger.debug("End dating inactive size: %s" % size) size.end_date = now_time size.save() # Find home for 'Unknown Size' unknown_sizes = Size.objects.filter( provider=provider, name__contains='Unknown Size' ) for size in unknown_sizes: # Lookup sizes may not show up in 'list_sizes' if size.alias == 'N/A': continue # This is a sentinal value added for a separate purpose. try: libcloud_size = admin_driver.get_size( size.alias, forced_lookup=True ) except BaseHTTPError as error: if error.code == 404: # The size may have been truly deleted continue if not libcloud_size: continue cloud_size = OSSize(libcloud_size) core_size = convert_esh_size(cloud_size, provider.uuid) if print_logs: _exit_stdout_logging(console_handler) for size in seen_sizes: size.esh = None return seen_sizes
def invalidate_machine_cache(machine_request): """ The new image won't populate in the machine list unless the list is cleared. """ provider = machine_request.instance.\ provider_machine.provider driver = get_admin_driver(provider) if not driver: return driver.provider.machineCls.invalidate_provider_cache(driver.provider)
def admin_capacity_check(provider_id, instance_id): from service.driver import get_admin_driver from core.models import Provider p = Provider.objects.get(id=provider_id) admin_driver = get_admin_driver(p) instance = admin_driver.get_instance(instance_id) if not instance: logger.warn("ERROR - Could not find instance id=%s" % (instance_id,)) return hypervisor_hostname = instance.extra["object"].get("OS-EXT-SRV-ATTR:hypervisor_hostname") if not hypervisor_hostname: logger.warn("ERROR - Server Attribute hypervisor_hostname missing!" "Assumed to be under capacity") return hypervisor_stats = admin_driver._connection.ex_detail_hypervisor_node(hypervisor_hostname) return test_capacity(hypervisor_hostname, instance, hypervisor_stats)
def get_instance_owner_map(provider): admin_driver = get_admin_driver(provider) meta = admin_driver.meta(admin_driver=admin_driver) logger.info("Retrieving all tenants..") all_tenants = admin_driver._connection._keystone_list_tenants() logger.info("Retrieved %s tenants. Retrieving all instances.." % len(all_tenants)) all_instances = meta.all_instances() logger.info("Retrieved %s instances." % len(all_instances)) #Convert tenant-id to tenant-name all at once all_instances = _convert_tenant_id_to_names(all_instances, all_tenants) logger.info("Owner information added.") #Make a mapping of owner-to-instance instance_map = _make_instance_owner_map(all_instances) logger.info("Instance owner map created") return instance_map
def get_all_instances(): from core.models import Identity, Provider from api import get_esh_driver from service.driver import get_admin_driver all_instances = [] for provider in Provider.objects.all(): try: admin_driver = get_admin_driver(provider) if not admin_driver: raise Exception("No account admins for provider %s" % provider) meta_driver = admin_driver.meta(admin_driver=admin_driver) all_instances.extend(meta_driver.all_instances()) except: logger.exception("Problem accessing all " "instances for provider: %s" % provider) return all_instances
def get(self, request, provider_id, identity_id, hypervisor_id): """ Lookup the Hypervisor information (Lookup using the given provider/identity) Update on server DB (If applicable) """ user = request.user provider = Provider.objects.filter(id=provider_id) if not provider: return invalid_creds(provider_id, identity_id) esh_driver = get_admin_driver(provider[0]) if not esh_driver: return invalid_creds(provider_id, identity_id) hypervisor = {} if hasattr(esh_driver._connection, 'ex_detail_hypervisor_node'): hypervisor = esh_driver._connection\ .ex_detail_hypervisor_node(hypervisor_id) response = Response(hypervisor) return response
def get(self, request, provider_id): try: provider = Provider.objects.get(id=provider_id) except Provider.DoesNotExist: errorObj = failureJSON([{ 'code': 404, 'message': 'The provider does not exist.'}]) return Response(errorObj, status=status.HTTP_404_NOT_FOUND) admin_driver = get_admin_driver(provider) if hasattr(admin_driver._connection, "ex_hypervisor_statistics"): return Response(admin_driver._connection.ex_hypervisor_statistics()) else: errorObj = failureJSON([{ 'code': 404, 'message': 'The provider does not exist.'}]) return Response(errorObj, status=status.HTTP_404_NOT_FOUND)
def get(self, request, provider_id): try: provider = Provider.get_active(provider_id) except Provider.DoesNotExist: return failure_response(status.HTTP_404_NOT_FOUND, "The provider does not exist.") admin_driver = get_admin_driver(provider) if not admin_driver: return failure_response( status.HTTP_404_NOT_FOUND, "The driver cannot be retrieved for this provider.") if hasattr(admin_driver._connection, "ex_hypervisor_statistics"): return Response( admin_driver._connection.ex_hypervisor_statistics()) else: return failure_response( status.HTTP_404_NOT_FOUND, "Hypervisor statistics are unavailable for this provider.")
def admin_capacity_check(provider_id, instance_id): from service.driver import get_admin_driver from core.models import Provider p = Provider.objects.get(id=provider_id) admin_driver = get_admin_driver(p) instance = admin_driver.get_instance(instance_id) if not instance: logger.warn("ERROR - Could not find instance id=%s" % (instance_id, )) return hypervisor_hostname = instance.extra['object']\ .get('OS-EXT-SRV-ATTR:hypervisor_hostname') if not hypervisor_hostname: logger.warn("ERROR - Server Attribute hypervisor_hostname missing!" "Assumed to be under capacity") return hypervisor_stats = admin_driver._connection.ex_detail_hypervisor_node( hypervisor_hostname) return test_capacity(hypervisor_hostname, instance, hypervisor_stats)
def upload_privacy_data(machine_request, new_machine): from service.driver import get_admin_driver, get_account_driver prov = new_machine.provider accounts = get_account_driver(prov) if not accounts: print "Aborting import: Could not retrieve Account Driver "\ "for Provider %s" % prov return admin_driver = get_admin_driver(prov) if not admin_driver: print "Aborting import: Could not retrieve admin_driver "\ "for Provider %s" % prov return img = accounts.get_image(new_machine.identifier) tenant_list = machine_request.get_access_list() #All in the list will be added as 'sharing' the OStack img #All tenants already sharing the OStack img will be added to this list return sync_membership(accounts, img, new_machine, tenant_list)
def get_instance_owner_map(provider, users=None): """ All keys == All identities """ admin_driver = get_admin_driver(provider) meta = admin_driver.meta(admin_driver=admin_driver) all_identities = _select_identities(provider, users) all_instances = meta.all_instances() all_tenants = admin_driver._connection._keystone_list_tenants() #Convert instance.owner from tenant-id to tenant-name all at once all_instances = _convert_tenant_id_to_names(all_instances, all_tenants) #Make a mapping of owner-to-instance instance_map = _make_instance_owner_map(all_instances, users=users) logger.info("Instance owner map created") identity_map = _include_all_idents(all_identities, instance_map) logger.info("Identity map created") return identity_map
def get(self, request, provider_uuid): try: provider = Provider.get_active(provider_uuid) except Provider.DoesNotExist: return failure_response( status.HTTP_404_NOT_FOUND, "The provider does not exist.") admin_driver = get_admin_driver(provider) if not admin_driver: return failure_response( status.HTTP_404_NOT_FOUND, "The driver cannot be retrieved for this provider.") if hasattr(admin_driver._connection, "ex_hypervisor_statistics"): return Response( admin_driver._connection.ex_hypervisor_statistics()) else: return failure_response( status.HTTP_404_NOT_FOUND, "Hypervisor statistics are unavailable for this provider.")
def set_machine_request_metadata(machine_request, image_id): admin_driver = get_admin_driver(machine_request.new_machine_provider) machine = admin_driver.get_machine(image_id) lc_driver = admin_driver._connection if not machine: celery_logger.warn("Could not find machine with ID=%s" % image_id) return if not hasattr(lc_driver, 'ex_set_image_metadata'): return metadata = lc_driver.ex_get_image_metadata(machine) if machine_request.new_application_description: metadata['description'] = machine_request.new_application_description if machine_request.new_version_tags: metadata['tags'] = machine_request.new_version_tags celery_logger.info("LC Driver:%s - Machine:%s - Metadata:%s" % (lc_driver, machine.id, metadata)) lc_driver.ex_set_image_metadata(machine, metadata) return machine
def _os_update_owner(provider_machine, tenant_name): from core.models import Provider from service.driver import get_admin_driver provider = provider_machine.provider if provider not in Provider.get_active(type_name='openstack'): raise Exception("An active openstack provider is required to" " update image owner") esh_driver = get_admin_driver(provider) if not esh_driver: raise Exception("The account driver of Provider %s is required to" " update image metadata" % provider) esh_machine = esh_driver.get_machine(provider_machine.identifier) if not esh_machine: raise Exception("Machine with ID %s not found" % provider_machine.identifier) tenant_id = _tenant_name_to_id(provider_machine.provider, tenant_name) update_machine_metadata(esh_driver, esh_machine, {"owner": tenant_id, "application_owner": tenant_name})
def get(self, request, provider_id, identity_id): """ Using provider and identity, getlist of machines TODO: Cache this request """ #TODO: Decide how we should pass this in (I.E. GET query string?) active = False user = request.user provider = Provider.objects.filter(id=provider_id) if not provider: return invalid_creds(provider_id, identity_id) esh_driver = get_admin_driver(provider[0]) esh_hypervisor_list = [] if hasattr(esh_driver._connection, 'ex_list_hypervisor_nodes'): esh_hypervisor_list = esh_driver._connection.ex_list_hypervisor_nodes() region_name = esh_driver._connection._ex_force_service_region for obj in esh_hypervisor_list: obj['service_region'] = region_name response = Response(esh_hypervisor_list) return response
def get(self, request, provider_uuid, identity_uuid): """ Using provider and identity, getlist of machines TODO: Cache this request """ #TODO: Decide how we should pass this in (I.E. GET query string?) active = False user = request.user provider = Provider.objects.filter(uuid=provider_uuid) if not provider: return invalid_creds(provider_uuid, identity_uuid) esh_driver = get_admin_driver(provider[0]) esh_hypervisor_list = [] if hasattr(esh_driver._connection, 'ex_list_hypervisor_nodes'): esh_hypervisor_list = esh_driver._connection.ex_list_hypervisor_nodes() region_name = esh_driver._connection._ex_force_service_region for obj in esh_hypervisor_list: obj['service_region'] = region_name response = Response(esh_hypervisor_list) return response
def _os_update_owner(provider_machine, tenant_name): from core.models import Provider from service.driver import get_admin_driver provider = provider_machine.provider if provider not in Provider.get_active(type_name='openstack'): raise Exception("An active openstack provider is required to" " update image owner") esh_driver = get_admin_driver(provider) if not esh_driver: raise Exception("The account driver of Provider %s is required to" " update image metadata" % provider) esh_machine = esh_driver.get_machine(provider_machine.identifier) if not esh_machine: raise Exception("Machine with ID %s not found" % provider_machine.identifier) tenant_id = _tenant_name_to_id(provider_machine.provider, tenant_name) update_machine_metadata(esh_driver, esh_machine, { "owner": tenant_id, "application_owner": tenant_name })
def get_all_instances(): from core.models import Identity, Provider from service.driver import get_admin_driver all_instances = [] for provider in Provider.objects.all(): if not provider.is_active(): #TODO: Optionally we could ensure that anyone using this provider # Has their times end-dated... #Nothing to add... continue try: admin_driver = get_admin_driver(provider) if not admin_driver: raise Exception("No account admins for provider %s" % provider) meta_driver = admin_driver.meta(admin_driver=admin_driver) all_instances.extend(meta_driver.all_instances()) except: logger.exception("Problem accessing all " "instances for provider: %s" % provider) return all_instances
def upload_privacy_data(machine_request, new_machine): from service.driver import get_admin_driver, get_account_driver prov = new_machine.provider accounts = get_account_driver(prov) if not accounts: print "Aborting import: Could not retrieve Account Driver "\ "for Provider %s" % prov return admin_driver = get_admin_driver(prov) if not admin_driver: print "Aborting import: Could not retrieve admin_driver "\ "for Provider %s" % prov return img = accounts.get_image(new_machine.identifier) tenant_list = machine_request.get_access_list() #All in the list will be added as 'sharing' the OStack img #All tenants already sharing the OStack img will be added to this list tenant_list = sync_image_access_list(accounts, img, names=tenant_list) #Make private on the DB level make_private(accounts.image_manager, img, new_machine, tenant_list)
def upload_privacy_data(machine_request, new_machine): from service.accounts.openstack import AccountDriver as OSAccounts from service.driver import get_admin_driver prov = new_machine.provider accounts = OSAccounts(prov) if not accounts: print "Aborting import: Could not retrieve OSAccounts driver "\ "for Provider %s" % prov return admin_driver = get_admin_driver(prov) if not admin_driver: print "Aborting import: Could not retrieve admin_driver "\ "for Provider %s" % prov return img = accounts.image_manager.get_image(new_machine.identifier) tenant_list = machine_request.get_access_list() #All in the list will be added as 'sharing' the OStack img #All tenants already sharing the OStack img will be added to this list tenant_list = sync_image_access_list(accounts, img, names=tenant_list) #Make private on the DB level make_private(accounts.image_manager, img, new_machine, tenant_list)
def get(self, request, provider_id): """ Returns occupancy data for the specific provider. """ #Get meta for provider to call occupancy try: provider = Provider.objects.get(id=provider_id) except Provider.DoesNotExist: errorObj = failureJSON([{ 'code': 404, 'message': 'The provider does not exist.'}]) return Response(errorObj, status=status.HTTP_404_NOT_FOUND) admin_driver = get_admin_driver(provider) meta_driver = admin_driver.meta(admin_driver=admin_driver) esh_size_list = meta_driver.occupancy() core_size_list = [convert_esh_size(size, provider_id) for size in esh_size_list] serialized_data = ProviderSizeSerializer(core_size_list, many=True).data return Response(serialized_data)
def get(self, request, provider_uuid): """ Returns occupancy data for the specific provider. """ try: provider = Provider.get_active(provider_uuid) except Provider.DoesNotExist: return failure_response( status.HTTP_404_NOT_FOUND, "The provider does not exist.") admin_driver = get_admin_driver(provider) if not admin_driver: return failure_response( status.HTTP_404_NOT_FOUND, "The driver cannot be retrieved for this provider.") meta_driver = admin_driver.meta(admin_driver=admin_driver) esh_size_list = meta_driver.occupancy() core_size_list = [convert_esh_size(size, provider_uuid) for size in esh_size_list] serialized_data = ProviderSizeSerializer(core_size_list, many=True).data return Response(serialized_data)
def get_all_instances(): from core.models import Identity, Provider from api import get_esh_driver from service.driver import get_admin_driver all_instances = [] for provider in Provider.objects.all(): if not provider.is_active(): #TODO: Optionally we could ensure that anyone using this provider # Has their times end-dated... #Nothing to add... continue try: admin_driver = get_admin_driver(provider) if not admin_driver: raise Exception("No account admins for provider %s" % provider) meta_driver = admin_driver.meta(admin_driver=admin_driver) all_instances.extend(meta_driver.all_instances()) except: logger.exception("Problem accessing all " "instances for provider: %s" % provider) return all_instances
def get_instance_owner_map(provider): """ All keys == All identities """ admin_driver = get_admin_driver(provider) meta = admin_driver.meta(admin_driver=admin_driver) all_identities = provider.identity_set.all() logger.info("Retrieving all tenants..") all_tenants = admin_driver._connection._keystone_list_tenants() logger.info("Retrieved %s tenants. Retrieving all instances.." % len(all_tenants)) all_instances = meta.all_instances() logger.info("Retrieved %s instances." % len(all_instances)) #Convert instance.owner from tenant-id to tenant-name all at once all_instances = _convert_tenant_id_to_names(all_instances, all_tenants) logger.info("Owner information added.") #Make a mapping of owner-to-instance instance_map = _make_instance_owner_map(all_instances) logger.info("Instance owner map created") identity_map = _include_all_idents(all_identities, instance_map) logger.info("Identity map created") return identity_map
def main(): parser = argparse.ArgumentParser( description="Remove any instance metadata where tmp_status is" + "set to networking.") parser.add_argument("-p", "--provider", required=True, type=int, help="Database id for a provider.") args = parser.parse_args() p = Provider.objects.get(id=args.provider) admin_driver = get_admin_driver(p) print "Retrieving all instances for %s." % (p) meta = admin_driver.meta(admin_driver=admin_driver) instances = meta.all_instances() bad_instances = [i for i in instances if i.extra.get("metadata") and i.extra["metadata"].get("tmp_status") and i.extra["metadata"]["tmp_status"] == "networking"] for i in bad_instances: print "Removing networking metadata for %s" % (i) admin_driver._connection.ex_write_metadata(i, {"tmp_status": ""}, replace_metadata=False)
def main(): parser = argparse.ArgumentParser( description="Remove any instance metadata where tmp_status is" + "set to networking.") parser.add_argument("-p", "--provider", required=True, type=int, help="Database id for a provider.") args = parser.parse_args() p = Provider.objects.get(id=args.provider) admin_driver = get_admin_driver(p) print "Retrieving all instances for %s." % (p) meta = admin_driver.meta(admin_driver=admin_driver) instances = meta.all_instances() bad_instances = [ i for i in instances if i.extra.get("metadata") and i.extra["metadata"].get("tmp_status") and i.extra["metadata"]["tmp_status"] == "networking" ] for i in bad_instances: print "Removing networking metadata for %s" % (i) admin_driver._connection.ex_set_metadata(i, {"tmp_status": ""}, replace_metadata=False)
def get(self, request, provider_uuid): try: provider = Provider.get_active(provider_uuid) except Provider.DoesNotExist: return failure_response( status.HTTP_404_NOT_FOUND, "The provider does not exist.") admin_driver = get_admin_driver(provider) if not admin_driver: return failure_response( status.HTTP_404_NOT_FOUND, "The driver cannot be retrieved for this provider.") if not hasattr(admin_driver._connection, "ex_hypervisor_statistics"): return failure_response( status.HTTP_404_NOT_FOUND, "Occupancy statistics cannot be retrieved for this provider.") try: stats = admin_driver._connection.ex_hypervisor_statistics() return Response(stats) except (socket_error, ConnectionFailure): return connection_failure(provider_uuid) except Exception as exc: return failure_response(status.HTTP_503_SERVICE_UNAVAILABLE, "Error occurred while retrieving statistics: %s" % exc)
def new_admin_driver(self): from service.driver import get_admin_driver return get_admin_driver(self.new_machine_provider)
def _set_admin_driver(): global _admin_driver _admin_driver = get_admin_driver(_get_provider())
def get_admin_driver(self): from service import driver old_provider = self.get_admin_provider() admin_driver = driver.get_admin_driver(old_provider) return admin_driver
def start_machine_imaging(machine_request, delay=False): """ Builds up a machine imaging task using core.models.machine_request delay - If true, wait until task is completed before returning """ new_status, _ = StatusType.objects.get_or_create(name="started") machine_request.status = new_status machine_request.save() original_status = machine_request.old_status last_run_error, original_status = _recover_from_error(original_status) if last_run_error: machine_request.old_status = original_status machine_request.save() instance_id = machine_request.instance.provider_alias (orig_managerCls, orig_creds, dest_managerCls, dest_creds) = machine_request.prepare_manager() imaging_args = machine_request.get_imaging_args() admin_driver = get_admin_driver(machine_request.new_machine_provider) admin_ident = machine_request.new_admin_identity() imaging_error_task = machine_request_error.s(machine_request.id) # Task 2 = Imaging w/ Chromogenic imaging_task = _get_imaging_task(orig_managerCls, orig_creds, dest_managerCls, dest_creds, imaging_args) imaging_task.link_error(imaging_error_task) # Assume we are starting from the beginning. init_task = imaging_task # Task 2 = Process the machine request if 'processing' in original_status: # If processing, start here.. image_id = original_status.replace("processing - ", "") logger.info("Start with processing:%s" % image_id) process_task = process_request.s(image_id, machine_request.id) init_task = process_task else: # Link from imaging to process.. process_task = process_request.s(machine_request.id) imaging_task.link(process_task) process_task.link_error(imaging_error_task) # Task 3 = Validate the new image by launching an instance if 'validating' in original_status: image_id = machine_request.new_machine.identifier celery_logger.info("Start with validating:%s" % image_id) # If validating, seed the image_id and start here.. validate_task = validate_new_image.s(image_id, machine_request.id) init_task = validate_task else: validate_task = validate_new_image.s(machine_request.id) process_task.link(validate_task) # Task 4 = Wait for new instance to be 'active' wait_for_task = wait_for_instance.s( # NOTE: 1st arg, instance_id, passed from last task. admin_driver.__class__, admin_driver.provider, admin_driver.identity, "active", return_id=True) validate_task.link(wait_for_task) validate_task.link_error(imaging_error_task) # Task 5 = Terminate the new instance on completion destroy_task = destroy_instance.s(admin_ident.created_by, admin_ident.uuid) wait_for_task.link(destroy_task) wait_for_task.link_error(imaging_error_task) # Task 6 - Finally, email the user that their image is ready! # NOTE: si == Ignore the result of the last task. email_task = imaging_complete.si(machine_request.id) destroy_task.link_error(imaging_error_task) destroy_task.link(email_task) email_task.link_error(imaging_error_task) # Set status to imaging ONLY if our initial task is the imaging task. if init_task == imaging_task: machine_request.old_status = 'imaging' machine_request.save() # Start the task. async = init_task.apply_async() if delay: async .get() return async
def _get_cached_admin_driver(provider, force=True): if not admin_drivers.get(provider) or force: admin_drivers[provider] = get_admin_driver(provider) return admin_drivers[provider]