def get_cached_machine(provider_alias, provider_id): if not ProviderMachine.cached_machines: build_cached_machines() cached_mach = ProviderMachine.cached_machines.get((int(provider_id), provider_alias)) if not cached_mach: logger.warn("Cache does not have machine %s on provider %s" % (provider_alias, provider_id)) return cached_mach
def get(self, request, provider_uuid, identity_uuid, action=None): """ """ if not action: return failure_response( status.HTTP_400_BAD_REQUEST, 'Action is not supported.' ) try: esh_driver = prepare_driver(request, provider_uuid, identity_uuid) except ProviderNotActive as pna: return inactive_provider(pna) except Exception as e: return failure_response( status.HTTP_409_CONFLICT, e.message) if not esh_driver: return invalid_creds(provider_uuid, identity_uuid) esh_meta = esh_driver.meta() try: if 'test_links' in action: test_links = esh_meta.test_links() return Response(test_links, status=status.HTTP_200_OK) except LibcloudInvalidCredsError: logger.warn('Authentication Failed. Provider-id:%s Identity-id:%s' % (provider_uuid, identity_uuid)) return failure_response( status.HTTP_401_UNAUTHORIZED, 'Identity/Provider Authentication Failed') except NotImplemented as ne: logger.exception(ne) return failure_response( status.HTTP_404_NOT_FOUND, 'The requested resource %s is not available on this provider' % action)
def get(self, request, provider_id=None, identity_id=None, action=None): """ """ if not action: errorObj = failureJSON([{ 'code': 400, 'message': 'Action is not supported.'}]) return Response(errorObj, status=status.HTTP_400_BAD_REQUEST) esh_driver = prepare_driver(request, identity_id) esh_meta = esh_driver.meta() try: if 'test_links' in action: test_links = esh_meta.test_links() return Response(test_links, status=status.HTTP_200_OK) except InvalidCredsError: logger.warn('Authentication Failed. Provider-id:%s Identity-id:%s' % (provider_id, identity_id)) errorObj = failureJSON([{ 'code': 401, 'message': 'Identity/Provider Authentication Failed'}]) return Response(errorObj, status=status.HTTP_401_UNAUTHORIZED) except NotImplemented, ne: logger.exception(ne) errorObj = failureJSON([{ 'code': 404, 'message': 'The requested resource %s is not available on this provider' % action}]) return Response(errorObj, status=status.HTTP_404_NOT_FOUND)
def set_instance_from_metadata(esh_driver, core_instance): """ NOT BEING USED ANYMORE.. DEPRECATED.. """ # Fixes Dep. loop - Do not remove from api.serializers import InstanceSerializer # Breakout for drivers (Eucalyptus) that don't support metadata if not hasattr(esh_driver._connection, 'ex_get_metadata'): # logger.debug("EshDriver %s does not have function 'ex_get_metadata'" # % esh_driver._connection.__class__) return core_instance try: esh_instance = esh_driver.get_instance(core_instance.provider_alias) if not esh_instance: return core_instance metadata = esh_driver._connection.ex_get_metadata(esh_instance) except Exception: logger.exception("Exception retrieving instance metadata for %s" % core_instance.provider_alias) return core_instance # TODO: Match with actual instance launch metadata in service/instance.py # TODO: Probably best to redefine serializer as InstanceMetadataSerializer # TODO: Define a creator and their identity by the METADATA instead of # assuming its the person who 'found' the instance serializer = InstanceSerializer(core_instance, data=metadata, partial=True) if not serializer.is_valid(): logger.warn("Encountered errors serializing metadata:%s" % serializer.errors) return core_instance core_instance = serializer.save() core_instance.esh = esh_instance return core_instance
def get_last_history(self): """ Returns the newest InstanceStatusHistory """ # FIXME: Clean up this implementation OR rename to `get_or_create` # TODO: Profile Option # except InstanceStatusHistory.DoesNotExist: # TODO: Profile current choice # FIXME: Move this call so that it happens inside InstanceStatusHistory to avoid circ.dep. last_history = self.instancestatushistory_set.order_by('-start_date' ).first() if last_history: return last_history else: unknown_size, _ = Size.objects.get_or_create( name='Unknown Size', alias='N/A', provider=self.provider, cpu=-1, mem=-1, root=-1, disk=-1 ) last_history = self._build_first_history( 'Unknown', unknown_size, self.start_date, self.end_date, True ) logger.warn( "No history existed for %s until now. " "An 'Unknown' history was created" % self ) return last_history
def delete(self, request, provider_id, identity_id, instance_id): """Authentication Required, TERMINATE the instance. Be careful, there is no going back once you've deleted an instance. """ user = request.user esh_driver = prepare_driver(request, provider_id, identity_id) if not esh_driver: return invalid_creds(provider_id, identity_id) try: esh_instance = esh_driver.get_instance(instance_id) if not esh_instance: return instance_not_found(instance_id) task.destroy_instance_task(esh_instance, identity_id) existing_instance = esh_driver.get_instance(instance_id) if existing_instance: #Instance will be deleted soon... esh_instance = existing_instance if esh_instance.extra\ and 'task' not in esh_instance.extra: esh_instance.extra['task'] = 'queueing delete' core_instance = convert_esh_instance(esh_driver, esh_instance, provider_id, identity_id, user) if core_instance: core_instance.end_date_all() else: logger.warn("Unable to find core instance %s." % (instance_id)) serialized_data = InstanceSerializer(core_instance, context={"request":request}).data response = Response(serialized_data, status=status.HTTP_200_OK) response['Cache-Control'] = 'no-cache' return response except InvalidCredsError: return invalid_creds(provider_id, identity_id)
def update_instance_metadata(esh_driver, esh_instance, data={}, replace=True): """ NOTE: This will NOT WORK for TAGS until openstack allows JSONArrays as values for metadata! """ wait_time = 1 if not esh_instance: return {} instance_id = esh_instance.id if not hasattr(esh_driver._connection, 'ex_set_metadata'): logger.warn("EshDriver %s does not have function 'ex_set_metadata'" % esh_driver._connection.__class__) return {} if esh_instance.extra['status'] == 'build': raise Exception("Metadata cannot be applied while EshInstance %s is in" " the build state." % (esh_instance,)) # ASSERT: We are ready to update the metadata if data.get('name'): esh_driver._connection.ex_set_server_name(esh_instance, data['name']) try: return esh_driver._connection.ex_set_metadata(esh_instance, data, replace_metadata=replace) except Exception, e: logger.exception("Error updating the metadata") if 'incapable of performing the request' in e.message: return {} else: raise
def _send_instance_email(driverCls, provider, identity, instance_id): try: logger.debug("_send_instance_email task started at %s." % datetime.now()) driver = get_driver(driverCls, provider, identity) instance = driver.get_instance(instance_id) #Breakout if instance has been deleted at this point if not instance: logger.debug("Instance has been teminated: %s." % instance_id) return username = identity.user.username profile = UserProfile.objects.get(user__username=username) if profile.send_emails: #Only send emails if allowed by profile setting created = datetime.strptime(instance.extra['created'], "%Y-%m-%dT%H:%M:%SZ") send_instance_email(username, instance.id, instance.name, instance.ip, created, username) else: logger.debug("User %s elected NOT to receive new instance emails" % username) logger.debug("_send_instance_email task finished at %s." % datetime.now()) except Exception as exc: logger.warn(exc) _send_instance_email.retry(exc=exc)
def has_permission(self, request, view): auth_user = request.user project_uuid = view.kwargs.get("project_uuid") if not project_uuid: logger.warn("Could not find kwarg:'project_uuid'") return False return any(group for group in auth_user.group_set.all() if group.projects.filter(uuid=project_uuid))
def invalid_provider(provider_id): log_message = 'Provider %s is inactive, disabled, or does not exist.'\ % (provider_id, ) logger.warn(log_message) return failure_response( status.HTTP_401_UNAUTHORIZED, log_message)
def lookupEmail(userid): """ Grabs email for the user based on LDAP attrs """ try: logger.debug(type(userid)) if isinstance(userid, WSGIRequest): raise Exception("WSGIRequest invalid.") attr = _search_ldap(userid) emailaddr = attr[0][1]["mail"][0] return emailaddr except Exception as e: logger.warn("Error occurred looking up email for user: %s" % userid) logger.exception(e) import traceback import sys import inspect s = inspect.stack() for i in range(0, 4): logger.debug(s[i]) etype, value, tb = sys.exc_info() logger.error("TB = %s" % traceback.format_tb(tb)) return None
def validate_new_image(image_id, machine_request_id): machine_request = MachineRequest.objects.get(id=machine_request_id) machine_request.status = 'validating' machine_request.save() from service.instance import launch_esh_instance admin_driver = machine_request.new_admin_driver() admin_ident = machine_request.new_admin_identity() if not admin_driver: logger.warn("Need admin_driver functionality to auto-validate instance") return False if not admin_ident: logger.warn("Need to know the AccountProvider to auto-validate instance") return False # Attempt to launch using the admin_driver admin_driver.identity.user = admin_ident.created_by machine = admin_driver.get_machine(image_id) small_size = admin_driver.list_sizes()[0] (instance, token, password) = launch_esh_instance( admin_driver, machine.id, small_size.id, admin_ident, 'Automated Image Verification - %s' % image_id, 'atmoadmin', using_admin=True) return instance.id
def invalid_provider_identity(provider_id, identity_id): log_message = 'Identity %s is inactive, disabled, '\ 'or does not exist on Provider %s' % (identity_id, provider_id) logger.warn(log_message) return failure_response( status.HTTP_401_UNAUTHORIZED, log_message)
def put(self, request, provider_id, identity_id, machine_id): """ TODO: Determine who is allowed to edit machines besides coreMachine.owner """ user = request.user data = request.DATA esh_driver = prepare_driver(request, identity_id) esh_machine = esh_driver.get_machine(machine_id) coreMachine = convert_esh_machine(esh_driver, esh_machine, provider_id) if not user.is_staff and user is not coreMachine.application.created_by: logger.warn('Non-staff/non-owner trying to update a machine') errorObj = failureJSON([{ 'code': 401, 'message': 'Only Staff and the machine Owner ' + 'are allowed to change machine info.'}]) return Response(errorObj, status=status.HTTP_401_UNAUTHORIZED) coreMachine.application.update(data) serializer = ProviderMachineSerializer(coreMachine, data=data, partial=True) if serializer.is_valid(): logger.info('metadata = %s' % data) update_machine_metadata(esh_driver, esh_machine, data) serializer.save() logger.info(serializer.data) return Response(serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def run(self, node, client): """ Server-side logging Optional Param: attempts - # of times to retry in the event of a Non-Zero exit status(code) """ attempt = 0 retry_time = 0 while attempt < self.attempts: node = super(LoggedScriptDeployment, self).run(node, client) if self.exit_status == 0: break attempt += 1 retry_time = 2 * 2**attempt # 4,8,16.. logger.debug( "WARN: Script %s on Node %s is non-zero." " Will re-try in %s seconds. Attempt: %s/%s" % (node.id, self.name, retry_time, attempt, self.attempts)) time.sleep(retry_time) if self.stdout: logger.debug('%s (%s)STDOUT: %s' % (node.id, self.name, self.stdout)) if self.stderr: logger.warn('%s (%s)STDERR: %s' % (node.id, self.name, self.stderr)) return node
def add_usergroup(self, username, password, createUser=True, adminRole=False): """ Create a group for this user only then create the user TODO: drop createUser -- ignored! """ #Create user try: user = self.create_user(username, password, username) except ClientException as user_exists: logger.debug('Received Error %s on add, User exists.' % user_exists) user = self.get_user(username) logger.debug("Assign project:%s Member:%s Role:%s" % (username, username, adminRole)) #Create project for user/group project = self.get_project(username) if not project: project = self.create_project(username) # Check the user has been given an appropriate role admin_role_name = "admin" if not adminRole: role_name = "_member_" else: role_name = admin_role_name try: role = self.add_project_membership(username, username, role_name) except ClientException: logger.warn('Could not assign role to username %s' % username) self.include_admin(username, admin_role_name) return (project, user, role)
def validate_new_image(image_id, machine_request_id): machine_request = MachineRequest.objects.get(id=machine_request_id) machine_request.status = "validating" machine_request.save() from service.instance import launch_esh_instance admin_driver = machine_request.new_admin_driver() admin_ident = machine_request.new_admin_identity() if not admin_driver: logger.warn("Need admin_driver functionality to auto-validate instance") return False if not admin_ident: logger.warn("Need to know the AccountProvider to auto-validate instance") return False # Update the admin driver's User (Cannot be initialized via. Chromogenic) admin_driver.identity.user = admin_ident.created_by # Update metadata on rtwo/libcloud machine -- NOT a glance machine machine = admin_driver.get_machine(image_id) small_size = admin_driver.list_sizes()[0] (instance_id, token, password) = launch_esh_instance( admin_driver, machine.id, small_size.id, admin_ident, "Automated Image Verification - %s" % image_id, "atmoadmin", using_admin=True, ) return instance_id
def _update_machine(self, request, provider_uuid, identity_uuid, machine_id): # TODO: Determine who is allowed to edit machines besides # core_machine.owner user = request.user data = request.DATA esh_driver = prepare_driver(request, provider_uuid, identity_uuid) if not esh_driver: return invalid_creds(provider_uuid, identity_uuid) esh_machine = esh_driver.get_machine(machine_id) core_machine = convert_esh_machine(esh_driver, esh_machine, provider_uuid, user) if not user.is_staff and user is not core_machine.application_version.application.created_by: logger.warn("%s is Non-staff/non-owner trying to update a machine" % (user.username)) return failure_response( status.HTTP_401_UNAUTHORIZED, "Only Staff and the machine Owner " "are allowed to change machine info." ) partial_update = True if request.method == "PATCH" else False serializer = ProviderMachineSerializer( core_machine, request_user=request.user, data=data, partial=partial_update ) if serializer.is_valid(): logger.info("metadata = %s" % data) update_machine_metadata(esh_driver, esh_machine, data) machine = serializer.save() if "created_by_identity" in request.DATA: identity = machine.created_by_identity update_application_owner(core_machine.application_version.application, identity) logger.info(serializer.data) return Response(serializer.data) return failure_response(status.HTTP_400_BAD_REQUEST, serializer.errors)
def deploy_init_to(driverCls, provider, identity, instance_id, username=None, password=None, redeploy=False, *args, **kwargs): try: logger.debug("deploy_init_to task started at %s." % datetime.now()) driver = get_driver(driverCls, provider, identity) instance = driver.get_instance(instance_id) if not instance: logger.debug("Instance has been teminated: %s." % instance_id) return image_metadata = driver._connection\ .ex_get_image_metadata(instance.machine) deploy_chain = get_deploy_chain(driverCls, provider, identity, instance, username, password, redeploy) deploy_chain.apply_async() #Can be really useful when testing. #if kwargs.get('delay'): # async.get() logger.debug("deploy_init_to task finished at %s." % datetime.now()) except SystemExit: logger.exception("System Exits are BAD! Find this and get rid of it!") raise Exception("System Exit called") except NonZeroDeploymentException: raise except Exception as exc: logger.warn(exc) deploy_init_to.retry(exc=exc)
def malformed_response(provider_id, identity_id): logger.warn('Server provided bad response. Provider-id:%s Identity-id:%s' % (provider_id, identity_id)) return failure_response( status.HTTP_500_INTERNAL_SERVER_ERROR, "Cloud Communications Error --" " Contact your Cloud Administrator OR try again later!")
def patch(self, request, provider_id, identity_id, machine_id): """ TODO: Determine who is allowed to edit machines besides core_machine.owner """ user = request.user data = request.DATA esh_driver = prepare_driver(request, provider_id, identity_id) if not esh_driver: return invalid_creds(provider_id, identity_id) esh_machine = esh_driver.get_machine(machine_id) core_machine = convert_esh_machine(esh_driver, esh_machine, provider_id) if not user.is_staff\ and user is not core_machine.application.created_by: logger.warn('%s is Non-staff/non-owner trying to update a machine' % (user.username)) return failure_response( status.HTTP_401_UNAUTHORIZED, "Only Staff and the machine Owner " + "are allowed to change machine info.") core_machine.application.update(request.DATA) serializer = ProviderMachineSerializer(core_machine, data=data, partial=True) if serializer.is_valid(): logger.info('metadata = %s' % data) update_machine_metadata(esh_driver, esh_machine, data) serializer.save() logger.info(serializer.data) return Response(serializer.data) return failure_response( status.HTTP_400_BAD_REQUEST, serializer.errors)
def _update_volume_metadata(esh_driver, esh_volume, metadata={}): """ NOTE: This will NOT WORK for TAGS until openstack allows JSONArrays as values for metadata! NOTE: This will NOT replace missing metadata tags.. ex: Start: ('a':'value','c':'value') passed: c=5 End: ('a':'value', 'c':5) """ if not esh_volume: return {} if not hasattr(esh_driver._connection, 'ex_update_volume_metadata'): logger.warn( "EshDriver %s does not have function 'ex_update_volume_metadata'" % esh_driver._connection.__class__) return {} data = esh_volume.extra.get('metadata', {}) data.update(metadata) try: return esh_driver._connection.ex_update_volume_metadata( esh_volume, data) except Exception as e: logger.exception("Error updating the metadata") if 'incapable of performing the request' in e.message: return {} else: raise
def get_allocation_result_for( provider, username, print_logs=False, start_date=None, end_date=None): """ Given provider and username: * Find the correct identity for the user * Create 'Allocation' using core representation * Calculate the 'AllocationResult' and return both """ identity = _get_identity_from_tenant_name(provider, username) # Attempt to run through the allocation engine try: allocation_result = _get_allocation_result( identity, start_date, end_date, print_logs=print_logs) logger.debug("Result for Username %s: %s" % (username, allocation_result)) return allocation_result except IdentityMembership.DoesNotExist: logger.warn( "WARNING: User %s does not" "have IdentityMembership on this database" % (username, )) return _empty_allocation_result() except: logger.exception("Unable to monitor Identity:%s" % (identity,)) raise
def validate(self, core_identity): identity_creds = core_identity.get_all_credentials() if 'router_name' not in identity_creds.keys(): logger.warn("Credential 'router_name' missing:" "cannot create user network") raise Exception("Identity %s has not been assigned a 'router_name'" % core_identity) return True
def glance_image_owner(provider_uuid, identifier, glance_image=None): try: prov = Provider.objects.get(uuid=provider_uuid) accounts = get_account_driver(prov) if not glance_image: accounts.clear_cache() glance_image = accounts.get_image(identifier) project = accounts.user_manager.get_project_by_id( glance_image.get('owner') ) except Exception as e: logger.exception(e) project = None if not project: return None try: image_owner = Identity.objects.get( provider__uuid=provider_uuid, created_by__username=project.name ) except Identity.DoesNotExist: logger.warn( "Could not find a username %s on Provider %s" % (project.name, provider_uuid) ) image_owner = None return image_owner
def validate_new_image(image_id, machine_request_id): machine_request = MachineRequest.objects.get(id=machine_request_id) new_status, _ = StatusType.objects.get_or_create(name="validating") machine_request.status = new_status machine_request.old_status = 'validating' machine_request.save() accounts = get_account_driver(machine_request.new_machine.provider) accounts.clear_cache() from service.instance import launch_machine_instance admin_driver = accounts.admin_driver admin_ident = machine_request.new_admin_identity() if not admin_driver: logger.warn( "Need admin_driver functionality to auto-validate instance") return False if not admin_ident: logger.warn( "Need to know the AccountProvider to auto-validate instance") return False # Attempt to launch using the admin_driver admin_driver.identity.user = admin_ident.created_by machine = admin_driver.get_machine(image_id) small_size = admin_driver.list_sizes()[0] instance = launch_machine_instance( admin_driver, admin_ident, machine, small_size, 'Automated Image Verification - %s' % image_id, username='******', using_admin=True) return instance.id
def get_identity_list(user, provider=None): """ Given the (request) user return all identities on all active providers """ try: group = Group.objects.get(name=user.username) if provider: identity_list = group.identities.filter( provider=provider, #Active providers only provider__active=True) else: identity_list = group.identities.filter( #Non-end dated providers as search base only_current_provider(), #Active providers only provider__active=True) return identity_list except Group.DoesNotExist: logger.warn("Group %s DoesNotExist" % user.username) return None except CoreIdentity.DoesNotExist: logger.warn("Identity %s DoesNotExist" % identity_uuid) return None
def find_instance(instance_id): core_instance = Instance.objects.filter(provider_alias=instance_id) if len(core_instance) > 1: logger.warn("Multiple instances returned for instance_id - %s" % instance_id) if core_instance: return core_instance[0] return None
def connection_failure(provider_id, identity_id): logger.warn('Multiple Connection Attempts Failed. Provider-id:%s Identity-id:%s' % (provider_id, identity_id)) return failure_response( status.HTTP_504_GATEWAY_TIMEOUT, 'Multiple connection attempts to the provider %s have failed. Please' ' try again later.' % provider_id)
def validate_token(token, request=None): """ Validates the token attached to the request (SessionStorage, GET/POST) If token has expired, CAS will attempt to reauthenticate the user and refresh token. Expired Tokens can be used for GET requests ONLY! """ #Existence test try: auth_token = AuthToken.objects.get(key=token) user = auth_token.user except AuthToken.DoesNotExist: #logger.info("AuthToken <%s> does not exist." % token) return False if auth_token.is_expired(): if request and request.META['REQUEST_METHOD'] == 'POST': user_to_auth = request.session.get('emulated_by', user) if cas_validateUser(user_to_auth): #logger.debug("Reauthenticated user -- Token updated") auth_token.update_expiration() auth_token.save() return True else: logger.warn("Could not reauthenticate user") return False else: #logger.debug("%s using EXPIRED token to GET data.." % user) return True else: return True
def approve_quota(self, request_id): """ Approves the quota request and updates the request """ super(IdentityMembership, self).save() try: from service.tasks.admin import set_provider_quota,\ set_resource_request_failed, close_resource_request set_provider_quota.apply_async( args=[self.identity.uuid], link=close_resource_request.s(request_id), link_error=set_resource_request_failed.s(request_id)) except Exception as ex: logger.warn("Unable to update service.quota.set_provider_quota.") raise
def get_identity(user, identity_uuid): """ Given the (request) user and an identity uuid, return None or an Active Identity """ try: identity_list = get_identity_list(user) if not identity_list: raise CoreIdentity.DoesNotExist("No identities found for user %s" % user.username) identity = identity_list.get(uuid=identity_uuid) return identity except CoreIdentity.DoesNotExist: logger.warn("Identity %s DoesNotExist" % identity_uuid) return None
def read_cloud_machine_hook(new_machine, provider_uuid, identifier): """ RULES: #1. READ operations ONLY! #2. FROM Cloud --> ProviderMachine ONLY! """ from service.openstack import glance_read_machine provider = Provider.objects.get(uuid=provider_uuid) if provider.get_type_name().lower() == 'openstack': glance_read_machine(new_machine) else: logger.warn( "machine data for %s is likely incomplete." " Create a new hook for %s." % provider)
def provider_machine_write_hook(provider_machine): """ RULES: #1. WRITE operations ONLY! #2. FROM ProviderMachine --> Cloud ONLY! """ from service.openstack import glance_write_machine provider = provider_machine.instance_source.provider if provider.get_type_name().lower() == 'openstack': glance_write_machine(provider_machine) else: logger.warn( "Create a new write hook for %s" " to keep cloud objects up to date." % provider)
def update_images(): from service.accounts.openstack import AccountDriver as OSAccounts for pm in self.providermachine_set.all(): if pm.provider.get_type_name().lower() != 'openstack': continue image_id = pm.identifier provider = pm.provider try: accounts = OSAccounts(pm.provider) image = accounts.image_manager.get_image(image_id) self.diff_updates(pm, image) accounts.image_manager.update_image(image, **updates) except Exception as ex: logger.warn("Image Update Failed for %s on Provider %s" % (image_id, provider))
def migrate_access_to_membership_list(self, access_list): for user in access_list: # 'User' -> User -> Group -> Membership user_qs = User.objects.filter(username=user) if not user_qs.exists(): logger.warn("WARNING: User %s does not have a user object" % user) continue usergroup_qs = user_qs[0].group_set.filter(name=user) if not usergroup_qs: logger.warn("WARNING: User %s does not have a group object" % user) continue group = usergroup_qs[0] self.new_version_membership.add(group)
def _cleanup_missing_instances( identity, core_running_instances, start_date=None): """ Cleans up the DB InstanceStatusHistory when you know what instances are active... core_running_instances - Reference list of KNOWN active instances """ instances = [] if not identity: return instances core_instances = _core_instances_for(identity, start_date) fixed_instances = [] for inst in core_instances: if not core_running_instances or inst not in core_running_instances: inst.end_date_all() fixed_instances.append(inst) else: # Instance IS in the list of running instances.. Further cleaning # can be done at this level. non_end_dated_history = inst.instancestatushistory_set.filter( end_date=None) count = len(non_end_dated_history) if count > 1: history_names = [ish.status.name for ish in non_end_dated_history] # Note: We have the 'wrong' instance, we want the one that # includes the ESH driver core_running_inst = [i for i in core_running_instances if i == inst][0] new_history = _resolve_history_conflict( identity, core_running_inst, non_end_dated_history) fixed_instances.append(inst) logger.warn( "Instance %s contained %s " "NON END DATED history:%s. " " New History: %s" % (inst.provider_alias, count, history_names, new_history)) # Gather the updated values.. instances.append(inst) # Return the updated list if fixed_instances: logger.warn("Cleaned up %s instances for %s" % (len(fixed_instances), identity.created_by.username)) return instances
def _get_status_name_for_provider( provider, status_name, task_name=None, tmp_status=None): """ Purpose: to be used in lookups/saves Return the appropriate InstanceStatus """ provider_type = provider.get_type_name().lower() if provider_type == 'openstack': return _get_openstack_name_map(status_name, task_name, tmp_status) logger.warn( "Could not find a strategy for provider type:%s" % provider_type) return status_name
def list_membership(accounts, glance_image_id): members = [] for image_share in accounts.image_manager.shared_images_for( image_id=glance_image_id): member_id = image_share['member_id'] keystone_project = accounts.user_manager.get_project_by_id(member_id) if not keystone_project: logger.warn("No project returned for member ID %s" % member_id) continue if not hasattr(keystone_project, 'name'): logger.warn( "Unexpected value. No attribute 'name' for Project:%s" % keystone_project) continue members.append(keystone_project.name) return members
def get_identity_list(user, provider=None): """ Given the (request) user return all identities on all active providers """ try: group = Group.objects.get(name=user.username) if provider: # Implicit: Active,non-end dated providers. identity_list = group.current_identities.filter(provider=provider) else: identity_list = group.current_identities.all() return identity_list except Group.DoesNotExist: logger.warn("Group %s DoesNotExist" % user.username) return None
def obtainOAuthToken(username, token_key, token_expire=None): """ returns a new token for username """ try: user = AtmosphereUser.objects.get(username=username) except AtmosphereUser.DoesNotExist: logger.warn("User %s doesn't exist on the DB. " "OAuth token _NOT_ created" % username) return None auth_user_token, _ = AuthToken.objects.get_or_create( key=token_key, user=user, api_server_url=settings.API_SERVER_URL) if token_expire: auth_user_token.update_expiration(token_expire) auth_user_token.save() return auth_user_token
def _calculate_overcommits(self, sizes, remove_totals): instances = self.admin_driver.list_all_instances() size_map = {size.id: size for size in sizes} for instance in instances: if instance.extra['status'] in ['suspended', 'shutoff']: #oc == OverCommited oc_size = size_map.get(instance.size.id) if not oc_size: logger.warn("Size %s NOT found in list of sizes. Cannot" " remove instance %s from calculation" % (instance.size.id, instance.id)) continue remove_totals['cpu'] = remove_totals['cpu'] + oc_size.cpu remove_totals['ram'] = remove_totals['ram'] + oc_size.ram remove_totals['disk'] = remove_totals['disk'] + oc_size.disk return remove_totals
def _split_cloud_name(cls, machine_name): version_sep = settings.APPLICATION_VERSION_SEPARATOR if version_sep in machine_name: split_list = machine_name.split(version_sep) if len(split_list) == 1: logger.warn("Version separator(%s) was not found: %s" % (version_sep, machine_name)) split_list = [split_list[0].trim(), ''] if len(split_list) > 2: logger.warn("Version separator(%s) is ambiguous: %s" % (version_sep, machine_name)) version_parts = machine_name.rpartition(version_sep) split_list = [version_parts[0].trim(), version_parts[2].trim()] return split_list
def emulate_session(request, username=None): try: logger.info("Emulate attempt: %s wants to be %s" % (request.user, username)) logger.info(request.session.__dict__) if not username and 'emulator' in request.session: logger.info("Clearing emulation attributes from user") request.session['username'] = request.session['emulator'] del request.session['emulator'] # Allow user to fall through on line below try: user = AtmosphereUser.objects.get(username=username) except AtmosphereUser.DoesNotExist: logger.info("Emulate attempt failed. User <%s> does not exist" % username) return HttpResponseRedirect( settings.REDIRECT_URL + "/api/v2") logger.info("Emulate success, creating tokens for %s" % username) expireDate = timezone.now() + secrets.TOKEN_EXPIRY_TIME token = get_or_create_token( user, token_key='EMULATED-'+str(uuid4()), token_expire=expireDate, remote_ip=request.META['REMOTE_ADDR'], issuer="DRF-EmulatedSession-%s" % user.username) token.save() # Keep original emulator if it exists, or use the last known username original_emulator = request.session.get( 'emulator', request.session['username']) request.session['emulator'] = original_emulator # Set the username to the user to be emulated # to whom the token also belongs request.session['username'] = username request.session['token'] = token.key logger.info("Returning emulated user - %s - to api root " % username) logger.info(request.session.__dict__) logger.info(request.user) serialized_data = TokenSerializer(token, context={'request': request}).data return Response(serialized_data, status=status.HTTP_201_CREATED) except Exception as e: logger.warn("Emulate request failed") logger.exception(e) return HttpResponseRedirect(settings.REDIRECT_URL + "/api/v2")
def include_admin(self, projectname, admin_rolename='admin'): """ This should be called each time an account is created: This gives the admin tenant access to view the users resources This is REQUIRED for: * Monitoring of instances * Ops """ try: # keystone admin always gets access, always has admin priv. admin_role_created = self.add_project_membership( projectname, self.keystone.username, admin_rolename) return admin_role_created except ClientException: logger.warn('Could not assign admin role to username %s' % self.keystone.username) return None
def ldap_validate(username, password): """ ldap_validate Using the username and password parameters, test with an LDAP bind. If the connection succeeds, the credentials are authentic. """ try: ldap_server = secrets.LDAP_SERVER ldap_server_dn = secrets.LDAP_SERVER_DN logger.warn("[LDAP] Validation Test - %s" % username) ldap_conn = ldap_driver.initialize(ldap_server) dn = "uid=" + username + "," + ldap_server_dn ldap_conn.simple_bind_s(dn, password) return True except Exception as e: logger.exception(e) return False
def _cpu_stats(self, size, cpu_total, cpu_used, cpu_overcommit): if cpu_overcommit > 0: cpu_used = cpu_used - cpu_overcommit # CPUs go by many different, provider-specific names.. if hasattr(size._size, 'cpu'): cpu_count = size._size.cpu elif hasattr(size._size, 'vcpus'): cpu_count = size._size.vcpus else: logger.warn("Could not find a CPU value for size %s" % size) cpu_count = -1 if cpu_count > 0: max_by_cpu = float(cpu_total) / float(size.cpu) else: # I don't know about this? max_by_cpu = sys.maxint return self.total_remaining(max_by_cpu, cpu_total, cpu_used, cpu_count)
def get_allocation(username, identity_uuid): user = User.objects.get(username=username) group = user.group_set.get(name=user.username) try: membership = IdentityMembership.objects.get( identity__uuid=identity_uuid, member=group) except IdentityMembership.DoesNotExist: logger.warn("WARNING: User %s does not" "have IdentityMembership on this database" % (username, )) return None if not user.is_staff and not membership.allocation: def_allocation = CoreAllocation.default_allocation( membership.identity.provider) logger.warn("%s is MISSING an allocation. Default Allocation" " assigned:%s" % (user, def_allocation)) return def_allocation return membership.allocation
def emulate_request(request, username=None): try: logger.info("Emulate attempt: %s wants to be %s" % (request.user, username)) logger.info(request.session.__dict__) if not username and 'emulated_by' in request.session: logger.info("Clearing emulation attributes from user") request.session['username'] = request.session['emulated_by'] del request.session['emulated_by'] # Allow user to fall through on line below try: user = DjangoUser.objects.get(username=username) except DjangoUser.DoesNotExist: logger.info("Emulate attempt failed. User <%s> does not exist" % username) return HttpResponseRedirect( settings.REDIRECT_URL + "/api/v1/profile") logger.info("Emulate success, creating tokens for %s" % username) token = AuthToken( user=user, key=str(uuid.uuid4()), issuedTime=datetime.now(), remote_ip=request.META['REMOTE_ADDR'], api_server_url=settings.API_SERVER_URL ) token.save() # Keep original emulator if it exists, or use the last known username original_emulator = request.session.get( 'emulated_by', request.session['username']) request.session['emulated_by'] = original_emulator # Set the username to the user to be emulated # to whom the token also belongs request.session['username'] = username request.session['token'] = token.key logger.info("Returning emulated user - %s - to api profile " % username) logger.info(request.session.__dict__) logger.info(request.user) return HttpResponseRedirect(settings.REDIRECT_URL + "/api/v1/profile") except Exception as e: logger.warn("Emulate request failed") logger.exception(e) return HttpResponseRedirect(settings.REDIRECT_URL + "/api/v1/profile")
def is_expired(self, user): ldap_user = lookupUser(user.username) if not ldap_user: logger.warn("Cannot contact LDAP -- Assume user is expired?") return True expiry_dict = ldap_user.get('expiry') if not expiry_dict: logger.error("LDAP password expiration map is missing --" " check django_cyverse_auth: %s" % ldap_user) return True expiry_date = expiry_dict.get('expires_on') if not expiry_date: logger.error("LDAP password expiration date is missing -- " "check django_cyverse_auth: %s" % ldap_user) return True _is_expired = expiry_date.replace(tzinfo=pytz.UTC) < timezone.now() return _is_expired
def delete(self, request, provider_uuid, identity_uuid, instance_id): """Authentication Required, TERMINATE the instance. Be careful, there is no going back once you've deleted an instance. """ user = request.user esh_driver = prepare_driver(request, provider_uuid, identity_uuid) if not esh_driver: return invalid_creds(provider_uuid, identity_uuid) try: esh_instance = esh_driver.get_instance(instance_id) if not esh_instance: return instance_not_found(instance_id) #Test that there is not an attached volume BEFORE we destroy _check_volume_attachment(esh_driver, esh_instance) task.destroy_instance_task(esh_instance, identity_uuid) invalidate_cached_instances(identity=Identity.objects.get(uuid=identity_uuid)) existing_instance = esh_driver.get_instance(instance_id) if existing_instance: #Instance will be deleted soon... esh_instance = existing_instance if esh_instance.extra\ and 'task' not in esh_instance.extra: esh_instance.extra['task'] = 'queueing delete' core_instance = convert_esh_instance(esh_driver, esh_instance, provider_uuid, identity_uuid, user) if core_instance: core_instance.end_date_all() else: logger.warn("Unable to find core instance %s." % (instance_id)) serialized_data = InstanceSerializer(core_instance, context={"request":request}).data response = Response(serialized_data, status=status.HTTP_200_OK) response['Cache-Control'] = 'no-cache' return response except (Identity.DoesNotExist) as exc: return failure_response(status.HTTP_400_BAD_REQUEST, "Invalid provider_uuid or identity_uuid.") except VolumeAttachConflict as exc: message = exc.message return failure_response(status.HTTP_409_CONFLICT, message) except ConnectionFailure: return connection_failure(provider_uuid, identity_uuid) except InvalidCredsError: return invalid_creds(provider_uuid, identity_uuid)
def get_uid_number(userid): """ Get uidNumber """ try: conn = ldap_driver.initialize(secrets.LDAP_SERVER) attr = conn.search_s(secrets.LDAP_SERVER_DN, ldap_driver.SCOPE_SUBTREE, "(uid=%s)" % userid) return int(attr[0][1]["uidNumber"][0]) - 10000 except IndexError: logger.warn("Error - User %s does not exist" % userid) return None except Exception as e: logger.warn("Error occurred getting user uidNumber for user: %s" % userid) logger.exception(e) return None
def migrate_access_to_membership_list(self, access_list): #FIXME: We are granting 'user' access but *in reality* we *should* grant *group* access and avoid this possible over-step. for user in access_list: # 'User' -> User -> Group -> Membership user = User.objects.filter(username=user).first() if not user: logger.warn("WARNING: User %s does not have a user object" % user) continue memberships_qs = user.memberships.select_related('group') if not memberships_qs: logger.warn("WARNING: User %s does not have a group object" % user) continue for membership in memberships_qs: group = membership.group self.new_version_membership.add(group)
def _get_app_by_name(provider_uuid, name): """ Retrieve app by name """ try: app = Application.objects.get( versions__machines__instance_source__provider__uuid=provider_uuid, name=name) return app except Application.DoesNotExist: return None except Application.MultipleObjectsReturned: logger.warn( "Possible Application Conflict: Multiple applications named:" "%s. Check this query for more details" % name) return None
def getAllUsers(): """ Grabs all users in LDAP """ try: conn = ldap_driver.initialize(secrets.LDAP_SERVER) user_list = [] for letter in string.lowercase: attr = _search_ldap("%s*" % letter, conn) for i in xrange(0, len(attr)): user_attrs = attr[i][1] user_list.append(user_attrs) return user_list except Exception as e: logger.warn("Error occurred looking up user: %s" % userid) logger.exception(e) return None
def create_unique_version(app, version, created_by, created_by_identity): while True: try: app_version = ApplicationVersion.objects.create( application=app, name=version, created_by=created_by, created_by_identity=created_by_identity, ) return app_version except IntegrityError: # duplicate_found logger.warn("Version %s is taken for Application %s" % (version, app)) if not version: version = "1" version += ".0"
def user_over_allocation_enforcement( provider, username, print_logs=False, start_date=None, end_date=None): """ Begin monitoring 'username' on 'provider'. * Calculate allocation from START of month to END of month * If user is deemed OverAllocation, apply enforce_allocation_policy """ identity = _get_identity_from_tenant_name(provider, username) allocation_result = get_allocation_result_for( provider, username, print_logs, start_date, end_date) # ASSERT: allocation_result has been retrieved successfully # Make some enforcement decision based on the allocation_result's output. if not identity: logger.warn( "%s has NO identity. " "Total Runtime could NOT be calculated. Returning.." % (username, )) return allocation_result user = User.objects.get(username=username) allocation = get_allocation(username, identity.uuid) if not allocation: logger.info( "%s has NO allocation. Total Runtime: %s. Returning.." % (username, allocation_result.total_runtime())) return allocation_result if not settings.ENFORCING: return allocation_result # Enforce allocation if overboard. over_allocation, diff_amount = allocation_result.total_difference() if over_allocation: logger.info( "%s is OVER allocation. %s - %s = %s" % (username, allocation_result.total_credit(), allocation_result.total_runtime(), diff_amount)) try: enforce_allocation_policy(identity, user) except: logger.info("Unable to enforce allocation for user: %s" % user) return allocation_result
def get_provider(user, provider_uuid): """ Given the (request) user and a provider uuid, return None or an Active provider """ try: group = Group.objects.get(name=user.username) except Group.DoesNotExist: logger.warn("Group %s DoesNotExist" % user.username) return None try: provider = group.current_providers.get(uuid=provider_uuid) return provider except Provider.DoesNotExist: logger.warn("Provider %s DoesNotExist for User:%s in Group:%s" % (provider_uuid, user, group)) return None
def get_identity_list(user, provider=None): """ Given the (request) user return all identities on all active providers """ try: group = Group.objects.get(name=user.username) if provider: identity_list = group.current_identities.filter(provider=provider) else: identity_list = group.current_identities.all() return identity_list except Group.DoesNotExist: logger.warn("Group %s DoesNotExist" % user.username) return None except CoreIdentity.DoesNotExist: logger.warn("Identity DoesNotExist for user %s" % user.username) return None
def admin_capacity_check(provider_id, instance_id): from service.driver import get_admin_driver from core.models import Provider p = Provider.objects.get(id=provider_id) admin_driver = get_admin_driver(p) instance = admin_driver.get_instance(instance_id) if not instance: logger.warn("ERROR - Could not find instance id=%s" % (instance_id, )) return hypervisor_hostname = instance.extra['object']\ .get('OS-EXT-SRV-ATTR:hypervisor_hostname') if not hypervisor_hostname: logger.warn("ERROR - Server Attribute hypervisor_hostname missing!" "Assumed to be under capacity") return hypervisor_stats = admin_driver._connection.ex_detail_hypervisor_node( hypervisor_hostname) return test_capacity(hypervisor_hostname, instance, hypervisor_stats)