def get(self, request, provider_uuid, identity_uuid): """ Using provider and identity, getlist of machines TODO: Cache this request """ try: request_user = request.user logger.debug("filtered_machine_list") filtered_machine_list = provider_filtered_machines(request, provider_uuid, identity_uuid, request_user) #logger.debug(filtered_machine_list) except InvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except MalformedResponseError: return malformed_response(provider_uuid, identity_uuid) except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) except Exception as e: logger.exception("Unexpected exception for user:%s" % request_user) return failure_response(status.HTTP_500_INTERNAL_SERVER_ERROR, e.message) #logger.debug(filtered_machine_list) serialized_data = ProviderMachineSerializer(filtered_machine_list, request_user=request.user, many=True).data response = Response(serialized_data) return response
def get_default_provider(username): """ Return default provider given """ try: from core.models.group import get_user_group from core.models.provider import Provider group = get_user_group(username) provider_ids = group.current_identities.values_list( 'provider', flat=True) provider = Provider.objects.filter( id__in=provider_ids, type__name="OpenStack") if provider: logger.debug("get_default_provider selected a new " "Provider for %s: %s" % (username, provider)) provider = provider[0] else: logger.error("get_default_provider could not find a new " "Provider for %s" % (username,)) return None return provider except Exception as e: logger.exception("get_default_provider encountered an error " "for %s" % (username,)) return None
def sync_cloud_access(accounts, img, project_names=None): domain_id = accounts.credentials.get('domain_name', 'default') approved_projects = accounts.get_image_members(img.id) # Any names who aren't already on the image should be added # Find names who are marked as 'sharing' on DB but not on OpenStack for project_name in project_names: # FIXME: Remove .strip() when 'bug' has been fixed group_name = project_name.strip( ) # FIXME: This code should be changed when user-group-project associations change. try: group = models.Group.objects.get(name=group_name) except: raise Exception("Invalid group name: %s" % group_name) for identity_membership in group.identitymembership_set.all(): if identity_membership.identity.provider != accounts.core_provider: logger.debug( "Skipped %s -- Wrong provider" % identity_membership.identity ) continue # Get project name from the identity's credential-list identity = identity_membership.identity project_name = identity.get_credential('ex_project_name') project = accounts.get_project(project_name, domain_id=domain_id) if not project or project in approved_projects: logger.debug("Skipped Project: %s -- Already shared" % project) continue project = accounts.share_image_with_identity(img, identity) approved_projects.append(project) return approved_projects
def provider_filtered_machines(request, provider_uuid, identity_uuid, request_user=None): """ Return all filtered machines. Uses the most common, default filtering method. """ identity = Identity.objects.filter(uuid=identity_uuid) if not identity: raise ObjectDoesNotExist() try: esh_driver = prepare_driver(request, provider_uuid, identity_uuid) except Exception: # TODO: Observe the change of 'Fail loudly' here # and clean up the noise, rather than hide it. logger.exception( "Driver could not be prepared - Provider: %s , Identity: %s" % (provider_uuid, identity_uuid)) esh_driver = None if not esh_driver: raise LibcloudInvalidCredsError() logger.debug(esh_driver) return list_filtered_machines(esh_driver, provider_uuid, request_user)
def get_context_user(serializer, kwargs, required=False): context = kwargs.get('context',{}) user = context.get('user') request = context.get('request') if not user and not request: print_str = "%s was initialized"\ " without appropriate context."\ " Sometimes, like on imports, this is normal."\ " For complete results include the \"context\" kwarg,"\ " with key \"request\" OR \"user\"."\ " (e.g. context={\"user\":user,\"request\":request})"\ % (serializer,) if required: raise Exception(print_str) else: logger.debug("Incomplete Data Warning:%s" % print_str) return None if user: #NOTE: Converting str to atmosphere user is easier when debugging if type(user) == str: user = AtmosphereUser.objects.get( username=user) elif type(user) not in [AnonymousUser,AtmosphereUser]: raise Exception("This Serializer REQUIRES the \"user\" " "to be of type str or AtmosphereUser") elif request: user = request.user if user: logger.debug("%s initialized with user %s" % (serializer, user)) return user
def get_default_provider(username): """ Return default provider given """ try: from core.models.group import get_user_group group = get_user_group(username) provider = group.providers.filter( Q(end_date=None) | Q(end_date__gt=timezone.now()), active=True, type__name="OpenStack") if provider: provider = provider[0] else: logger.error("get_default_provider could not find " "a valid Provider") return None logger.debug( "default provider is %s " % provider) return provider except IndexError: logger.info("No provider found for %s" % username) return None except Exception, e: logger.exception(e) return None
def cas_proxyCallback(request): """ This is a placeholder for a proxyCallback service needed for CAS authentication """ logger.debug("Incoming request to CASPROXY (Proxy Callback):") return HttpResponse("I am at a RSA-2 or VeriSigned SSL Cert. website.")
def _task_to_status(self, task_name): task_status_map = { # Terminate tasks #'deleting': 'active', # Suspend tasks "resuming": "build", "suspending": "suspended", # Shutdown tasks "powering-on": "active", "powering-off": "suspended", # Instance launch tasks "initializing": "build", "scheduling": "build", "spawning": "build", # Atmosphere Task-specific lines "networking": "build", "deploying": "build", "deploy_error": "build", # There are more.. Must find table.. } new_status = task_status_map.get(task_name) logger.debug( "Instance:%s History - Task provided:%s, Status should be %s" % (self.provider_alias, task_name, new_status) ) return new_status
def logout(request): logger.debug(request) django_logout(request) if request.POST.get('cas', False): return cas_logoutRedirect() RequestContext(request) return HttpResponseRedirect(settings.REDIRECT_URL+'/login')
def delete_project_member(self, groupname, username, adminRole=False): """ Retrieves the project and user object Removes user of the admin/member role Returns True on success Invalid username, groupname, rolename: raise keystoneclient.exceptions.NotFound """ project = self.get_project(groupname) user = self.get_user(username) #FIXME: Hardcoded values if adminRole: role = self.get_role('admin') else: role = self.get_role('defaultMemberRole') if not project or not user: return True try: project.remove_user(user, role) return True except NotFound as no_role_for_user: logger.debug('Error - %s: User-role combination does not exist' % no_role_for_user) return True except Exception, e: logger.exception(e) raise
def running_instances(network_name, all_instances): for instance in all_instances: if network_name in instance.extra['addresses'].keys(): logger.debug("Network %s is in use" % network_name) return True logger.debug("Network %s is NOT in use" % network_name) return False
def create_provider_machine(identifier, provider_uuid, app, created_by_identity=None, version="1.0"): # Attempt to match machine by provider alias # Admin identity used until the real owner can be identified. provider = Provider.objects.get(uuid=provider_uuid) if not created_by_identity: created_by_identity = provider.admin logger.debug("Provider %s" % provider) logger.debug("App %s" % app) #TODO: Reminder to re-evaluate these lines when you get to Django 1.8 source = InstanceSource.objects.create( identifier=identifier, created_by=created_by_identity.created_by, provider=provider, created_by_identity=created_by_identity, ) provider_machine = ProviderMachine.objects.create( instance_source=source, application=app, version=version, ) provider_machine_update_hook(provider_machine, provider_uuid, identifier) logger.info("New ProviderMachine created: %s" % provider_machine) add_to_cache(provider_machine) return provider_machine
def get_resource(request, file_location): try: username = request.session.get('username', None) remote_ip = request.META.get('REMOTE_ADDR', None) if remote_ip is not None: # Authenticated if the instance requests resource. instances = Instance.objects.filter(ip_address=remote_ip) authenticated = len(instances) > 0 elif username is not None: authenticate(username=username, password="") # User Authenticated by this line authenticated = True if not authenticated: raise Exception("Unauthorized access") path = settings.PROJECT_ROOT + "/init_files/" + file_location if os.path.exists(path): file = open(path, 'r') content = file.read() response = HttpResponse(content) # Download it, even if it looks like text response['Content-Disposition'] = \ 'attachment; filename=%s' % file_location.split("/")[-1] return response template = get_template('404.html') variables = RequestContext(request, { 'message': '%s not found' % (file_location,) }) output = template.render(variables) return HttpResponse(output) except Exception as e: logger.debug("Resource request failed") logger.exception(e) return HttpResponseRedirect(settings.REDIRECT_URL + "/login")
def run(self, node, client): """ Server-side logging Optional Param: attempts - # of times to retry in the event of a Non-Zero exit status(code) """ attempt = 0 retry_time = 0 while attempt < self.attempts: node = super(LoggedScriptDeployment, self).run(node, client) if self.exit_status == 0: break attempt += 1 retry_time = 2 * 2**attempt # 4,8,16.. logger.debug( "WARN: Script %s on Node %s is non-zero." " Will re-try in %s seconds. Attempt: %s/%s" % (node.id, self.name, retry_time, attempt, self.attempts)) time.sleep(retry_time) if self.stdout: logger.debug('%s (%s)STDOUT: %s' % (node.id, self.name, self.stdout)) if self.stderr: logger.warn('%s (%s)STDERR: %s' % (node.id, self.name, self.stderr)) return node
def _launch_machine(driver, identity, machine, size, name, userdata_content=None, network=None, password=None, token=None, **kwargs): if isinstance(driver.provider, EucaProvider): #Create/deploy the instance -- NOTE: Name is passed in extras logger.info("EUCA -- driver.create_instance EXTRAS:%s" % kwargs) esh_instance = driver\ .create_instance(name=name, image=machine, size=size, ex_userdata=userdata_contents, **kwargs) elif isinstance(driver.provider, OSProvider): deploy = True #ex_metadata, ex_keyname extra_args = _extra_openstack_args(identity) kwargs.update(extra_args) logger.debug("OS driver.create_instance kwargs: %s" % kwargs) esh_instance = driver.create_instance( name=name, image=machine, size=size, token=token, networks=[network], ex_admin_pass=password, **kwargs) #Used for testing.. Eager ignores countdown if app.conf.CELERY_ALWAYS_EAGER: logger.debug("Eager Task, wait 1 minute") time.sleep(1*60) elif isinstance(driver.provider, AWSProvider): #TODO:Extra stuff needed for AWS provider here esh_instance = driver.deploy_instance( name=name, image=machine, size=size, deploy=True, token=token, **kwargs) else: raise Exception("Unable to launch with this provider.") return (esh_instance, token, password)
def validate_token(token, request=None): """ Validates the token attached to the request (SessionStorage, GET/POST) If token has expired, CAS will attempt to reauthenticate the user and refresh token. Expired Tokens can be used for GET requests ONLY! """ #Existence test try: auth_token = AuthToken.objects.get(key=token) user = auth_token.user except AuthToken.DoesNotExist: logger.info("AuthToken Retrieved:%s Does not exist." % (token,)) return False if auth_token.is_expired(): if request and request.META['REQUEST_METHOD'] == 'POST': #See if the user (Or the user who is emulating a user) can be re-authed. user_to_auth = request.session.get('emulated_by', user) if cas_validateUser(user_to_auth): #logger.debug("Reauthenticated user -- Token updated") auth_token.update_expiration() auth_token.save() return True else: logger.info("Token %s expired, User %s " "could not be reauthenticated in CAS" % (token, user)) return False else: logger.debug("Token %s EXPIRED, but allowing User %s to GET data.." % (token, user)) return True else: return True
def get_allocation_result_for( provider, username, print_logs=False, start_date=None, end_date=None): """ Given provider and username: * Find the correct identity for the user * Create 'Allocation' using core representation * Calculate the 'AllocationResult' and return both """ identity = _get_identity_from_tenant_name(provider, username) # Attempt to run through the allocation engine try: allocation_result = _get_allocation_result( identity, start_date, end_date, print_logs=print_logs) logger.debug("Result for Username %s: %s" % (username, allocation_result)) return allocation_result except IdentityMembership.DoesNotExist: logger.warn( "WARNING: User %s does not" "have IdentityMembership on this database" % (username, )) return _empty_allocation_result() except: logger.exception("Unable to monitor Identity:%s" % (identity,)) raise
def validate_user(self, user): """ Validates an account based on the business logic assigned by jetstream. In this example: * Accounts are *ONLY* valid if they have 1+ 'jetstream' allocations. * All other allocations are ignored. """ driver = TASAPIDriver() try: project_allocations = fill_user_allocation_source_for(driver, user) if not project_allocations: return False return True except (NoTaccUserForXsedeException, NoAccountForUsernameException): logger.exception('User is invalid: %s', user) return False except TASAPIException: logger.exception( 'Some other error happened while trying to validate user: %s', user ) active_allocation_count = UserAllocationSource.objects.filter( only_current_user_allocations() & Q(user=user) ).count() logger.debug( 'user: %s, active_allocation_count: %d', user, active_allocation_count ) return active_allocation_count > 0
def get(self, request, provider_uuid, identity_uuid): """ Using provider and identity, getlist of machines TODO: Cache this request """ try: request_user = request.user logger.debug("filtered_machine_list") filtered_machine_list = provider_filtered_machines(request, provider_uuid, identity_uuid, request_user) except ProviderNotActive as pna: return inactive_provider(pna) except LibcloudInvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except LibcloudBadResponseError: return malformed_response(provider_uuid, identity_uuid) except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) except ObjectDoesNotExist: return invalid_provider_identity(provider_uuid, identity_uuid) except Exception as e: logger.exception("Unexpected exception for user:%s" % request_user) return failure_response(status.HTTP_409_CONFLICT, e.message) serialized_data = ProviderMachineSerializer(filtered_machine_list, request_user=request.user, many=True).data response = Response(serialized_data) return response
def monitor_sizes_for(provider_id, print_logs=False): """ Run the set of tasks related to monitoring sizes for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ from service.driver import get_admin_driver if print_logs: import logging import sys consolehandler = logging.StreamHandler(sys.stdout) consolehandler.setLevel(logging.DEBUG) logger.addHandler(consolehandler) provider = Provider.objects.get(id=provider_id) admin_driver = get_admin_driver(provider) # Non-End dated sizes on this provider db_sizes = Size.objects.filter(only_current(), provider=provider) all_sizes = admin_driver.list_sizes() seen_sizes = [] for cloud_size in all_sizes: core_size = convert_esh_size(cloud_size, provider.uuid) seen_sizes.append(core_size) now_time = timezone.now() needs_end_date = [size for size in db_sizes if size not in seen_sizes] for size in needs_end_date: logger.debug("End dating inactive size: %s" % size) size.end_date = now_time size.save() if print_logs: logger.removeHandler(consolehandler)
def get_default_identity(username, provider=None): """ Return the default identity given to the user-group for provider. """ try: from core.models.group import get_user_group group = get_user_group(username) identities = group.identities.all() if provider: if provider.is_active(): identities = identities.filter(provider=provider) return identities[0] else: logger.error("Provider provided for " "get_default_identity is inactive.") raise("Provider provided for get_default_identity " "is inactive.") else: default_provider = get_default_provider(username) default_identity = group.identities.filter( provider=default_provider)[0] logger.debug( "default_identity set to %s " % default_identity) return default_identity except Exception, e: logger.exception(e) return None
def _get_openstack_name_map(status_name, task_name, tmp_status): new_status = None if task_name: new_status = OPENSTACK_TASK_STATUS_MAP.get(task_name) if new_status: logger.debug( "Task provided:%s, Status maps to %s" % (task_name, new_status) ) elif tmp_status: # ASSERT: task_name = None if 'running_boot_script' in tmp_status: tmp_status = 'running_boot_script' # Avoid problems due to keeping track of scripts executed 1/2, 2/3, etc. new_status = OPENSTACK_TASK_STATUS_MAP.get(tmp_status) logger.debug( "Tmp_status provided:%s, Status maps to %s" % (tmp_status, new_status) ) if not new_status: # ASSERT: tmp_status = None return status_name # ASSERT: new_status exists. # Determine precedence/override based on status_name. if status_name in OPENSTACK_ACTIVE_STATES: return new_status else: # This covers cases like 'shutoff - deploy_error' being marked as # 'shutoff' return status_name
def update_credential(cls, identity, c_key, c_value, replace=False): from core.models import Credential test_key_exists = Credential.objects.filter( identity=identity, key=c_key ) if len(test_key_exists) > 1: if not replace: raise ValueError( "Found multiple entries for Credential: %s on Identity: %s" % (c_key, identity) ) test_key_exists.delete() elif test_key_exists: # Single selection test_key_exists = test_key_exists.get() if test_key_exists.value != c_value: logger.debug( "Conflicting Key Error: Key:%s Value:%s %s Value:%s" % ( c_key, test_key_exists.value, "(to replace with new value, set replace=True) New" if not replace else "Replacement", c_value ) ) # No Dupes... But should we really throw an Exception here? if not replace: return test_key_exists test_key_exists.value = c_value test_key_exists.save() return test_key_exists return Credential.objects.get_or_create( identity=identity, key=c_key, value=c_value )[0]
def create_instance( provider_uuid, identity_uuid, provider_alias, instance_source, ip_address, name, creator, create_stamp, token=None, password=None): # TODO: Define a creator and their identity by the METADATA instead of # assuming its the person who 'found' the instance identity = Identity.objects.get(uuid=identity_uuid) new_inst = Instance.objects.create(name=name, provider_alias=provider_alias, source=instance_source, ip_address=ip_address, created_by=creator, created_by_identity=identity, token=token, password=password, shell=False, start_date=create_stamp) new_inst.save() if token: logger.debug("New instance created - %s<%s> (Token = %s)" % (name, provider_alias, token)) else: logger.debug("New instance object - %s<%s>" % (name, provider_alias,)) # NOTE: No instance_status_history here, because status is not passed return new_inst
def remove_empty_networks_for(provider_id): provider = Provider.objects.get(id=provider_id) os_driver = get_account_driver(provider) all_instances = os_driver.admin_driver.list_all_instances() project_map = os_driver.network_manager.project_network_map() projects_with_networks = project_map.keys() for project in projects_with_networks: networks = project_map[project]['network'] if type(networks) != list: networks = [networks] for network in networks: network_name = network['name'] logger.debug("Checking if network %s is in use" % network_name) if running_instances(network_name, all_instances): continue #TODO: MUST change when not using 'usergroups' explicitly. user = project try: logger.debug("Removing project network for User:%s, Project:%s" % (user, project)) os_driver.network_manager.delete_project_network(user, project) except NeutronClientException: logger.exception("Neutron unable to remove project" "network for %s-%s" % (user,project)) except NeutronException: logger.exception("Neutron unable to remove project" "network for %s-%s" % (user,project))
def wait_for(instance_alias, driverCls, provider, identity, status_query, tasks_allowed=False, return_id=False, **task_kwargs): """ #Task makes 250 attempts to 'look at' the instance, waiting 15sec each try Cumulative time == 1 hour 2 minutes 30 seconds before FAILURE status_query = "active" Match only one value, active status_query = ["active","suspended"] or match multiple values. """ from service import instance as instance_service try: logger.debug("wait_for task started at %s." % datetime.now()) if app.conf.CELERY_ALWAYS_EAGER: logger.debug("Eager task - DO NOT return until its ready!") return _eager_override(wait_for, _is_instance_ready, (driverCls, provider, identity, instance_alias, status_query, tasks_allowed, return_id), {}) result = _is_instance_ready(driverCls, provider, identity, instance_alias, status_query, tasks_allowed, return_id) return result except Exception as exc: if "Not Ready" not in str(exc): # Ignore 'normal' errors. logger.exception(exc) wait_for.retry(exc=exc)
def add_usergroup(self, username, password, createUser=True, adminRole=False): """ Create a group for this user only then create the user TODO: drop createUser -- ignored! """ #Create user try: user = self.create_user(username, password, username) except ClientException as user_exists: logger.debug('Received Error %s on add, User exists.' % user_exists) user = self.get_user(username) logger.debug("Assign project:%s Member:%s Role:%s" % (username, username, adminRole)) #Create project for user/group project = self.get_project(username) if not project: project = self.create_project(username) # Check the user has been given an appropriate role admin_role_name = "admin" if not adminRole: role_name = "_member_" else: role_name = admin_role_name try: role = self.add_project_membership(username, username, role_name) except ClientException: logger.warn('Could not assign role to username %s' % username) self.include_admin(username, admin_role_name) return (project, user, role)
def getDefaultIdentity(username, provider=None): """ return the Default identity given to the user-group for provider """ profile = UserProfile.objects.get(user__username=username) if profile.selected_identity: return profile.selected_identity else: try: group = getUsergroup(username) identities = group.identities.all() if provider: identities = identities.filter(provider=provider) return identities[0] else: default_identity = group.identities.filter( provider__location="EUCALYPTUS")[0] profile.selected_identity = default_identity profile.save() logger.debug( "profile.selected_identity set to %s " % profile.selected_identity) return profile.selected_identity except Exception, e: logger.exception(e) return None
def _get_openstack_name_map(status_name, task_name, tmp_status): new_status = None if task_name: new_status = OPENSTACK_TASK_STATUS_MAP.get(task_name) if new_status: logger.debug("Task provided:%s, Status maps to %s" % (task_name, new_status)) elif tmp_status: # ASSERT: task_name = None new_status = OPENSTACK_TASK_STATUS_MAP.get(tmp_status) logger.debug( "Tmp_status provided:%s, Status maps to %s" % (tmp_status, new_status)) if not new_status: # ASSERT: tmp_status = None return status_name # ASSERT: new_status exists. # Determine precedence/override based on status_name. if status_name in OPENSTACK_ACTIVE_STATES: return new_status else: # This covers cases like 'shutoff - deploy_error' being marked as # 'shutoff' return status_name
def test_capacity(hypervisor_hostname, instance, hypervisor_stats): """ Test that the hypervisor has the capacity to bring an inactive instance back online on the compute node """ #CPU tests first (Most likely bottleneck) cpu_total = hypervisor_stats['vcpus'] cpu_used = hypervisor_stats['vcpus_used'] cpu_needed = instance.size.cpu log_str = "Resource:%s Used:%s Additional:%s Total:%s"\ % ("cpu", cpu_used, cpu_needed, cpu_total) logger.debug(log_str) if cpu_used + cpu_needed > cpu_total: raise HypervisorCapacityError(hypervisor_hostname, "Hypervisor is over-capacity. %s" % log_str) # ALL MEMORY VALUES IN MB mem_total = hypervisor_stats['memory_mb'] mem_used = hypervisor_stats['memory_mb_used'] mem_needed = instance.size.ram log_str = "Resource:%s Used:%s Additional:%s Total:%s"\ % ("mem", mem_used, mem_needed, mem_total) logger.debug(log_str) if mem_used + mem_needed > mem_total: raise HypervisorCapacityError(hypervisor_hostname, "Hypervisor is over-capacity. %s" % log_str) # ALL DISK VALUES IN GB disk_total = hypervisor_stats['local_gb'] disk_used = hypervisor_stats['local_gb_used'] disk_needed = instance.size.disk + instance.size.ephemeral log_str = "Resource:%s Used:%s Additional:%s Total:%s"\ % ("disk", disk_used, disk_needed, disk_total) if disk_used + disk_needed > disk_total: raise HypervisorCapacityError(hypervisor_hostname, "Hypervisor is over-capacity. %s" % log_str)
def convert_esh_instance( esh_driver, esh_instance, provider_uuid, identity_uuid, user, token=None, password=None): """ """ instance_id = esh_instance.id ip_address = _find_esh_ip(esh_instance) source_obj = esh_instance.source core_instance = find_instance(instance_id) if core_instance: _update_core_instance(core_instance, ip_address, password) else: start_date = _find_esh_start_date(esh_instance) logger.debug("Instance: %s" % instance_id) core_source = convert_instance_source( esh_driver, esh_instance, source_obj, provider_uuid, identity_uuid, user) logger.debug("CoreSource: %s" % core_source) # Use New/Existing core Machine to create core Instance core_instance = create_instance( provider_uuid, identity_uuid, instance_id, core_source.instance_source, ip_address, esh_instance.name, user, start_date, token, password) # Add 'esh' object core_instance.esh = esh_instance # Update the InstanceStatusHistory core_size = _esh_instance_size_to_core(esh_driver, esh_instance, provider_uuid) # TODO: You are the mole! core_instance.update_history( esh_instance.extra['status'], core_size, esh_instance.extra.get('task'), esh_instance.extra.get('metadata', {}).get('tmp_status', "MISSING")) # Update values in core with those found in metadata. # core_instance = set_instance_from_metadata(esh_driver, core_instance) return core_instance
def auth_response(request): """ Create a new AuthToken for the user, then return the Token & API URL AuthTokens will expire after a predefined time (See #/auth/utils.py:settings.TOKEN_EXPIRY_TIME) AuthTokens will be re-newed if the user is re-authenticated by CAS at expiry-time """ logger.debug("Creating Auth Response") api_server_url = settings.API_SERVER_URL #login validation response = HttpResponse() response['Access-Control-Allow-Origin'] = '*' response['Access-Control-Allow-Methods'] = 'POST, GET, OPTIONS' response['Access-Control-Max-Age'] = 1000 response['Access-Control-Allow-Headers'] = '*' response['X-Server-Management-Url'] = api_server_url response['X-Storage-Url'] = "http://" response['X-CDN-Management-Url'] = "http://" token = str(uuid.uuid4()) username = request.META['HTTP_X_AUTH_USER'] response['X-Auth-Token'] = token #New code: If there is an 'emulate_user' parameter: if 'HTTP_X_EMULATE_USER' in request.META: # AND user has permission to emulate if userCanEmulate(username): logger.debug("EMULATION REQUEST:" "Generating AuthToken for %s -- %s" % (request.META['HTTP_X_EMULATE_USER'], username)) response['X-Auth-User'] = request.META['HTTP_X_EMULATE_USER'] response['X-Emulated-By'] = username #then this token is for the emulated user auth_user_token = AuthToken( user=request.META['HTTP_X_EMULATE_USER'], issuedTime=datetime.now(), remote_ip=request.META['REMOTE_ADDR'], api_server_url=api_server_url) else: logger.warn("EMULATION REQUEST:User deemed Unauthorized : %s" % (username, )) #This user is unauthorized to emulate users - Don't create a token! return HttpResponse("401 UNAUTHORIZED TO EMULATE", status=401) else: #Normal login, no user to emulate response['X-Auth-User'] = username auth_user_token = AuthToken(user=username, issuedTime=datetime.now(), remote_ip=request.META['REMOTE_ADDR'], api_server_url=api_server_url) auth_user_token.save() return response
def get(self, request, provider_uuid, identity_uuid, format=None): """ Authentication Required, Get details for a specific identity. """ provider = get_provider(request.user, provider_uuid) identity = get_identity(request.user, identity_uuid) if not provider or not identity: return invalid_provider_identity(provider_uuid, identity_uuid) serialized_data = IdentitySerializer(identity).data logger.debug(type(serialized_data)) return Response(serialized_data)
def test_all_instance_links(): try: logger.debug("test_all_instance_links task started at %s." % datetime.now()) instances = get_all_instances() update_links(instances) logger.debug("test_all_instance_links task finished at %s." % datetime.now()) except Exception as exc: logger.exception('Error during test_all_instance_links task') test_all_instance_links.retry(exc=exc)
def apply_rule(self, instance, history, running_time, print_logs=False): """ Multiply the running_time by (multiplier) to adjust the overall burn time. """ if print_logs: logger.debug(">> %s Current Running Time:%s * Multiplier:%s = %s" % (history.status, running_time, self.multiplier, running_time * self.multiplier)) running_time *= self.multiplier return running_time
def validate_auth_token(decorated_func, *args, **kwargs): request = args[0] valid_user = validate_request_user(request) if not valid_user: logger.debug('Unauthorized access by %s - %s - Invalid Token' % (valid_user, request.META.get('REMOTE_ADDR'))) return Response( "Expected header parameter: Authorization Token <TokenID>", status=status.HTTP_401_UNAUTHORIZED) return func(request, *args, **kwargs)
def tacc_api_post(url, post_data, username=None, password=None): if not username: username = settings.TACC_API_USER if not password: password = settings.TACC_API_PASS logger.debug('url: %s', url) # logger.debug("REQ BODY: %s" % post_data) resp = requests.post(url, post_data, auth=(username, password)) logger.debug('resp.status_code: %s', resp.status_code) # logger.debug('resp.__dict__: %s', resp.__dict__) return resp
def create_history(cls, status_name, instance, size, start_date=None): """ Creates a new (Unsaved!) InstanceStatusHistory """ status, _ = InstanceStatus.objects.get_or_create(name=status_name) new_history = InstanceStatusHistory( instance=instance, size=size, status=status) if start_date: new_history.start_date=start_date logger.debug("Created new history object: %s " % (new_history)) return new_history
def authenticate(self, username=None, password=None, request=None): """ Return user if validated by LDAP. Return None otherwise. """ if not ldap_validate(username, password): logger.debug("LDAP Authentication failed - "+username) return None ldap_attrs = ldap_lookupUser(username) attributes = ldap_formatAttrs(ldap_attrs) return get_or_create_user(username, attributes)
def add_os_project_network(core_identity, *args, **kwargs): try: logger.debug("add_os_project_network task started at %s." % datetime.now()) from rtwo.accounts.openstack import AccountDriver as OSAccountDriver account_driver = OSAccountDriver(core_identity.provider) account_driver.create_network(core_identity) logger.debug("add_os_project_network task finished at %s." % datetime.now()) except Exception as exc: add_os_project_network.retry(exc=exc)
def clean_empty_ips(driverCls, provider, identity, *args, **kwargs): try: logger.debug("remove_floating_ip task started at %s." % datetime.now()) driver = get_driver(driverCls, provider, identity) ips_cleaned = driver._clean_floating_ip() logger.debug("remove_floating_ip task finished at %s." % datetime.now()) return ips_cleaned except Exception as exc: logger.warn(exc) clean_empty_ips.retry(exc=exc)
def clear_empty_ips(): logger.debug("clear_empty_ips task started at %s." % datetime.now()) identities = current_openstack_identities() for core_identity in identities: try: #TODO: Add some clear_empty_ips_for.apply_async( args=[core_identity.id, core_identity.created_by]) except Exception as exc: logger.exception(exc) logger.debug("clear_empty_ips task finished at %s." % datetime.now())
def application(request): try: logger.debug("APPLICATION") logger.debug(str(request.session.__dict__)) #access_log(request,meta_data = "{'userid' : '%s', 'token' : '%s', #'api_server' : '%s'}" %(request.session['username'], #request.session['token'],request.session['api_server'])) template = get_template('application/application.html') except Exception, e: logger.exception(e) return HttpResponseRedirect(settings.REDIRECT_URL + '/login')
def get_or_create_provider_configuration(sender, provider_instance=None, created=False, **kwargs): if not provider_instance: return prof = ProviderConfiguration.objects.get_or_create( provider=provider_instance) if prof[1] is True: logger.debug("Creating Provider Configuration for %s" % provider_instance)
def post(self, request): """ Creates a new Quota Request email and sends it to admins. """ required = ["quota", "reason"] missing_keys = check_missing_keys(request.data, required) if missing_keys: return keys_not_found(missing_keys) logger.debug("request.data = %s" % (str(request.data))) result = self._email(request, request.user.username, request.data["quota"], request.data["reason"]) return Response(result, status=status.HTTP_201_CREATED)
def apply_rule(self, instance, history, running_time, print_logs=False): """ Multiply the running_time by size of Disk (GB) * (multiplier) """ if print_logs: logger.debug( ">> %s Current Running Time:%s * Disk:%s * Multiplier:%s = %s" % (history.status, running_time, history.size.disk, self.multiplier, running_time * history.size.disk * self.multiplier)) running_time *= self.multiplier * history.size.disk return running_time
def run(self, node, client): """ Server-side logging """ node = super(LoggedScriptDeployment, self).run(node, client) if self.stdout: logger.debug('%s (%s)STDOUT: %s' % (node.id, self.name, self.stdout)) if self.stderr: logger.warn('%s (%s)STDERR: %s' % (node.id, self.name, self.stderr)) return node
def create_accounts(self, provider, username, force=False): from service.driver import get_account_driver from core.models import Project, Identity credentials_list = self.get_credentials_list(provider, username) identities = Identity.objects.none() for credentials in credentials_list: try: project_name = credentials['project_name'] created_identities = self.find_accounts(provider, **credentials) if created_identities and not force: # logger.debug( # "Accounts already created for %s on provider %s", username, provider) identities |= created_identities continue logger.debug( "Creating new account for %s with credentials - %s" % (username, credentials) ) account_driver = get_account_driver(provider) if not account_driver: raise ValueError( "Provider %s produced an invalid account driver "\ "-- Use plugin after you create a core.Provider "\ "*AND* assign a core.Identity to be the core.AccountProvider." % provider) new_identity = account_driver.create_account(**credentials) identities |= Identity.objects.filter(id=new_identity.id) memberships = new_identity.identity_memberships.filter( member__memberships__is_leader=True ) if not memberships: memberships = new_identity.identity_memberships.all() membership = memberships.first() if not membership: raise ValueError( "Expected at least one member in identity %s" % new_identity ) group = membership.member try: Project.objects.get(name=project_name, owner=group) except Project.DoesNotExist: Project.objects.create( name=project_name, created_by=new_identity.created_by, owner=group ) except: logger.exception( "Could *NOT* Create NEW account for %s" % username ) return identities
def destroy_all_instances(self): """ Destroy all instances and delete tenant networks for all users. """ for instance in self.all_instances(): self.admin_driver.destroy_instance(instance) logger.debug('Destroyed instance %s' % instance) os_driver = OSAccountDriver() for username in os_driver.list_usergroup_names(): tenant_name = username os_driver.network_manager.delete_tenant_network( username, tenant_name) return True
def _convert_timestamp(create_stamp): # create_stamp is an iso 8601 timestamp string # that may or may not include microseconds. # start_date is a timezone-aware datetime object try: start_date = datetime.strptime(create_stamp, '%Y-%m-%dT%H:%M:%S.%fZ') except ValueError: start_date = datetime.strptime(create_stamp, '%Y-%m-%dT%H:%M:%SZ') #All Dates are UTC relative start_date = start_date.replace(tzinfo=pytz.utc) logger.debug("Launched At: %s" % create_stamp) logger.debug("Started At: %s" % start_date) return start_date
def authenticate(self, username=None, password=None, request=None): """ Return user if validated by CAS Return None otherwise. """ (success, cas_response) = cas_validateUser(username) logger.info("Authenticate by CAS: %s - %s %s" % (username, success, cas_response)) if not success: logger.debug("CAS Authentication failed - " + username) return None attributes = cas_formatAttrs(cas_response) return makeOrCreateUser(username, attributes)
def _load_machine(esh_machine, provider_uuid): name = esh_machine.name alias = esh_machine.id app = get_application(provider_uuid, alias, name) if not app: logger.debug("Creating Application for Image %s" % (alias, )) app = create_application(provider_uuid, alias, name) # Using what we know about our (possibly new) application # and load (or possibly create) the provider machine provider_machine = get_or_create_provider_machine( alias, name, provider_uuid, app=app) return provider_machine
def cas_formatAttrs(cas_response): """ Formats attrs into a unified dict to ease in user creation """ try: cas_response_obj = cas_response.map[cas_response.type] logger.debug(cas_response_obj) cas_attrs = cas_response_obj['attributes'] return cas_attrs except KeyError, nokey: logger.debug("Error retrieving attributes") logger.exception(nokey) return None
def provider_over_allocation_enforcement(identity, user): provider = identity.provider action = provider.over_allocation_action if not action: logger.debug("No 'over_allocation_action' provided for %s" % provider) return False driver = get_cached_driver(identity=identity) esh_instances = driver.list_instances() # TODO: Parallelize this operation so you don't wait for larger instances # to finish 'wait_for' task below.. for instance in esh_instances: execute_provider_action(user, driver, identity, instance, action) return True # User was over_allocation
def apply_rule(self, instance, history, running_time, print_logs=False): """ Multiply the running_time by size of RAM (MB) * (multiplier) NOTE: To calculate in GB, set self.multiplier = 1/1024 """ if print_logs: logger.debug( ">> %s Current Running Time:%s * RAM:%s * Multiplier:%s = %s" % (history.status, running_time, history.size.disk, self.multiplier, running_time * history.size.disk * self.multiplier)) running_time *= self.multiplier * history.size.ram return running_time
def glance_read_machine(new_machine): """ The glance API contains MOAR information about the image then a call to 'list_machines()' on the OpenStack (Compute/Nova) Driver. This method will call glance and update any/all available information. """ new_app = new_machine.application base_source = new_machine.instance_source provider_uuid = base_source.provider.uuid identifier = base_source.identifier g_image = glance_image_for(provider_uuid, identifier) if not g_image: logger.warn("DID NOT FIND glance image for %s" % new_machine) return # If glance image, we can also infer some about the application owner = glance_image_owner(provider_uuid, identifier, g_image) if owner: base_source.created_by = owner.created_by base_source.created_by_identity = owner base_source.save() logger.debug("Found glance image for %s" % new_machine) if g_image.get('visibility', 'public') != 'public': new_app.private = True if new_app.first_machine() is new_machine: logger.debug("Glance image represents App:%s" % new_app) new_app.created_by = owner.created_by new_app.created_by_identity = owner g_start_date = glance_timestamp(g_image.get('created_at')) g_end_date = glance_timestamp(g_image.get('deleted')) if not g_start_date: logger.warn( "Could not parse timestamp of 'created_at': %s" % g_image['created_at'] ) g_start_date = now() new_app.start_date = g_start_date new_app.end_date = g_end_date new_app.save() base_source.start_date = g_start_date base_source.end_date = g_end_date base_source.save() new_machine.save()
def convert_esh_machine(esh_driver, esh_machine, provider_id, image_id=None): """ Takes as input an (rtwo) driver and machine, and a core provider id Returns as output a core ProviderMachine """ if image_id and not esh_machine: return _convert_from_instance(esh_driver, provider_id, image_id) elif not esh_machine: return None push_metadata = False if not esh_machine._image: metadata = {} else: metadata = esh_machine._image.extra.get('metadata', {}) name = esh_machine.name alias = esh_machine.alias if metadata and False and has_app_data(metadata): #USE CASE: Application data exists on the image # and may exist on this DB app = get_application(alias, metadata.get('application_uuid')) if not app: app_kwargs = get_app_data(metadata, provider_id) logger.debug("Creating Application for Image %s " "(Based on Application data: %s)" % (alias, app_kwargs)) app = create_application(alias, provider_id, **app_kwargs) else: #USE CASE: Application data does NOT exist, # This machine is assumed to be its own application, so run the # machine alias to retrieve any existing application. # otherwise create a new application with the same name as the machine # App assumes all default values #logger.info("Image %s missing Application data" % (alias, )) push_metadata = True #TODO: Get application 'name' instead? app = get_application(alias) if not app: logger.debug("Creating Application for Image %s" % (alias, )) app = create_application(alias, provider_id, name) provider_machine = load_provider_machine(alias, name, provider_id, app=app, metadata=metadata) #if push_metadata and hasattr(esh_driver._connection, # 'ex_set_image_metadata'): # logger.debug("Creating App data for Image %s:%s" % (alias, app.name)) # write_app_data(esh_driver, provider_machine) provider_machine.esh = esh_machine return provider_machine
def auth1_0(request): """ VERSION 1 AUTH -- DEPRECATED Authentication is based on the values passed in to the header. If successful, the request is passed on to auth_response CAS Authentication requires: "x-auth-user" AND "x-auth-cas" LDAP Authentication requires: "x-auth-user" AND "x-auth-key" NOTE(esteve): Should we just always attempt authentication by cas, then we dont send around x-auth-* headers.. """ logger.debug("Auth Request") if 'HTTP_X_AUTH_USER' in request.META\ and 'HTTP_X_AUTH_CAS' in request.META: username = request.META['HTTP_X_AUTH_USER'] if cas_validateUser(username): del request.META['HTTP_X_AUTH_CAS'] return auth_response(request) else: logger.debug("CAS login failed - %s" % username) return HttpResponse("401 UNAUTHORIZED", status=401) if 'HTTP_X_AUTH_KEY' in request.META\ and 'HTTP_X_AUTH_USER' in request.META: username = request.META['HTTP_X_AUTH_USER'] x_auth_key = request.META['HTTP_X_AUTH_KEY'] if ldap_validate(username, x_auth_key): return auth_response(request) else: logger.debug("LDAP login failed - %s" % username) return HttpResponse("401 UNAUTHORIZED", status=401) else: logger.debug("Request did not have User/Key" " or User/CAS in the headers") return HttpResponse("401 UNAUTHORIZED", status=401)
def create_provider_machine(identifier, provider_uuid, app, created_by_identity=None, version=None): # Attempt to match machine by provider alias # Admin identity used until the real owner can be identified. provider = Provider.objects.get(uuid=provider_uuid) if not created_by_identity: created_by_identity = provider.admin try: source = InstanceSource.objects.get( provider=provider, identifier=identifier) source.created_by_identity = created_by_identity source.created_by = created_by_identity.created_by except InstanceSource.DoesNotExist: source = InstanceSource.objects.create( provider=provider, identifier=identifier, created_by_identity=created_by_identity, created_by=created_by_identity.created_by, ) if not version: version = create_app_version(app) logger.debug("Provider %s" % provider) logger.debug("App %s" % app) logger.debug("Version %s" % version) logger.debug("Source %s" % source.identifier) provider_machine = ProviderMachine.objects.create( instance_source=source, application_version=version, ) read_cloud_machine_hook(provider_machine, provider_uuid, identifier) logger.info("New ProviderMachine created: %s" % provider_machine) add_to_cache(provider_machine) return provider_machine
def _send_instance_email(driverCls, provider, identity, instance_id): try: logger.debug("_send_instance_email task started at %s." % datetime.now()) driver = get_driver(driverCls, provider, identity) instance = driver.get_instance(instance_id) #Breakout if instance has been deleted at this point if not instance: logger.debug("Instance has been teminated: %s." % instance_id) return username = identity.user.username profile = UserProfile.objects.get(user__username=username) if profile.send_emails: #Only send emails if allowed by profile setting created = datetime.strptime(instance.extra['created'], "%Y-%m-%dT%H:%M:%SZ") send_instance_email(username, instance.id, instance.name, instance.ip, created, username) else: logger.debug("User %s elected NOT to receive new instance emails" % username) logger.debug("_send_instance_email task finished at %s." % datetime.now()) except Exception as exc: logger.warn(exc) _send_instance_email.retry(exc=exc)