def fill_user_allocation_source_for(driver, user): from core.models import AtmosphereUser assert isinstance(user, AtmosphereUser) allocation_list = find_user_allocation_source_for(driver, user) if allocation_list is None: logger.info( "find_user_allocation_source_for %s is None, so stop and don't delete allocations" % user.username ) return allocation_resources = [] user_allocation_sources = [] old_user_allocation_sources = list( UserAllocationSource.objects.filter( user=user ).order_by('allocation_source__name').all() ) for api_allocation in allocation_list: allocation_source = get_or_create_allocation_source(api_allocation) allocation_resources.append(allocation_source) user_allocation_source = get_or_create_user_allocation_source( user, allocation_source ) user_allocation_sources.append(user_allocation_source) canonical_source_names = [source.name for source in allocation_resources] for user_allocation_source in old_user_allocation_sources: if user_allocation_source.allocation_source.name not in canonical_source_names: delete_user_allocation_source( user, user_allocation_source.allocation_source ) return allocation_resources
def put(self, request, provider_id, identity_id, instance_id): """Authentication Required, update metadata about the instance""" user = request.user data = request.DATA #Ensure item exists on the server first esh_driver = prepare_driver(request, provider_id, identity_id) if not esh_driver: return invalid_creds(provider_id, identity_id) esh_instance = esh_driver.get_instance(instance_id) if not esh_instance: return instance_not_found(instance_id) #Gather the DB related item and update core_instance = convert_esh_instance(esh_driver, esh_instance, provider_id, identity_id, user) serializer = InstanceSerializer(core_instance, data=data, context={"request":request}) if serializer.is_valid(): logger.info('metadata = %s' % data) update_instance_metadata(esh_driver, esh_instance, data) serializer.save() response = Response(serializer.data) logger.info('data = %s' % serializer.data) response['Cache-Control'] = 'no-cache' return response else: return Response(serializer.errors, status=status.HTTP_400)
def _launch_machine(driver, identity, machine, size, name, userdata_content=None, network=None, password=None, token=None, **kwargs): if isinstance(driver.provider, EucaProvider): #Create/deploy the instance -- NOTE: Name is passed in extras logger.info("EUCA -- driver.create_instance EXTRAS:%s" % kwargs) esh_instance = driver\ .create_instance(name=name, image=machine, size=size, ex_userdata=userdata_contents, **kwargs) elif isinstance(driver.provider, OSProvider): deploy = True #ex_metadata, ex_keyname extra_args = _extra_openstack_args(identity) kwargs.update(extra_args) logger.debug("OS driver.create_instance kwargs: %s" % kwargs) esh_instance = driver.create_instance( name=name, image=machine, size=size, token=token, networks=[network], ex_admin_pass=password, **kwargs) #Used for testing.. Eager ignores countdown if app.conf.CELERY_ALWAYS_EAGER: logger.debug("Eager Task, wait 1 minute") time.sleep(1*60) elif isinstance(driver.provider, AWSProvider): #TODO:Extra stuff needed for AWS provider here esh_instance = driver.deploy_instance( name=name, image=machine, size=size, deploy=True, token=token, **kwargs) else: raise Exception("Unable to launch with this provider.") return (esh_instance, token, password)
def update_instances(driver, identity, esh_list, core_list): """ End-date core instances that don't show up in esh_list && Update the values of instances that do """ esh_ids = [instance.id for instance in esh_list] #logger.info('%s Instances for Identity %s: %s' % (len(esh_ids), identity, esh_ids)) for core_instance in core_list: try: index = esh_ids.index(core_instance.provider_alias) except ValueError: logger.info("Did not find instance %s in ID List: %s" % (core_instance.provider_alias, esh_ids)) core_instance.end_date_all() continue esh_instance = esh_list[index] esh_size = driver.get_size(esh_instance.size.id) core_size = convert_esh_size(esh_size, provider_id) core_instance.update_history( esh_instance.extra['status'], core_size, esh_instance.extra.get('task') or esh_instance.extra.get( 'metadata', {}).get('tmp_status')) return
def mount_volume_task(driver, instance_id, volume_id, device=None, mount_location=None, *args, **kwargs): """ Mount, if possible, the volume to instance Device and mount_location assumed if empty """ logger.info("Mount ONLY: %s --> %s" % (volume_id,instance_id)) logger.info("device_location:%s --> mount_location: %s" % (device, mount_location)) try: if not hasattr(driver, 'deploy_to'): #Do not attempt to mount if we don't have sh access return None vol = driver.get_volume(volume_id) existing_mount = vol.extra.get('metadata',{}).get('mount_location') if existing_mount: raise VolumeMountConflict(instance_id, volume_id, "Volume already mounted at %s. Run 'unmount_volume' first!" % existing_mount) if not driver._connection.ex_volume_attached_to_instance(vol, instance_id): raise VolumeMountConflict(instance_id, volume_id, "Cannot mount volume %s " "-- Not attached to instance %s" % (volume_id, instance_id)) mount_chain = _get_mount_chain(driver, instance_id, volume_id, device, mount_location) mount_chain.apply_async() except VolumeMountConflict: raise except Exception, e: logger.exception("Exc occurred") raise VolumeMountConflict(instance_id, volume_id)
def total_usage(username, start_date, allocation_source_name=None,end_date=None, burn_rate=False, email=None): """ This function outputs the total allocation usage in hours """ from service.allocation_logic import create_report if not end_date: end_date = timezone.now() user_allocation = create_report(start_date,end_date,user_id=username,allocation_source_name=allocation_source_name) if email: return user_allocation total_allocation = 0.0 for data in user_allocation: #print data['instance_id'], data['allocation_source'], data['instance_status_start_date'], data['instance_status_end_date'], data['applicable_duration'] if not data['allocation_source']=='N/A': total_allocation += data['applicable_duration'] compute_used_total = round(total_allocation/3600.0,2) if compute_used_total > 0: logger.info("Total usage for User %s with AllocationSource %s from %s-%s = %s" % (username, allocation_source_name, start_date, end_date, compute_used_total)) if burn_rate: burn_rate_total = 0 if len(user_allocation)<1 else user_allocation[-1]['burn_rate'] if burn_rate_total != 0: logger.info("User %s with AllocationSource %s Burn Rate: %s" % (username, allocation_source_name, burn_rate_total)) return [compute_used_total, burn_rate_total] return compute_used_total
def transaction(cls, status_name, instance, size, start_time=None, last_history=None): try: with transaction.atomic(): if not last_history: # Required to prevent race conditions. last_history = instance.get_last_history()\ .select_for_update(nowait=True) if not last_history: raise ValueError( "A previous history is required " "to perform a transaction. Instance:%s" % (instance,)) elif last_history.end_date: raise ValueError("Old history already has end date: %s" % last_history) last_history.end_date = start_time last_history.save() new_history = InstanceStatusHistory.create_history( status_name, instance, size, start_time) logger.info( "Status Update - User:%s Instance:%s " "Old:%s New:%s Time:%s" % (instance.created_by, instance.provider_alias, last_history.status.name, new_history.status.name, new_history.start_date)) new_history.save() return new_history except DatabaseError: logger.exception( "instance_status_history: Lock is already acquired by" "another transaction.")
def attach_volume_task(driver, instance_id, volume_id, device=None, mount_location=None, *args, **kwargs): """ Attach (And mount, if possible) volume to instance Device and mount_location assumed if empty """ logger.info("Attach: %s --> %s" % (volume_id, instance_id)) logger.info("device_location:%s, mount_location: %s" % (device, mount_location)) try: attach_volume = attach_task.si( driver.__class__, driver.provider, driver.identity, instance_id, volume_id, device) if not hasattr(driver, 'deploy_to'): # Do not attempt to mount if we don't have sh access attach_volume.apply_async() # No mount location, return None return None mount_chain = _get_mount_chain(driver, instance_id, volume_id, device, mount_location) attach_volume.link(mount_chain) attach_volume.apply_async() except Exception as e: raise VolumeMountConflict(instance_id, volume_id) return mount_location
def _update_application(self, request, app, **kwargs): data = request.DATA user = request.user data = request.DATA app_owner = app.created_by app_members = app.get_members() if user != app_owner and not Group.check_membership(user, app_members): return failure_response(status.HTTP_403_FORBIDDEN, "You are not the Application owner. " "This incident will be reported") #Or it wont.. Up to operations.. partial_update = True if request.method == 'PATCH' else False serializer = ApplicationSerializer(app, data=data, context={'request': request}, partial=partial_update) if serializer.is_valid(): logger.info('metadata = %s' % data) #TODO: Update application metadata on each machine? #update_machine_metadata(esh_driver, esh_machine, data) serializer.save() if 'created_by_identity' in data: identity = serializer.object.created_by_identity update_application_owner(serializer.object, identity) if 'boot_scripts' in data: _save_scripts_to_application(serializer.object, data.get('boot_scripts',[])) return Response(serializer.data) return failure_response( status.HTTP_400_BAD_REQUEST, serializer.errors)
def create_provider_machine(identifier, provider_uuid, app, created_by_identity=None, version=None): # Attempt to match machine by provider alias # Admin identity used until the real owner can be identified. provider = Provider.objects.get(uuid=provider_uuid) if not created_by_identity: created_by_identity = provider.admin try: source = InstanceSource.objects.get(provider=provider, identifier=identifier) source.created_by_identity = created_by_identity source.created_by = created_by_identity.created_by except InstanceSource.DoesNotExist: source = InstanceSource.objects.create( provider=provider, identifier=identifier, created_by_identity=created_by_identity, created_by=created_by_identity.created_by, ) if not version: version = create_app_version(app) logger.debug("Provider %s" % provider) logger.debug("App %s" % app) logger.debug("Version %s" % version) logger.debug("Source %s" % source.identifier) provider_machine = ProviderMachine.objects.create(instance_source=source, application_version=version) provider_machine_update_hook(provider_machine, provider_uuid, identifier) logger.info("New ProviderMachine created: %s" % provider_machine) add_to_cache(provider_machine) return provider_machine
def set_provider_quota(identity_uuid, limit_dict=None): """ """ identity = Identity.objects.get(uuid=identity_uuid) if not identity.credential_set.all(): # Can't update quota if credentials arent set return if not limit_dict: limit_dict = _get_hard_limits(identity.provider) if identity.provider.get_type_name().lower() == 'openstack': driver = get_cached_driver(identity=identity) username = identity.created_by.username user_id = driver._connection.key tenant_id = driver._connection._get_tenant_id() membership = IdentityMembership.objects.get( identity__uuid=identity_uuid, member__name=username) user_quota = membership.quota if user_quota: # Don't go above the hard-set limits per provider. if user_quota.cpu > limit_dict['cpu']: user_quota.cpu = limit_dict['cpu'] if user_quota.memory > limit_dict['ram']: user_quota.memory = limit_dict['ram'] # Use THESE values... values = {'cores': user_quota.cpu, 'ram': user_quota.memory * 1024} logger.info("Updating quota for %s to %s" % (username, values)) ad = AccountDriver(identity.provider) admin_driver = ad.admin_driver admin_driver._connection.ex_update_quota_for_user(tenant_id, user_id, values) return True
def monitor_instances_for(provider, users=None, print_logs=False): """ Update instances for provider. """ #For now, lets just ignore everything that isn't openstack. if 'openstack' not in provider.type.name.lower(): return instance_map = get_instance_owner_map(provider, users=users) if print_logs: import logging import sys consolehandler = logging.StreamHandler(sys.stdout) consolehandler.setLevel(logging.DEBUG) logger.addHandler(consolehandler) if print_logs: print_table_header() for username in sorted(instance_map.keys()): instances = instance_map[username] monitor_instances_for_user(provider, username, instances, print_logs) logger.info("Monitoring completed") if print_logs: logger.removeHandler(consolehandler)
def _get_instance_owner_map(provider, users=None): """ All keys == All identities Values = List of identities / username NOTE: This is KEYSTONE && NOVA specific. the 'instance owner' here is the username // ex_tenant_name """ from service.driver import get_account_driver accounts = get_account_driver(provider=provider, raise_exception=True) all_identities = _select_identities(provider, users) acct_providers = AccountProvider.objects.filter(provider=provider) if acct_providers: account_identity = acct_providers[0].identity provider = None else: account_identity = None all_instances = get_cached_instances( provider=provider, identity=account_identity, force=True ) #all_tenants = admin_driver._connection._keystone_list_tenants() all_tenants = accounts.list_projects() # Convert instance.owner from tenant-id to tenant-name all at once all_instances = _convert_tenant_id_to_names(all_instances, all_tenants) # Make a mapping of owner-to-instance instance_map = _make_instance_owner_map(all_instances, users=users) logger.info("Instance owner map created") identity_map = _include_all_idents(all_identities, instance_map) logger.info("Identity map created") return identity_map
def update_machine_metadata(esh_driver, esh_machine, data={}): """ NOTE: This will NOT WORK for TAGS until openstack allows JSONArrays as values for metadata! """ if not hasattr(esh_driver._connection, 'ex_set_image_metadata'): logger.info("EshDriver %s does not have function 'ex_set_image_metadata'" % esh_driver._connection.__class__) return {} try: # Possible metadata that could be in 'data' # * application uuid # * application name # * specific machine version #TAGS must be converted from list --> String logger.info("New metadata:%s" % data) meta_response = esh_driver._connection.ex_set_image_metadata(esh_machine, data) esh_machine.invalidate_machine_cache(esh_driver.provider, esh_machine) return meta_response except Exception, e: logger.exception("Error updating machine metadata") if 'incapable of performing the request' in e.message: return {} else: raise
def set_provider_quota(identity_id): """ """ identity = Identity.objects.get(id=identity_id) if not identity.credential_set.all(): #Can't update quota if credentials arent set return if identity.provider.get_type_name().lower() == 'openstack': driver = get_esh_driver(identity) username = identity.created_by.username user_id = driver._connection._get_user_id() tenant_id = driver._connection._get_tenant_id() membership = IdentityMembership.objects.get(identity__id=identity_id, member__name=username) user_quota = membership.quota if user_quota: values = {'cores': user_quota.cpu, 'ram': user_quota.memory * 1024} logger.info("Updating quota for %s to %s" % (username, values)) ad = AccountDriver(identity.provider) admin_driver = ad.admin_driver admin_driver._connection.ex_update_quota_for_user(tenant_id, user_id, values) return True
def quota_request_email(request, username, new_quota, reason): """ Processes Increase Quota request. Sends email to [email protected] Returns a response. """ user = User.objects.get(username=username) membership = IdentityMembership.objects.get( identity=user.select_identity(), member__in=user.group_set.all()) admin_url = reverse('admin:core_identitymembership_change', args=(membership.id,)) subject = "Atmosphere Quota Request - %s" % username context = { "user": user, "quota": new_quota, "reason": reason, "url": request.build_absolute_uri(admin_url) } body = render_to_string("core/email/quota_request.html", context=Context(context)) logger.info(body) email_success = email_admin(request, subject, body, cc_user=False) return {"email_sent": email_success}
def validate_selected_identity(self, attrs, source): """ Check that profile is an identitymember & providermember Returns the dict of attrs """ #Short-circut if source (identity) not in attrs logger.debug(attrs) logger.debug(source) if 'selected_identity' not in attrs: return attrs user = self.object.user logger.info("Validating identity for %s" % user) selected_identity = attrs['selected_identity'] logger.debug(selected_identity) groups = user.group_set.all() for g in groups: for id_member in g.identitymembership_set.all(): if id_member.identity == selected_identity: logger.info("Saving new identity:%s" % selected_identity) user.selected_identity = selected_identity user.save() return attrs raise serializers.ValidationError("User is not a member of" "selected_identity: %s" % selected_identity)
def _build_first_history( self, status_name, size, start_date, end_date=None, first_update=False, activity=None ): # FIXME: Move this call so that it happens inside InstanceStatusHistory to avoid circ.dep. from core.models import InstanceStatusHistory if not first_update and status_name not in [ 'build', 'pending', 'running' ]: logger.info( "First Update Unknown - Status name on instance %s: %s", self.provider_alias, status_name ) # Instance state is 'unknown' from start of instance until now # NOTE: This is needed to prevent over-charging accounts status_name = 'unknown' activity = None first_history = InstanceStatusHistory.create_history( status_name, self, size, start_date=start_date, end_date=end_date, activity=activity ) first_history.save() return first_history
def get_default_provider(username): """ Return default provider given """ try: from core.models.group import get_user_group group = get_user_group(username) provider = group.providers.filter( Q(end_date=None) | Q(end_date__gt=timezone.now()), active=True, type__name="OpenStack") if provider: provider = provider[0] else: logger.error("get_default_provider could not find " "a valid Provider") return None logger.debug( "default provider is %s " % provider) return provider except IndexError: logger.info("No provider found for %s" % username) return None except Exception, e: logger.exception(e) return None
def _deploy_init_to(driverCls, provider, identity, instance_id, username=None, password=None, redeploy=False, **celery_task_args): try: logger.debug("_deploy_init_to task started at %s." % datetime.now()) #Check if instance still exists driver = get_driver(driverCls, provider, identity) instance = driver.get_instance(instance_id) if not instance: logger.debug("Instance has been teminated: %s." % instance_id) return #NOTE: This is unrelated to the password argument logger.info(instance.extra) instance._node.extra['password'] = None msd = init(instance, identity.user.username, password, redeploy) kwargs = _generate_ssh_kwargs() kwargs.update({'deploy': msd}) driver.deploy_to(instance, **kwargs) _update_status_log(instance, "Deploy Finished") logger.debug("_deploy_init_to task finished at %s." % datetime.now()) except DeploymentError as exc: logger.exception(exc) if isinstance(exc.value, NonZeroDeploymentException): #The deployment was successful, but the return code on one or more # steps is bad. Log the exception and do NOT try again! raise exc.value #TODO: Check if all exceptions thrown at this time #fall in this category, and possibly don't retry if #you hit the Exception block below this. _deploy_init_to.retry(exc=exc) except Exception as exc: logger.exception(exc) _deploy_init_to.retry(exc=exc)
def create_volume_from_image(identity_uuid, image_id, size_id, name, description, metadata): """ Create a new volume from an image """ try: identity = Identity.objects.get(uuid=identity_uuid) user = identity.created_by driver = get_cached_driver(identity=identity) image = driver._connection.ex_get_image(image_id) size = driver._connection.ex_get_size(size_id) if not image: raise Exception("No image found for id=%s." % image_id) if not size: raise Exception("No size found for id=%s." % size_id) success, esh_volume = driver._connection.create_volume( size.id, name, description=description, metadata=metadata, image=image) if not success: raise Exception("Could not create volume from image") # Save the new volume to the database convert_esh_volume( esh_volume, identity.provider.uuid, identity_uuid, user) except SoftTimeLimitExceeded as e: create_volume_from_image.retry(exc=e) except Identity.DoesNotExist: logger.info("An Identity for uuid=%s does not exist.", identity_uuid) raise
def default_quota(cls, user, provider): """ Load each Default Quota Plugin and call `plugin.get_default_quota(user, provider)` """ _default_quota = None for DefaultQuotaPlugin in cls.load_plugins(cls.list_of_classes): plugin = DefaultQuotaPlugin() try: inspect.getcallargs( getattr(plugin, 'get_default_quota'), user=user, provider=provider ) except AttributeError: logger.info( "Validation plugin %s missing method 'get_default_quota'" % DefaultQuotaPlugin ) except TypeError: logger.info( "Validation plugin %s does not accept kwargs `user` & `provider`" % DefaultQuotaPlugin ) _default_quota = plugin.get_default_quota( user=user, provider=provider ) if _default_quota: return _default_quota return _default_quota
def post(self, request, provider_id, identity_id): """ Sends an e-mail to the admins to start the create_image process. """ #request.DATA is r/o #Copy allows for editing data = copy.deepcopy(request.DATA) data.update({'owner': data.get('created_for', request.user.username)}) if data.get('vis','public') != 'public': user_list = re.split(', | |\n', data.get('shared_with',"")) share_with_admins(user_list, data.get('provider')) share_with_self(user_list, request.user.username) user_list = [user for user in user_list if user] # Skips blanks data['shared_with'] = user_list logger.info(data) serializer = MachineRequestSerializer(data=data) if serializer.is_valid(): #Add parent machine to request machine_request = serializer.object machine_request.parent_machine = machine_request.instance.provider_machine serializer.save() #Object now has an ID for links.. machine_request_id = serializer.object.id active_provider = machine_request.active_provider() auto_approve = active_provider.has_trait("Auto-Imaging") requestImaging(request, machine_request_id, auto_approve=auto_approve) if auto_approve: start_machine_imaging(machine_request) return Response(serializer.data, status=status.HTTP_201_CREATED) else: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def create_volume_from_snapshot(identity_uuid, snapshot_id, size_id, name, description, metadata): """ Create a new volume for the snapshot NOTE: The size must be at least the same size as the original volume. """ try: identity = Identity.objects.get(uuid=identity_uuid) driver = get_cached_driver(identity=identity) snapshot = driver._connection.ex_get_snapshot(snapshot_id) size = driver._connection.ex_get_size(size_id) if not snapshot: raise Exception("No snapshot found for id=%s." % snapshot_id) if not size: raise Exception("No size found for id=%s." % size_id) success, esh_volume = driver._connection.create_volume( snapshot.size, name, description=description, metadata=metadata, snapshot=snapshot) if not success: raise Exception("Could not create volume from snapshot") # Save the new volume to the database convert_esh_volume( esh_volume, identity.provider.uuid, identity_uuid, identity.created_by) except SoftTimeLimitExceeded as e: create_volume_from_snapshot.retry(exc=e) except Identity.DoesNotExist: logger.info("An Identity for uuid=%s does not exist.", identity_uuid) raise
def restore_ip_chain(esh_driver, esh_instance, redeploy=False): """ Returns: a task, chained together task chain: wait_for("active") --> AddFixed --> AddFloating --> reDeploy start with: task.apply_async() """ from service.tasks.driver import \ wait_for, add_fixed_ip, add_floating_ip, deploy_init_to init_task = wait_for.s( esh_driver.__class__, esh_driver.provider, esh_driver.identity, esh_instance.id, ["active",], no_tasks=True) #Step 1: Add fixed fixed_ip_task = add_fixed_ip.si( esh_driver.__class__, esh_driver.provider, esh_driver.identity, esh_instance.id) init_task.link(fixed_ip_task) #Add float and re-deploy OR just add floating IP... if redeploy: deploy_task = deploy_init_to.si(esh_driver.__class__, esh_driver.provider, esh_driver.identity, esh_instance.id, redeploy=True) fixed_ip_task.link(deploy_task) else: logger.info("Skip deployment, Add floating IP only") floating_ip_task = add_floating_ip.si(esh_driver.__class__, esh_driver.provider, esh_driver.identity, esh_instance.id) fixed_ip_task.link(floating_ip_task) return init_task
def _test_username(atmo_user, username_match): username = atmo_user.username result = _simple_match(username, username_match, contains=True) logger.info( "Username:%s Match On:%s - Result:%s" % (username, username_match, result)) return result
def __init__(self, provider=None, *args, **kwargs): super(AccountDriver, self).__init__() if provider: all_creds = self._init_by_provider(provider, *args, **kwargs) else: all_creds = kwargs if 'location' in all_creds: self.namespace = "Atmosphere_OpenStack:%s" % all_creds['location'] else: logger.info("Using default namespace.. Could cause conflicts if " "switching between providers. To avoid ambiguity, " "provide the kwarg: location='provider_prefix'") # Build credentials for each manager self.credentials = all_creds ex_auth_version = all_creds.get("ex_force_auth_version", '2.0_password') if ex_auth_version.startswith('2'): self.identity_version = 2 elif ex_auth_version.startswith('3'): self.identity_version = 3 else: raise Exception("Could not determine identity_version of %s" % ex_auth_version) user_creds = self._build_user_creds(all_creds) image_creds = self._build_image_creds(all_creds) net_creds = self._build_network_creds(all_creds) sdk_creds = self._build_sdk_creds(all_creds) # Initialize managers with respective credentials self.user_manager = UserManager(**user_creds) self.image_manager = ImageManager(**image_creds) self.network_manager = NetworkManager(**net_creds) self.openstack_sdk = _connect_to_openstack_sdk(**sdk_creds)
def _update_threshold(self, request, app, **kwargs): user = request.user data = request.DATA app_owner = app.created_by app_members = app.get_members() if user != app_owner and not Group.check_membership(user, app_members): return failure_response(status.HTTP_403_FORBIDDEN, "You are not the Application owner. " "This incident will be reported") #Or it wont.. Up to operations.. if kwargs.get('delete'): threshold = app.get_threshold() if threshold: threshold.delete() serializer = ApplicationThresholdSerializer( app.get_threshold()) return Response(serializer.data) partial_update = True if request.method == 'PATCH' else False serializer = ApplicationThresholdSerializer( app.threshold, data=data, context={'request': request}, partial=partial_update) if serializer.is_valid(): serializer.save() logger.info(serializer.data) return Response(serializer.data) return failure_response( status.HTTP_400_BAD_REQUEST, serializer.errors)
def over_allocation_test(identity, esh_instances): from api import get_esh_driver from core.models.instance import convert_esh_instance from atmosphere import settings over_allocated, time_diff = check_over_allocation( identity.created_by.username, identity.id, time_period=relativedelta(day=1, months=1)) logger.info("Overallocation Test: %s - %s - %s\tInstances:%s" % (identity.created_by.username, over_allocated, time_diff, esh_instances)) if not over_allocated: # Nothing changed, bail. return False if settings.DEBUG: logger.info('Do not enforce allocations in DEBUG mode') return False driver = get_esh_driver(identity) running_instances = [] for instance in esh_instances: #Suspend active instances, update the task in the DB try: if driver._is_active_instance(instance): driver.suspend_instance(instance) except Exception, e: if 'in vm_state suspended' not in e.message: raise updated_esh = driver.get_instance(instance.id) updated_core = convert_esh_instance(driver, updated_esh, identity.provider.id, identity.id, identity.created_by) running_instances.append(updated_core)
def remove_ips(esh_driver, esh_instance, update_meta=True): """ Returns: (floating_removed, fixed_removed) """ network_manager = esh_driver._connection.get_network_manager() #Delete the Floating IP result = network_manager.disassociate_floating_ip(esh_instance.id) logger.info("Removed Floating IP for Instance %s - Result:%s" % (esh_instance.id, result)) if update_meta: update_instance_metadata(esh_driver, esh_instance, data={'public-ip': '', 'public-hostname':''}, replace=False) #Fixed instance_ports = network_manager.list_ports(device_id=esh_instance.id) if instance_ports: fixed_ip_port = instance_ports[0] fixed_ips = fixed_ip_port.get('fixed_ips',[]) if fixed_ips: fixed_ip = fixed_ips[0]['ip_address'] result = esh_driver._connection.ex_remove_fixed_ip(esh_instance, fixed_ip) logger.info("Removed Fixed IP %s - Result:%s" % (fixed_ip, result)) return (True, True) return (True, False)
def _update_machine(self, request, provider_uuid, identity_uuid, machine_id): # TODO: Determine who is allowed to edit machines besides # core_machine.owner user = request.user data = request.data try: esh_driver = prepare_driver(request, provider_uuid, identity_uuid) except ProviderNotActive as pna: return inactive_provider(pna) except Exception as e: return failure_response(status.HTTP_409_CONFLICT, e.message) if not esh_driver: return invalid_creds(provider_uuid, identity_uuid) esh_machine = esh_driver.get_machine(machine_id) core_machine = convert_esh_machine(esh_driver, esh_machine, provider_uuid, user) if not user.is_staff and user is not core_machine.application_version.application.created_by: logger.warn( '%s is Non-staff/non-owner trying to update a machine' % (user.username)) return failure_response( status.HTTP_401_UNAUTHORIZED, "Only Staff and the machine Owner " "are allowed to change machine info.") partial_update = True if request.method == 'PATCH' else False serializer = ProviderMachineSerializer(core_machine, request_user=request.user, data=data, partial=partial_update) if serializer.is_valid(): logger.info('metadata = %s' % data) update_machine_metadata(esh_driver, esh_machine, data) machine = serializer.save() if 'created_by_identity' in request.data: identity = machine.created_by_identity update_application_owner( core_machine.application_version.application, identity) logger.info(serializer.data) return Response(serializer.data) return failure_response(status.HTTP_400_BAD_REQUEST, serializer.errors)
def eager_update_metadata(driver, instance, metadata): """ Used for TESTING ONLY. NEVER called in normal celery operation. """ while 1: #Check if instance is terminated or no longer building. if not instance or instance.extra['status'] != 'build': break #Wait 1min try again wait_time = 1*60 logger.info("Always Eager Detected and instance is not active" ". Will wait 1 minute and check again to avoid" " stack overflow from immediately retrying.." ) time.sleep(wait_time*60) # Update reference for the instance to see if its 'done' instance = driver.get_instance(instance_id) return update_instance_metadata( driver, instance, data=metadata, replace=False)
def _build_first_history(self, status_name, size, start_date, end_date=None, first_update=False, activity=None): #FIXME: Move this call so that it happens inside InstanceStatusHistory to avoid circ.dep. from core.models import InstanceStatusHistory if not first_update and status_name not in [ 'build', 'pending', 'running']: logger.info("First Update Unknown - Status name on instance \ %s: %s - %s" % (self.provider_alias, status_name)) # Instance state is 'unknown' from start of instance until now # NOTE: This is needed to prevent over-charging accounts status_name = 'unknown' activity = None first_history = InstanceStatusHistory.create_history( status_name, self, size, start_date=start_date, end_date=end_date, activity=activity) first_history.save() return first_history
def unmount_volume_task(driver, instance_id, volume_id, *args, **kwargs): try: logger.info("UN-Mount ONLY: %s --> %s" % (volume_id, instance_id)) if not hasattr(driver, 'deploy_to'): raise Exception("Cannot mount " "-- Driver does not have a deploy_to method") # Only attempt to umount if we have sh access vol = driver.get_volume(volume_id) if not driver._connection.ex_volume_attached_to_instance( vol, instance_id): raise VolumeMountConflict("Cannot unmount volume %s " "-- Not attached to instance %s" % (volume_id, instance_id)) umount_chain = _get_umount_chain(driver, instance_id, volume_id) umount_chain.apply_async() return (True, None) except Exception as exc: logger.exception("Exception occurred creating the unmount task") return (False, exc.message)
def transaction(cls, status_name, activity, instance, size, extra=None, start_time=None, last_history=None): try: with transaction.atomic(): if not last_history: # Required to prevent race conditions. last_history = instance.get_last_history()\ .select_for_update(nowait=True) if not last_history: raise ValueError( "A previous history is required " "to perform a transaction. Instance:%s" % (instance, )) elif last_history.end_date: raise ValueError( "Old history already has end date: %s" % last_history) last_history.end_date = start_time last_history.save() new_history = InstanceStatusHistory.create_history( status_name, instance, size, start_date=start_time, activity=activity, extra=extra) logger.info("Status Update - User:%s Instance:%s " "Old:%s New:%s Time:%s" % (instance.created_by, instance.provider_alias, last_history.status.name, new_history.status.name, new_history.start_date)) new_history.save() return new_history except DatabaseError: logger.exception( "instance_status_history: Lock is already acquired by" "another transaction.")
def validate_cidr(self, cidr): logger.info("Attempting to validate cidr %s" % cidr) test_cidr_set = netaddr.IPSet([cidr]) all_subnets = [subnet for subnet in self.list_subnets() if subnet.get('ip_version', 4) != 6] all_subnet_ips = [sn['allocation_pools'] for sn in all_subnets] for idx, subnet_ip_list in enumerate(all_subnet_ips): for subnet_ip_range in subnet_ip_list: (start, end) = (subnet_ip_range['start'], subnet_ip_range['end']) if start.startswith('10') or end.startswith('10') or start.startswith('192') or end.startswith('192'): continue test_range = netaddr.IPRange( subnet_ip_range['start'], subnet_ip_range['end']) if len(test_range) > 1000: continue for ip in test_range: if ip in test_cidr_set: raise Exception("Overlap detected for CIDR %s and Subnet %s" % (cidr, all_subnets[idx])) return True
def validate_selected_identity(self, selected_identity): """ Check that profile is an identitymember & providermember Returns the dict of attrs """ user = self.instance logger.info("Validating identity for %s" % user) logger.debug(selected_identity) groups = user.group_set.all() for g in groups: for id_member in g.identitymembership_set.all(): if id_member.identity == selected_identity: logger.info("Saving new identity:%s" % selected_identity) user.selected_identity = selected_identity user.save() return selected_identity raise serializers.ValidationError("User is not a member of" "selected_identity: %s" % selected_identity)
def create_accounts(self, provider, username, force=False): from core.models import Identity, Project from core.plugins import AllocationSourcePluginManager identities = Identity.objects.filter(provider=provider, created_by__username=username) if not identities.count(): raise AccountCreationConflict( "Expected an identity to have been created for %s on Provider %s during the /token_update method. Contact support for help!" % (username, provider)) for identity in identities: user = identity.created_by try: has_allocations = AllocationSourcePluginManager.ensure_user_allocation_sources( user) if not has_allocations: raise ValueError( 'User "{}" has no valid allocations'.format(user)) except Exception as e: logger.exception( 'Encountered error while ensuring user has valid Allocation Sources: "%s"', user) raise AccountCreationConflict( 'AccountDriver is trying to create an account: {} ' 'but while ensuring user has valid Allocation Sources there was a problem: {}' .format(user, e)) if settings.AUTO_CREATE_NEW_PROJECTS: project_name = identity.project_name() projects = Project.objects.filter(created_by=user, name=project_name) has_projects = projects.count() > 0 membership = user.memberships.first() if not has_projects and membership: group = membership.group logger.info('Creating new project for %s: "%s"', user, project_name) project = Project.objects.create( name=project_name, created_by=user, owner=group, description="Auto-created project for %s" % project_name) return identities
def listen_for_user_allocation_source_created( sender, instance, created, **kwargs ): """ This listener expects: EventType - 'user_allocation_source_created' entity_id - "amitj" # Username # CyVerse Payload EventPayload - { "allocation_source_name" : "TG-amit100568", } # Jetstream Payload EventPayload - { "allocation_source_name": "TG-AG100345", } The method should assign a user to an allocation source """ event = instance from core.models import AtmosphereUser, AllocationSource if event.name != 'user_allocation_source_created': return None logger.info('user_allocation_source_created: %s' % event.__dict__) payload = event.payload allocation_source_name = payload['allocation_source_name'] user_name = event.entity_id object_updated, created = UserAllocationSource.objects.update_or_create( user=AtmosphereUser.objects.get_by_natural_key(user_name), allocation_source=AllocationSource.objects.get( name=allocation_source_name ) ) logger.info( 'object_updated: %s, created: %s' % ( object_updated, created, ) )
def _is_active_instance(self, instance): #Other things may need to be tested status = instance.extra['status'] task = instance.extra['task'] power = instance.extra['power'] if status in ['active', 'build', 'resize']: if task in ['deleting', 'suspending']: result = False result = True elif task in [ 'resuming', 'powering-on', 'verify-resize', 'resize_reverting', 'resize_confirming' ]: result = True else: result = False logger.info("Instance: %s Status: %s-%s Active:%s" % (instance.id, status, task, result)) return result
def _get_instance_owner_map(provider, users=None): """ All keys == All identities Values = List of identities / username NOTE: This is KEYSTONE && NOVA specific. the 'instance owner' here is the username // ex_tenant_name """ admin_driver = get_cached_driver(provider=provider) all_identities = _select_identities(provider, users) all_instances = get_cached_instances(provider=provider) all_tenants = admin_driver._connection._keystone_list_tenants() #Convert instance.owner from tenant-id to tenant-name all at once all_instances = _convert_tenant_id_to_names(all_instances, all_tenants) #Make a mapping of owner-to-instance instance_map = _make_instance_owner_map(all_instances, users=users) logger.info("Instance owner map created") identity_map = _include_all_idents(all_identities, instance_map) logger.info("Identity map created") return identity_map
def check_volume_task(driverCls, provider, identity, instance_id, volume_id, *args, **kwargs): try: logger.debug("check_volume task started at %s." % datetime.now()) driver = get_driver(driverCls, provider, identity) instance = driver.get_instance(instance_id) volume = driver.get_volume(volume_id) attach_data = volume.extra['attachments'][0] device = attach_data['device'] private_key = ATMOSPHERE_PRIVATE_KEYFILE kwargs.update({'ssh_key': private_key}) kwargs.update({'timeout': 120}) #One script to make two checks: #1. Voume exists 2. Volume has a filesystem cv_script = check_volume(device) #NOTE: non_zero_deploy needed to stop DeploymentError from being raised kwargs.update({'deploy': cv_script, 'non_zero_deploy': True}) driver.deploy_to(instance, **kwargs) kwargs.pop('non_zero_deploy', None) #Script execute if cv_script.exit_status != 0: if 'No such file' in cv_script.stdout: raise Exception('Volume check failed: %s. ' 'Device %s does not exist on instance %s' % (volume, device, instance)) elif 'Bad magic number' in cv_script.stdout: #Filesystem needs to be created for this device logger.info("Mkfs needed") mkfs_script = mkfs_volume(device) kwargs.update({'deploy': mkfs_script}) driver.deploy_to(instance, **kwargs) else: raise Exception('Volume check failed: Something weird') logger.debug("check_volume task finished at %s." % datetime.now()) except DeploymentError as exc: logger.exception(exc) except Exception as exc: logger.warn(exc) check_volume_task.retry(exc=exc)
def _machine_authored_by_atmosphere(self, cloud_machine): project_id = cloud_machine.get('owner') owner_project = self.account_driver.get_project_by_id(project_id) if not owner_project: owner = cloud_machine.get('application_owner') owner_project = self.account_driver.get_project(owner) # Assumption: the atmosphere imaging author == the project_name set for the account_driver. atmo_author_project_name = self.account_driver.project_name if not owner_project: logger.info( "cloud machine %s - authored by project_id %s, not the Atmosphere author: %s", cloud_machine.id, project_id, atmo_author_project_name) return False elif owner_project.name != atmo_author_project_name: logger.info( "cloud machine %s - authored by Tenant %s, not the Atmosphere author: %s", cloud_machine.id, owner_project.name, atmo_author_project_name) return False return True
def set_machine_request_metadata(machine_request, image_id): admin_driver = machine_request.new_admin_driver() machine = admin_driver.get_machine(image_id) lc_driver = admin_driver._connection if not machine: logger.warn("Could not find machine with ID=%s" % image_id) return if not hasattr(lc_driver, 'ex_set_image_metadata'): return metadata = lc_driver.ex_get_image_metadata(machine) if machine_request.new_machine_description: metadata['description'] = machine_request.new_machine_description if machine_request.new_machine_tags: metadata['tags'] = machine_request.new_machine_tags logger.info("LC Driver:%s - Machine:%s - Metadata:%s" % (lc_driver, machine.id, metadata)) lc_driver.ex_set_image_metadata(machine, metadata) return machine
def deploy_failed(task_uuid, driverCls, provider, identity, instance_id, **celery_task_args): try: logger.debug("deploy_failed task started at %s." % datetime.now()) logger.info("task_uuid=%s" % task_uuid) result = app.AsyncResult(task_uuid) with allow_join_result(): exc = result.get(propagate=False) err_str = "DEPLOYERROR::%s" % (result.traceback,) logger.error(err_str) driver = get_driver(driverCls, provider, identity) instance = driver.get_instance(instance_id) update_instance_metadata(driver, instance, data={'tmp_status': 'deploy_error'}, replace=False) logger.debug("deploy_failed task finished at %s." % datetime.now()) except Exception as exc: logger.warn(exc) deploy_failed.retry(exc=exc)
def _build_first_history(self, status_name, size, start_date, end_date=None, first_update=False): if not first_update and status_name not in [ 'build', 'pending', 'running' ]: logger.info( "First Update Unknown - Status name on instance %s: %s" % (self.provider_alias, status_name)) # Instance state is 'unknown' from start of instance until now # NOTE: This is needed to prevent over-charging accounts status_name = 'unknown' first_history = InstanceStatusHistory.create_history( status_name, self, size, start_date, end_date) first_history.save() return first_history
def _set_network_quota(user_quota, identity): network_values = { 'port': user_quota.port_count, 'floatingip': user_quota.floating_ip_count, # INTENTIONALLY SKIPPED/IGNORED # 'subnet', 'router', 'network', # 'security_group', 'security_group_rules' } username = identity.created_by.username logger.info("Updating network quota for %s to %s" % (username, network_values)) driver = get_cached_driver(identity=identity) tenant_id = driver._connection._get_tenant_id() ad = get_account_driver(identity.provider) admin_driver = ad.admin_driver result = admin_driver._connection._neutron_update_quota(tenant_id, network_values) logger.info("Updated quota for %s to %s" % (username, result)) return result
def listen_for_quota_assigned(sender, instance, created, **kwargs): """ This listener expects: EventType - 'quota_assigned' entity_id - "username" # Owner of the CoreIdentity that quota will be assigned to. # Event Payload Expected EventPayload - { "timestamp": "2017-01-01T12:00:00Z", "identity": "<core_identity_uuid>", "quota": { "cpu": 16, "memory": 128, "storage": 10, "instance_count": 10, "snapshot_count": 10, "storage_count": 10, "floating_ip_count": 10, "port_count": 10 } ... } The result of this method will: - Set the quota for the cloud provider of the Identity - assign the quota to the Identity """ event = instance from service.quota import set_provider_quota if event.name != 'quota_assigned': return logger.info('quota_assigned: %s' % event.__dict__) payload = event.payload quota_values = payload['quota'] identity_uuid = payload['identity'] identity = Identity.objects.get(uuid=identity_uuid) created = False quota = Quota.objects.filter(**quota_values).order_by('pk').first() if not quota: quota = Quota.objects.create(**quota_values) created = True logger.info('Quota retrieved: %s, created: %s', quota, created) set_provider_quota(str(identity.uuid), quota=quota) logger.info("Set the quota for cloud provider to match: %s", identity) identity = Identity.objects.get(uuid=identity_uuid) identity.quota = quota identity.save() logger.info("DB set identity to match quota: %s", identity)
def remove_membership(image_version, group, accounts=None): """ This function will remove *all* users in the group to *all* providers/machines using this image_version """ for provider_machine in image_version.machines.filter(only_current_source()): prov = provider_machine.instance_source.provider if not accounts: accounts = get_account_driver(prov) if not accounts: raise NotImplemented("Account Driver could not be created for %s" % prov) accounts.clear_cache() admin_driver = accounts.admin_driver # cache has been cleared if not admin_driver: raise NotImplemented("Admin Driver could not be created for %s" % prov) img = accounts.get_image(provider_machine.identifier) approved_projects = accounts.shared_images_for(img.id) for identity_membership in group.identitymembership_set.order_by('identity__created_by__username'): if identity_membership.identity.provider != prov: continue # Get project name from the identity's credential-list project_name = identity_membership.identity.get_credential( 'ex_project_name') project = accounts.get_project(project_name) if project and project not in approved_projects: continue # Perform a *DATABASE* remove first. application = provider_machine.application application_version = provider_machine.application_version models.ApplicationMembership.objects.filter( group=group, application=application).delete() logger.info("Removed ApplicationMembership: %s-%s" % (application, group)) models.ApplicationVersionMembership.objects.filter( group=group, image_version=application_version).delete() logger.info("Removed ApplicationVersionMembership: %s-%s" % (application_version, group)) models.ProviderMachineMembership.objects.filter( group=group, provider_machine=provider_machine).delete() logger.info("Removed ProviderMachineMembership: %s-%s" % (provider_machine, group)) # Perform a *CLOUD* remove last. try: accounts.image_manager.unshare_image(img, project_name) except Exception as exc: logger.exception("Exception occurred while removing user from cloud: %s", exc) logger.info("Removed Cloud Access: %s-%s" % (img, project_name)) return
def _set_compute_quota(user_quota, identity): # Use THESE values... compute_values = { 'cores': user_quota.cpu, 'ram': user_quota.memory*1024, # NOTE: Value is stored in GB, Openstack (Liberty) expects MB 'floating_ips': user_quota.floating_ip_count, 'fixed_ips': user_quota.port_count, 'instances': user_quota.instance_count, 'force': True } creds = identity.get_all_credentials() use_tenant_id = False if creds.get('ex_force_auth_version', '2.0_password') == "2.0_password": compute_values.pop('instances') use_tenant_id = True username = identity.created_by.username logger.info("Updating quota for %s to %s" % (username, compute_values)) driver = get_cached_driver(identity=identity) username = driver._connection.key tenant_id = driver._connection._get_tenant_id() ad = get_account_driver(identity.provider, raise_exception=True) ks_user = ad.get_user(username) admin_driver = ad.admin_driver creds = identity.get_all_credentials() if creds.get('ex_force_auth_version', '2.0_password') != "2.0_password": # FIXME: Remove 'use_tenant_id' when legacy clouds are no-longer in use. try: result = admin_driver._connection.ex_update_quota(tenant_id, compute_values, use_tenant_id=use_tenant_id) except Exception: logger.exception("Could not set a user-quota, trying to set tenant-quota") raise else: # For CyVerse old clouds, run the top method. don't use try/except. try: result = admin_driver._connection.ex_update_quota_for_user( tenant_id, ks_user.id, compute_values, use_tenant_id=use_tenant_id) except Exception: logger.exception("Could not set a user-quota, trying to set tenant-quota") raise logger.info("Updated quota for %s to %s" % (username, result)) return result
def attach_volume_task(driver, instance_id, volume_id, device=None, mount_location=None, *args, **kwargs): logger.info("P_device - %s" % device) logger.info("P_mount_location - %s" % mount_location) attach_task.delay(driver.__class__, driver.provider, driver.identity, instance_id, volume_id, device).get() if not hasattr(driver, 'deploy_to'): #Do not attempt to mount if we don't have sh access return check_volume_task.delay(driver.__class__, driver.provider, driver.identity, instance_id, volume_id).get() mount_task.delay(driver.__class__, driver.provider, driver.identity, instance_id, volume_id, mount_location).get() return mount_location
def machine_is_valid(self, cloud_machine): """ Given a cloud_machine (glance image) Return True if the machine should be included in Atmosphere's catalog Return False if the machine should be skipped In this plugin, a cloud_machine is skipped if: - metadata_key is not found in image metadata - image does not pass the 'sanity checks' """ if not self._sanity_check_machine(cloud_machine): return False elif not self._contains_metadata(cloud_machine, self.whitelist_metadata_key): logger.info( "Skipping cloud machine %s -" " Missing whitelist metadata_key: %s", cloud_machine, self.whitelist_metadata_key) return False return True
def end_date_all(self, end_date=None): """ Call this function to tie up loose ends when the instance is finished (Destroyed, terminated, no longer exists..) """ if not end_date: end_date = timezone.now() ish_list = self.instancestatushistory_set.filter(end_date=None) for ish in ish_list: # logger.info('Saving history:%s' % ish) if not ish.end_date: logger.info("END DATING instance history %s: %s" % (ish, end_date)) ish.end_date = end_date ish.save() if not self.end_date: logger.info("END DATING instance %s: %s" % (self.provider_alias, end_date)) self.end_date = end_date self.save()
def _machine_in_same_domain(self, cloud_machine): """ If we wanted to support 'domain-restrictions' *inside* of atmosphere, we could verify the domain of the image owner. If their domain does not match, skip. """ project_id = cloud_machine.get('owner') owner_project = self.account_driver.get_project_by_id(project_id) if not owner_project: logger.info( "Skipping cloud machine %s, No owner listed.", cloud_machine) return False domain_id = owner_project.domain_id config_domain = self.account_driver.get_config('user', 'domain', 'default') owner_domain = self.account_driver.openstack_sdk.identity.get_domain(domain_id) account_domain = self.account_driver.openstack_sdk.identity.get_domain(config_domain) if owner_domain.id != account_domain.id: logger.info("Cloud machine %s - owner domain (%s) does not match %s", cloud_machine, owner_domain, account_domain) return False return True
def create_new_account_for(provider, user): from service.exceptions import AccountCreationConflict from service.driver import get_account_driver existing_user_list = provider.identity_set.values_list( 'created_by__username', flat=True) if user.username in existing_user_list: logger.info("Accounts already exists on %s for %s" % (provider.location, user.username)) return None try: accounts = get_account_driver(provider) logger.info("Create NEW account for %s" % user.username) new_identity = accounts.create_account(user.username) return new_identity except AccountCreationConflict: raise # TODO: Ideally, have sentry handle these events, rather than force an Unhandled 500 to bubble up. except: logger.exception("Could *NOT* Create NEW account for %s" % user.username) return None
def send_reports(): failed_reports = 0 reports_to_send = TASAllocationReport.objects.filter( Q(compute_used__gt=0, success=False)).order_by('user__username', 'start_date') count = reports_to_send.count() logger.info('send_reports - count: %d', count) for current_report_index, tas_report in enumerate(reports_to_send): logger.debug('send_reports - current_report_index: %d', current_report_index) try: tas_report.send() except TASPluginException: logger.exception( "Could not send the report because of the error below") failed_reports += 1 continue if failed_reports != 0: raise Exception("%s/%s reports failed to send to TAS" % (failed_reports, count))
def post(self, request, provider_uuid, identity_uuid, machine_id): """ TODO: Determine who is allowed to edit machines besides core_machine.owner """ user = request.user data = request.DATA logger.info('data = %s' % request.DATA) core_machine = ProviderMachine.objects.filter( provider__uuid=provider_uuid, identifier=machine_id) if not core_machine: return failure_response( status.HTTP_400_BAD_REQUEST, "Machine id %s does not exist" % machine_id) core_machine = core_machine.get() if core_machine.instance_source.created_by == request.user: return failure_response( status.HTTP_400_BAD_REQUEST, "You are NOT the owner of Machine id=%s " % machine_id) if 'licenses' not in data \ or type(data['licenses']) != list: return failure_response( status.HTTP_400_BAD_REQUEST, "Licenses missing from data. Expected a list of License IDs" " ex:[1,2,3,]") licenses = [] #Out with the old core_machine.licenses.all().delete() for license_id in data['licenses']: license = License.objects.get(id=license_id) #In with the new core_machine.licenses.add(license) #Return the new set. licenses = core_machine.licenses.all() logger.info('licenses = %s' % licenses) serialized_data = LicenseSerializer(licenses, many=True).data return Response(serialized_data, status=status.HTTP_202_ACCEPTED)
def update_membership(): from core.models import ApplicationMembership, Provider, ProviderMachine from service.accounts.openstack import AccountDriver as OSAcctDriver from service.accounts.eucalyptus import AccountDriver as EucaAcctDriver for provider in Provider.objects.all(): if not provider.is_active(): return [] if provider.type.name.lower() == 'openstack': driver = OSAcctDriver(provider) else: logger.warn("Encountered unknown ProviderType:%s, expected" " [Openstack]") continue images = driver.list_all_images() changes = 0 for img in images: pm = ProviderMachine.objects.filter(identifier=img.id, provider=provider) if not pm or len(pm) > 1: logger.debug("pm filter is bad!") logger.debug(pm) continue app_manager = pm.application.applicationmembership_set if not img.is_public: #Lookup members image_members = accts.image_manager.shared_images_for( image_id=img.id) #add machine to each member #(Who owns the cred:ex_project_name) in MachineMembership #for member in image_members: else: members = app_manager.all() if members: logger.info("Application for PM:%s used to be private." " %s Users membership has been revoked. " % (img.id, len(members))) changes += len(members) members.delete() #if MachineMembership exists, remove it (No longer private) logger.info("Total Updates to machine membership:%s" % changes) return changes
def o_callback_authorize(request): if 'code' not in request.GET: logger.info(request.__dict__) #TODO - Maybe: Redirect into a login return HttpResponse("") oauth_client = get_cas_oauth_client() oauth_code = request.GET['code'] #Exchange code for ticket access_token, expiry_date = oauth_client.get_access_token(oauth_code) if not access_token: logger.info( "The Code %s is invalid/expired. Attempting another login." % oauth_code) return o_login_redirect(request) #Exchange token for profile user_profile = oauth_client.get_profile(access_token) if not user_profile or "id" not in user_profile: logger.error( "AccessToken is producing an INVALID profile! " "Check the CAS server and caslib.py for more information.") #NOTE: Make sure this redirects the user OUT of the loop! return login(request) #ASSERT: A valid OAuth token gave us the Users Profile. # Now create an AuthToken and return it username = user_profile["id"] auth_token = createOAuthToken(username, access_token, expires) #Set the username to the user to be emulated #to whom the token also belongs request.session['username'] = username request.session['token'] = auth_token.key logger.info("Returning user - %s - to application " % username) logger.info(request.session.__dict__) logger.info(request.user) return HttpResponseRedirect(settings.REDIRECT_URL + "/application/")
def user_over_allocation_enforcement(provider, username, print_logs=False, start_date=None, end_date=None): """ Begin monitoring 'username' on 'provider'. * Calculate allocation from START of month to END of month * If user is deemed OverAllocation, apply enforce_allocation_policy """ identity = _get_identity_from_tenant_name(provider, username) allocation_result = get_allocation_result_for(provider, username, print_logs, start_date, end_date) # ASSERT: allocation_result has been retrieved successfully # Make some enforcement decision based on the allocation_result's output. if not identity: logger.warn("%s has NO identity. " "Total Runtime could NOT be calculated. Returning.." % (username, )) return allocation_result user = User.objects.get(username=username) allocation = get_allocation(username, identity.uuid) if not allocation: logger.info("%s has NO allocation. Total Runtime: %s. Returning.." % (username, allocation_result.total_runtime())) return allocation_result if not settings.ENFORCING: logger.debug('Settings dictate allocations are NOT enforced') return allocation_result # Enforce allocation if overboard. if allocation_result.over_allocation(): logger.info("%s is OVER allocation. %s - %s = %s" % (username, allocation_result.total_credit(), allocation_result.total_runtime(), allocation_result.total_difference())) enforce_allocation_policy(identity, user) return allocation_result