def remove_empty_networks_for(provider_id): provider = Provider.objects.get(id=provider_id) os_driver = get_account_driver(provider) all_instances = os_driver.admin_driver.list_all_instances() project_map = os_driver.network_manager.project_network_map() projects_with_networks = project_map.keys() for project in projects_with_networks: networks = project_map[project]['network'] if type(networks) != list: networks = [networks] for network in networks: network_name = network['name'] logger.debug("Checking if network %s is in use" % network_name) if running_instances(network_name, all_instances): continue #TODO: MUST change when not using 'usergroups' explicitly. user = project try: logger.debug("Removing project network for User:%s, Project:%s" % (user, project)) os_driver.network_manager.delete_project_network(user, project) except NeutronClientException: logger.exception("Neutron unable to remove project" "network for %s-%s" % (user,project)) except NeutronException: logger.exception("Neutron unable to remove project" "network for %s-%s" % (user,project))
def get(self, request, provider_uuid, identity_uuid, action=None): """ """ if not action: return failure_response( status.HTTP_400_BAD_REQUEST, 'Action is not supported.' ) try: esh_driver = prepare_driver(request, provider_uuid, identity_uuid) except ProviderNotActive as pna: return inactive_provider(pna) except Exception as e: return failure_response( status.HTTP_409_CONFLICT, e.message) if not esh_driver: return invalid_creds(provider_uuid, identity_uuid) esh_meta = esh_driver.meta() try: if 'test_links' in action: test_links = esh_meta.test_links() return Response(test_links, status=status.HTTP_200_OK) except LibcloudInvalidCredsError: logger.warn('Authentication Failed. Provider-id:%s Identity-id:%s' % (provider_uuid, identity_uuid)) return failure_response( status.HTTP_401_UNAUTHORIZED, 'Identity/Provider Authentication Failed') except NotImplemented as ne: logger.exception(ne) return failure_response( status.HTTP_404_NOT_FOUND, 'The requested resource %s is not available on this provider' % action)
def transaction(cls, status_name, instance, size, start_time=None, last_history=None): try: with transaction.atomic(): if not last_history: # Required to prevent race conditions. last_history = instance.get_last_history()\ .select_for_update(nowait=True) if not last_history: raise ValueError( "A previous history is required " "to perform a transaction. Instance:%s" % (instance,)) elif last_history.end_date: raise ValueError("Old history already has end date: %s" % last_history) last_history.end_date = start_time last_history.save() new_history = InstanceStatusHistory.create_history( status_name, instance, size, start_time) logger.info( "Status Update - User:%s Instance:%s " "Old:%s New:%s Time:%s" % (instance.created_by, instance.provider_alias, last_history.status.name, new_history.status.name, new_history.start_date)) new_history.save() return new_history except DatabaseError: logger.exception( "instance_status_history: Lock is already acquired by" "another transaction.")
def get_account_driver(provider, raise_exception=False): """ Create an account driver for a given provider. """ try: if type(provider) == uuid.UUID: provider = CoreProvider.objects.get(uuid=provider) type_name = provider.get_type_name().lower() if 'openstack' in type_name: from service.accounts.openstack_manager import AccountDriver as\ OSAccountDriver return OSAccountDriver(provider) elif 'eucalyptus' in type_name: from service.accounts.eucalyptus import AccountDriver as\ EucaAccountDriver return EucaAccountDriver(provider) except: if type(provider) == uuid.UUID: provider_str = "Provider with UUID %s" % provider else: provider_str = "Provider %s" % provider.location logger.exception("Account driver for provider %s not found." % (provider_str)) if raise_exception: raise return None
def set_instance_from_metadata(esh_driver, core_instance): """ NOT BEING USED ANYMORE.. DEPRECATED.. """ # Fixes Dep. loop - Do not remove from api.serializers import InstanceSerializer # Breakout for drivers (Eucalyptus) that don't support metadata if not hasattr(esh_driver._connection, 'ex_get_metadata'): # logger.debug("EshDriver %s does not have function 'ex_get_metadata'" # % esh_driver._connection.__class__) return core_instance try: esh_instance = esh_driver.get_instance(core_instance.provider_alias) if not esh_instance: return core_instance metadata = esh_driver._connection.ex_get_metadata(esh_instance) except Exception: logger.exception("Exception retrieving instance metadata for %s" % core_instance.provider_alias) return core_instance # TODO: Match with actual instance launch metadata in service/instance.py # TODO: Probably best to redefine serializer as InstanceMetadataSerializer # TODO: Define a creator and their identity by the METADATA instead of # assuming its the person who 'found' the instance serializer = InstanceSerializer(core_instance, data=metadata, partial=True) if not serializer.is_valid(): logger.warn("Encountered errors serializing metadata:%s" % serializer.errors) return core_instance core_instance = serializer.save() core_instance.esh = esh_instance return core_instance
def get_resource(request, file_location): try: username = request.session.get('username', None) remote_ip = request.META.get('REMOTE_ADDR', None) if remote_ip is not None: # Authenticated if the instance requests resource. instances = Instance.objects.filter(ip_address=remote_ip) authenticated = len(instances) > 0 elif username is not None: authenticate(username=username, password="") # User Authenticated by this line authenticated = True if not authenticated: raise Exception("Unauthorized access") path = settings.PROJECT_ROOT + "/init_files/" + file_location if os.path.exists(path): file = open(path, 'r') content = file.read() response = HttpResponse(content) # Download it, even if it looks like text response['Content-Disposition'] = \ 'attachment; filename=%s' % file_location.split("/")[-1] return response template = get_template('404.html') variables = RequestContext(request, { 'message': '%s not found' % (file_location,) }) output = template.render(variables) return HttpResponse(output) except Exception as e: logger.debug("Resource request failed") logger.exception(e) return HttpResponseRedirect(settings.REDIRECT_URL + "/login")
def get(self, request, provider_uuid, identity_uuid): """ Using provider and identity, getlist of machines TODO: Cache this request """ try: request_user = request.user logger.debug("filtered_machine_list") filtered_machine_list = provider_filtered_machines(request, provider_uuid, identity_uuid, request_user) #logger.debug(filtered_machine_list) except InvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except MalformedResponseError: return malformed_response(provider_uuid, identity_uuid) except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) except Exception as e: logger.exception("Unexpected exception for user:%s" % request_user) return failure_response(status.HTTP_500_INTERNAL_SERVER_ERROR, e.message) #logger.debug(filtered_machine_list) serialized_data = ProviderMachineSerializer(filtered_machine_list, request_user=request.user, many=True).data response = Response(serialized_data) return response
def add_rule_to_group(self, nova, protocol, security_group): """ Add a security group rule if it doesn't already exist. """ if len(protocol) == 3: (ip_protocol, from_port, to_port) = protocol cidr = None elif len(protocol) == 4: (ip_protocol, from_port, to_port, cidr) = protocol else: raise Exception("Rule tuple did not match expected output:" " (protocol, from_port, to_port, [CIDR])") if not self.find_rule(security_group, ip_protocol, from_port, to_port): try: nova.security_group_rules.create(security_group.id, ip_protocol=ip_protocol, from_port=from_port, to_port=to_port, cidr=cidr) except OverLimit, ole: if 'Security group rule already exists' in ole.message: return True logger.exception(ole.__dict__) raise
def delete_project_member(self, groupname, username, adminRole=False): """ Retrieves the project and user object Removes user of the admin/member role Returns True on success Invalid username, groupname, rolename: raise keystoneclient.exceptions.NotFound """ project = self.get_project(groupname) user = self.get_user(username) #FIXME: Hardcoded values if adminRole: role = self.get_role('admin') else: role = self.get_role('defaultMemberRole') if not project or not user: return True try: project.remove_user(user, role) return True except NotFound as no_role_for_user: logger.debug('Error - %s: User-role combination does not exist' % no_role_for_user) return True except Exception, e: logger.exception(e) raise
def delete(self, request, provider_uuid, identity_uuid, volume_id): """ Destroys the volume and updates the DB """ user = request.user # Ensure volume exists esh_driver = prepare_driver(request, provider_uuid, identity_uuid) if not esh_driver: return invalid_creds(provider_uuid, identity_uuid) try: esh_volume = esh_driver.get_volume(volume_id) except ConnectionFailure: return connection_failure(provider_uuid, identity_uuid) except InvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except Exception as exc: logger.exception("Encountered a generic exception. " "Returning 409-CONFLICT") return failure_response(status.HTTP_409_CONFLICT, str(exc.message)) if not esh_volume: return volume_not_found(volume_id) core_volume = convert_esh_volume(esh_volume, provider_uuid, identity_uuid, user) # Delete the object, update the DB esh_driver.destroy_volume(esh_volume) core_volume.end_date = now() core_volume.save() # Return the object serialized_data = VolumeSerializer(core_volume, context={'request': request}).data response = Response(serialized_data) return response
def create(self, request): request_user = request.user request_data = request.data if not request_data.items(): return failure_response( status.HTTP_400_BAD_REQUEST, "Reuquest Data is missing" ) try: self._validate_data(request_user, request_data) except Exception as exc: return failure_response(status.HTTP_400_BAD_REQUEST, exc.message) try: instance_allocation_source = self._create_instance_allocation_source( request_data, request_user ) serialized_instance_allocation_source = InstanceAllocationSourceSerializer( instance_allocation_source, context={'request': self.request} ) return Response( serialized_instance_allocation_source.data, status=status.HTTP_201_CREATED ) except Exception as exc: logger.exception( "Encountered exception while assigning Allocation source %s to Instance %s" % ( request_data['allocation_source_name'], request_data['instance_id'] ) ) return failure_response(status.HTTP_409_CONFLICT, str(exc.message))
def get(self, request, provider_uuid, identity_uuid): """ Retrieves list of volumes and updates the DB """ user = request.user esh_driver = prepare_driver(request, provider_uuid, identity_uuid) if not esh_driver: return invalid_creds(provider_uuid, identity_uuid) volume_list_method = esh_driver.list_volumes if AccountProvider.objects.filter(identity__uuid=identity_uuid): # Instance list method changes when using the OPENSTACK provider volume_list_method = esh_driver.list_all_volumes try: esh_volume_list = volume_list_method() except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) except MalformedResponseError: return malformed_response(provider_uuid, identity_uuid) except InvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except Exception: logger.exception("Uncaught Exception in Volume list method") return failure_response( status.HTTP_500_INTERNAL_SERVER_ERROR, 'Volume list method failed. Contact support') core_volume_list = [convert_esh_volume(volume, provider_uuid, identity_uuid, user) for volume in esh_volume_list] serializer = VolumeSerializer(core_volume_list, context={'request': request}, many=True) response = Response(serializer.data) return response
def get(self, request, provider_uuid, identity_uuid, volume_id): """ """ user = request.user esh_driver = prepare_driver(request, provider_uuid, identity_uuid) if not esh_driver: return invalid_creds(provider_uuid, identity_uuid) try: esh_volume = esh_driver.get_volume(volume_id) except ConnectionFailure: return connection_failure(provider_uuid, identity_uuid) except InvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except Exception as exc: logger.exception("Encountered a generic exception. " "Returning 409-CONFLICT") return failure_response(status.HTTP_409_CONFLICT, str(exc.message)) if not esh_volume: try: source = InstanceSource.objects.get( identifier=volume_id, provider__uuid=provider_uuid) source.end_date = datetime.now() source.save() except (InstanceSource.DoesNotExist, CoreVolume.DoesNotExist): pass return volume_not_found(volume_id) core_volume = convert_esh_volume(esh_volume, provider_uuid, identity_uuid, user) serialized_data = VolumeSerializer(core_volume, context={'request': request}).data response = Response(serialized_data) return response
def getDefaultIdentity(username, provider=None): """ return the Default identity given to the user-group for provider """ profile = UserProfile.objects.get(user__username=username) if profile.selected_identity: return profile.selected_identity else: try: group = getUsergroup(username) identities = group.identities.all() if provider: identities = identities.filter(provider=provider) return identities[0] else: default_identity = group.identities.filter( provider__location="EUCALYPTUS")[0] profile.selected_identity = default_identity profile.save() logger.debug( "profile.selected_identity set to %s " % profile.selected_identity) return profile.selected_identity except Exception, e: logger.exception(e) return None
def _update_volume_metadata(esh_driver, esh_volume, metadata={}): """ NOTE: This will NOT WORK for TAGS until openstack allows JSONArrays as values for metadata! NOTE: This will NOT replace missing metadata tags.. ex: Start: ('a':'value','c':'value') passed: c=5 End: ('a':'value', 'c':5) """ if not esh_volume: return {} if not hasattr(esh_driver._connection, 'ex_update_volume_metadata'): logger.warn( "EshDriver %s does not have function 'ex_update_volume_metadata'" % esh_driver._connection.__class__) return {} data = esh_volume.extra.get('metadata', {}) data.update(metadata) try: return esh_driver._connection.ex_update_volume_metadata( esh_volume, data) except Exception as e: logger.exception("Error updating the metadata") if 'incapable of performing the request' in e.message: return {} else: raise
def deploy_init_to(driverCls, provider, identity, instance_id, username=None, password=None, redeploy=False, *args, **kwargs): try: logger.debug("deploy_init_to task started at %s." % datetime.now()) driver = get_driver(driverCls, provider, identity) instance = driver.get_instance(instance_id) if not instance: logger.debug("Instance has been teminated: %s." % instance_id) return image_metadata = driver._connection\ .ex_get_image_metadata(instance.machine) deploy_chain = get_deploy_chain(driverCls, provider, identity, instance, username, password, redeploy) deploy_chain.apply_async() #Can be really useful when testing. #if kwargs.get('delay'): # async.get() logger.debug("deploy_init_to task finished at %s." % datetime.now()) except SystemExit: logger.exception("System Exits are BAD! Find this and get rid of it!") raise Exception("System Exit called") except NonZeroDeploymentException: raise except Exception as exc: logger.warn(exc) deploy_init_to.retry(exc=exc)
def deploy_script(driverCls, provider, identity, instance_id, script, **celery_task_args): try: logger.debug("deploy_script task started at %s." % datetime.now()) #Check if instance still exists driver = get_driver(driverCls, provider, identity) instance = driver.get_instance(instance_id) if not instance: logger.debug("Instance has been teminated: %s." % instance_id) return instance._node.extra['password'] = None kwargs = _generate_ssh_kwargs() kwargs.update({'deploy': script}) driver.deploy_to(instance, **kwargs) logger.debug("deploy_script task finished at %s." % datetime.now()) except DeploymentError as exc: logger.exception(exc) if isinstance(exc.value, NonZeroDeploymentException): #The deployment was successful, but the return code on one or more # steps is bad. Log the exception and do NOT try again! raise exc.value #TODO: Check if all exceptions thrown at this time #fall in this category, and possibly don't retry if #you hit the Exception block below this. deploy_script.retry(exc=exc) except Exception as exc: logger.exception(exc) deploy_script.retry(exc=exc)
def mount_volume_task(driver, instance_id, volume_id, device=None, mount_location=None, *args, **kwargs): """ Mount, if possible, the volume to instance Device and mount_location assumed if empty """ logger.info("Mount ONLY: %s --> %s" % (volume_id,instance_id)) logger.info("device_location:%s --> mount_location: %s" % (device, mount_location)) try: if not hasattr(driver, 'deploy_to'): #Do not attempt to mount if we don't have sh access return None vol = driver.get_volume(volume_id) existing_mount = vol.extra.get('metadata',{}).get('mount_location') if existing_mount: raise VolumeMountConflict(instance_id, volume_id, "Volume already mounted at %s. Run 'unmount_volume' first!" % existing_mount) if not driver._connection.ex_volume_attached_to_instance(vol, instance_id): raise VolumeMountConflict(instance_id, volume_id, "Cannot mount volume %s " "-- Not attached to instance %s" % (volume_id, instance_id)) mount_chain = _get_mount_chain(driver, instance_id, volume_id, device, mount_location) mount_chain.apply_async() except VolumeMountConflict: raise except Exception, e: logger.exception("Exc occurred") raise VolumeMountConflict(instance_id, volume_id)
def wait_for(instance_alias, driverCls, provider, identity, status_query, tasks_allowed=False, return_id=False, **task_kwargs): """ #Task makes 250 attempts to 'look at' the instance, waiting 15sec each try Cumulative time == 1 hour 2 minutes 30 seconds before FAILURE status_query = "active" Match only one value, active status_query = ["active","suspended"] or match multiple values. """ from service import instance as instance_service try: logger.debug("wait_for task started at %s." % datetime.now()) if app.conf.CELERY_ALWAYS_EAGER: logger.debug("Eager task - DO NOT return until its ready!") return _eager_override(wait_for, _is_instance_ready, (driverCls, provider, identity, instance_alias, status_query, tasks_allowed, return_id), {}) result = _is_instance_ready(driverCls, provider, identity, instance_alias, status_query, tasks_allowed, return_id) return result except Exception as exc: if "Not Ready" not in str(exc): # Ignore 'normal' errors. logger.exception(exc) wait_for.retry(exc=exc)
def provider_filtered_machines(request, provider_uuid, identity_uuid, request_user=None): """ Return all filtered machines. Uses the most common, default filtering method. """ identity = Identity.objects.filter(uuid=identity_uuid) if not identity: raise ObjectDoesNotExist() try: esh_driver = prepare_driver(request, provider_uuid, identity_uuid) except Exception: # TODO: Observe the change of 'Fail loudly' here # and clean up the noise, rather than hide it. logger.exception( "Driver could not be prepared - Provider: %s , Identity: %s" % (provider_uuid, identity_uuid)) esh_driver = None if not esh_driver: raise LibcloudInvalidCredsError() logger.debug(esh_driver) return list_filtered_machines(esh_driver, provider_uuid, request_user)
def monitor_instances_for_user(provider, username, instances): from core.models.instance import convert_esh_instance from api import get_esh_driver try: user = AtmosphereUser.objects.get(username=username) #TODO: When user->group is no longer true, # we will need to modify this.. group = Group.objects.get(name=user.username) ident = user.identity_set.get(provider=provider) im = ident.identitymembership_set.get(member=group) #NOTE: Couples with API, probably want this in # service/driver driver = get_esh_driver(ident) core_instances = [] #NOTE: We are converting them so they will # be picked up as core models for the 'over_allocation_test' for instance in instances: c_inst = convert_esh_instance( driver, instance, ident.provider.id, ident.id, ident.created_by) core_instances.append(c_inst) over_allocation = over_allocation_test(im.identity, instances) core_instances = user.instance_set.filter( provider_machine__provider=provider, end_date=None) core_instances_ident = ident.instance_set.filter(end_date=None) update_instances(driver, im.identity, instances, core_instances) except: logger.exception("Unable to monitor User:%s on Provider:%s" % (username,provider))
def get_esh_instance(request, provider_uuid, identity_uuid, instance_id): esh_driver = prepare_driver(request, provider_uuid, identity_uuid) if not esh_driver: raise InvalidCredsError( "Provider_uuid && identity_uuid " "did not produce a valid combination") esh_instance = None try: esh_instance = esh_driver.get_instance(instance_id) except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) except InvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except Exception as exc: logger.exception("Encountered a generic exception. " "Returning 409-CONFLICT") return failure_response(status.HTTP_409_CONFLICT, str(exc.message)) if not esh_instance: # End date everything try: core_inst = CoreInstance.objects.get( provider_alias=instance_id, source__provider__uuid=provider_uuid, created_by_identity__uuid=identity_uuid) core_inst.end_date_all() except CoreInstance.DoesNotExist: pass return esh_instance
def time_remaining(self, user=None): """ Returns the remaining compute_allowed, user: If passed in *and* allocation source is 'special', calculate remaining time based on user snapshots. Will return a negative number if 'over allocation', when `compute_used` is larger than `compute_allowed`. Will return Infinity if `compute_allowed` is `-1` (or any negative number) :return: decimal.Decimal :rtype: decimal.Decimal """ # Handling the 'SPECIAL_ALLOCATION_SOURCES' time_shared_allocations = getattr(settings, 'SPECIAL_ALLOCATION_SOURCES', {}) if user and self.name in time_shared_allocations.keys(): try: compute_allowed = time_shared_allocations[self.name]['compute_allowed'] except: raise Exception( "The structure of settings.SPECIAL_ALLOCATION_SOURCES " "has changed! Verify your settings are correct and/or " "change the lines of code above.") try: last_snapshot = self.user_allocation_snapshots.get(user=user) except ObjectDoesNotExist: logger.exception('User allocation snapshot does not exist anymore (or yet), so returning -1') return -1 else: compute_allowed = self.compute_allowed last_snapshot = self.snapshot if compute_allowed < 0: return decimal.Decimal('Infinity') compute_used = last_snapshot.compute_used if last_snapshot else 0 remaining_compute = compute_allowed - compute_used return remaining_compute
def perform_destroy(self, instance): user = self.request.user identity_uuid = instance.created_by_identity.uuid identity = Identity.objects.get(id=identity_uuid) try: # Test that there is not an attached volume BEFORE we destroy #NOTE: Although this is a task we are calling and waiting for response.. core_instance = destroy_instance( user, identity_uuid, instance.provider_alias) serialized_instance = InstanceSerializer( core_instance, context={ 'request': self.request}, data={}, partial=True) if not serialized_instance.is_valid(): return Response(serialized_instance.data, status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_204_NO_CONTENT) except VolumeAttachConflict as exc: message = exc.message return failure_response(status.HTTP_409_CONFLICT, message) except (socket_error, ConnectionFailure): return connection_failure(identity) except InvalidCredsError: return invalid_creds(identity) except Exception as exc: logger.exception("Encountered a generic exception. " "Returning 409-CONFLICT") return failure_response(status.HTTP_409_CONFLICT, str(exc.message))
def get_default_identity(username, provider=None): """ Return the default identity given to the user-group for provider. """ try: from core.models.group import get_user_group group = get_user_group(username) identities = group.identities.all() if provider: if provider.is_active(): identities = identities.filter(provider=provider) return identities[0] else: logger.error("Provider provided for " "get_default_identity is inactive.") raise("Provider provided for get_default_identity " "is inactive.") else: default_provider = get_default_provider(username) default_identity = group.identities.filter( provider=default_provider)[0] logger.debug( "default_identity set to %s " % default_identity) return default_identity except Exception, e: logger.exception(e) return None
def get_default_provider(username): """ Return default provider given """ try: from core.models.group import get_user_group from core.models.provider import Provider group = get_user_group(username) provider_ids = group.current_identities.values_list( 'provider', flat=True) provider = Provider.objects.filter( id__in=provider_ids, type__name="OpenStack") if provider: logger.debug("get_default_provider selected a new " "Provider for %s: %s" % (username, provider)) provider = provider[0] else: logger.error("get_default_provider could not find a new " "Provider for %s" % (username,)) return None return provider except Exception as e: logger.exception("get_default_provider encountered an error " "for %s" % (username,)) return None
def update_machine_metadata(esh_driver, esh_machine, data={}): """ NOTE: This will NOT WORK for TAGS until openstack allows JSONArrays as values for metadata! """ if not hasattr(esh_driver._connection, 'ex_set_image_metadata'): logger.info("EshDriver %s does not have function 'ex_set_image_metadata'" % esh_driver._connection.__class__) return {} try: # Possible metadata that could be in 'data' # * application uuid # * application name # * specific machine version #TAGS must be converted from list --> String logger.info("New metadata:%s" % data) meta_response = esh_driver._connection.ex_set_image_metadata(esh_machine, data) esh_machine.invalidate_machine_cache(esh_driver.provider, esh_machine) return meta_response except Exception, e: logger.exception("Error updating machine metadata") if 'incapable of performing the request' in e.message: return {} else: raise
def get(self, request, provider_uuid, identity_uuid): """ Using provider and identity, getlist of machines TODO: Cache this request """ try: request_user = request.user logger.debug("filtered_machine_list") filtered_machine_list = provider_filtered_machines(request, provider_uuid, identity_uuid, request_user) except ProviderNotActive as pna: return inactive_provider(pna) except LibcloudInvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except LibcloudBadResponseError: return malformed_response(provider_uuid, identity_uuid) except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) except ObjectDoesNotExist: return invalid_provider_identity(provider_uuid, identity_uuid) except Exception as e: logger.exception("Unexpected exception for user:%s" % request_user) return failure_response(status.HTTP_409_CONFLICT, e.message) serialized_data = ProviderMachineSerializer(filtered_machine_list, request_user=request.user, many=True).data response = Response(serialized_data) return response
def get_user_allocations( self, username, include_expired=False, raise_exception=True ): path = '/v1/projects/username/%s' % username url_match = self.tacc_api + path resp, data = tacc_api_get( url_match, self.tacc_username, self.tacc_password ) user_allocations = [] try: _validate_tas_data(data) projects = data['result'] for project in projects: api_allocations = project[ 'allocations' ] if include_expired else select_valid_allocations( project['allocations'] ) for allocation in api_allocations: if allocation['resource'] == self.resource_name: user_allocations.append((project, allocation)) return user_allocations except ValueError as exc: logger.exception('JSON Decode error') if raise_exception: raise TASAPIException("JSON Decode error -- %s" % exc) except Exception: logger.exception( 'Something went wrong while getting user allocations' ) if raise_exception: raise return None
def update_instance_metadata(esh_driver, esh_instance, data={}, replace=True): """ NOTE: This will NOT WORK for TAGS until openstack allows JSONArrays as values for metadata! """ wait_time = 1 if not esh_instance: return {} instance_id = esh_instance.id if not hasattr(esh_driver._connection, 'ex_set_metadata'): logger.warn("EshDriver %s does not have function 'ex_set_metadata'" % esh_driver._connection.__class__) return {} if esh_instance.extra['status'] == 'build': raise Exception("Metadata cannot be applied while EshInstance %s is in" " the build state." % (esh_instance,)) # ASSERT: We are ready to update the metadata if data.get('name'): esh_driver._connection.ex_set_server_name(esh_instance, data['name']) try: return esh_driver._connection.ex_set_metadata(esh_instance, data, replace_metadata=replace) except Exception, e: logger.exception("Error updating the metadata") if 'incapable of performing the request' in e.message: return {} else: raise
def _handle_get(srv_key, obj_id): """ Handle the object lookup for the request. Returns a response string and HTTP status code as a tuple in the form: ``(data_string, webstatus)``. """ try: conn = MySQLdb.connect(host=PROV_DB_HOST, user=PROV_DB_USERNAME, passwd=PROV_DB_PASSWORD, db=PROV_DB_NAME, port=PROV_DB_PORT) cursor = conn.cursor() cursor.execute(SERVICE_ID_FROM_KEY_QUERY % (srv_key)) key_to_id = cursor.fetchone() srv_id, = key_to_id c3po.info('result from `service-id` query' + key_to_id) cursor.execute(OBJECT_QUERY_UUID_LOOKUP % (obj_id, srv_id)) results = cursor.fetchall() if len(results) == 1: uid = str(results[0][0]) info_msg = "Lookup Object Exists:" + " " + uid c3po.info(info_msg) data_string = json.dumps({'UUID': uid}, indent=4) webstatus = '200 OK' elif len(results) > 1: errmsg = ("More than one object was found: " + str(results)) c3po.warn(errmsg) data_string = json.dumps( { 'Status': 'Exception', 'Details': 'Multiple objects found ' + 'with the same `object_id` for the same ' + ' `service_id`. Incident has been reported' }, indent=4) webstatus = '404 Not Found' else: err_msg = "Object UUID is null: " + obj_id logging.error(err_msg) data_string = json.dumps( { 'Status': 'Failed', 'Details': 'Object does not exist' }, indent=4) webstatus = '404 Not Found' cursor.close() except Exception as exc: err_msg = "MySQL DB Exception: " + " " + str(exc) + " " + obj_id c3po.warn(err_msg) c3po.exception(exc) data_string = json.dumps( { 'Status': 'Failed', 'Details': 'MySQL Exception. Incident' + ' has been reported' }, indent=4) webstatus = '500 Internal Server Error' return (data_string, webstatus)
def create(self, request): user = request.user data = request.data try: self.validate_input(user, data) except Exception as exc: return failure_response( status.HTTP_400_BAD_REQUEST, exc.message) # Create a mutable dict and start modifying. data = data.copy() name = data.get('name') identity_uuid = data.get('identity') source_alias = data.get('source_alias') size_alias = data.get('size_alias') allocation_source_id = data.get('allocation_source_id') boot_scripts = data.pop("scripts", []) deploy = data.get('deploy', True) project_uuid = data.get('project') extra = data.get('extra', {}) try: identity = Identity.objects.get(uuid=identity_uuid) allocation_source = AllocationSource.objects.get(uuid=allocation_source_id) core_instance = launch_instance( user, identity_uuid, size_alias, source_alias, name, deploy, allocation_source=allocation_source, **extra) # Faking a 'partial update of nothing' to allow call to 'is_valid' serialized_instance = InstanceSerializer( core_instance, context={'request': self.request}, data={}, partial=True) if not serialized_instance.is_valid(): return Response(serialized_instance.errors, status=status.HTTP_400_BAD_REQUEST) instance = serialized_instance.save() project = Project.objects.get(uuid=project_uuid) instance.project = project instance.save() if boot_scripts: _save_scripts_to_instance(instance, boot_scripts) instance.change_allocation_source(allocation_source) return Response( serialized_instance.data, status=status.HTTP_201_CREATED) except UnderThresholdError as ute: return under_threshold(ute) except (OverQuotaError, OverAllocationError) as oqe: return over_quota(oqe) except ProviderNotActive as pna: return inactive_provider(pna) except SizeNotAvailable as snae: return size_not_available(snae) except HypervisorCapacityError as hce: return over_capacity(hce) except SecurityGroupNotCreated: return connection_failure(identity) except (socket_error, ConnectionFailure): return connection_failure(identity) except LibcloudInvalidCredsError: return invalid_creds(identity) except Exception as exc: logger.exception("Encountered a generic exception. " "Returning 409-CONFLICT") return failure_response(status.HTTP_409_CONFLICT, str(exc.message))
def get_or_create_user_subnet(self, network_id, username, ip_version=4, dns_nameservers=[], subnet_pool_id=None, get_unique_number=_get_unique_id, get_cidr=get_default_subnet): """ Create a subnet for the user using the get_cidr function to get a private subnet range. """ # FIXME: Remove the username dependency -- if its just a seed value? # FIXME: Look into get_cidr and get_unique_number -- is there a better # way? subnet_name = "%s-subnet" % self.prefix success = False inc = 0 MAX_SUBNET = 4064 new_cidr = None if subnet_pool_id: return self.network_driver.create_subnet( self.user_neutron, subnet_name, network_id, ip_version, dns_nameservers=dns_nameservers, subnet_pool_id=subnet_pool_id) while not success and inc < MAX_SUBNET: try: new_cidr = get_cidr(username, inc, get_unique_number) cidr_match = any(sn for sn in self.network_driver.list_subnets() if sn['cidr'] == new_cidr) if new_cidr and not cidr_match: return self.network_driver.create_subnet( self.user_neutron, subnet_name, network_id, ip_version, new_cidr, dns_nameservers) elif cidr_match: logger.warn("Unable to create new_cidr for subnet " "for user: %s (CIDR already used)" % username) inc += 1 else: logger.warn("Unable to create new_cidr for subnet " "for user: %s (create_subnet failed)" % username) inc += 1 except NeutronClientException as nce: if "overlap" in nce.message: # Expected output. Hash is already used, add one and try # another subnet. inc += 1 else: logger.exception("Unable to create subnet for user: %s" % username) inc += 1 if not get_unique_number: logger.warn("No get_unique_number method " "provided for user: %s" % username) except Exception: logger.exception("Unable to create subnet for user: %s" % username) if not get_unique_number: logger.warn("No get_unique_number method " "provided for user: %s" % username) inc += 1 if not success or not new_cidr: raise Exception("Unable to create subnet for user: %s" % username)
def update_history( self, status_name, size, task=None, tmp_status=None, first_update=False): """ Given the status name and size, look up the previous history object If nothing has changed: return (False, last_history) else: end date previous history object, start new history object. return (True, new_history) """ #FIXME: Move this call so that it happens inside InstanceStatusHistory to avoid circ.dep. from core.models import InstanceStatusHistory import traceback # 1. Get status name status_name = _get_status_name_for_provider( self.source.provider, status_name, task, tmp_status) activity = self.esh_activity() # 2. Get the last history (or Build a new one if no other exists) has_history = self.instancestatushistory_set.all().count() if not has_history: last_history = InstanceStatusHistory.create_history( status_name, self, size, start_date=self.start_date, activity=activity) last_history.save() logger.debug("STATUSUPDATE - FIRST - Instance:%s Old Status: %s - %s New\ Status: %s Tmp Status: %s" % (self.provider_alias, self.esh_status(), self.esh_activity(), status_name, tmp_status)) logger.debug("STATUSUPDATE - Traceback: %s" % traceback.format_stack()) last_history = self.get_last_history() # 2. Size and name must match to continue using last history if last_history.status.name == status_name \ and last_history.size.id == size.id: # logger.info("status_name matches last history:%s " % # last_history.status.name) return (False, last_history) logger.debug("STATUSUPDATE - Instance:%s Old Status: %s - %s New Status: %s\ Tmp Status: %s" % (self.provider_alias, self.esh_status(), self.esh_activity(), status_name, tmp_status)) logger.debug("STATUSUPDATE - Traceback: %s" % traceback.format_stack()) # 3. ASSERT: A new history item is required due to a State or Size # Change now_time = timezone.now() try: new_history = InstanceStatusHistory.transaction( status_name, activity, self, size, start_time=now_time, last_history=last_history) return (True, new_history) except ValueError: logger.exception("Bad transaction") return (False, last_history)
def post(self, request, provider_uuid, identity_uuid): """ Updates DB values for volume """ user = request.user data = request.data missing_keys = valid_snapshot_post_data(data) if missing_keys: return keys_not_found(missing_keys) # Required size = data.get('size') volume_id = data.get('volume_id') display_name = data.get('display_name') # Optional description = data.get('description') metadata = data.get('metadata') snapshot_id = data.get('snapshot_id') # STEP 0 - Existence tests try: esh_driver = prepare_driver(request, provider_uuid, identity_uuid) except ProviderNotActive as pna: return inactive_provider(pna) except Exception as e: return failure_response(status.HTTP_409_CONFLICT, e.message) if not esh_driver: return invalid_creds(provider_uuid, identity_uuid) try: esh_volume = esh_driver.get_volume(volume_id) except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) except LibcloudInvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except Exception as exc: logger.exception("Encountered a generic exception. " "Returning 409-CONFLICT") return failure_response(status.HTTP_409_CONFLICT, str(exc.message)) # TODO: Put quota tests at the TOP so we dont over-create resources! # STEP 1 - Reuse/Create snapshot if snapshot_id: snapshot = esh_driver._connection.get_snapshot(snapshot_id) if not snapshot: return failure_response( status.HTTP_400_BAD_REQUEST, "Snapshot %s not found. Process aborted." % snapshot_id) else: # Normal flow, create a snapshot from the volume if not esh_volume: return volume_not_found(volume_id) if esh_volume.extra['status'].lower() != 'available': return failure_response( status.HTTP_400_BAD_REQUEST, "Volume status must be 'available'. " "Did you detach the volume?") snapshot = esh_driver._connection.ex_create_snapshot( esh_volume, display_name, description) if not snapshot: return failure_response( status.HTTP_400_BAD_REQUEST, "Snapshot not created. Process aborted.") # STEP 2 - Create volume from snapshot try: success, esh_volume = create_esh_volume(esh_driver, identity_uuid, display_name, size, description, metadata, snapshot=snapshot) if not success: return failure_response( status.HTTP_500_INTERNAL_SERVER_ERROR, 'Volume creation failed. Contact support') # Volume creation succeeded core_volume = convert_esh_volume(esh_volume, provider_uuid, identity_uuid, user) serialized_data = VolumeSerializer(core_volume, context={ 'request': request }).data return Response(serialized_data, status=status.HTTP_201_CREATED) except OverQuotaError as oqe: return over_quota(oqe) except ConnectionFailure: return connection_failure(provider_uuid, identity_uuid) except LibcloudInvalidCredsError: return invalid_creds(provider_uuid, identity_uuid)
def delete(self, request, provider_uuid, identity_uuid, instance_id): """Authentication Required, TERMINATE the instance. Be careful, there is no going back once you've deleted an instance. """ user = request.user esh_driver = prepare_driver(request, provider_uuid, identity_uuid) if not esh_driver: return invalid_creds(provider_uuid, identity_uuid) if not can_use_instance(user, instance_id, leader_required=True): return member_action_forbidden(user.username, instance_id) try: esh_instance = esh_driver.get_instance(instance_id) except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) except LibcloudInvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except Exception as exc: logger.exception("Encountered a generic exception. " "Returning 409-CONFLICT") return failure_response(status.HTTP_409_CONFLICT, str(exc.message)) try: # Test that there is not an attached volume BEFORE we destroy task.destroy_instance_task(user, esh_instance, identity_uuid) invalidate_cached_instances( identity=Identity.objects.get(uuid=identity_uuid)) existing_instance = esh_driver.get_instance(instance_id) except VolumeAttachConflict as exc: message = exc.message return failure_response(status.HTTP_409_CONFLICT, message) except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) except LibcloudInvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except InstanceDoesNotExist as dne: return failure_response( status.HTTP_404_NOT_FOUND, "Instance %s does not exist" % instance_id) except Exception as exc: logger.exception("Encountered a generic exception. " "Returning 409-CONFLICT") return failure_response(status.HTTP_409_CONFLICT, str(exc.message)) try: core_instance = None if existing_instance: # Instance will be deleted soon... esh_instance = existing_instance if esh_instance.extra\ and 'task' not in esh_instance.extra: esh_instance.extra['task'] = 'queueing delete' core_instance = convert_esh_instance(esh_driver, esh_instance, provider_uuid, identity_uuid, user) if not core_instance: logger.warn("Unable to find core instance %s." % (instance_id)) core_instance = CoreInstance.objects.filter( provider_alias=instance_id).first() serialized_data = InstanceSerializer( core_instance, context={"request": request}).data response = Response(serialized_data, status=status.HTTP_200_OK) response['Cache-Control'] = 'no-cache' return response except (Identity.DoesNotExist) as exc: return failure_response(status.HTTP_400_BAD_REQUEST, "Invalid provider_uuid or identity_uuid.") except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) except LibcloudInvalidCredsError: return invalid_creds(provider_uuid, identity_uuid)
return HttpResponseRedirect('/login/') template = get_template("cf2/index.html") context = RequestContext( request, { 'site_root': settings.REDIRECT_URL, 'debug': settings.DEBUG, 'year': datetime.now().year }) output = template.render(context) return HttpResponse(output) except KeyError, e: logger.debug("User not logged in.. Redirecting to CAS login") return cas_loginRedirect(request, settings.REDIRECT_URL + '/application') except Exception, e: logger.exception(e) return cas_loginRedirect(request, settings.REDIRECT_URL + '/application') def app_beta(request): logger.debug("APP BETA") try: #TODO Reimplment maintenance record check template = get_template("cf3/index.html") context = RequestContext( request, { 'site_root': settings.REDIRECT_URL, 'url_root': '/beta/', 'debug': settings.DEBUG, 'year': datetime.now().year
def post(self, request, provider_uuid, identity_uuid, format=None): """ Instance Class: Launches an instance based on the params Returns a single instance Parameters: machine_alias, size_alias, username TODO: Create a 'reverse' using the instance-id to pass the URL for the newly created instance I.e: url = "/provider/1/instance/1/i-12345678" """ data = request.data user = request.user # Check the data is valid missing_keys = valid_post_data(data) if missing_keys: return keys_not_found(missing_keys) identity = Identity.shared_with_user(user, is_leader=True).filter(uuid=identity_uuid).first() if not identity: failure_msg = "User %s does not have permission to POST with this identity. Promote user to leader or use a different Identity." % (user,) return failure_response(status.HTTP_403_FORBIDDEN, failure_msg) # Pass these as args size_alias = data.pop("size_alias") allocation_source_uuid = data.pop("allocation_source_uuid",None) machine_alias = data.pop("machine_alias") hypervisor_name = data.pop("hypervisor", None) if hypervisor_name: # Previous method passed this with 'None' but that fails now. # This check will only add the ex_ value if it is 'truthy'. data['ex_hypervisor_name'] = hypervisor_name deploy = data.pop("deploy", True) if type(deploy) in [str, unicode] and deploy.lower() == "false": deploy = False elif not isinstance(deploy, bool): deploy = True boot_scripts = data.pop("scripts", []) try: logger.debug(data) allocation_source = AllocationSource.objects.get( uuid=allocation_source_uuid) core_instance = launch_instance( user, identity_uuid, size_alias, machine_alias, deploy=deploy, allocation_source=allocation_source, **data) except UnderThresholdError as ute: return under_threshold(ute) except OverQuotaError as oqe: return over_quota(oqe) except OverAllocationError as oae: return over_quota(oae) except Unauthorized: return invalid_creds(provider_uuid, identity_uuid) except SizeNotAvailable as snae: return size_not_available(snae) except SecurityGroupNotCreated: return connection_failure(provider_uuid, identity_uuid) except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) except LibcloudInvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except Exception as exc: logger.exception("Encountered a generic exception. " "Returning 409-CONFLICT") return failure_response(status.HTTP_409_CONFLICT, str(exc.message)) serializer = InstanceSerializer(core_instance, context={"request": request}, data=data) if serializer.is_valid(): instance = serializer.save() if boot_scripts: _save_scripts_to_instance(instance, boot_scripts) instance.change_allocation_source(allocation_source) logger.info("DEBUG- Instance launch completed - Returning instance %s (%s) to user %s" % (instance, instance.created_by_identity, request.user)) return Response(serializer.data, status=status.HTTP_201_CREATED) else: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
hypervisor_name = data.pop('hypervisor',None) try: core_instance = launch_instance(user, provider_id, identity_id, size_alias, machine_alias, ex_availability_zone=hypervisor_name, **data) except OverQuotaError, oqe: return over_quota(oqe) except OverAllocationError, oae: return over_quota(oae) except SizeNotAvailable, snae: return size_not_availabe(snae) except InvalidCredsError: return invalid_creds(provider_id, identity_id) except Exception as exc: logger.exception("Encountered a generic exception. " "Returning 409-CONFLICT") return failure_response(status.HTTP_409_CONFLICT, exc.message) serializer = InstanceSerializer(core_instance, context={'user':request.user}, data=data) #NEVER WRONG if serializer.is_valid(): serializer.save() return Response(serializer.data, status=status.HTTP_201_CREATED) else: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def perform_create(self, serializer): request_user = self.request.user q = MachineRequest.objects.filter( ( Q(created_by__id=request_user.id) & Q(instance_id=serializer.validated_data['instance'].id) & ~Q(status__name="failed") & ~Q(status__name="denied") & ~Q(status__name="completed") & ~Q(status__name="closed") )) if len(q) > 0: message = "Only one active request is allowed per provider." raise rest_exceptions.MethodNotAllowed('create', detail=message) # NOTE: An identity could possible have multiple memberships # It may be better to directly take membership rather than an identity identity_id = serializer.initial_data.get("identity") parent_machine = serializer.validated_data['instance'].provider_machine access_list = serializer.initial_data.get("access_list") or [] visibility = serializer.initial_data.get("new_application_visibility") new_provider = self._get_new_provider() if visibility in ["select", "private"]: share_with_admins(access_list, parent_machine.provider.uuid) share_with_self(access_list, request_user.username) access_list = remove_duplicate_users(access_list) status, _ = StatusType.objects.get_or_create(name="pending") new_machine_provider = Provider.objects.filter(id=new_provider.id) new_machine_owner = AtmosphereUser.objects.filter(id=request_user.id) parent_machine = ProviderMachine.objects.filter(id=parent_machine.id) if new_machine_provider.count(): new_machine_provider = new_machine_provider[0] else: raise rest_exceptions.ParseError(detail="Could not retrieve new machine provider.") if new_machine_owner.count(): new_machine_owner = new_machine_owner[0] else: raise rest_exceptions.ParseError(detail="Could not retrieve new machine owner.") if parent_machine.count(): parent_machine = parent_machine[0] else: raise rest_exceptions.ParseError(detail="Could not retrieve parent machine.") new_tags = self.filter_tags(request_user, serializer.validated_data.get("new_version_tags","")) try: membership = IdentityMembership.objects.get(identity=identity_id) instance = serializer.save( membership=membership, status=status, created_by=request_user, new_machine_provider=new_provider, new_machine_owner=request_user, new_version_tags=new_tags, access_list=access_list, old_status="pending", # TODO: Is this required or will it default to pending? parent_machine=parent_machine ) instance.migrate_access_to_membership_list(access_list) self.submit_action(instance) except (core_exceptions.ProviderLimitExceeded, core_exceptions.RequestLimitExceeded): message = "Only one active request is allowed per provider." raise rest_exceptions.MethodNotAllowed('create', detail=message) except core_exceptions.InvalidMembership: message = ( "The user '%s' is not a valid member." % request_user.username ) raise rest_exceptions.ParseError(detail=message) except IdentityMembership.DoesNotExist: message = ( "The identity '%s' does not have a membership" % identity_id ) raise rest_exceptions.ParseError(detail=message) except Exception as e: message = { "An error was encoutered when submitting the request: %s" % e.message } logger.exception(e) raise rest_exceptions.ParseError(detail=message)
def post(self, request, provider_uuid, identity_uuid, instance_id): """Authentication Required, Attempt a specific instance action, including necessary parameters. """ # Service-specific call to action action_params = request.data if not action_params.get('action', None): return failure_response( status.HTTP_400_BAD_REQUEST, 'POST request to /action require a BODY with \'action\'.') result_obj = None user = request.user identity = Identity.objects.get(uuid=identity_uuid) action = action_params['action'] try: if not can_use_instance(user, instance_id, leader_required=True): return member_action_forbidden(user.username, "Instance", instance_id) result_obj = run_instance_action(user, identity, instance_id, action, action_params) result_obj = _further_process_result(request, action, result_obj) api_response = { 'result': 'success', 'message': 'The requested action <%s> was run successfully' % (action_params['action'],), 'object': result_obj, } response = Response(api_response, status=status.HTTP_200_OK) return response except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) except ProviderNotActive as pna: return inactive_provider(pna) except InstanceDoesNotExist as dne: return failure_response( status.HTTP_404_NOT_FOUND, 'Instance %s no longer exists' % (instance_id,)) except LibcloudInvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except HypervisorCapacityError as hce: return over_capacity(hce) except OverQuotaError as oqe: return over_quota(oqe) except OverAllocationError as oae: return over_quota(oae) except SizeNotAvailable as snae: return size_not_available(snae) except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) except VolumeMountConflict as vmc: return mount_failed(vmc) except NotImplemented: return failure_response( status.HTTP_409_CONFLICT, "The requested action %s is not available on this provider." % action_params['action']) except ActionNotAllowed: return failure_response( status.HTTP_409_CONFLICT, "The requested action %s has been explicitly " "disabled on this provider." % action_params['action']) except Exception as exc: logger.exception("Exception occurred processing InstanceAction") message = exc.message if message.startswith('409 Conflict'): return failure_response( status.HTTP_409_CONFLICT, message) return failure_response( status.HTTP_403_FORBIDDEN, "The requested action %s encountered " "an irrecoverable exception: %s" % (action_params['action'], message))
def update(self, request, pk, *args, **fields): if not hasattr(self, 'request'): self.request = request request_user = request.user request_data = request.data request_data['source_id'] = pk request_data[ 'allocation_source_name'] = AllocationSource.objects.filter( uuid=pk).last().name # check for data if not request_data.items(): return failure_response(status.HTTP_400_BAD_REQUEST, "Request Data is missing") # validate user try: self._validate_user(request_user) except Exception as exc: return failure_response(status.HTTP_400_BAD_REQUEST, exc.message) # validate patched fields and update allocation source model try: self._validate_params(request_data) # create payload payload = {} payload['allocation_source_name'] = request_data[ 'allocation_source_name'] # events to call events = [] # if 'name' in request_data: # payload_name = payload.copy() # payload_name['name'] = request_data['name'] # events.append(( # 'allocation_source_name_changed', # payload_name)) if 'renewal_strategy' in request_data: payload_renewal_strategy = payload.copy() payload_renewal_strategy['renewal_strategy'] = request_data[ 'renewal_strategy'] events.append(('allocation_source_renewal_strategy_changed', payload_renewal_strategy)) if 'compute_allowed' in request_data: payload_compute_allowed = payload.copy() payload_compute_allowed['compute_allowed'] = request_data[ 'compute_allowed'] events.append(('allocation_source_compute_allowed_changed', payload_compute_allowed)) except Exception as exc: return failure_response(status.HTTP_400_BAD_REQUEST, exc.message) try: allocation_source = self._update_allocation_source( events, request_data) serialized_allocation_source = AllocationSourceSerializer( allocation_source, context={'request': self.request}) return Response(serialized_allocation_source.data, status=status.HTTP_200_OK) except Exception as exc: logger.exception( "Encountered exception while updating Allocation Source") return failure_response(status.HTTP_409_CONFLICT, str(exc.message))
class AccountDriver(CachedAccountDriver): user_manager = None image_manager = None network_manager = None core_provider = None MASTER_RULES_LIST = [ ("ICMP", 0, 255), #FTP Access ("UDP", 20, 20), # FTP data transfer ("TCP", 20, 21), # FTP control #SSH & Telnet Access ("TCP", 22, 23), ("UDP", 22, 23), # SMTP Mail #("TCP", 25, 25), # HTTP Access ("TCP", 80, 80), # POP Mail #("TCP", 109, 110), # SFTP Access ("TCP", 115, 115), # SQL Access #("TCP", 118, 118), #("UDP", 118, 118), # IMAP Access #("TCP", 143, 143), # SNMP Access #("UDP", 161, 161), # LDAP Access ("TCP", 389, 389), ("UDP", 389, 389), # HTTPS Access ("TCP", 443, 443), # LDAPS Access ("TCP", 636, 636), ("UDP", 636, 636), # Open up >1024 ("TCP", 1024, 4199), ("UDP", 1024, 4199), #SKIP PORT 4200.. See Below ("TCP", 4201, 65535), ("UDP", 4201, 65535), # Poke hole in 4200 for iPlant VMs proxy-access only (Shellinabox) ("TCP", 4200, 4200, "128.196.0.0/16"), ("UDP", 4200, 4200, "128.196.0.0/16"), ("TCP", 4200, 4200, "150.135.0.0/16"), ("UDP", 4200, 4200, "150.135.0.0/16"), ] def _init_by_provider(self, provider, *args, **kwargs): from service.driver import get_esh_driver self.core_provider = provider provider_creds = provider.get_credentials() self.provider_creds = provider_creds admin_identity = provider.admin admin_creds = admin_identity.get_credentials() self.admin_driver = get_esh_driver(admin_identity) admin_creds = self._libcloud_to_openstack(admin_creds) all_creds = {'location': provider.get_location()} all_creds.update(admin_creds) all_creds.update(provider_creds) return all_creds def __init__(self, provider=None, *args, **kwargs): super(AccountDriver, self).__init__() if provider: all_creds = self._init_by_provider(provider, *args, **kwargs) else: all_creds = kwargs if 'location' in all_creds: self.namespace = "Atmosphere_OpenStack:%s" % all_creds['location'] else: logger.info("Using default namespace.. Could cause conflicts if " "switching between providers. To avoid ambiguity, " "provide the kwarg: location='provider_prefix'") # Build credentials for each manager self.user_creds = self._build_user_creds(all_creds) self.image_creds = self._build_image_creds(all_creds) self.net_creds = self._build_network_creds(all_creds) #Initialize managers with respective credentials self.user_manager = UserManager(**self.user_creds) self.image_manager = ImageManager(**self.image_creds) self.network_manager = NetworkManager(**self.net_creds) def create_account(self, username, password=None, project_name=None, role_name=None, quota=None, max_quota=False): """ Create (And Update "latest changes") to an account """ if not self.core_provider: raise Exception("AccountDriver not initialized by provider," " cannot create identity. For account creation use" " build_account()") if username in self.core_provider.list_admin_names(): return (username, password, project) = self.build_account(username, password, project_name, role_name, max_quota) ident = self.create_identity(username, password, project.name, quota=quota, max_quota=max_quota) return ident def build_account(self, username, password, project_name=None, role_name=None, max_quota=False): finished = False #Attempt account creation while not finished: try: if not password: password = self.hashpass(username) if not project_name: project_name = username #1. Create Project: should exist before creating user project = self.user_manager.get_project(project_name) if not project: project = self.user_manager.create_project(project_name) # 2. Create User (And add them to the project) user = self.get_user(username) if not user: logger.info("Creating account: %s - %s - %s" % (username, password, project)) user = self.user_manager.create_user( username, password, project) # 3.1 Include the admin in the project #TODO: providercredential initialization of # "default_admin_role" self.user_manager.include_admin(project_name) # 3.2 Check the user has been given an appropriate role if not role_name: role_name = "_member_" self.user_manager.add_project_membership( project_name, username, role_name) # 4. Create a security group -- SUSPENDED.. Will occur on # instance launch instead. #self.init_security_group(user, password, project, # project.name, # self.MASTER_RULES_LIST) # 5. Create a keypair to use when launching with atmosphere self.init_keypair(user.name, password, project.name) finished = True except ConnectionError: logger.exception("Connection reset by peer. " "Waiting for one minute.") time.sleep(60) # Wait one minute except OverLimit: logger.exception("OverLimit on POST requests. " "Waiting for one minute.") time.sleep(60) # Wait one minute return (username, password, project) def init_keypair(self, username, password, project_name): keyname = settings.ATMOSPHERE_KEYPAIR_NAME with open(settings.ATMOSPHERE_KEYPAIR_FILE, "r") as pub_key_file: public_key = pub_key_file.read() return self.get_or_create_keypair(username, password, project_name, keyname, public_key) def init_security_group(self, username, password, project_name, security_group_name, rules_list): # 4.1. Update the account quota to hold a larger number of # roles than what is necessary user = self.user_manager.keystone.users.find(name=username) project = self.user_manager.keystone.tenants.find(name=project_name) nc = self.user_manager.nova rule_max = max(len(rules_list), 100) nc.quotas.update(project.id, security_group_rules=rule_max) #Change the description of the security group to match the project name try: #Create the default security group nova = self.user_manager.build_nova(username, password, project_name) sec_groups = nova.security_groups.list() if not sec_groups: nova.security_group.create("default", project_name) self.network_manager.rename_security_group(project) except ConnectionError, ce: logger.exception("Failed to establish connection." " Security group creation FAILED") return None except NeutronClientException, nce: if nce.status_code != 404: logger.exception("Encountered unknown exception while renaming" " the security group") return None
def add_floating_ip(driverCls, provider, identity, instance_alias, delete_status=True, *args, **kwargs): #For testing ONLY.. Test cases ignore countdown.. if app.conf.CELERY_ALWAYS_EAGER: logger.debug("Eager task waiting 15 seconds") time.sleep(15) try: logger.debug("add_floating_ip task started at %s." % datetime.now()) #Remove unused floating IPs first, so they can be re-used driver = get_driver(driverCls, provider, identity) driver._clean_floating_ip() #assign if instance doesn't already have an IP addr instance = driver.get_instance(instance_alias) if not instance: logger.debug("Instance has been teminated: %s." % instance_alias) return None floating_ips = driver._connection.neutron_list_ips(instance) if floating_ips: floating_ip = floating_ips[0]["floating_ip_address"] else: floating_ip = driver._connection.neutron_associate_ip( instance, *args, **kwargs)["floating_ip_address"] _update_status_log(instance, "Networking Complete") #TODO: Implement this as its own task, with the result from #'floating_ip' passed in. Add it to the deploy_chain before deploy_to hostname = "" if floating_ip.startswith('128.196'): regex = re.compile("(?P<one>[0-9]+)\.(?P<two>[0-9]+)\." "(?P<three>[0-9]+)\.(?P<four>[0-9]+)") r = regex.search(floating_ip) (one, two, three, four) = r.groups() hostname = "vm%s-%s.iplantcollaborative.org" % (three, four) else: # Find a way to convert new floating IPs to hostnames.. hostname = floating_ip metadata_update = { 'public-hostname': hostname, 'public-ip': floating_ip } #NOTE: This is part of the temp change, should be removed when moving # to vxlan instance_ports = driver._connection.neutron_list_ports( device_id=instance.id) network = driver._connection.neutron_get_tenant_network() if instance_ports: for idx, fixed_ip_port in enumerate(instance_ports): fixed_ips = fixed_ip_port.get('fixed_ips', []) mac_addr = fixed_ip_port.get('mac_address') metadata_update['mac-address%s' % idx] = mac_addr metadata_update['port-id%s' % idx] = fixed_ip_port['id'] metadata_update['network-id%s' % idx] = network['id'] #EndNOTE: update_instance_metadata(driver, instance, data=metadata_update, replace=False) logger.info("Assigned IP:%s - Hostname:%s" % (floating_ip, hostname)) #End logger.debug("add_floating_ip task finished at %s." % datetime.now()) return {"floating_ip": floating_ip, "hostname": hostname} except Exception as exc: logger.exception("Error occurred while assigning a floating IP") #Networking can take a LONG time when an instance first launches, #it can also be one of those things you 'just miss' by a few seconds.. #So we will retry 30 times using limited exp.backoff #Max Time: 53min countdown = min(2**current.request.retries, 128) add_floating_ip.retry(exc=exc, countdown=countdown)
def post_instance_action(self, request, pk=None): user = request.user instance_id = pk instance = find_instance(instance_id) identity = instance.created_by_identity action_params = request.data action = action_params.pop('action') if type(action) == list: action = action[0] try: result_obj = run_instance_action(user, identity, instance_id, action, action_params) api_response = { 'result': 'success', 'message': 'The requested action <%s> was run successfully' % (action,), 'object': result_obj, } response = Response(api_response, status=status.HTTP_200_OK) return response except (socket_error, ConnectionFailure): return connection_failure(identity) except InstanceDoesNotExist as dne: return failure_response( status.HTTP_404_NOT_FOUND, 'Instance %s no longer exists' % (dne.message,)) except LibcloudInvalidCredsError: return invalid_creds(identity) except HypervisorCapacityError as hce: return over_capacity(hce) except ProviderNotActive as pna: return inactive_provider(pna) except (OverQuotaError, OverAllocationError) as oqe: return over_quota(oqe) except SizeNotAvailable as snae: return size_not_available(snae) except (socket_error, ConnectionFailure): return connection_failure(identity) except LibcloudInvalidCredsError: return invalid_creds(identity) except VolumeMountConflict as vmc: return mount_failed(vmc) except NotImplemented: return failure_response( status.HTTP_409_CONFLICT, "The requested action %s is not available on this provider." % action) except ActionNotAllowed: return failure_response( status.HTTP_409_CONFLICT, "The requested action %s has been explicitly " "disabled on this provider." % action) except Exception as exc: logger.exception("Exception occurred processing InstanceAction") message = exc.message if message.startswith('409 Conflict'): return failure_response( status.HTTP_409_CONFLICT, message) return failure_response( status.HTTP_403_FORBIDDEN, "The requested action %s encountered " "an irrecoverable exception: %s" % (action, message))
def post(self, request, provider_uuid, identity_uuid, instance_id): """Authentication Required, Attempt a specific instance action, including necessary parameters. """ #Service-specific call to action action_params = request.DATA if not action_params.get('action', None): return failure_response( status.HTTP_400_BAD_REQUEST, 'POST request to /action require a BODY with \'action\'.') result_obj = None user = request.user esh_driver = prepare_driver(request, provider_uuid, identity_uuid) if not esh_driver: return invalid_creds(provider_uuid, identity_uuid) try: esh_instance = esh_driver.get_instance(instance_id) except ConnectionFailure: return connection_failure(provider_uuid, identity_uuid) except InvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except Exception as exc: logger.exception("Encountered a generic exception. " "Returning 409-CONFLICT") return failure_response(status.HTTP_409_CONFLICT, str(exc.message)) if not esh_instance: return failure_response( status.HTTP_400_BAD_REQUEST, 'Instance %s no longer exists' % (instance_id, )) action = action_params['action'] try: if 'volume' in action: volume_id = action_params.get('volume_id') mount_location = action_params.get('mount_location', None) device = action_params.get('device', None) if 'attach_volume' == action: if mount_location == 'null' or mount_location == 'None': mount_location = None if device == 'null' or device == 'None': device = None future_mount_location = task.attach_volume_task( esh_driver, esh_instance.alias, volume_id, device, mount_location) elif 'mount_volume' == action: future_mount_location = task.mount_volume_task( esh_driver, esh_instance.alias, volume_id, device, mount_location) elif 'unmount_volume' == action: (result, error_msg) = task.unmount_volume_task( esh_driver, esh_instance.alias, volume_id, device, mount_location) elif 'detach_volume' == action: (result, error_msg) = task.detach_volume_task( esh_driver, esh_instance.alias, volume_id) if not result and error_msg: #Return reason for failed detachment return failure_response(status.HTTP_400_BAD_REQUEST, error_msg) #Task complete, convert the volume and return the object esh_volume = esh_driver.get_volume(volume_id) core_volume = convert_esh_volume(esh_volume, provider_uuid, identity_uuid, user) result_obj = VolumeSerializer(core_volume, context={ "request": request }).data elif 'resize' == action: size_alias = action_params.get('size', '') if type(size_alias) == int: size_alias = str(size_alias) resize_instance(esh_driver, esh_instance, size_alias, provider_uuid, identity_uuid, user) elif 'confirm_resize' == action: confirm_resize(esh_driver, esh_instance, provider_uuid, identity_uuid, user) elif 'revert_resize' == action: esh_driver.revert_resize_instance(esh_instance) elif 'redeploy' == action: redeploy_init(esh_driver, esh_instance, countdown=None) elif 'resume' == action: result_obj = resume_instance(esh_driver, esh_instance, provider_uuid, identity_uuid, user) elif 'suspend' == action: result_obj = suspend_instance(esh_driver, esh_instance, provider_uuid, identity_uuid, user) elif 'shelve' == action: result_obj = shelve_instance(esh_driver, esh_instance, provider_uuid, identity_uuid, user) elif 'unshelve' == action: result_obj = unshelve_instance(esh_driver, esh_instance, provider_uuid, identity_uuid, user) elif 'shelve_offload' == action: result_obj = offload_instance(esh_driver, esh_instance) elif 'start' == action: start_instance(esh_driver, esh_instance, provider_uuid, identity_uuid, user) elif 'stop' == action: stop_instance(esh_driver, esh_instance, provider_uuid, identity_uuid, user) elif 'reset_network' == action: esh_driver.reset_network(esh_instance) elif 'console' == action: result_obj = esh_driver._connection.ex_vnc_console( esh_instance) elif 'reboot' == action: reboot_type = action_params.get('reboot_type', 'SOFT') reboot_instance(esh_driver, esh_instance, identity_uuid, user, reboot_type) elif 'rebuild' == action: machine_alias = action_params.get('machine_alias', '') machine = esh_driver.get_machine(machine_alias) esh_driver.rebuild_instance(esh_instance, machine) else: return failure_response( status.HTTP_400_BAD_REQUEST, 'Unable to to perform action %s.' % (action)) #ASSERT: The action was executed successfully api_response = { 'result': 'success', 'message': 'The requested action <%s> was run successfully' % action_params['action'], 'object': result_obj, } response = Response(api_response, status=status.HTTP_200_OK) return response ### Exception handling below.. except HypervisorCapacityError, hce: return over_capacity(hce)
def test_instance_links(alias, uri): from rtwo.linktest import test_link if uri is None: return {alias: {'vnc': False, 'shell': False}} shell_address = '%s/shell/%s/' % (settings.SERVER_URL, uri) try: shell_success = test_link(shell_address) except Exception, e: logger.exception("Bad shell address: %s" % shell_address) shell_success = False vnc_address = 'http://%s:5904' % uri try: vnc_success = test_link(vnc_address) except Exception, e: logger.exception("Bad vnc address: %s" % vnc_address) vnc_success = False return {alias: {'vnc': vnc_success, 'shell': shell_success}} def update_links(instances): from core.models import Instance updated = [] linktest_results = active_instances(instances) for (instance_id, link_results) in linktest_results.items(): try: update = False instance = Instance.objects.get(provider_alias=instance_id) if link_results['shell'] != instance.shell: logger.debug('Change Instance %s shell %s-->%s' % (instance, instance.shell, link_results['shell']))
def post(self, request, provider_uuid, identity_uuid, format=None): """ Instance Class: Launches an instance based on the params Returns a single instance Parameters: machine_alias, size_alias, username TODO: Create a 'reverse' using the instance-id to pass the URL for the newly created instance I.e: url = "/provider/1/instance/1/i-12345678" """ data = request.data user = request.user # Check the data is valid missing_keys = valid_post_data(data) if missing_keys: return keys_not_found(missing_keys) # Pass these as args size_alias = data.pop("size_alias") machine_alias = data.pop("machine_alias") hypervisor_name = data.pop("hypervisor", None) deploy = data.pop("deploy", True) if type(deploy) in [str, unicode] and deploy.lower() == "false": deploy = False elif not isinstance(deploy, bool): deploy = True boot_scripts = data.pop("scripts", []) try: logger.debug(data) core_instance = launch_instance( user, identity_uuid, size_alias, machine_alias, ex_availability_zone=hypervisor_name, deploy=deploy, **data) except UnderThresholdError as ute: return under_threshold(ute) except OverQuotaError as oqe: return over_quota(oqe) except OverAllocationError as oae: return over_quota(oae) except SizeNotAvailable as snae: return size_not_available(snae) except SecurityGroupNotCreated: return connection_failure(provider_uuid, identity_uuid) except (socket_error, ConnectionFailure): return connection_failure(provider_uuid, identity_uuid) except InvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except Exception as exc: logger.exception("Encountered a generic exception. " "Returning 409-CONFLICT") return failure_response(status.HTTP_409_CONFLICT, str(exc.message)) serializer = InstanceSerializer(core_instance, context={"request": request}, data=data) if serializer.is_valid(): instance = serializer.save() if boot_scripts: _save_scripts_to_instance(instance, boot_scripts) return Response(serializer.data, status=status.HTTP_201_CREATED) else: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, provider_uuid, identity_uuid, instance_id): """Authentication Required, TERMINATE the instance. Be careful, there is no going back once you've deleted an instance. """ user = request.user esh_driver = prepare_driver(request, provider_uuid, identity_uuid) if not esh_driver: return invalid_creds(provider_uuid, identity_uuid) try: esh_instance = esh_driver.get_instance(instance_id) except ConnectionFailure: return connection_failure(provider_uuid, identity_uuid) except InvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except Exception as exc: logger.exception("Encountered a generic exception. " "Returning 409-CONFLICT") return failure_response(status.HTTP_409_CONFLICT, str(exc.message)) try: #Test that there is not an attached volume BEFORE we destroy #_check_volume_attachment(esh_driver, esh_instance) task.destroy_instance_task(esh_instance, identity_uuid) invalidate_cached_instances(identity=Identity.objects.get( uuid=identity_uuid)) existing_instance = esh_driver.get_instance(instance_id) if existing_instance: #Instance will be deleted soon... esh_instance = existing_instance if esh_instance.extra\ and 'task' not in esh_instance.extra: esh_instance.extra['task'] = 'queueing delete' except VolumeAttachConflict as exc: message = exc.message return failure_response(status.HTTP_409_CONFLICT, message) except ConnectionFailure: return connection_failure(provider_uuid, identity_uuid) except InvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except Exception as exc: logger.exception("Encountered a generic exception. " "Returning 409-CONFLICT") return failure_response(status.HTTP_409_CONFLICT, str(exc.message)) try: core_instance = convert_esh_instance(esh_driver, esh_instance, provider_uuid, identity_uuid, user) if core_instance: core_instance.end_date_all() else: logger.warn("Unable to find core instance %s." % (instance_id)) serialized_data = InstanceSerializer(core_instance, context={ "request": request }).data response = Response(serialized_data, status=status.HTTP_200_OK) response['Cache-Control'] = 'no-cache' return response except (Identity.DoesNotExist) as exc: return failure_response(status.HTTP_400_BAD_REQUEST, "Invalid provider_uuid or identity_uuid.") except ConnectionFailure: return connection_failure(provider_uuid, identity_uuid) except InvalidCredsError: return invalid_creds(provider_uuid, identity_uuid)
def _test_user_email(atmo_user, email_pattern): email = atmo_user.email.lower() email_pattern = email_pattern.pattern.lower() result = _simple_glob_test(email, email_pattern) or _simple_match( email, email_pattern, contains=True) logger.info("Email:%s Pattern:%s - Result:%s" % (email, email_pattern, result)) return result def _test_username(atmo_user, username_match): username = atmo_user.username result = _simple_match(username, username_match, contains=True) logger.info("Username:%s Match On:%s - Result:%s" % (username, username_match, result)) return result def is_url(test_string): val = URLValidator() try: val(test_string) return True except ValidationError, e: return False except: logger.exception("URL Validation no longer works -- Code fix required") return False
except UnderThresholdError, ute: return under_threshold(ute) except OverQuotaError, oqe: return over_quota(oqe) except OverAllocationError, oae: return over_quota(oae) except SizeNotAvailable, snae: return size_not_availabe(snae) except SecurityGroupNotCreated: return connection_failure(provider_uuid, identity_uuid) except ConnectionFailure: return connection_failure(provider_uuid, identity_uuid) except InvalidCredsError: return invalid_creds(provider_uuid, identity_uuid) except Exception as exc: logger.exception("Encountered a generic exception. " "Returning 409-CONFLICT") return failure_response(status.HTTP_409_CONFLICT, str(exc.message)) serializer = InstanceSerializer(core_instance, context={"request": request}, data=data) #NEVER WRONG if serializer.is_valid(): serializer.save() if boot_scripts: _save_scripts_to_instance(serializer.object, boot_scripts) return Response(serializer.data, status=status.HTTP_201_CREATED) else: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def clear_empty_ips(): logger.debug("clear_empty_ips task started at %s." % datetime.now()) from service import instance as instance_service from rtwo.driver import OSDriver from api import get_esh_driver from service.accounts.openstack import AccountDriver as\ OSAccountDriver identities = Identity.objects.filter( provider__type__name__iexact='openstack', provider__active=True) identities = sorted( identities, key=lambda ident: attrgetter(ident.provider.type.name, ident.created_by.username)) os_acct_driver = None total = len(identities) for idx, core_identity in enumerate(identities): try: #Initialize the drivers driver = get_esh_driver(core_identity) if not isinstance(driver, OSDriver): continue if not os_acct_driver or\ os_acct_driver.core_provider != core_identity.provider: os_acct_driver = OSAccountDriver(core_identity.provider) logger.info("Initialized account driver") # Get useful info creds = core_identity.get_credentials() tenant_name = creds['ex_tenant_name'] logger.info("Checking Identity %s/%s - %s" % (idx+1, total, tenant_name)) # Attempt to clean floating IPs num_ips_removed = driver._clean_floating_ip() if num_ips_removed: logger.debug("Removed %s ips from OpenStack Tenant %s" % (num_ips_removed, tenant_name)) #Test for active/inactive instances instances = driver.list_instances() active = any(driver._is_active_instance(inst) for inst in instances) inactive = all(driver._is_inactive_instance(inst) for inst in instances) if active and not inactive: #User has >1 active instances AND not all instances inactive pass elif os_acct_driver.network_manager.get_network_id( os_acct_driver.network_manager.neutron, '%s-net' % tenant_name): #User has 0 active instances OR all instances are inactive #Network exists, attempt to dismantle as much as possible remove_network = not inactive logger.info("Removing project network %s for %s" % (remove_network, tenant_name)) if remove_network: #Sec. group can't be deleted if instances are suspended # when instances are suspended we pass remove_network=False os_acct_driver.delete_security_group(core_identity) os_acct_driver.delete_network(core_identity, remove_network=remove_network) else: #logger.info("No Network found. Skipping %s" % tenant_name) pass except Exception as exc: logger.exception(exc) logger.debug("clear_empty_ips task finished at %s." % datetime.now())
def launch_esh_instance(driver, machine_alias, size_alias, core_identity, name=None, username=None, using_admin=False, *args, **kwargs): """ TODO: Remove extras, pass as kwarg_dict instead return the esh_instance & instance token """ from service import task try: #create a reference to this attempted instance launch. instance_token = str(uuid.uuid4()) #create a unique one-time password for instance root user instance_password = str(uuid.uuid4()) #TODO: Mock these for faster launch performance #Gather the machine object machine = driver.get_machine(machine_alias) if not machine: raise Exception( "Machine %s could not be located with this driver" % machine_alias) #Gather the size object size = driver.get_size(size_alias) if not size: raise Exception( "Size %s could not be located with this driver" % size_alias) if not username: username = driver.identity.user.username if not name: name = 'Instance of %s' % machine.alias if isinstance(driver.provider, EucaProvider): #Create and set userdata instance_service_url = "%s" % (settings.INSTANCE_SERVICE_URL,) init_file_version = kwargs.get('init_file', "v1") # Remove quotes -- Single && Double name = name.replace('"', '').replace("'", "") userdata_contents = _get_init_script(instance_service_url, instance_token, instance_password, name, username, init_file_version) #Create/deploy the instance -- NOTE: Name is passed in extras logger.info("EUCA -- driver.create_instance EXTRAS:%s" % kwargs) esh_instance = driver\ .create_instance(name=name, image=machine, size=size, ex_userdata=userdata_contents, **kwargs) elif isinstance(driver.provider, OSProvider): deploy = True if not using_admin: security_group_init(core_identity) network = network_init(core_identity) keypair_init(core_identity) credentials = core_identity.get_credentials() tenant_name = credentials.get('ex_tenant_name') ex_metadata = {'tmp_status': 'initializing', 'tenant_name': tenant_name, 'creator': '%s' % username} ex_keyname = settings.ATMOSPHERE_KEYPAIR_NAME logger.debug("OS driver.create_instance kwargs: %s" % kwargs) esh_instance = driver.create_instance(name=name, image=machine, size=size, token=instance_token, ex_metadata=ex_metadata, ex_keyname=ex_keyname, networks=[network], deploy=True, **kwargs) #Used for testing.. Eager ignores countdown if app.conf.CELERY_ALWAYS_EAGER: logger.debug("Eager Task, wait 1 minute") time.sleep(1*60) # call async task to deploy to instance. task.deploy_init_task(driver, esh_instance, username, instance_password, instance_token) elif isinstance(driver.provider, AWSProvider): #TODO:Extra stuff needed for AWS provider here esh_instance = driver.deploy_instance(name=name, image=machine, size=size, deploy=True, token=instance_token, **kwargs) else: raise Exception("Unable to launch with this provider.") return (esh_instance, instance_token, instance_password) except Exception as e: logger.exception(e) raise