def send_email(subject, body, from_email, to, cc=None, fail_silently=False, html=False): """ Use django.core.mail.EmailMessage to send and log an Atmosphere email. """ try: msg = EmailMessage(subject=subject, body=body, from_email=from_email, to=to, cc=cc) if html: msg.content_subtype = 'html' email_logger.info("\n> From:%s\n> To:%s\n> Cc:%s\n> Subject:%s\n> Body:\n%s", from_email, to, cc, subject, body) if getattr(settings, "SEND_EMAILS", True): msg.send(fail_silently=fail_silently) email_logger.info("NOTE: Above message sent successfully") celery_logger.info("NOTE: Above message sent successfully") else: email_logger.info("NOTE: Above message not sent -- SEND_EMAILS was False") celery_logger.info("NOTE: Above message not sent -- SEND_EMAILS was False") return True except Exception as e: celery_logger.exception(e) return False
def remove_empty_networks_for(provider_id): provider = Provider.objects.get(id=provider_id) os_driver = get_account_driver(provider) all_instances = os_driver.admin_driver.list_all_instances() project_map = os_driver.network_manager.project_network_map() projects_with_networks = project_map.keys() for project in projects_with_networks: networks = project_map[project]['network'] if not isinstance(networks, list): networks = [networks] for network in networks: network_name = network['name'] celery_logger.debug("Checking if network %s is in use" % network_name) if running_instances(network_name, all_instances): continue # TODO: MUST change when not using 'usergroups' explicitly. user = project try: celery_logger.debug("Removing project network for User:%s, Project:%s" % (user, project)) os_driver.network_manager.delete_project_network(user, project) except NeutronClientException: celery_logger.exception("Neutron unable to remove project" "network for %s-%s" % (user, project)) except NeutronException: celery_logger.exception("Neutron unable to remove project" "network for %s-%s" % (user, project))
def send_email(subject, body, from_email, to, cc=None, html=False): """ Use django.core.mail.EmailMessage to send and log an Atmosphere email. """ try: msg = EmailMessage(subject=subject, body=body, from_email=from_email, to=to, cc=cc) if html: msg.content_subtype = 'html' email_logger.info( "\n> From:%s\n> To:%s\n> Cc:%s\n> Subject:%s\n> Body:\n%s", from_email, to, cc, subject, body) if getattr(settings, "SEND_EMAILS", True): msg.send() email_logger.info("NOTE: Above message sent successfully") celery_logger.info("NOTE: Above message sent successfully") else: email_logger.info( "NOTE: Above message not sent -- SEND_EMAILS was False") celery_logger.info( "NOTE: Above message not sent -- SEND_EMAILS was False") return True except Exception as e: celery_logger.exception(e) return False
def update_mount_location(new_mount_location, driverCls, provider, identity, volume_alias): """ """ from service import volume as volume_service try: celery_logger.debug( "update_mount_location task started at %s." % datetime.now()) driver = get_driver(driverCls, provider, identity) volume = driver.get_volume(volume_alias) if not volume: return if not new_mount_location: return volume_metadata = volume.extra['metadata'] return volume_service._update_volume_metadata( driver, volume, metadata={'mount_location': new_mount_location}) celery_logger.debug( "update_mount_location task finished at %s." % datetime.now()) except Exception as exc: celery_logger.exception(exc) update_mount_location.retry(exc=exc)
def remove_empty_networks_for(provider_id): provider = Provider.objects.get(id=provider_id) os_driver = get_account_driver(provider) all_instances = os_driver.admin_driver.list_all_instances() project_map = os_driver.network_manager.project_network_map() projects_with_networks = project_map.keys() for project in projects_with_networks: networks = project_map[project]['network'] if not isinstance(networks, list): networks = [networks] for network in networks: network_name = network['name'] celery_logger.debug("Checking if network %s is in use" % network_name) if running_instances(network_name, all_instances): continue # TODO: MUST change when not using 'usergroups' explicitly. user = project try: celery_logger.debug( "Removing project network for User:%s, Project:%s" % (user, project)) os_driver.network_manager.delete_project_network(user, project) except NeutronClientException: celery_logger.exception("Neutron unable to remove project" "network for %s-%s" % (user, project)) except NeutronException: celery_logger.exception("Neutron unable to remove project" "network for %s-%s" % (user, project))
def send_email(subject, body, from_email, to, cc=None, fail_silently=False, html=False): """ Use django.core.mail.EmailMessage to send and log an Atmosphere email. """ try: msg = EmailMessage(subject=subject, body=body, from_email=from_email, to=to, cc=cc) if html: msg.content_subtype = 'html' log_message = "\n> From:{0}\n> To:{1}\n> Cc:{2}\n> Subject:{3}\n> Body:\n{4}" args = (from_email, to, cc, subject, body) email_logger.info(log_message.format(*args)) msg.send(fail_silently=fail_silently) return True except Exception as e: celery_logger.exception(e) return False
def send_email(subject, body, from_email, to, cc=None, fail_silently=False, html=False): """ Use django.core.mail.EmailMessage to send and log an Atmosphere email. """ try: msg = EmailMessage(subject=subject, body=body, from_email=from_email, to=to, cc=cc) if html: msg.content_subtype = 'html' log_message = "\n> From:{0}\n> To:{1}\n> Cc:{2}\n> Subject:{3}\n> Body:\n{4}" args = (from_email, to, cc, subject, body) email_logger.info(log_message.format(*args)) if getattr(settings, "SEND_EMAILS", True): msg.send(fail_silently=fail_silently) email_logger.info("NOTE: Above message sent successfully") celery_logger.info("NOTE: Above message sent successfully") else: email_logger.info("NOTE: Above message not sent -- SEND_EMAILS was False") celery_logger.info("NOTE: Above message not sent -- SEND_EMAILS was False") return True except Exception as e: celery_logger.exception(e) return False
def monitor_instances_for(provider_id, users=None, print_logs=False, check_allocations=False, start_date=None, end_date=None): """ Run the set of tasks related to monitoring instances for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ provider = Provider.objects.get(id=provider_id) # For now, lets just ignore everything that isn't openstack. if 'openstack' not in provider.type.name.lower(): return instance_map = _get_instance_owner_map(provider, users=users) if print_logs: import logging import sys consolehandler = logging.StreamHandler(sys.stdout) consolehandler.setLevel(logging.DEBUG) celery_logger.addHandler(consolehandler) # DEVNOTE: Potential slowdown running multiple functions # Break this out when instance-caching is enabled running_total = 0 for username in sorted(instance_map.keys()): running_instances = instance_map[username] running_total += len(running_instances) identity = _get_identity_from_tenant_name(provider, username) if identity and running_instances: try: driver = get_cached_driver(identity=identity) core_running_instances = [ convert_esh_instance( driver, inst, identity.provider.uuid, identity.uuid, identity.created_by) for inst in running_instances] except Exception as exc: celery_logger.exception( "Could not convert running instances for %s" % username) continue else: # No running instances. core_running_instances = [] # Using the 'known' list of running instances, cleanup the DB core_instances = _cleanup_missing_instances( identity, core_running_instances) if check_allocations: allocation_result = user_over_allocation_enforcement( provider, username, print_logs, start_date, end_date) if print_logs: celery_logger.removeHandler(consolehandler) return running_total
def set_provider_quota(identity_uuid): try: return spq(identity_uuid) except Exception as exc: celery_logger.exception("Encountered an exception trying to " "'set_provider_quota' for Identity UUID:%s" % identity_uuid) set_provider_quota.retry(exc=exc)
def add_membership_task(image_version, group): celery_logger.debug("add_membership_task task started at %s." % timezone.now()) try: add_membership(image_version, group) celery_logger.debug("add_membership_task task finished at %s." % timezone.now()) except Exception as exc: celery_logger.exception(exc) add_membership_task.retry(exc=exc)
def set_provider_quota(identity_uuid): try: return spq(identity_uuid) except Exception as exc: celery_logger.exception( "Encountered an exception trying to " "'set_provider_quota' for Identity UUID:%s" % identity_uuid) set_provider_quota.retry(exc=exc)
def _req( self, method, url, json_data={}, additional_headers={}, json_resp=True ): """ send a request with given method to the given url Args: method (str): HTTP method url (str): api url to send the request to json_data (dict, optional): JSON payload. Defaults to None. additional_header (dict, optional): additional headers. Defaults to None. json_resp (bool, optional): if response is json. Defaults to True. Raises: ResponseNotJSON: raised when the response is not JSON HTTPError: requert failed Returns: dict: response text as JSON object """ try: headers = {} headers["Host"] = self.host headers["Accept"] = "application/json;q=0.9,*/*;q=0.8" headers["Content-Type"] = "application/json" if self._token: headers["Authorization"] = "Bearer " + self._token if additional_headers: headers.update(additional_headers) full_url = self.base_url + url requests_func = _http_method(method) if json_data: resp = requests_func( full_url, headers=headers, json=json_data, verify=self.verify ) else: resp = requests_func( full_url, headers=headers, verify=self.verify ) resp.raise_for_status() if json_resp: return json.loads(resp.text) return resp.text except JSONDecodeError as exc: msg = "ARGO - REST API, {}, {}".format(type(exc), resp.text) logger.exception(msg) raise ResponseNotJSON("ARGO, Fail to parse response body as JSON") except requests.exceptions.HTTPError as exc: msg = "ARGO - REST API, {}, {}".format(type(exc), resp.text) logger.exception(msg) raise exc
def attach_task(driverCls, provider, identity, instance_id, volume_id, device_choice=None, *args, **kwargs): try: celery_logger.debug("attach_task started at %s." % datetime.now()) driver = get_driver(driverCls, provider, identity) instance = driver.get_instance(instance_id) volume = driver.get_volume(volume_id) # Step 1. Attach the volume # NOTE: device_choice !== device 100% driver.attach_volume(instance, volume, device_choice) # When the reslt returns the volume will be 'attaching' # We can't do anything until the volume is 'available/in-use' attempts = 0 while True: volume = driver.get_volume(volume_id) # Give up if you can't find the volume if not volume: return None if attempts > 6: # After 6 attempts (~1min) break # Openstack Check if isinstance(driver, OSDriver) and\ 'attaching' not in volume.extra.get('status', ''): break if isinstance(driver, EucaDriver) and\ 'attaching' not in volume.extra.get('status', ''): break # Exponential backoff.. attempts += 1 sleep_time = 2**attempts celery_logger.debug("Volume %s is not ready (%s). Sleep for %s" % (volume.id, volume.extra.get('status', 'no-status'), sleep_time)) time.sleep(sleep_time) if 'available' in volume.extra.get('status', ''): raise Exception("Volume %s failed to attach to instance %s" % (volume.id, instance.id)) # Device path for euca == openstack try: attach_data = volume.extra['attachments'][0] device = attach_data['device'] except (IndexError, KeyError) as bad_fetch: celery_logger.warn("Could not find 'device' in " "volume.extra['attachments']: " "Volume:%s Extra:%s" % (volume.id, volume.extra)) device = None celery_logger.debug("attach_task finished at %s." % datetime.now()) return device except Exception as exc: celery_logger.exception(exc) attach_task.retry(exc=exc)
def monitor_instances_for(provider_id, users=None, print_logs=False, check_allocations=False, start_date=None, end_date=None): """ Run the set of tasks related to monitoring instances for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ provider = Provider.objects.get(id=provider_id) # For now, lets just ignore everything that isn't openstack. if 'openstack' not in provider.type.name.lower(): return instance_map = _get_instance_owner_map(provider, users=users) if print_logs: console_handler = _init_stdout_logging() # DEVNOTE: Potential slowdown running multiple functions # Break this out when instance-caching is enabled running_total = 0 if not settings.ENFORCING: celery_logger.debug('Settings dictate allocations are NOT enforced') for username in sorted(instance_map.keys()): running_instances = instance_map[username] running_total += len(running_instances) identity = _get_identity_from_tenant_name(provider, username) if identity and running_instances: try: driver = get_cached_driver(identity=identity) core_running_instances = [ convert_esh_instance(driver, inst, identity.provider.uuid, identity.uuid, identity.created_by) for inst in running_instances ] except Exception as exc: celery_logger.exception( "Could not convert running instances for %s" % username) continue else: # No running instances. core_running_instances = [] # Using the 'known' list of running instances, cleanup the DB core_instances = _cleanup_missing_instances(identity, core_running_instances) if check_allocations: allocation_result = user_over_allocation_enforcement( provider, username, print_logs, start_date, end_date) if print_logs: _exit_stdout_logging(console_handler) return running_total
def argo_wf_template_exec( wf_template_filename, provider_uuid, workflow_data, config_file_path=None, wait=False ): """ Execute an specified Argo workflow. Find file based on provider. Pass argument to workflow. Args: wf_template_filename (str): filename of the workflow provider_uuid (str): uuid of the provider workflow_data (dict): data to be passed to workflow as arguments config_file_path (str, optional): path to the config file. will use the default one from the setting if None. Defaults to None. wait (bool, optional): wait for workflow to complete. Defaults to False. Returns: (ArgoWorkflow, dict): workflow and status of the workflow, e.g. {"complete": bool, "success": bool, "error": bool} """ try: # read configuration from file config = read_argo_config(config_file_path=config_file_path) # construct workflow context context = ArgoContext(config=config) # find the workflow definition wf_temp_def = argo_lookup_yaml_file( config["workflow_base_dir"], wf_template_filename, provider_uuid ) # submit workflow template wf_temp = ArgoWorkflowTemplate.create(context, wf_temp_def) wf_name = wf_temp.execute(context, wf_param=workflow_data) wf = ArgoWorkflow(wf_name) # polling if needed if wait: status = wf.watch(context, 10, 18) if status.complete: return (wf_name, status) status = wf.watch(context, 60, 1440) return (wf, status) return (wf, {"complete": None, "success": None, "error": None}) except Exception as exc: logger.exception( "ARGO, argo_wf_template_exec(), {} {}".format(type(exc), exc) ) raise exc
def monitor_instances_for(provider_id, users=None, print_logs=False, start_date=None, end_date=None): """ Run the set of tasks related to monitoring instances for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ provider = Provider.objects.get(id=provider_id) # For now, lets just ignore everything that isn't openstack. if 'openstack' not in provider.type.name.lower(): return instance_map = _get_instance_owner_map(provider, users=users) if print_logs: console_handler = _init_stdout_logging() seen_instances = [] # DEVNOTE: Potential slowdown running multiple functions # Break this out when instance-caching is enabled if not settings.ENFORCING: celery_logger.debug('Settings dictate allocations are NOT enforced') for tenant_name in sorted(instance_map.keys()): running_instances = instance_map[tenant_name] identity = _get_identity_from_tenant_name(provider, tenant_name) if identity and running_instances: try: driver = get_cached_driver(identity=identity) core_running_instances = [ convert_esh_instance( driver, inst, identity.provider.uuid, identity.uuid, identity.created_by) for inst in running_instances] seen_instances.extend(core_running_instances) except Exception as exc: celery_logger.exception( "Could not convert running instances for %s" % tenant_name) continue else: # No running instances. core_running_instances = [] # Using the 'known' list of running instances, cleanup the DB core_instances = _cleanup_missing_instances( identity, core_running_instances) if print_logs: _exit_stdout_logging(console_handler) # return seen_instances NOTE: this has been commented out to avoid PicklingError! # TODO: Uncomment the above, Determine what _we can return_ and return that instead.... return
def monitor_instances_for( provider_id, users=None, print_logs=False, start_date=None, end_date=None ): """ Run the set of tasks related to monitoring instances for a provider. Optionally, provide a list of usernames to monitor While debugging, print_logs=True can be very helpful. start_date and end_date allow you to search a 'non-standard' window of time. """ provider = Provider.objects.get(id=provider_id) # For now, lets just ignore everything that isn't openstack. if 'openstack' not in provider.type.name.lower(): return instance_map = _get_instance_owner_map(provider, users=users) if print_logs: console_handler = _init_stdout_logging() seen_instances = [] # DEVNOTE: Potential slowdown running multiple functions # Break this out when instance-caching is enabled if not settings.ENFORCING: celery_logger.debug('Settings dictate allocations are NOT enforced') for tenant_name in sorted(instance_map.keys()): running_instances = instance_map[tenant_name] identity = _get_identity_from_tenant_name(provider, tenant_name) if identity and running_instances: try: driver = get_cached_driver(identity=identity) core_running_instances = [ convert_esh_instance( driver, inst, identity.provider.uuid, identity.uuid, identity.created_by ) for inst in running_instances ] seen_instances.extend(core_running_instances) except Exception: celery_logger.exception( "Could not convert running instances for %s" % tenant_name ) continue else: # No running instances. core_running_instances = [] # Using the 'known' list of running instances, cleanup the DB _cleanup_missing_instances(identity, core_running_instances) if print_logs: _exit_stdout_logging(console_handler) # return seen_instances NOTE: this has been commented out to avoid PicklingError! # TODO: Uncomment the above, Determine what _we can return_ and return that instead.... return
def detach_task( driverCls, provider, identity, instance_id, volume_id, *args, **kwargs ): try: celery_logger.debug("detach_task started at %s." % timezone.now()) driver = get_driver(driverCls, provider, identity) instance = driver.get_instance(instance_id) volume = driver.get_volume(volume_id) driver.detach_volume(volume) # When the reslt returns the volume will be 'detaching' # We will ensure the volume does not return to 'in-use' attempts = 0 while True: volume = driver.get_volume(volume_id) if attempts > 6: # After 6 attempts (~1min) break # The Openstack way if isinstance(driver, OSDriver)\ and 'detaching' not in volume.extra['status']: break # The Eucalyptus way attach_data = volume.extra['attachments'][0] if isinstance(driver, EucaDriver) and attach_data\ and 'detaching' not in attach_data.get('status'): break # Exponential backoff.. attempts += 1 sleep_time = 2**attempts celery_logger.debug( "Volume %s is not ready (%s). Sleep for %s" % (volume.id, volume.extra['status'], sleep_time) ) time.sleep(sleep_time) if 'in-use' in volume.extra['status']: raise Exception( "Failed to detach Volume %s to instance %s" % (volume, instance) ) celery_logger.debug("detach_task finished at %s." % timezone.now()) except DeviceBusyException: # We should NOT retry if the device is busy raise except Exception as exc: # If the volume is NOT attached, do not retry. if 'Volume is not attached' in exc.message: return celery_logger.exception(exc) detach_task.retry(exc=exc)
def allocation_source_overage_enforcement_for_user(allocation_source, user): celery_logger.debug('allocation_source_overage_enforcement_for_user - allocation_source: %s, user: %s', allocation_source, user) user_instances = [] for identity in user.current_identities: try: celery_logger.debug('allocation_source_overage_enforcement_for_user - identity: %s', identity) affected_instances = allocation_source_overage_enforcement_for(allocation_source, user, identity) user_instances.extend(affected_instances) except Exception: celery_logger.exception( 'allocation_source_overage_enforcement_for allocation_source: %s, user: %s, and identity: %s', allocation_source, user, identity) return user_instances
def argo_workflow_exec( workflow_filename, provider_uuid, workflow_data, config_file_path=None, wait=False ): """ Execute an specified Argo workflow. Find file based on provider. Pass argument to workflow. Args: workflow_filename (str): filename of the workflow provider_uuid (str): uuid of the provider workflow_data (dict): data to be passed to workflow as arguments config_file_path (str, optional): path to the config file. will use the default one from the setting if None. Defaults to None. wait (bool, optional): wait for workflow to complete. Defaults to False. Returns: (ArgoWorkflow, ArgoWorkflowStatus): workflow and status of the workflow """ try: # read configuration from file config = read_argo_config( config_file_path=config_file_path, provider_uuid=provider_uuid ) # find the workflow definition & construct workflow wf_def = argo_lookup_yaml_file( config["workflow_base_dir"], workflow_filename, provider_uuid ) # construct workflow context context = ArgoContext(config=config) # execute if wait: result = ArgoWorkflow.create_n_watch(context, wf_def, workflow_data) return result wf = ArgoWorkflow.create(context, wf_def, workflow_data) return (wf, ArgoWorkflowStatus()) except Exception as exc: logger.exception( "ARGO, argo_workflow_exec(), {} {}".format(type(exc), exc) ) raise exc
def update_volume_metadata(driverCls, provider, identity, volume_alias, metadata): """ """ from service import volume as volume_service try: celery_logger.debug("update_volume_metadata task started at %s." % datetime.now()) driver = get_driver(driverCls, provider, identity) volume = driver.get_volume(volume_alias) if not volume: return return volume_service.update_volume_metadata(driver, volume, metadata=metadata) celery_logger.debug("volume_metadata task finished at %s." % datetime.now()) except Exception as exc: celery_logger.exception(exc) update_volume_metadata.retry(exc=exc)
def remove_empty_networks_for(provider_id): provider = Provider.objects.get(id=provider_id) os_driver = get_account_driver(provider) if not os_driver: celery_logger.warn( "Cannot remove_empty_networks_for provider %s -- Account Driver not created" % provider) return all_instances = os_driver.admin_driver.list_all_instances() project_map = os_driver.network_manager.project_network_map() known_project_names = Credential.objects.filter( key='ex_project_name').values_list('value', flat=True) projects_with_networks = sorted( [k for k in project_map.keys() if k in known_project_names]) for project in projects_with_networks: networks = project_map[project]['network'] if not isinstance(networks, list): networks = [networks] for network in networks: network_name = network['name'] celery_logger.debug("Checking if network %s is in use" % network_name) if running_instances(network_name, all_instances): continue user = project identity = Identity.objects.filter( provider_id=provider_id, credential__key='ex_project_name', credential__value=project).filter( credential__key='key', credential__value=user).first() if not identity: celery_logger.warn( "NOT Removing project network for User:%s, Project:%s -- No Valid Identity found!" % (user, project)) continue try: celery_logger.debug( "Removing project network for User:%s, Project:%s" % (user, project)) os_driver.delete_user_network(identity) except NeutronClientException: celery_logger.exception("Neutron unable to remove project" "network for %s-%s" % (user, project)) except NeutronException: celery_logger.exception("Neutron unable to remove project" "network for %s-%s" % (user, project))
def check_volume_task(driverCls, provider, identity, instance_id, volume_id, *args, **kwargs): try: celery_logger.debug("check_volume task started at %s." % datetime.now()) driver = get_driver(driverCls, provider, identity) instance = driver.get_instance(instance_id) volume = driver.get_volume(volume_id) attach_data = volume.extra['attachments'][0] device = attach_data['device'] private_key = ATMOSPHERE_PRIVATE_KEYFILE kwargs.update({'ssh_key': private_key}) kwargs.update({'timeout': 120}) # One script to make two checks: # 1. Voume exists 2. Volume has a filesystem cv_script = check_volume(device) # NOTE: non_zero_deploy needed to stop LibcloudDeploymentError from being # raised kwargs.update({'deploy': cv_script, 'non_zero_deploy': True}) driver.deploy_to(instance, **kwargs) kwargs.pop('non_zero_deploy', None) # Script execute if cv_script.exit_status != 0: if 'No such file' in cv_script.stdout: raise Exception('Volume check failed: %s. ' 'Device %s does not exist on instance %s' % (volume, device, instance)) elif 'Bad magic number' in cv_script.stdout: # Filesystem needs to be created for this device celery_logger.info("Mkfs needed") mkfs_script = mkfs_volume(device) kwargs.update({'deploy': mkfs_script}) driver.deploy_to(instance, **kwargs) else: raise Exception('Volume check failed: Something weird') celery_logger.debug("check_volume task finished at %s." % datetime.now()) except LibcloudDeploymentError as exc: celery_logger.exception(exc) except Exception as exc: celery_logger.warn(exc) check_volume_task.retry(exc=exc)
def check_volume_task(driverCls, provider, identity, instance_id, volume_id, device_type='ext4', *args, **kwargs): try: celery_logger.debug("check_volume task started at %s." % datetime.now()) driver = get_driver(driverCls, provider, identity) instance = driver.get_instance(instance_id) volume = driver.get_volume(volume_id) username = identity.get_username() attach_data = volume.extra['attachments'][0] device_location = attach_data['device'] celery_logger.info("device_location: %s" % device_location) # One playbook to make two checks: # 1. Voume exists # 2. Volume has a filesystem # (If not, create one of type 'device_type') playbooks = deploy_check_volume(instance.ip, username, instance.id, device_location, device_type=device_type) celery_logger.info(playbooks.__dict__) hostname = build_host_name(instance.id, instance.ip) result = False if execution_has_failures(playbooks, hostname)\ or execution_has_unreachable(playbooks, hostname) else True if not result: raise Exception( "Error encountered while checking volume for filesystem: %s" % playbooks.stats.summarize(host=hostname)) return result except LibcloudDeploymentError as exc: celery_logger.exception(exc) except Exception as exc: celery_logger.warn(exc) check_volume_task.retry(exc=exc)
def update_volume_metadata( driverCls, provider, identity, volume_alias, metadata ): """ """ from service import volume as volume_service try: celery_logger.debug( "update_volume_metadata task started at %s." % timezone.now() ) driver = get_driver(driverCls, provider, identity) volume = driver.get_volume(volume_alias) if not volume: return return volume_service._update_volume_metadata( driver, volume, metadata=metadata ) except Exception as exc: celery_logger.exception(exc) update_volume_metadata.retry(exc=exc)
def send_email(subject, body, from_email, to, cc=None, fail_silently=False, html=False): """ Use django.core.mail.EmailMessage to send and log an Atmosphere email. """ try: msg = EmailMessage(subject=subject, body=body, from_email=from_email, to=to, cc=cc) if html: msg.content_subtype = 'html' msg.send(fail_silently=fail_silently) args = (from_email, to, cc, subject, body) email_logger.info(log_message.format(*args)) return True except Exception as e: celery_logger.exception(e) return False
def attach_task(driverCls, provider, identity, instance_id, volume_id, device_choice=None, *args, **kwargs): try: celery_logger.debug("attach_task started at %s." % datetime.now()) driver = get_driver(driverCls, provider, identity) from service.volume import attach_volume # TODO: Test pulling this up -- out of band attach_volume(driver, instance_id, volume_id, device_choice=device_choice) # When the reslt returns the volume will be 'attaching' # We can't do anything until the volume is 'available/in-use' attempts = 0 while True: volume = driver.get_volume(volume_id) # Give up if you can't find the volume if not volume: return None if attempts > 6: # After 6 attempts (~1min) break # Openstack Check if isinstance(driver, OSDriver) and\ 'attaching' not in volume.extra.get('status', ''): break if isinstance(driver, EucaDriver) and\ 'attaching' not in volume.extra.get('status', ''): break # Exponential backoff.. attempts += 1 sleep_time = 2**attempts celery_logger.debug( "Volume %s is not ready (%s). Sleep for %s" % (volume.id, volume.extra.get('status', 'no-status'), sleep_time)) time.sleep(sleep_time) if 'available' in volume.extra.get('status', ''): raise Exception("Volume %s failed to attach to instance %s" % (volume.id, instance_id)) # Device path for euca == openstack try: attach_data = volume.extra['attachments'][0] device = attach_data['device'] except (IndexError, KeyError) as bad_fetch: celery_logger.warn("Could not find 'device' in " "volume.extra['attachments']: " "Volume:%s Extra:%s" % (volume.id, volume.extra)) device = None celery_logger.debug("attach_task finished at %s." % datetime.now()) return device except Exception as exc: celery_logger.exception(exc) attach_task.retry(exc=exc)