def get_delete_instance_tasks(cls, instance, serialized_instance, release_floating_ips): _tasks = [ core_tasks.BackendMethodTask().si( serialized_instance, backend_method='delete_instance', state_transition='begin_deleting', ), core_tasks.PollBackendCheckTask().si( serialized_instance, backend_check_method='is_instance_deleted', ), ] if release_floating_ips: for index, floating_ip in enumerate(instance.floating_ips): _tasks.append(core_tasks.BackendMethodTask().si( core_utils.serialize_instance(floating_ip), 'delete_floating_ip', ).set(countdown=5 if not index else 0)) else: # pull related floating IPs state after instance deletion for index, floating_ip in enumerate(instance.floating_ips): _tasks.append(core_tasks.BackendMethodTask().si( core_utils.serialize_instance(floating_ip), 'pull_floating_ip_runtime_state', ).set(countdown=5 if not index else 0)) shared_tenant = instance.service_project_link.service.settings.scope if shared_tenant: serialized_executor = core_utils.serialize_class(openstack_executors.TenantPullFloatingIPsExecutor) serialized_tenant = core_utils.serialize_instance(shared_tenant) _tasks.append(core_tasks.ExecutorTask().si(serialized_executor, serialized_tenant)) return _tasks
def create_floating_ips(cls, instance, serialized_instance): _tasks = [] # Create non-existing floating IPs for floating_ip in instance.floating_ips.filter(backend_id=''): serialized_floating_ip = core_utils.serialize_instance(floating_ip) _tasks.append(core_tasks.BackendMethodTask().si(serialized_floating_ip, 'create_floating_ip')) # Push instance floating IPs _tasks.append(core_tasks.BackendMethodTask().si(serialized_instance, 'push_instance_floating_ips')) # Wait for operation completion for index, floating_ip in enumerate(instance.floating_ips): _tasks.append(core_tasks.PollRuntimeStateTask().si( core_utils.serialize_instance(floating_ip), backend_pull_method='pull_floating_ip_runtime_state', success_state='ACTIVE', erred_state='ERRED', ).set(countdown=5 if not index else 0)) shared_tenant = instance.service_project_link.service.settings.scope if shared_tenant: serialized_executor = core_utils.serialize_class(openstack_executors.TenantPullFloatingIPsExecutor) serialized_tenant = core_utils.serialize_instance(shared_tenant) _tasks.append(core_tasks.ExecutorTask().si(serialized_executor, serialized_tenant)) return _tasks
def get_task_signature(cls, instance, serialized_instance, **kwargs): """ Delete each resource using specific executor. Convert executors to task and combine all deletion task into single sequential task. """ cleanup_tasks = [ ProjectResourceCleanupTask().si( core_utils.serialize_class(executor_cls), core_utils.serialize_class(model_cls), serialized_instance, ) for (model_cls, executor_cls) in cls.executors ] if not cleanup_tasks: return core_tasks.EmptyTask() return chain(cleanup_tasks)
def get_task_signature(cls, tenant, serialized_tenant): serialized_executor = core_utils.serialize_class( openstack_executors.TenantPushQuotasExecutor) quotas = tenant.quotas.all() quotas = {q.name: int(q.limit) for q in quotas} return core_tasks.ExecutorTask().si(serialized_executor, serialized_tenant, quotas=quotas)
def lookups(self, request, model_admin): resources = [ (model, name) for name, model in SupportedServices.get_resource_models().items() ] others = [(model, model.__name__) for model in models.PriceEstimate.get_estimated_models() if not issubclass(model, structure_models.ResourceMixin)] estimated_models = [(core_utils.serialize_class(model), name) for model, name in resources + others] return sorted(estimated_models, key=lambda x: x[1])
def get_task_signature(cls, instance, serialized_instance, **kwargs): executors = cls.get_executors() # Combine all executors into single sequential task # to cleanup related resources in correct order cleanup_tasks = [ core_tasks.ExecutorTask().si(core_utils.serialize_class(executor), serialized_instance) for executor in executors ] if not cleanup_tasks: return core_tasks.EmptyTask() return chain(cleanup_tasks)
def get_task_signature(cls, job, serialized_job, **kwargs): deletion_tasks = [ core_tasks.StateTransitionTask().si(serialized_job, state_transition='begin_deleting') ] serialized_executor = core_utils.serialize_class(openstack_executors.InstanceDeleteExecutor) for resource in job.get_related_resources(): serialized_resource = core_utils.serialize_instance(resource) force = resource.state == openstack_models.Instance.States.ERRED deletion_tasks.append(core_tasks.ExecutorTask().si( serialized_executor, serialized_resource, force=force, delete_volumes=True )) return chain(*deletion_tasks)
def as_signature(cls, instance, **kwargs): serialized_instance = utils.serialize_instance(instance) pre_apply = tasks.PreApplyExecutorTask().si(utils.serialize_class(cls), serialized_instance, **kwargs) main = cls.get_task_signature(instance, serialized_instance, **kwargs) link = cls.get_success_signature(instance, serialized_instance, **kwargs) link_error = cls.get_failure_signature(instance, serialized_instance, **kwargs) parts = [ task for task in [pre_apply, main, link] if not isinstance(task, tasks.EmptyTask) and task is not None ] signature = reduce(operator.or_, parts) if link_error: signature = signature.on_error(link_error) return signature
def get_task_signature(cls, tenant, serialized_tenant, pull_security_groups=True, **kwargs): """ Create tenant, add user to it, create internal network, pull quotas """ # we assume that tenant one network and subnet after creation network = tenant.networks.first() subnet = network.subnets.first() serialized_network = core_utils.serialize_instance(network) serialized_subnet = core_utils.serialize_instance(subnet) creation_tasks = [ core_tasks.BackendMethodTask().si( serialized_tenant, 'create_tenant', state_transition='begin_creating'), core_tasks.BackendMethodTask().si(serialized_tenant, 'add_admin_user_to_tenant'), core_tasks.BackendMethodTask().si(serialized_tenant, 'create_tenant_user'), core_tasks.BackendMethodTask().si( serialized_network, 'create_network', state_transition='begin_creating'), core_tasks.BackendMethodTask().si( serialized_subnet, 'create_subnet', state_transition='begin_creating'), ] quotas = tenant.quotas.all() quotas = { q.name: int(q.limit) if q.limit.is_integer() else q.limit for q in quotas } creation_tasks.append(core_tasks.BackendMethodTask().si( serialized_tenant, 'push_tenant_quotas', quotas)) # handle security groups # XXX: Create default security groups that was connected to SPL earlier. serialized_executor = core_utils.serialize_class( SecurityGroupCreateExecutor) for security_group in tenant.security_groups.all(): serialized_security_group = core_utils.serialize_instance( security_group) creation_tasks.append(core_tasks.ExecutorTask().si( serialized_executor, serialized_security_group)) if pull_security_groups: creation_tasks.append(core_tasks.BackendMethodTask().si( serialized_tenant, 'pull_tenant_security_groups')) # initialize external network if it defined in service settings service_settings = tenant.service_project_link.service.settings customer = tenant.service_project_link.project.customer external_network_id = service_settings.get_option( 'external_network_id') try: customer_openstack = models.CustomerOpenStack.objects.get( settings=service_settings, customer=customer) external_network_id = customer_openstack.external_network_id except models.CustomerOpenStack.DoesNotExist: pass if external_network_id and not kwargs.get('skip_connection_extnet'): creation_tasks.append(core_tasks.BackendMethodTask().si( serialized_tenant, 'connect_tenant_to_external_network', external_network_id=external_network_id)) creation_tasks.append(core_tasks.BackendMethodTask().si( serialized_tenant, 'pull_tenant_quotas')) return chain(*creation_tasks)
def get_task_signature(cls, instance, serialized_instance, ssh_key=None, flavor=None): """ Create all instance volumes in parallel and wait for them to provision """ serialized_volumes = [ core_utils.serialize_instance(volume) for volume in instance.volumes.all() ] _tasks = [ tasks.ThrottleProvisionStateTask().si( serialized_instance, state_transition='begin_creating') ] # Create volumes for serialized_volume in serialized_volumes: _tasks.append(tasks.ThrottleProvisionTask().si( serialized_volume, 'create_volume', state_transition='begin_creating')) for index, serialized_volume in enumerate(serialized_volumes): # Wait for volume creation _tasks.append(core_tasks.PollRuntimeStateTask().si( serialized_volume, backend_pull_method='pull_volume_runtime_state', success_state='available', erred_state='error', ).set(countdown=30 if index == 0 else 0)) # Pull volume to sure that it is bootable _tasks.append(core_tasks.BackendMethodTask().si( serialized_volume, 'pull_volume')) # Mark volume as OK _tasks.append(core_tasks.StateTransitionTask().si( serialized_volume, state_transition='set_ok')) # Create instance based on volumes kwargs = { 'backend_flavor_id': flavor.backend_id, } if ssh_key is not None: kwargs['public_key'] = ssh_key.public_key # Wait 10 seconds after volume creation due to OpenStack restrictions. _tasks.append(core_tasks.BackendMethodTask().si( serialized_instance, 'create_instance', **kwargs).set(countdown=10)) # Wait for instance creation _tasks.append(core_tasks.PollRuntimeStateTask().si( serialized_instance, backend_pull_method='pull_instance_runtime_state', success_state=models.Instance.RuntimeStates.ACTIVE, erred_state=models.Instance.RuntimeStates.ERROR, )) # Update volumes runtime state and device name for serialized_volume in serialized_volumes: _tasks.append(core_tasks.BackendMethodTask().si( serialized_volume, backend_method='pull_volume', update_fields=['runtime_state', 'device'])) # TODO: Port should be created before instance is created. # The following calls should be removed: pull_created_instance_internal_ips and push_instance_internal_ips. # Pull instance internal IPs # pull_instance_internal_ips method cannot be used, because it requires backend_id to update # existing internal IPs. However, internal IPs of the created instance does not have backend_ids. _tasks.append(core_tasks.BackendMethodTask().si( serialized_instance, 'pull_created_instance_internal_ips')) # Consider the case when instance has several internal IPs connected to # different subnets within the same network. # When OpenStack instance is provisioned, network port is created. # This port is pulled into Waldur using the pull_created_instance_internal_ips method. # However, it does not take into account subnets, because OpenStack # does not allow to specify subnet on instance creation. # See also: https://specs.openstack.org/openstack/nova-specs/specs/juno/approved/selecting-subnet-when-creating-vm.html # Therefore we need to push remaining network ports for subnets explicitly. _tasks.append(core_tasks.BackendMethodTask().si( serialized_instance, 'push_instance_internal_ips')) # Pull instance security groups _tasks.append(core_tasks.BackendMethodTask().si( serialized_instance, 'pull_instance_security_groups')) # Create non-existing floating IPs for floating_ip in instance.floating_ips.filter(backend_id=''): serialized_floating_ip = core_utils.serialize_instance(floating_ip) _tasks.append(core_tasks.BackendMethodTask().si( serialized_floating_ip, 'create_floating_ip')) # Push instance floating IPs _tasks.append(core_tasks.BackendMethodTask().si( serialized_instance, 'push_instance_floating_ips')) # Wait for operation completion for index, floating_ip in enumerate(instance.floating_ips): _tasks.append(core_tasks.PollRuntimeStateTask().si( core_utils.serialize_instance(floating_ip), backend_pull_method='pull_floating_ip_runtime_state', success_state='ACTIVE', erred_state='ERRED', ).set(countdown=5 if not index else 0)) shared_tenant = instance.service_project_link.service.settings.scope if shared_tenant: serialized_executor = core_utils.serialize_class( openstack_executors.TenantPullFloatingIPsExecutor) serialized_tenant = core_utils.serialize_instance(shared_tenant) _tasks.append(core_tasks.ExecutorTask().si(serialized_executor, serialized_tenant)) return chain(*_tasks)
def get_task_signature(cls, instance, serialized_instance, ssh_key=None, flavor=None): """ Create all instance volumes in parallel and wait for them to provision """ serialized_volumes = [core_utils.serialize_instance(volume) for volume in instance.volumes.all()] _tasks = [tasks.ThrottleProvisionStateTask().si(serialized_instance, state_transition='begin_creating')] # Create volumes for serialized_volume in serialized_volumes: _tasks.append(tasks.ThrottleProvisionTask().si( serialized_volume, 'create_volume', state_transition='begin_creating')) for index, serialized_volume in enumerate(serialized_volumes): # Wait for volume creation _tasks.append(core_tasks.PollRuntimeStateTask().si( serialized_volume, backend_pull_method='pull_volume_runtime_state', success_state='available', erred_state='error', ).set(countdown=30 if index == 0 else 0)) # Pull volume to sure that it is bootable _tasks.append(core_tasks.BackendMethodTask().si(serialized_volume, 'pull_volume')) # Mark volume as OK _tasks.append(core_tasks.StateTransitionTask().si(serialized_volume, state_transition='set_ok')) # Create instance based on volumes kwargs = { 'backend_flavor_id': flavor.backend_id, } if ssh_key is not None: kwargs['public_key'] = ssh_key.public_key # Wait 10 seconds after volume creation due to OpenStack restrictions. _tasks.append(core_tasks.BackendMethodTask().si( serialized_instance, 'create_instance', **kwargs).set(countdown=10)) # Wait for instance creation _tasks.append(core_tasks.PollRuntimeStateTask().si( serialized_instance, backend_pull_method='pull_instance_runtime_state', success_state=models.Instance.RuntimeStates.ACTIVE, erred_state=models.Instance.RuntimeStates.ERROR, )) # Update volumes runtime state and device name for serialized_volume in serialized_volumes: _tasks.append(core_tasks.BackendMethodTask().si( serialized_volume, backend_method='pull_volume', update_fields=['runtime_state', 'device'] )) # Pull instance internal IPs # pull_instance_internal_ips method cannot be used, because it requires backend_id to update # existing internal IPs. However, internal IPs of the created instance does not have backend_ids. _tasks.append(core_tasks.BackendMethodTask().si(serialized_instance, 'pull_created_instance_internal_ips')) # Pull instance security groups _tasks.append(core_tasks.BackendMethodTask().si(serialized_instance, 'pull_instance_security_groups')) # Create non-existing floating IPs for floating_ip in instance.floating_ips.filter(backend_id=''): serialized_floating_ip = core_utils.serialize_instance(floating_ip) _tasks.append(core_tasks.BackendMethodTask().si(serialized_floating_ip, 'create_floating_ip')) # Push instance floating IPs _tasks.append(core_tasks.BackendMethodTask().si(serialized_instance, 'push_instance_floating_ips')) # Wait for operation completion for index, floating_ip in enumerate(instance.floating_ips): _tasks.append(core_tasks.PollRuntimeStateTask().si( core_utils.serialize_instance(floating_ip), backend_pull_method='pull_floating_ip_runtime_state', success_state='ACTIVE', erred_state='ERRED', ).set(countdown=5 if not index else 0)) shared_tenant = instance.service_project_link.service.settings.scope if shared_tenant: serialized_executor = core_utils.serialize_class(openstack_executors.TenantPullFloatingIPsExecutor) serialized_tenant = core_utils.serialize_instance(shared_tenant) _tasks.append(core_tasks.ExecutorTask().si(serialized_executor, serialized_tenant)) return chain(*_tasks)