def get_task_signature(cls, instance, serialized_instance, **kwargs):
     flavor = kwargs.pop('flavor')
     return chain(
         core_tasks.BackendMethodTask().si(
             serialized_instance,
             backend_method='resize_instance',
             state_transition='begin_updating',
             flavor_id=flavor.backend_id,
         ),
         core_tasks.PollRuntimeStateTask().si(
             serialized_instance,
             backend_pull_method='pull_instance_runtime_state',
             success_state='VERIFY_RESIZE',
             erred_state='ERRED',
         ),
         core_tasks.BackendMethodTask().si(
             serialized_instance, backend_method='confirm_instance_resize'
         ),
         core_tasks.PollRuntimeStateTask().si(
             serialized_instance,
             backend_pull_method='pull_instance_runtime_state',
             success_state='SHUTOFF',
             erred_state='ERRED',
         ),
     )
    def get_task_signature(cls, volume, serialized_volume, **kwargs):
        if volume.instance is None:
            return chain(
                core_tasks.BackendMethodTask().si(
                    serialized_volume,
                    backend_method='extend_volume',
                    state_transition='begin_updating',
                ),
                core_tasks.PollRuntimeStateTask().si(
                    serialized_volume,
                    backend_pull_method='pull_volume_runtime_state',
                    success_state='available',
                    erred_state='error'
                )
            )

        return chain(
            core_tasks.StateTransitionTask().si(
                core_utils.serialize_instance(volume.instance),
                state_transition='begin_updating'
            ),
            core_tasks.BackendMethodTask().si(
                serialized_volume,
                backend_method='detach_volume',
                state_transition='begin_updating'
            ),
            core_tasks.PollRuntimeStateTask().si(
                serialized_volume,
                backend_pull_method='pull_volume_runtime_state',
                success_state='available',
                erred_state='error'
            ),
            core_tasks.BackendMethodTask().si(
                serialized_volume,
                backend_method='extend_volume',
            ),
            core_tasks.PollRuntimeStateTask().si(
                serialized_volume,
                backend_pull_method='pull_volume_runtime_state',
                success_state='available',
                erred_state='error'
            ),
            core_tasks.BackendMethodTask().si(
                serialized_volume,
                instance_uuid=volume.instance.uuid.hex,
                device=volume.device,
                backend_method='attach_volume',
            ),
            core_tasks.PollRuntimeStateTask().si(
                serialized_volume,
                backend_pull_method='pull_volume_runtime_state',
                success_state='in-use',
                erred_state='error'
            ),
        )
Beispiel #3
0
 def get_task_signature(cls, instance, serialized_instance, **kwargs):
     _tasks = [
         core_tasks.StateTransitionTask().si(
             serialized_instance, state_transition='begin_updating')
     ]
     # Create non-exist floating IPs
     for floating_ip in instance.floating_ips.filter(backend_id=''):
         serialized_floating_ip = core_utils.serialize_instance(floating_ip)
         _tasks.append(core_tasks.BackendMethodTask().si(
             serialized_floating_ip, 'create_floating_ip'))
     # Push instance floating IPs
     _tasks.append(core_tasks.BackendMethodTask().si(
         serialized_instance, 'push_instance_floating_ips'))
     # Wait for operation completion
     for index, floating_ip in enumerate(instance.floating_ips):
         _tasks.append(core_tasks.PollRuntimeStateTask().si(
             core_utils.serialize_instance(floating_ip),
             backend_pull_method='pull_floating_ip_runtime_state',
             success_state='ACTIVE',
             erred_state='ERRED',
         ).set(countdown=5 if not index else 0))
     # Pull floating IPs again to update state of disconnected IPs
     _tasks.append(core_tasks.IndependentBackendMethodTask().si(
         serialized_instance, 'pull_floating_ips'))
     return chain(*_tasks)
Beispiel #4
0
 def get_task_signature(cls, instance, serialized_instance, user,
                        install_longhorn):
     _tasks = [
         core_tasks.BackendMethodTask().si(
             serialized_instance,
             'create_cluster',
             state_transition='begin_creating')
     ]
     _tasks += cls.create_nodes(instance.node_set.all(), user)
     _tasks += [
         core_tasks.PollRuntimeStateTask().si(
             serialized_instance,
             backend_pull_method='check_cluster_nodes',
             success_state=models.Cluster.RuntimeStates.ACTIVE,
             erred_state='error',
         )
     ]
     _tasks += [
         core_tasks.BackendMethodTask().si(
             serialized_instance,
             'pull_cluster',
         )
     ]
     if install_longhorn:
         # NB: countdown is needed for synchronization: wait until cluster will get ready for apps installation
         _tasks += [
             core_tasks.BackendMethodTask().si(
                 serialized_instance,
                 'install_longhorn_to_cluster').set(countdown=30)
         ]
     return chain(*_tasks)
    def create_instance(cls, serialized_instance, flavor, ssh_key=None):
        """
        It is assumed that volumes and network ports have been created beforehand.
        """
        _tasks = []
        kwargs = {
            'backend_flavor_id': flavor.backend_id,
        }
        if ssh_key is not None:
            kwargs['public_key'] = ssh_key.public_key

        # Wait 10 seconds after volume creation due to OpenStack restrictions.
        _tasks.append(
            core_tasks.BackendMethodTask()
            .si(serialized_instance, 'create_instance', **kwargs)
            .set(countdown=10)
        )

        # Wait for instance creation
        _tasks.append(
            core_tasks.PollRuntimeStateTask().si(
                serialized_instance,
                backend_pull_method='pull_instance_runtime_state',
                success_state=models.Instance.RuntimeStates.ACTIVE,
                erred_state=models.Instance.RuntimeStates.ERROR,
            )
        )
        return _tasks
    def get_task_signature(cls, backup, serialized_backup, **kwargs):
        serialized_snapshots = [
            core_utils.serialize_instance(snapshot)
            for snapshot in backup.snapshots.all()
        ]

        _tasks = [
            core_tasks.StateTransitionTask().si(
                serialized_backup, state_transition='begin_creating'
            )
        ]
        for serialized_snapshot in serialized_snapshots:
            _tasks.append(
                tasks.ThrottleProvisionTask().si(
                    serialized_snapshot,
                    'create_snapshot',
                    force=True,
                    state_transition='begin_creating',
                )
            )
            _tasks.append(
                core_tasks.PollRuntimeStateTask().si(
                    serialized_snapshot,
                    backend_pull_method='pull_snapshot_runtime_state',
                    success_state='available',
                    erred_state='error',
                )
            )
            _tasks.append(
                core_tasks.StateTransitionTask().si(
                    serialized_snapshot, state_transition='set_ok'
                )
            )

        return chain(*_tasks)
    def create_floating_ips(cls, instance, serialized_instance):
        _tasks = []

        # Create non-existing floating IPs
        for floating_ip in instance.floating_ips.filter(backend_id=''):
            serialized_floating_ip = core_utils.serialize_instance(floating_ip)
            _tasks.append(core_tasks.BackendMethodTask().si(serialized_floating_ip, 'create_floating_ip'))

        # Push instance floating IPs
        _tasks.append(core_tasks.BackendMethodTask().si(serialized_instance, 'push_instance_floating_ips'))

        # Wait for operation completion
        for index, floating_ip in enumerate(instance.floating_ips):
            _tasks.append(core_tasks.PollRuntimeStateTask().si(
                core_utils.serialize_instance(floating_ip),
                backend_pull_method='pull_floating_ip_runtime_state',
                success_state='ACTIVE',
                erred_state='ERRED',
            ).set(countdown=5 if not index else 0))

        shared_tenant = instance.service_project_link.service.settings.scope
        if shared_tenant:
            serialized_executor = core_utils.serialize_class(openstack_executors.TenantPullFloatingIPsExecutor)
            serialized_tenant = core_utils.serialize_instance(shared_tenant)
            _tasks.append(core_tasks.ExecutorTask().si(serialized_executor, serialized_tenant))

        return _tasks
    def create_volumes(cls, serialized_volumes):
        """
        Create all instance volumes and wait for them to provision.
        """
        _tasks = []

        # Create volumes
        for serialized_volume in serialized_volumes:
            _tasks.append(tasks.ThrottleProvisionTask().si(
                serialized_volume, 'create_volume', state_transition='begin_creating'))

        for index, serialized_volume in enumerate(serialized_volumes):
            # Wait for volume creation
            _tasks.append(core_tasks.PollRuntimeStateTask().si(
                serialized_volume,
                backend_pull_method='pull_volume_runtime_state',
                success_state='available',
                erred_state='error',
            ).set(countdown=30 if index == 0 else 0))

            # Pull volume to sure that it is bootable
            _tasks.append(core_tasks.BackendMethodTask().si(serialized_volume, 'pull_volume'))

            # Mark volume as OK
            _tasks.append(core_tasks.StateTransitionTask().si(serialized_volume, state_transition='set_ok'))

        return _tasks
Beispiel #9
0
 def get_task_signature(cls, project, serialized_project, **kwargs):
     return chain(
         tasks.StateTransitionTask().si(serialized_project,
                                        state_transition='begin_updating'),
         tasks.PollRuntimeStateTask().si(
             serialized_project,
             backend_pull_method='import_project_batch',
             success_state='success',
             erred_state='error',
         ))
 def get_task_signature(cls, volume, serialized_volume, **kwargs):
     return chain(
         core_tasks.BackendMethodTask().si(
             serialized_volume, backend_method='detach_volume', state_transition='begin_updating'),
         core_tasks.PollRuntimeStateTask().si(
             serialized_volume,
             backend_pull_method='pull_volume_runtime_state',
             success_state='available',
             erred_state='error',
         )
     )
Beispiel #11
0
 def get_task_signature(cls, snapshot, serialized_snapshot, **kwargs):
     return chain(
         tasks.ThrottleProvisionTask().si(
             serialized_snapshot,
             'create_snapshot',
             state_transition='begin_creating'),
         core_tasks.PollRuntimeStateTask().si(
             serialized_snapshot,
             backend_pull_method='pull_snapshot_runtime_state',
             success_state='available',
             erred_state='error',
         ).set(countdown=10))
 def get_task_signature(cls, instance, serialized_instance, **kwargs):
     return chain(
         core_tasks.BackendMethodTask().si(
             serialized_instance, backend_method='reboot_vm', state_transition='begin_updating',
         ),
         core_tasks.PollRuntimeStateTask().si(
             serialized_instance,
             backend_pull_method='pull_virtual_machine_runtime_state',
             success_state='running',
             erred_state='error'
         ),
     )
 def get_task_signature(cls, instance, serialized_instance, **kwargs):
     return chain(
         core_tasks.BackendMethodTask().si(
             serialized_instance, 'stop_instance', state_transition='begin_updating',
         ),
         core_tasks.PollRuntimeStateTask().si(
             serialized_instance,
             backend_pull_method='pull_instance_runtime_state',
             success_state='SHUTOFF',
             erred_state='ERRED',
         ),
     )
Beispiel #14
0
 def get_task_signature(cls, instance, serialized_instance, **kwargs):
     size = kwargs.pop('size')
     return chain(
         core_tasks.BackendMethodTask().si(
             serialized_instance,
             backend_method='resize_instance',
             state_transition='begin_updating',
             size_id=size.backend_id),
         core_tasks.PollRuntimeStateTask().si(
             serialized_instance,
             backend_pull_method='pull_instance_runtime_state',
             success_state='stopped',
             erred_state='error').set(countdown=30))
    def get_task_signature(cls, snapshot_restoration, serialized_snapshot_restoration, **kwargs):
        serialized_volume = core_utils.serialize_instance(snapshot_restoration.volume)

        _tasks = [
            tasks.ThrottleProvisionTask().si(
                serialized_volume, 'create_volume', state_transition='begin_creating'),
            core_tasks.PollRuntimeStateTask().si(
                serialized_volume, 'pull_volume_runtime_state', success_state='available', erred_state='error',
            ).set(countdown=30),
            core_tasks.BackendMethodTask().si(serialized_volume, 'remove_bootable_flag'),
            core_tasks.BackendMethodTask().si(serialized_volume, 'pull_volume'),
        ]

        return chain(*_tasks)
Beispiel #16
0
 def get_detach_data_volumes_tasks(cls, instance, serialized_instance):
     data_volumes = instance.volumes.all().filter(bootable=False)
     detach_volumes = [
         core_tasks.BackendMethodTask().si(
             core_utils.serialize_instance(volume),
             backend_method='detach_volume',
         ) for volume in data_volumes
     ]
     check_volumes = [
         core_tasks.PollRuntimeStateTask().si(
             core_utils.serialize_instance(volume),
             backend_pull_method='pull_volume_runtime_state',
             success_state='available',
             erred_state='error') for volume in data_volumes
     ]
     return detach_volumes + check_volumes
 def get_task_signature(cls, instance, serialized_instance, user):
     _tasks = [
         core_tasks.BackendMethodTask().si(
             serialized_instance,
             'create_cluster',
             state_transition='begin_creating')
     ]
     _tasks += cls.create_nodes(instance.node_set.all(), user)
     _tasks += [
         core_tasks.PollRuntimeStateTask().si(
             serialized_instance,
             backend_pull_method='check_cluster_creating',
             success_state=models.Cluster.RuntimeStates.ACTIVE,
             erred_state='error',
         )
     ]
     return chain(*_tasks)
Beispiel #18
0
 def get_task_signature(cls, instance, serialized_instance):
     return chain(
         core_tasks.BackendMethodTask().si(
             serialized_instance,
             'create_app',
             state_transition='begin_creating'),
         core_tasks.PollRuntimeStateTask().si(
             serialized_instance,
             backend_pull_method='check_application_state',
             success_state='active',
             erred_state='error',
         ),
         core_tasks.BackendMethodTask().si(
             core_utils.serialize_instance(instance.rancher_project),
             'pull_project_workloads',
         ),
     )
 def get_task_signature(cls, volume, serialized_volume, **kwargs):
     return chain(
         core_tasks.BackendMethodTask().si(
             serialized_volume,
             instance_uuid=volume.instance.uuid.hex,
             device=volume.device,
             backend_method='attach_volume',
             state_transition='begin_updating'
         ),
         core_tasks.PollRuntimeStateTask().si(
             serialized_volume,
             backend_pull_method='pull_volume_runtime_state',
             success_state='in-use',
             erred_state='error',
         ),
         # additional pull to populate field "device".
         core_tasks.BackendMethodTask().si(serialized_volume, backend_method='pull_volume'),
     )
Beispiel #20
0
    def get_task_signature(
        cls,
        instance,
        serialized_instance,
        image=None,
        size=None,
        ssh_key=None,
        volume=None,
    ):
        kwargs = {
            'backend_image_id': image.backend_id,
            'backend_size_id': size.backend_id,
        }
        if ssh_key is not None:
            kwargs['ssh_key_uuid'] = ssh_key.uuid.hex

        serialized_volume = core_utils.serialize_instance(volume)

        return chain(
            core_tasks.StateTransitionTask().si(
                serialized_volume, state_transition='begin_creating'
            ),
            core_tasks.BackendMethodTask().si(
                serialized_instance,
                backend_method='create_instance',
                state_transition='begin_creating',
                **kwargs
            ),
            core_tasks.PollRuntimeStateTask().si(
                serialized_instance,
                backend_pull_method='pull_instance_runtime_state',
                success_state='running',
                erred_state='error',
            ),
            core_tasks.BackendMethodTask().si(
                serialized_volume,
                backend_method='pull_instance_volume',
                success_runtime_state='inuse',
            ),
            core_tasks.BackendMethodTask().si(
                serialized_instance, 'pull_instance_public_ips'
            ),
        )
Beispiel #21
0
    def get_task_signature(cls,
                           instance,
                           serialized_instance,
                           ssh_key=None,
                           flavor=None):
        """ Create all instance volumes in parallel and wait for them to provision """
        serialized_volumes = [
            core_utils.serialize_instance(volume)
            for volume in instance.volumes.all()
        ]

        _tasks = [
            tasks.ThrottleProvisionStateTask().si(
                serialized_instance, state_transition='begin_creating')
        ]
        # Create volumes
        for serialized_volume in serialized_volumes:
            _tasks.append(tasks.ThrottleProvisionTask().si(
                serialized_volume,
                'create_volume',
                state_transition='begin_creating'))
        for index, serialized_volume in enumerate(serialized_volumes):
            # Wait for volume creation
            _tasks.append(core_tasks.PollRuntimeStateTask().si(
                serialized_volume,
                backend_pull_method='pull_volume_runtime_state',
                success_state='available',
                erred_state='error',
            ).set(countdown=30 if index == 0 else 0))
            # Pull volume to sure that it is bootable
            _tasks.append(core_tasks.BackendMethodTask().si(
                serialized_volume, 'pull_volume'))
            # Mark volume as OK
            _tasks.append(core_tasks.StateTransitionTask().si(
                serialized_volume, state_transition='set_ok'))
        # Create instance based on volumes
        kwargs = {
            'backend_flavor_id': flavor.backend_id,
        }
        if ssh_key is not None:
            kwargs['public_key'] = ssh_key.public_key
        # Wait 10 seconds after volume creation due to OpenStack restrictions.
        _tasks.append(core_tasks.BackendMethodTask().si(
            serialized_instance, 'create_instance',
            **kwargs).set(countdown=10))

        # Wait for instance creation
        _tasks.append(core_tasks.PollRuntimeStateTask().si(
            serialized_instance,
            backend_pull_method='pull_instance_runtime_state',
            success_state=models.Instance.RuntimeStates.ACTIVE,
            erred_state=models.Instance.RuntimeStates.ERROR,
        ))

        # Update volumes runtime state and device name
        for serialized_volume in serialized_volumes:
            _tasks.append(core_tasks.BackendMethodTask().si(
                serialized_volume,
                backend_method='pull_volume',
                update_fields=['runtime_state', 'device']))

        # TODO: Port should be created before instance is created.
        # The following calls should be removed: pull_created_instance_internal_ips and push_instance_internal_ips.

        # Pull instance internal IPs
        # pull_instance_internal_ips method cannot be used, because it requires backend_id to update
        # existing internal IPs. However, internal IPs of the created instance does not have backend_ids.
        _tasks.append(core_tasks.BackendMethodTask().si(
            serialized_instance, 'pull_created_instance_internal_ips'))

        # Consider the case when instance has several internal IPs connected to
        # different subnets within the same network.
        # When OpenStack instance is provisioned, network port is created.
        # This port is pulled into Waldur using the pull_created_instance_internal_ips method.
        # However, it does not take into account subnets, because OpenStack
        # does not allow to specify subnet on instance creation.
        # See also: https://specs.openstack.org/openstack/nova-specs/specs/juno/approved/selecting-subnet-when-creating-vm.html
        # Therefore we need to push remaining network ports for subnets explicitly.
        _tasks.append(core_tasks.BackendMethodTask().si(
            serialized_instance, 'push_instance_internal_ips'))

        # Pull instance security groups
        _tasks.append(core_tasks.BackendMethodTask().si(
            serialized_instance, 'pull_instance_security_groups'))

        # Create non-existing floating IPs
        for floating_ip in instance.floating_ips.filter(backend_id=''):
            serialized_floating_ip = core_utils.serialize_instance(floating_ip)
            _tasks.append(core_tasks.BackendMethodTask().si(
                serialized_floating_ip, 'create_floating_ip'))
        # Push instance floating IPs
        _tasks.append(core_tasks.BackendMethodTask().si(
            serialized_instance, 'push_instance_floating_ips'))
        # Wait for operation completion
        for index, floating_ip in enumerate(instance.floating_ips):
            _tasks.append(core_tasks.PollRuntimeStateTask().si(
                core_utils.serialize_instance(floating_ip),
                backend_pull_method='pull_floating_ip_runtime_state',
                success_state='ACTIVE',
                erred_state='ERRED',
            ).set(countdown=5 if not index else 0))

        shared_tenant = instance.service_project_link.service.settings.scope
        if shared_tenant:
            serialized_executor = core_utils.serialize_class(
                openstack_executors.TenantPullFloatingIPsExecutor)
            serialized_tenant = core_utils.serialize_instance(shared_tenant)
            _tasks.append(core_tasks.ExecutorTask().si(serialized_executor,
                                                       serialized_tenant))
        return chain(*_tasks)
    def get_task_signature(cls, instance, serialized_instance, ssh_key=None, flavor=None):
        """ Create all instance volumes in parallel and wait for them to provision """
        serialized_volumes = [core_utils.serialize_instance(volume) for volume in instance.volumes.all()]

        _tasks = [tasks.ThrottleProvisionStateTask().si(serialized_instance, state_transition='begin_creating')]
        # Create volumes
        for serialized_volume in serialized_volumes:
            _tasks.append(tasks.ThrottleProvisionTask().si(
                serialized_volume, 'create_volume', state_transition='begin_creating'))
        for index, serialized_volume in enumerate(serialized_volumes):
            # Wait for volume creation
            _tasks.append(core_tasks.PollRuntimeStateTask().si(
                serialized_volume,
                backend_pull_method='pull_volume_runtime_state',
                success_state='available',
                erred_state='error',
            ).set(countdown=30 if index == 0 else 0))
            # Pull volume to sure that it is bootable
            _tasks.append(core_tasks.BackendMethodTask().si(serialized_volume, 'pull_volume'))
            # Mark volume as OK
            _tasks.append(core_tasks.StateTransitionTask().si(serialized_volume, state_transition='set_ok'))
        # Create instance based on volumes
        kwargs = {
            'backend_flavor_id': flavor.backend_id,
        }
        if ssh_key is not None:
            kwargs['public_key'] = ssh_key.public_key
        # Wait 10 seconds after volume creation due to OpenStack restrictions.
        _tasks.append(core_tasks.BackendMethodTask().si(
            serialized_instance, 'create_instance', **kwargs).set(countdown=10))

        # Wait for instance creation
        _tasks.append(core_tasks.PollRuntimeStateTask().si(
            serialized_instance,
            backend_pull_method='pull_instance_runtime_state',
            success_state=models.Instance.RuntimeStates.ACTIVE,
            erred_state=models.Instance.RuntimeStates.ERROR,
        ))

        # Update volumes runtime state and device name
        for serialized_volume in serialized_volumes:
            _tasks.append(core_tasks.BackendMethodTask().si(
                serialized_volume,
                backend_method='pull_volume',
                update_fields=['runtime_state', 'device']
            ))

        # Pull instance internal IPs
        # pull_instance_internal_ips method cannot be used, because it requires backend_id to update
        # existing internal IPs. However, internal IPs of the created instance does not have backend_ids.
        _tasks.append(core_tasks.BackendMethodTask().si(serialized_instance, 'pull_created_instance_internal_ips'))

        # Pull instance security groups
        _tasks.append(core_tasks.BackendMethodTask().si(serialized_instance, 'pull_instance_security_groups'))

        # Create non-existing floating IPs
        for floating_ip in instance.floating_ips.filter(backend_id=''):
            serialized_floating_ip = core_utils.serialize_instance(floating_ip)
            _tasks.append(core_tasks.BackendMethodTask().si(serialized_floating_ip, 'create_floating_ip'))
        # Push instance floating IPs
        _tasks.append(core_tasks.BackendMethodTask().si(serialized_instance, 'push_instance_floating_ips'))
        # Wait for operation completion
        for index, floating_ip in enumerate(instance.floating_ips):
            _tasks.append(core_tasks.PollRuntimeStateTask().si(
                core_utils.serialize_instance(floating_ip),
                backend_pull_method='pull_floating_ip_runtime_state',
                success_state='ACTIVE',
                erred_state='ERRED',
            ).set(countdown=5 if not index else 0))

        shared_tenant = instance.service_project_link.service.settings.scope
        if shared_tenant:
            serialized_executor = core_utils.serialize_class(openstack_executors.TenantPullFloatingIPsExecutor)
            serialized_tenant = core_utils.serialize_instance(shared_tenant)
            _tasks.append(core_tasks.ExecutorTask().si(serialized_executor, serialized_tenant))
        return chain(*_tasks)