Пример #1
0
 def _attach_volumes_task(self):
     attach_tasks = (volume.VolumeAttachTask(self.stack,
                                             self.resource_id,
                                             volume_id,
                                             device)
                     for volume_id, device in self.volumes())
     return scheduler.PollingTaskGroup(attach_tasks)
Пример #2
0
    def test_instance_resume_volumes_step(self):
        return_server = self.fc.servers.list()[1]
        instance = self._create_test_instance(return_server, 'in_resume_vol')

        instance.resource_id = 1234
        self.m.ReplayAll()

        # Override the get_servers_1234 handler status to ACTIVE
        d = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
        d['server']['status'] = 'ACTIVE'

        # Return a dummy PollingTaskGroup to make check_resume_complete step
        def dummy_attach():
            yield

        dummy_tg = scheduler.PollingTaskGroup([dummy_attach, dummy_attach])
        self.m.StubOutWithMock(instance, '_attach_volumes_task')
        instance._attach_volumes_task().AndReturn(dummy_tg)

        self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
        get = self.fc.client.get_servers_1234
        get().AndReturn((200, d))

        self.m.ReplayAll()

        instance.state_set(instance.SUSPEND, instance.COMPLETE)

        scheduler.TaskRunner(instance.resume)()
        self.assertEqual((instance.RESUME, instance.COMPLETE), instance.state)

        self.m.VerifyAll()
Пример #3
0
    def test_instance_suspend_volumes_wait(self):
        return_server = self.fc.servers.list()[1]
        instance = self._create_test_instance(return_server, 'in_suspend_vol')

        instance.resource_id = '1234'
        self.m.ReplayAll()

        # Override the get_servers_1234 handler status to SUSPENDED, but keep
        # it ACTIVE for the first two iterations of check_suspend_complete.
        d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
        d2 = copy.deepcopy(d1)
        d1['server']['status'] = 'ACTIVE'
        d2['server']['status'] = 'SUSPENDED'

        # Return a dummy PollingTaskGroup to make check_suspend_complete step
        def dummy_detach():
            yield

        dummy_tg = scheduler.PollingTaskGroup([dummy_detach, dummy_detach])
        self.m.StubOutWithMock(instance, '_detach_volumes_task')
        instance._detach_volumes_task().AndReturn(dummy_tg)

        self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
        get = self.fc.client.get_servers_1234
        get().AndReturn((200, d1))
        get().AndReturn((200, d1))
        get().AndReturn((200, d2))
        self.m.ReplayAll()

        scheduler.TaskRunner(instance.suspend)()
        self.assertEqual((instance.SUSPEND, instance.COMPLETE), instance.state)

        self.m.VerifyAll()
Пример #4
0
 def _detach_volumes_task(self):
     '''
     Detach volumes from the instance
     '''
     detach_tasks = (volume.VolumeDetachTask(self.stack, self.resource_id,
                                             volume_id)
                     for volume_id, device in self.volumes())
     return scheduler.PollingTaskGroup(detach_tasks)
Пример #5
0
    def test_group(self):
        tasks = [DummyTask() for i in range(3)]
        for t in tasks:
            self.m.StubOutWithMock(t, 'do_step')

        self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')

        for t in tasks:
            t.do_step(1).AndReturn(None)
        for t in tasks:
            scheduler.TaskRunner._sleep(mox.IsA(int)).AndReturn(None)
            t.do_step(2).AndReturn(None)
            scheduler.TaskRunner._sleep(mox.IsA(int)).AndReturn(None)
            t.do_step(3).AndReturn(None)

        self.m.ReplayAll()

        tg = scheduler.PollingTaskGroup(tasks)
        scheduler.TaskRunner(tg)()
Пример #6
0
    def handle_delete(self):
        '''
        Delete an instance, blocking until it is disposed by OpenStack
        '''
        if self.resource_id is None:
            return

        detach_tasks = (volume.VolumeDetachTask(self.stack, self.resource_id,
                                                volume_id)
                        for volume_id, device in self.volumes())
        scheduler.TaskRunner(scheduler.PollingTaskGroup(detach_tasks))()

        try:
            server = self.nova().servers.get(self.resource_id)
        except clients.novaclient.exceptions.NotFound:
            pass
        else:
            delete = scheduler.TaskRunner(self._delete_server, server)
            delete(wait_time=0.2)

        self.resource_id = None
Пример #7
0
 def _detach_volumes_task(self):
     tasks = (scheduler.TaskRunner(self._detach_volume, volume_id)
              for volume_id, device in self.volumes())
     return scheduler.PollingTaskGroup(tasks)
Пример #8
0
 def _detach_volumes_task(self):
     '''
     No-op task, needed to use superclass implementations
     of handle_delete and handle_suspend.
     '''
     return scheduler.PollingTaskGroup([])
Пример #9
0
    def handle_create(self):
        security_groups = self._get_security_groups()

        userdata = self.properties['UserData'] or ''
        flavor = self.properties['InstanceType']
        key_name = self.properties['KeyName']
        availability_zone = self.properties['AvailabilityZone']

        keypairs = [k.name for k in self.nova().keypairs.list()]
        if key_name not in keypairs and key_name is not None:
            raise exception.UserKeyPairMissing(key_name=key_name)

        image_name = self.properties['ImageId']
        image_id = None
        image_list = self.nova().images.list()
        for o in image_list:
            if o.name == image_name:
                image_id = o.id
                break

        if image_id is None:
            logger.info("Image %s was not found in glance" % image_name)
            raise exception.ImageNotFound(image_name=image_name)

        flavor_id = None
        flavor_list = self.nova().flavors.list()
        for o in flavor_list:
            if o.name == flavor:
                flavor_id = o.id
                break
        if flavor_id is None:
            raise exception.FlavorMissing(flavor_id=flavor)

        tags = {}
        if self.properties['Tags']:
            for tm in self.properties['Tags']:
                tags[tm['Key']] = tm['Value']
        else:
            tags = None

        scheduler_hints = {}
        if self.properties['NovaSchedulerHints']:
            for tm in self.properties['NovaSchedulerHints']:
                scheduler_hints[tm['Key']] = tm['Value']
        else:
            scheduler_hints = None

        nics = self._build_nics(self.properties['NetworkInterfaces'],
                                subnet_id=self.properties['SubnetId'])

        server_userdata = self._build_userdata(userdata)
        server = None
        try:
            server = self.nova().servers.create(
                name=self.physical_resource_name(),
                image=image_id,
                flavor=flavor_id,
                key_name=key_name,
                security_groups=security_groups,
                userdata=server_userdata,
                meta=tags,
                scheduler_hints=scheduler_hints,
                nics=nics,
                availability_zone=availability_zone)
        finally:
            # Avoid a race condition where the thread could be cancelled
            # before the ID is stored
            if server is not None:
                self.resource_id_set(server.id)

        attach_tasks = (volume.VolumeAttachTask(self.stack,
                                                self.resource_id,
                                                volume_id,
                                                device)
                        for volume_id, device in self.volumes())
        attach_volumes_task = scheduler.PollingTaskGroup(attach_tasks)

        return server, scheduler.TaskRunner(attach_volumes_task)