Exemple #1
0
 def __init__(self, size, name, description, id, snapshot, volume_type, metadata, availability_zone):
     snapshot_id = None
     if snapshot is not None:
         snapshot_id = snapshot["id"]
     if id is None:
         id = str(utils.gen_uuid())
     self.vol = {
         "created_at": timeutils.utcnow(),
         "deleted_at": None,
         "updated_at": timeutils.utcnow(),
         "uuid": "WTF",
         "deleted": False,
         "id": id,
         "user_id": self.user_uuid,
         "project_id": "fake-project-id",
         "snapshot_id": snapshot_id,
         "host": None,
         "size": size,
         "availability_zone": availability_zone,
         "instance_uuid": None,
         "mountpoint": None,
         "attach_time": timeutils.utcnow(),
         "status": "available",
         "attach_status": "detached",
         "scheduled_at": None,
         "launched_at": None,
         "terminated_at": None,
         "display_name": name,
         "display_description": description,
         "provider_location": "fake-location",
         "provider_auth": "fake-auth",
         "volume_type_id": 99,
     }
Exemple #2
0
    def test_update_service_capabilities(self):
        service_states = self.host_manager.service_states
        self.assertEqual(len(service_states.keys()), 0)
        self.mox.StubOutWithMock(timeutils, 'utcnow')
        timeutils.utcnow().AndReturn(31337)
        timeutils.utcnow().AndReturn(31339)

        host1_compute_capabs = dict(free_memory=1234, host_memory=5678,
                timestamp=1, hypervisor_hostname='node1')
        host2_compute_capabs = dict(free_memory=8756, timestamp=1,
                hypervisor_hostname='node2')

        self.mox.ReplayAll()
        self.host_manager.update_service_capabilities('compute', 'host1',
                host1_compute_capabs)
        self.host_manager.update_service_capabilities('compute', 'host2',
                host2_compute_capabs)

        # Make sure original dictionary wasn't copied
        self.assertEqual(host1_compute_capabs['timestamp'], 1)

        host1_compute_capabs['timestamp'] = 31337
        host2_compute_capabs['timestamp'] = 31339

        expected = {('host1', 'node1'): host1_compute_capabs,
                    ('host2', 'node2'): host2_compute_capabs}
        self.assertThat(service_states, matchers.DictMatches(expected))
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(_('task run outlasted interval by %s sec') %
                                 -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
Exemple #4
0
    def periodic_tasks(self, context, raise_on_error=False):
        """Tasks to be run at a periodic interval."""
        idle_for = DEFAULT_INTERVAL
        for task_name, task in self._periodic_tasks:
            full_task_name = '.'.join([self.__class__.__name__, task_name])

            now = timeutils.utcnow()
            spacing = self._periodic_spacing[task_name]
            last_run = self._periodic_last_run[task_name]

            # If a periodic task is _nearly_ due, then we'll run it early
            if spacing is not None and last_run is not None:
                due = last_run + datetime.timedelta(seconds=spacing)
                if not timeutils.is_soon(due, 0.2):
                    idle_for = min(idle_for, timeutils.delta_seconds(now, due))
                    continue

            if spacing is not None:
                idle_for = min(idle_for, spacing)

            LOG.debug(_("Running periodic task %(full_task_name)s"), locals())
            self._periodic_last_run[task_name] = timeutils.utcnow()

            try:
                task(self, context)
            except Exception as e:
                if raise_on_error:
                    raise
                LOG.exception(_("Error during %(full_task_name)s: %(e)s"),
                              locals())
            eventlet.sleep(0)

        return idle_for
Exemple #5
0
    def test_snapshot_index_detail_serializer(self):
        serializer = volumes.SnapshotsTemplate()
        raw_snapshots = [dict(
                id='snap1_id',
                status='snap1_status',
                size=1024,
                createdAt=timeutils.utcnow(),
                displayName='snap1_name',
                displayDescription='snap1_desc',
                volumeId='vol1_id',
                ),
                       dict(
                id='snap2_id',
                status='snap2_status',
                size=1024,
                createdAt=timeutils.utcnow(),
                displayName='snap2_name',
                displayDescription='snap2_desc',
                volumeId='vol2_id',
                )]
        text = serializer.serialize(dict(snapshots=raw_snapshots))

        tree = etree.fromstring(text)

        self.assertEqual('snapshots', tree.tag)
        self.assertEqual(len(raw_snapshots), len(tree))
        for idx, child in enumerate(tree):
            self._verify_snapshot(raw_snapshots[idx], child)
Exemple #6
0
    def test_update_service_capabilities(self):
        service_states = self.host_manager.service_states
        self.assertDictMatch(service_states, {})
        self.mox.StubOutWithMock(timeutils, 'utcnow')
        timeutils.utcnow().AndReturn(31337)
        timeutils.utcnow().AndReturn(31339)

        host1_compute_capabs = dict(free_memory=1234, host_memory=5678,
                timestamp=1)
        host2_compute_capabs = dict(free_memory=8756, timestamp=1)

        self.mox.ReplayAll()
        self.host_manager.update_service_capabilities('compute', 'host1',
                host1_compute_capabs)
        self.host_manager.update_service_capabilities('compute', 'host2',
                host2_compute_capabs)

        # Make sure original dictionary wasn't copied
        self.assertEqual(host1_compute_capabs['timestamp'], 1)

        host1_compute_capabs['timestamp'] = 31337
        host2_compute_capabs['timestamp'] = 31339

        expected = {'host1': host1_compute_capabs,
                    'host2': host2_compute_capabs}
        self.assertDictMatch(service_states, expected)
    def test_get_all(self):
        host1 = self._host + "_1"
        host2 = self._host + "_2"
        host3 = self._host + "_3"

        serv1 = self.useFixture(ServiceFixture(host1, self._binary, self._topic)).serv
        serv1.start()

        serv2 = self.useFixture(ServiceFixture(host2, self._binary, self._topic)).serv
        serv2.start()

        serv3 = self.useFixture(ServiceFixture(host3, self._binary, self._topic)).serv
        serv3.start()

        db.service_get_by_args(self._ctx, host1, self._binary)
        db.service_get_by_args(self._ctx, host2, self._binary)
        db.service_get_by_args(self._ctx, host3, self._binary)

        host1key = str("%s:%s" % (self._topic, host1))
        host2key = str("%s:%s" % (self._topic, host2))
        host3key = str("%s:%s" % (self._topic, host3))
        self.servicegroup_api._driver.mc.set(host1key, timeutils.utcnow(), time=self.down_time)
        self.servicegroup_api._driver.mc.set(host2key, timeutils.utcnow(), time=self.down_time)
        self.servicegroup_api._driver.mc.set(host3key, timeutils.utcnow(), time=-1)

        services = self.servicegroup_api.get_all(self._topic)

        self.assertIn(host1, services)
        self.assertIn(host2, services)
        self.assertNotIn(host3, services)

        service_id = self.servicegroup_api.get_one(self._topic)
        self.assertIn(service_id, services)
 def inspect_container(self, container_id):
     if container_id not in self._containers:
         return
     container = self._containers[container_id]
     info = {
         'Args': [],
         'Config': container['config'],
         'Created': str(timeutils.utcnow()),
         'ID': container_id,
         'Image': self._fake_id(),
         'NetworkSettings': {
             'Bridge': '',
             'Gateway': '',
             'IPAddress': '',
             'IPPrefixLen': 0,
             'PortMapping': None
         },
         'Path': 'bash',
         'ResolvConfPath': '/etc/resolv.conf',
         'State': {
             'ExitCode': 0,
             'Ghost': False,
             'Pid': 0,
             'Running': container['running'],
             'StartedAt': str(timeutils.utcnow())
         },
         'SysInitPath': '/tmp/docker',
         'Volumes': {},
     }
     return info
    def test_service_is_up(self):
        serv = self.useFixture(
            ServiceFixture(self._host, self._binary, self._topic)).serv
        serv.start()
        service_ref = db.service_get_by_args(self._ctx,
                                             self._host,
                                             self._binary)
        fake_now = 1000
        down_time = 5
        self.flags(service_down_time=down_time)
        self.mox.StubOutWithMock(timeutils, 'utcnow_ts')
        self.servicegroup_api = servicegroup.API()
        hostkey = str("%s:%s" % (self._topic, self._host))

        # Up (equal)
        timeutils.utcnow_ts().AndReturn(fake_now)
        timeutils.utcnow_ts().AndReturn(fake_now + down_time - 1)
        self.mox.ReplayAll()
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=down_time)
        result = self.servicegroup_api.service_is_up(service_ref)
        self.assertTrue(result)

        self.mox.ResetAll()
        # Up
        timeutils.utcnow_ts().AndReturn(fake_now)
        timeutils.utcnow_ts().AndReturn(fake_now + down_time - 2)
        self.mox.ReplayAll()
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=down_time)
        result = self.servicegroup_api.service_is_up(service_ref)
        self.assertTrue(result)

        self.mox.ResetAll()
        # Down
        timeutils.utcnow_ts().AndReturn(fake_now)
        timeutils.utcnow_ts().AndReturn(fake_now + down_time)
        self.mox.ReplayAll()
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=down_time)
        result = self.servicegroup_api.service_is_up(service_ref)
        self.assertFalse(result)

        self.mox.ResetAll()
        # Down
        timeutils.utcnow_ts().AndReturn(fake_now)
        timeutils.utcnow_ts().AndReturn(fake_now + down_time + 1)
        self.mox.ReplayAll()
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=down_time)
        result = self.servicegroup_api.service_is_up(service_ref)
        self.assertFalse(result)

        self.mox.ResetAll()
Exemple #10
0
def stub_instance(id, user_id='fake', project_id='fake', host=None,
                  vm_state=None, task_state=None,
                  reservation_id="", uuid=FAKE_UUID, image_ref="10",
                  flavor_id="1", name=None, key_name='',
                  access_ipv4=None, access_ipv6=None, progress=0):

    if host is not None:
        host = str(host)

    if key_name:
        key_data = 'FAKE'
    else:
        key_data = ''

    # ReservationID isn't sent back, hack it in there.
    server_name = name or "server%s" % id
    if reservation_id != "":
        server_name = "reservation_%s" % (reservation_id, )

    instance = {
        "id": int(id),
        "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
        "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
        "admin_pass": "",
        "user_id": user_id,
        "project_id": project_id,
        "image_ref": image_ref,
        "kernel_id": "",
        "ramdisk_id": "",
        "launch_index": 0,
        "key_name": key_name,
        "key_data": key_data,
        "vm_state": vm_state or vm_states.BUILDING,
        "task_state": task_state,
        "memory_mb": 0,
        "vcpus": 0,
        "root_gb": 0,
        "hostname": "",
        "host": host,
        "instance_type": {},
        "user_data": "",
        "reservation_id": reservation_id,
        "mac_address": "",
        "scheduled_at": timeutils.utcnow(),
        "launched_at": timeutils.utcnow(),
        "terminated_at": timeutils.utcnow(),
        "availability_zone": "",
        "display_name": server_name,
        "display_description": "",
        "locked": False,
        "metadata": [],
        "access_ip_v4": access_ipv4,
        "access_ip_v6": access_ipv6,
        "uuid": uuid,
        "progress": progress}

    return instance
Exemple #11
0
 def setUp(self):
     """setUp method for simple tenant usage"""
     super(SimpleTenantUsageSampleJsonTest, self).setUp()
     self._post_server()
     timeutils.set_time_override(timeutils.utcnow() +
                                 datetime.timedelta(hours=1))
     self.query = {
         'start': str(timeutils.utcnow() - datetime.timedelta(hours=1)),
         'end': str(timeutils.utcnow())
     }
 def update(self, context):
     """Update status for all compute_nodes.  This should be called
     periodically to refresh the compute_node inventory.
     """
     self.green_pool.waitall()
     diff = timeutils.utcnow() - self.last_compute_db_check
     if diff.seconds >= CONF.compute_db_check_interval:
         LOG.info(_("Updating compute_node cache from db."))
         self.last_compute_db_check = timeutils.utcnow()
         self._refresh_from_db(context)
     self._poll_computes()
 def _get_fake_host_state(self, index=0):
     host_state = host_manager.HostState(
         'host_%s' % index,
         'node_%s' % index)
     host_state.free_ram_mb = 50000
     host_state.service = {
         "disabled": False,
         "updated_at": timeutils.utcnow(),
         "created_at": timeutils.utcnow(),
     }
     return host_state
Exemple #14
0
    def __init__(self, bdm_dict=None, **kwargs):
        bdm_dict = bdm_dict or {}
        db_id = bdm_dict.pop('id', 1)
        instance_uuid = bdm_dict.pop('instance_uuid', str(uuid.uuid4()))

        super(FakeDbBlockDeviceDict, self).__init__(bdm_dict=bdm_dict,
                                                    **kwargs)
        fake_db_fields = {'id': db_id, 'instance_uuid': instance_uuid,
                          'created_at': timeutils.utcnow(),
                          'updated_at': timeutils.utcnow(),
                          'deleted_at': None,
                          'deleted': 0}
        self.update(fake_db_fields)
        def run_test():
            a = timeutils.utcnow()

            for x in xrange(requests):
                self.driver.select_destinations(self.context, request_spec, {})

            b = timeutils.utcnow()
            c = b - a

            seconds = c.days * 24 * 60 * 60 + c.seconds
            microseconds = seconds * 1000 + c.microseconds / 1000.0
            per_request_ms = microseconds / requests
            return per_request_ms
Exemple #16
0
    def test_volume_index_detail_serializer(self):
        serializer = volumes.VolumesTemplate()
        raw_volumes = [dict(
                id='vol1_id',
                status='vol1_status',
                size=1024,
                availabilityZone='vol1_availability',
                createdAt=timeutils.utcnow(),
                attachments=[dict(
                        id='vol1_id',
                        volumeId='vol1_id',
                        serverId='instance_uuid',
                        device='/foo1')],
                displayName='vol1_name',
                displayDescription='vol1_desc',
                volumeType='vol1_type',
                snapshotId='snap1_id',
                metadata=dict(
                    foo='vol1_foo',
                    bar='vol1_bar',
                    ),
                ),
                       dict(
                id='vol2_id',
                status='vol2_status',
                size=1024,
                availabilityZone='vol2_availability',
                createdAt=timeutils.utcnow(),
                attachments=[dict(
                        id='vol2_id',
                        volumeId='vol2_id',
                        serverId='instance_uuid',
                        device='/foo2')],
                displayName='vol2_name',
                displayDescription='vol2_desc',
                volumeType='vol2_type',
                snapshotId='snap2_id',
                metadata=dict(
                    foo='vol2_foo',
                    bar='vol2_bar',
                    ),
                )]
        text = serializer.serialize(dict(volumes=raw_volumes))

        print text
        tree = etree.fromstring(text)

        self.assertEqual('volumes', tree.tag)
        self.assertEqual(len(raw_volumes), len(tree))
        for idx, child in enumerate(tree):
            self._verify_volume(raw_volumes[idx], child)
Exemple #17
0
    def monitor_service(self, ctxt):
        """
        check service status, confirm whether service is abnormal,
        and notify the ha module if service is confirmed abnormal

        """
        services_failure_info = []

        for service_name in self.check_services:
            service_topic = service_name.rpartition('nova-')[2]
            abnormal_services = self._get_abnormal_service_by_topic(ctxt,
                                                                service_topic)
            if abnormal_services == True:
                self._notify_ha_service_failure(ctxt, service_name)
            elif abnormal_services:
                for abnormal_service in abnormal_services:
                    service_failure_info = self._get_service_failure_info(
                                       service_name, abnormal_service['host'])
                    if (service_failure_info is not None and
                        self.is_time_valid(
                            service_failure_info['last_failure_time'],
                            timeutils.utcnow(), self.service_recover_time)):
                        LOG.info(_('service %(service)s on %(host)s is'
                                   ' still in recovering...')
                                 % {'service': service_name,
                                    'host': abnormal_service['host']})
                        services_failure_info.append(service_failure_info)
                        continue
                    if self._confirm_service_failure(ctxt, abnormal_service):
                        self._notify_ha_service_failure(ctxt, service_name,
                                                        abnormal_service)
                        if service_failure_info is None:
                            service_failure_info = dict(
                                service_name=service_name,
                                service_host=abnormal_service['host'],
                                failure_times=1,
                                last_failure_time=timeutils.utcnow())
                        else:
                            service_failure_info['failure_times'] += 1
                            now = timeutils.utcnow()
                            service_failure_info['last_failure_time'] = now
                        LOG.info(_('monitor: service %(service)s on %(host)s'
                                   ' abnormal')
                                 % {'service': service_name,
                                    'host': abnormal_service['host']})
                        services_failure_info.append(service_failure_info)
            else:
                LOG.info(_('monitor: service %s is normal') % service_name)
        self.services_failure_info = services_failure_info
        self._report_failure_services_info()
    def test_get_all(self):
        host1 = self._host + '_1'
        host2 = self._host + '_2'
        host3 = self._host + '_3'

        serv1 = self.useFixture(
            ServiceFixture(host1, self._binary, self._topic)).serv
        serv1.start()

        serv2 = self.useFixture(
            ServiceFixture(host2, self._binary, self._topic)).serv
        serv2.start()

        serv3 = self.useFixture(
            ServiceFixture(host3, self._binary, self._topic)).serv
        serv3.start()

        service_ref1 = db.service_get_by_args(self._ctx,
                                              host1,
                                              self._binary)
        service_ref2 = db.service_get_by_args(self._ctx,
                                              host2,
                                              self._binary)
        service_ref3 = db.service_get_by_args(self._ctx,
                                              host3,
                                              self._binary)

        host1key = str("%s:%s" % (self._topic, host1))
        host2key = str("%s:%s" % (self._topic, host2))
        host3key = str("%s:%s" % (self._topic, host3))
        self.servicegroup_api._driver.mc.set(host1key,
                                             timeutils.utcnow(),
                                             time=self.down_time)
        self.servicegroup_api._driver.mc.set(host2key,
                                             timeutils.utcnow(),
                                             time=self.down_time)
        self.servicegroup_api._driver.mc.set(host3key,
                                             timeutils.utcnow(),
                                             time=-1)

        services = self.servicegroup_api.get_all(self._topic)

        self.assertTrue(host1 in services)
        self.assertTrue(host2 in services)
        self.assertFalse(host3 in services)

        service_id = self.servicegroup_api.get_one(self._topic)
        self.assertTrue(service_id in services)
    def test_memcached_driver(self):
        serv = self.useFixture(
            ServiceFixture(self._host, self._binary, self._topic)).serv
        serv.start()
        service_ref = db.service_get_by_args(self._ctx,
                                             self._host,
                                             self._binary)
        hostkey = str("%s:%s" % (self._topic, self._host))
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=self.down_time)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        eventlet.sleep(self.down_time + 1)
        service_ref = db.service_get_by_args(self._ctx,
                                             self._host,
                                             self._binary)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        serv.stop()
        eventlet.sleep(self.down_time + 1)
        service_ref = db.service_get_by_args(self._ctx,
                                             self._host,
                                             self._binary)
        self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
Exemple #20
0
 def test_log_progress_if_not_required(self):
     self.mox.StubOutWithMock(vm_utils.LOG, "debug")
     current = timeutils.utcnow()
     timeutils.set_time_override(current)
     timeutils.advance_time_seconds(vm_utils.PROGRESS_INTERVAL_SECONDS - 1)
     self.mox.ReplayAll()
     vm_utils._log_progress_if_required(1, current, 2)
Exemple #21
0
 def log_request_completion(self, response, request, start):
     apireq = request.environ.get('ec2.request', None)
     if apireq:
         controller = apireq.controller
         action = apireq.action
     else:
         controller = None
         action = None
     ctxt = request.environ.get('nova.context', None)
     delta = timeutils.utcnow() - start
     seconds = delta.seconds
     microseconds = delta.microseconds
     LOG.info(
         "%s.%ss %s %s %s %s:%s %s [%s] %s %s",
         seconds,
         microseconds,
         request.remote_addr,
         request.method,
         "%s%s" % (request.script_name, request.path_info),
         controller,
         action,
         response.status_int,
         request.user_agent,
         request.content_type,
         response.content_type,
         context=ctxt)
Exemple #22
0
    def test_volume_show_create_serializer(self):
        serializer = volumes.VolumeTemplate()
        raw_volume = dict(
            id='vol_id',
            status='vol_status',
            size=1024,
            availabilityZone='vol_availability',
            createdAt=timeutils.utcnow(),
            attachments=[dict(
                    id='vol_id',
                    volumeId='vol_id',
                    serverId='instance_uuid',
                    device='/foo')],
            displayName='vol_name',
            displayDescription='vol_desc',
            volumeType='vol_type',
            snapshotId='snap_id',
            metadata=dict(
                foo='bar',
                baz='quux',
                ),
            )
        text = serializer.serialize(dict(volume=raw_volume))

        print text
        tree = etree.fromstring(text)

        self._verify_volume(raw_volume, tree)
Exemple #23
0
        def _wait_for_deploy():
            """Called at an interval until the deployment completes."""
            try:
                row = db.bm_node_get(context, node['id'])
                if instance['uuid'] != row.get('instance_uuid'):
                    locals['error'] = _("Node associated with another instance"
                                        " while waiting for deploy of %s")
                    raise loopingcall.LoopingCallDone()

                status = row.get('task_state')
                if (status == baremetal_states.DEPLOYING
                        and locals['started'] == False):
                    LOG.info(_("PXE deploy started for instance %s")
                                % instance['uuid'])
                    locals['started'] = True
                elif status in (baremetal_states.DEPLOYDONE,
                                baremetal_states.ACTIVE):
                    LOG.info(_("PXE deploy completed for instance %s")
                                % instance['uuid'])
                    raise loopingcall.LoopingCallDone()
                elif status == baremetal_states.DEPLOYFAIL:
                    locals['error'] = _("PXE deploy failed for instance %s")
            except exception.NodeNotFound:
                locals['error'] = _("Baremetal node deleted while waiting "
                                    "for deployment of instance %s")

            if (CONF.baremetal.pxe_deploy_timeout and
                    timeutils.utcnow() > expiration):
                locals['error'] = _("Timeout reached while waiting for "
                                     "PXE deploy of instance %s")
            if locals['error']:
                raise loopingcall.LoopingCallDone()
Exemple #24
0
    def test_sync_instances(self):
        call_info = {}

        def sync_instances(self, context, **kwargs):
            call_info['project_id'] = kwargs.get('project_id')
            call_info['updated_since'] = kwargs.get('updated_since')
            call_info['deleted'] = kwargs.get('deleted')

        self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances)

        req = self._get_request("cells/sync_instances")
        req.environ['nova.context'] = self.context
        body = {}
        self.controller.sync_instances(req, body=body)
        self.assertIsNone(call_info['project_id'])
        self.assertIsNone(call_info['updated_since'])

        body = {'project_id': 'test-project'}
        self.controller.sync_instances(req, body=body)
        self.assertEqual(call_info['project_id'], 'test-project')
        self.assertIsNone(call_info['updated_since'])

        expected = timeutils.utcnow().isoformat()
        if not expected.endswith("+00:00"):
            expected += "+00:00"

        body = {'updated_since': expected}
        self.controller.sync_instances(req, body=body)
        self.assertIsNone(call_info['project_id'])
        self.assertEqual(call_info['updated_since'], expected)

        body = {'updated_since': 'skjdfkjsdkf'}
        self.assertRaises(exc.HTTPBadRequest,
                self.controller.sync_instances, req, body=body)

        body = {'deleted': False}
        self.controller.sync_instances(req, body=body)
        self.assertIsNone(call_info['project_id'])
        self.assertIsNone(call_info['updated_since'])
        self.assertEqual(call_info['deleted'], False)

        body = {'deleted': 'False'}
        self.controller.sync_instances(req, body=body)
        self.assertIsNone(call_info['project_id'])
        self.assertIsNone(call_info['updated_since'])
        self.assertEqual(call_info['deleted'], False)

        body = {'deleted': 'True'}
        self.controller.sync_instances(req, body=body)
        self.assertIsNone(call_info['project_id'])
        self.assertIsNone(call_info['updated_since'])
        self.assertEqual(call_info['deleted'], True)

        body = {'deleted': 'foo'}
        self.assertRaises(exc.HTTPBadRequest,
                self.controller.sync_instances, req, body=body)

        body = {'foo': 'meow'}
        self.assertRaises(exc.HTTPBadRequest,
                self.controller.sync_instances, req, body=body)
Exemple #25
0
    def _wait_for_node_reboot(self, nodename):
        """Wait for xCAT node boot to complete."""
        locals = {'errstr':''}

        def _wait_for_reboot():
            out,err = self._exec_xcat_command("nodestat %s" % nodename)
            if err:
                locals['errstr'] = _("Error returned when quering node status"
                           " for node %s:%s") % (nodename, err)
                LOG.warning(locals['errstr'])
                raise loopingcall.LoopingCallDone()

            if out:
                node,status = out.split(": ")
                status = status.strip()
                if status == "sshd":
                    LOG.info(_("Rebooting node %s completed.")
                             % nodename)
                    raise loopingcall.LoopingCallDone()

            if (CONF.xcat.reboot_timeout and
                    timeutils.utcnow() > expiration):
                locals['errstr'] = _("Timeout while waiting for"
                           " rebooting node %s.") % nodename
                LOG.warning(locals['errstr'])
                raise loopingcall.LoopingCallDone()

        expiration = timeutils.utcnow() + datetime.timedelta(
                seconds=CONF.xcat.reboot_timeout)
        timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)
        # default check every 5 seconds
        timer.start(interval=CONF.xcat.reboot_checking_interval).wait()

        if locals['errstr']:
            raise exception.xCATRebootFailure(locals['errstr'])
Exemple #26
0
 def _force_reclaim(self):
     # Make sure that compute manager thinks the instance is
     # old enough to be expired
     the_past = timeutils.utcnow() + datetime.timedelta(hours=1)
     timeutils.set_time_override(override_time=the_past)
     ctxt = context.get_admin_context()
     self.compute._reclaim_queued_deletes(ctxt)
    def setUp(self):
        super(SimpleTenantUsageControllerTest, self).setUp()
        self.controller = simple_tenant_usage.SimpleTenantUsageController()

        class FakeComputeAPI:
            def get_instance_type(self, context, flavor_type):
                if flavor_type == 1:
                    return flavors.get_default_flavor()
                else:
                    raise exception.InstanceTypeNotFound(flavor_type)

        self.compute_api = FakeComputeAPI()
        self.context = None

        now = timeutils.utcnow()
        self.baseinst = dict(display_name='foo',
                             launched_at=now - datetime.timedelta(1),
                             terminated_at=now,
                             instance_type_id=1,
                             vm_state='deleted',
                             deleted=0)
        basetype = flavors.get_default_flavor()
        sys_meta = utils.dict_to_metadata(
            flavors.save_flavor_info({}, basetype))
        self.baseinst['system_metadata'] = sys_meta
        self.basetype = flavors.extract_flavor(self.baseinst)
Exemple #28
0
    def _prerun_134(self, engine):
        now = timeutils.utcnow()
        data = [{
            'id': 1,
            'uuid': '1d739808-d7ec-4944-b252-f8363e119755',
            'mac': '00:00:00:00:00:01',
            'start_period': now,
            'last_refreshed': now + datetime.timedelta(seconds=10),
            'bw_in': 100000,
            'bw_out': 200000,
            }, {
            'id': 2,
            'uuid': '1d739808-d7ec-4944-b252-f8363e119756',
            'mac': '2a:f2:48:31:c1:60',
            'start_period': now,
            'last_refreshed': now + datetime.timedelta(seconds=20),
            'bw_in': 1000000000,
            'bw_out': 200000000,
            }, {
            'id': 3,
            # This is intended to be the same as above.
            'uuid': '1d739808-d7ec-4944-b252-f8363e119756',
            'mac': '00:00:00:00:00:02',
            'start_period': now,
            'last_refreshed': now + datetime.timedelta(seconds=30),
            'bw_in': 0,
            'bw_out': 0,
            }]

        bw_usage_cache = get_table(engine, 'bw_usage_cache')
        engine.execute(bw_usage_cache.insert(), data)
        return data
Exemple #29
0
    def test_validate_ec2_timestamp_advanced_time(self):

        # EC2 request with Timestamp in advanced time
        timestamp = timeutils.utcnow() + datetime.timedelta(seconds=250)
        params = {"Timestamp": timeutils.strtime(timestamp, "%Y-%m-%dT%H:%M:%SZ")}
        expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
        self.assertFalse(expired)
Exemple #30
0
    def index(self, req):
        """
        Return a list of all running services. Filter by host & service name.
        """
        context = req.environ['nova.context']
        authorize(context)
        now = timeutils.utcnow()
        services = db.service_get_all(context)

        host = ''
        if 'host' in req.GET:
            host = req.GET['host']
        service = ''
        if 'service' in req.GET:
            service = req.GET['service']
        if host:
            services = [s for s in services if s['host'] == host]
        if service:
            services = [s for s in services if s['binary'] == service]

        svcs = []
        for svc in services:
            delta = now - (svc['updated_at'] or svc['created_at'])
            alive = abs(utils.total_seconds(delta)) <= CONF.service_down_time
            art = (alive and "up") or "down"
            active = 'enabled'
            if svc['disabled']:
                active = 'disabled'
            svcs.append({"binary": svc['binary'], 'host': svc['host'],
                         'zone': svc['availability_zone'],
                         'status': active, 'state': art,
                         'updated_at': svc['updated_at']})
        return {'services': svcs}
Exemple #31
0
    def _shelve_instance(self, shelved_offload_time):
        CONF.set_override('shelved_offload_time', shelved_offload_time)
        db_instance = jsonutils.to_primitive(self._create_fake_instance())
        self.compute.run_instance(self.context, db_instance, {}, {}, [], None,
                                  None, True, None, False)
        instance = instance_obj.Instance.get_by_uuid(
            self.context,
            db_instance['uuid'],
            expected_attrs=['metadata', 'system_metadata'])
        image_id = 'fake_image_id'
        host = 'fake-mini'
        cur_time = timeutils.utcnow()
        timeutils.set_time_override(cur_time)
        instance.task_state = task_states.SHELVING
        instance.save()
        sys_meta = dict(instance.system_metadata)
        sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
        sys_meta['shelved_image_id'] = image_id
        sys_meta['shelved_host'] = host
        db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta)

        self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
        self.mox.StubOutWithMock(self.compute.driver, 'snapshot')
        self.mox.StubOutWithMock(self.compute.driver, 'power_off')
        self.mox.StubOutWithMock(self.compute, '_get_power_state')
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')

        self.compute._notify_about_instance_usage(self.context, instance,
                                                  'shelve.start')
        self.compute.driver.power_off(instance)
        self.compute._get_power_state(self.context, instance).AndReturn(123)
        self.compute.driver.snapshot(self.context, instance, 'fake_image_id',
                                     mox.IgnoreArg())

        update_values = {
            'power_state':
            123,
            'vm_state':
            vm_states.SHELVED,
            'task_state':
            None,
            'expected_task_state':
            [task_states.SHELVING, task_states.SHELVING_IMAGE_UPLOADING],
            'system_metadata':
            sys_meta
        }
        if CONF.shelved_offload_time == 0:
            update_values['task_state'] = task_states.SHELVING_OFFLOADING
        db.instance_update_and_get_original(
            self.context,
            instance['uuid'],
            update_values,
            update_cells=False,
            columns_to_join=['metadata', 'system_metadata'],
        ).AndReturn((db_instance, db_instance))
        self.compute._notify_about_instance_usage(self.context, instance,
                                                  'shelve.end')
        if CONF.shelved_offload_time == 0:
            self.compute._notify_about_instance_usage(self.context, instance,
                                                      'shelve_offload.start')
            self.compute.driver.power_off(instance)
            self.compute._get_power_state(self.context,
                                          instance).AndReturn(123)
            db.instance_update_and_get_original(
                self.context,
                instance['uuid'],
                {
                    'power_state':
                    123,
                    'host':
                    None,
                    'node':
                    None,
                    'vm_state':
                    vm_states.SHELVED_OFFLOADED,
                    'task_state':
                    None,
                    'expected_task_state':
                    [task_states.SHELVING, task_states.SHELVING_OFFLOADING]
                },
                update_cells=False,
                columns_to_join=['metadata', 'system_metadata'],
            ).AndReturn((db_instance, db_instance))
            self.compute._notify_about_instance_usage(self.context, instance,
                                                      'shelve_offload.end')
        self.mox.ReplayAll()

        self.compute.shelve_instance(self.context, instance, image_id=image_id)
Exemple #32
0
def stub_instance(id,
                  user_id='fake',
                  project_id='fake',
                  host=None,
                  vm_state=None,
                  task_state=None,
                  reservation_id="",
                  uuid=FAKE_UUID,
                  image_ref="10",
                  flavor_id="1",
                  name=None,
                  key_name='',
                  access_ipv4=None,
                  access_ipv6=None,
                  progress=0):

    if host is not None:
        host = str(host)

    if key_name:
        key_data = 'FAKE'
    else:
        key_data = ''

    # ReservationID isn't sent back, hack it in there.
    server_name = name or "server%s" % id
    if reservation_id != "":
        server_name = "reservation_%s" % (reservation_id, )

    instance = {
        "id": int(id),
        "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
        "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
        "admin_pass": "",
        "user_id": user_id,
        "project_id": project_id,
        "image_ref": image_ref,
        "kernel_id": "",
        "ramdisk_id": "",
        "launch_index": 0,
        "key_name": key_name,
        "key_data": key_data,
        "vm_state": vm_state or vm_states.BUILDING,
        "task_state": task_state,
        "memory_mb": 0,
        "vcpus": 0,
        "root_gb": 0,
        "hostname": "",
        "host": host,
        "instance_type": {},
        "user_data": "",
        "reservation_id": reservation_id,
        "mac_address": "",
        "scheduled_at": timeutils.utcnow(),
        "launched_at": timeutils.utcnow(),
        "terminated_at": timeutils.utcnow(),
        "availability_zone": "",
        "display_name": server_name,
        "display_description": "",
        "locked": False,
        "metadata": [],
        "access_ip_v4": access_ipv4,
        "access_ip_v6": access_ipv6,
        "uuid": uuid,
        "progress": progress
    }

    return instance
    def consume_from_instance(self, instance):
        """Consume nodes entire resources regardless of instance request."""
        super(IronicNodeState, self).consume_from_instance(instance)

        self.updated = timeutils.utcnow()
Exemple #34
0
class SessionBase(object):
    """
    Base class for Fake Sessions
    """
    def __init__(self, uri):
        self._session = None

    def pool_get_default_SR(self, _1, pool_ref):
        return 'FAKE DEFAULT SR'

    def VBD_plug(self, _1, ref):
        rec = get_record('VBD', ref)
        if rec['currently_attached']:
            raise Failure(['DEVICE_ALREADY_ATTACHED', ref])
        rec['currently_attached'] = True
        rec['device'] = rec['userdevice']

    def VBD_unplug(self, _1, ref):
        rec = get_record('VBD', ref)
        if not rec['currently_attached']:
            raise Failure(['DEVICE_ALREADY_DETACHED', ref])
        rec['currently_attached'] = False
        rec['device'] = ''

    def VBD_add_to_other_config(self, _1, vbd_ref, key, value):
        db_ref = _db_content['VBD'][vbd_ref]
        if not 'other_config' in db_ref:
            db_ref['other_config'] = {}
        if key in db_ref['other_config']:
            raise Failure(
                ['MAP_DUPLICATE_KEY', 'VBD', 'other_config', vbd_ref, key])
        db_ref['other_config'][key] = value

    def VBD_get_other_config(self, _1, vbd_ref):
        db_ref = _db_content['VBD'][vbd_ref]
        if not 'other_config' in db_ref:
            return {}
        return db_ref['other_config']

    def PBD_create(self, _1, pbd_rec):
        pbd_ref = _create_object('PBD', pbd_rec)
        _db_content['PBD'][pbd_ref]['currently_attached'] = False
        return pbd_ref

    def PBD_plug(self, _1, pbd_ref):
        rec = get_record('PBD', pbd_ref)
        if rec['currently_attached']:
            raise Failure(['DEVICE_ALREADY_ATTACHED', rec])
        rec['currently_attached'] = True
        sr_ref = rec['SR']
        _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]

    def PBD_unplug(self, _1, pbd_ref):
        rec = get_record('PBD', pbd_ref)
        if not rec['currently_attached']:
            raise Failure(['DEVICE_ALREADY_DETACHED', rec])
        rec['currently_attached'] = False
        sr_ref = pbd_ref['SR']
        _db_content['SR'][sr_ref]['PBDs'].remove(pbd_ref)

    def SR_introduce(self, _1, sr_uuid, label, desc, type, content_type,
                     shared, sm_config):
        ref = None
        rec = None
        for ref, rec in _db_content['SR'].iteritems():
            if rec.get('uuid') == sr_uuid:
                break
        if rec:
            # make forgotten = 0 and return ref
            _db_content['SR'][ref]['forgotten'] = 0
            return ref
        else:
            # SR not found in db, so we create one
            params = {}
            params.update(locals())
            del params['self']
            sr_ref = _create_object('SR', params)
            _db_content['SR'][sr_ref]['uuid'] = sr_uuid
            _db_content['SR'][sr_ref]['forgotten'] = 0
            if type in ('iscsi'):
                # Just to be clear
                vdi_per_lun = True
            if vdi_per_lun:
                # we need to create a vdi because this introduce
                # is likely meant for a single vdi
                vdi_ref = create_vdi('', sr_ref)
                _db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
                _db_content['VDI'][vdi_ref]['SR'] = sr_ref
            return sr_ref

    def SR_forget(self, _1, sr_ref):
        _db_content['SR'][sr_ref]['forgotten'] = 1

    def SR_scan(self, _1, sr_ref):
        return

    def PIF_get_all_records_where(self, _1, _2):
        # TODO(salvatore-orlando): filter table on _2
        return _db_content['PIF']

    def VM_get_xenstore_data(self, _1, vm_ref):
        return _db_content['VM'][vm_ref].get('xenstore_data', {})

    def VM_remove_from_xenstore_data(self, _1, vm_ref, key):
        db_ref = _db_content['VM'][vm_ref]
        if not 'xenstore_data' in db_ref:
            return
        if key in db_ref['xenstore_data']:
            del db_ref['xenstore_data'][key]

    def VM_add_to_xenstore_data(self, _1, vm_ref, key, value):
        db_ref = _db_content['VM'][vm_ref]
        if not 'xenstore_data' in db_ref:
            db_ref['xenstore_data'] = {}
        db_ref['xenstore_data'][key] = value

    def VM_pool_migrate(self, _1, vm_ref, host_ref, options):
        pass

    def VDI_remove_from_other_config(self, _1, vdi_ref, key):
        db_ref = _db_content['VDI'][vdi_ref]
        if not 'other_config' in db_ref:
            return
        if key in db_ref['other_config']:
            del db_ref['other_config'][key]

    def VDI_add_to_other_config(self, _1, vdi_ref, key, value):
        db_ref = _db_content['VDI'][vdi_ref]
        if not 'other_config' in db_ref:
            db_ref['other_config'] = {}
        if key in db_ref['other_config']:
            raise Failure(
                ['MAP_DUPLICATE_KEY', 'VDI', 'other_config', vdi_ref, key])
        db_ref['other_config'][key] = value

    def VDI_copy(self, _1, vdi_to_copy_ref, sr_ref):
        db_ref = _db_content['VDI'][vdi_to_copy_ref]
        name_label = db_ref['name_label']
        read_only = db_ref['read_only']
        sharable = db_ref['sharable']
        other_config = db_ref['other_config'].copy()
        return create_vdi(name_label,
                          sr_ref,
                          sharable=sharable,
                          read_only=read_only,
                          other_config=other_config)

    def VDI_clone(self, _1, vdi_to_clone_ref):
        db_ref = _db_content['VDI'][vdi_to_clone_ref]
        sr_ref = db_ref['SR']
        return self.VDI_copy(_1, vdi_to_clone_ref, sr_ref)

    def host_compute_free_memory(self, _1, ref):
        #Always return 12GB available
        return 12 * 1024 * 1024 * 1024

    def _plugin_agent_version(self, method, args):
        return as_json(returncode='0', message='1.0')

    def _plugin_agent_key_init(self, method, args):
        return as_json(returncode='D0', message='1')

    def _plugin_agent_password(self, method, args):
        return as_json(returncode='0', message='success')

    def _plugin_agent_inject_file(self, method, args):
        return as_json(returncode='0', message='success')

    def _plugin_agent_resetnetwork(self, method, args):
        return as_json(returncode='0', message='success')

    def _plugin_noop(self, method, args):
        return ''

    def _plugin_pickle_noop(self, method, args):
        return pickle.dumps(None)

    def _plugin_migration_transfer_vhd(self, method, args):
        kwargs = pickle.loads(args['params'])['kwargs']
        vdi_ref = self.xenapi_request('VDI.get_by_uuid',
                                      (kwargs['vdi_uuid'], ))
        assert vdi_ref
        return pickle.dumps(None)

    _plugin_glance_upload_vhd = _plugin_pickle_noop
    _plugin_kernel_copy_vdi = _plugin_noop
    _plugin_kernel_create_kernel_ramdisk = _plugin_noop
    _plugin_kernel_remove_kernel_ramdisk = _plugin_noop
    _plugin_migration_move_vhds_into_sr = _plugin_noop

    def _plugin_xenhost_host_data(self, method, args):
        return jsonutils.dumps({
            'host_memory': {
                'total': 10,
                'overhead': 20,
                'free': 30,
                'free-computed': 40
            }
        })

    def _plugin_poweraction(self, method, args):
        return jsonutils.dumps({"power_action": method[5:]})

    _plugin_xenhost_host_reboot = _plugin_poweraction
    _plugin_xenhost_host_startup = _plugin_poweraction
    _plugin_xenhost_host_shutdown = _plugin_poweraction

    def _plugin_xenhost_set_host_enabled(self, method, args):
        enabled = 'enabled' if args.get('enabled') == 'true' else 'disabled'
        return jsonutils.dumps({"status": enabled})

    def _plugin_xenhost_host_uptime(self, method, args):
        return jsonutils.dumps({"uptime": "fake uptime"})

    def host_call_plugin(self, _1, _2, plugin, method, args):
        func = getattr(self, '_plugin_%s_%s' % (plugin, method), None)
        if not func:
            raise Exception('No simulation in host_call_plugin for %s,%s' %
                            (plugin, method))

        return func(method, args)

    def VDI_get_virtual_size(self, *args):
        return 1 * 1024 * 1024 * 1024

    def VDI_resize_online(self, *args):
        return 'derp'

    VDI_resize = VDI_resize_online

    def _VM_reboot(self, session, vm_ref):
        db_ref = _db_content['VM'][vm_ref]
        if db_ref['power_state'] != 'Running':
            raise Failure([
                'VM_BAD_POWER_STATE', 'fake-opaque-ref',
                db_ref['power_state'].lower(), 'halted'
            ])
        db_ref['power_state'] = 'Running'

    def VM_clean_reboot(self, session, vm_ref):
        return self._VM_reboot(session, vm_ref)

    def VM_hard_reboot(self, session, vm_ref):
        return self._VM_reboot(session, vm_ref)

    def VM_hard_shutdown(self, session, vm_ref):
        db_ref = _db_content['VM'][vm_ref]
        db_ref['power_state'] = 'Halted'

    VM_clean_shutdown = VM_hard_shutdown

    def pool_eject(self, session, host_ref):
        pass

    def pool_join(self, session, hostname, username, password):
        pass

    def pool_set_name_label(self, session, pool_ref, name):
        pass

    def host_migrate_receive(self, session, destref, nwref, options):
        return "fake_migrate_data"

    def VM_assert_can_migrate(self, session, vmref, migrate_data, live,
                              vdi_map, vif_map, options):
        pass

    def VM_migrate_send(self, session, mref, migrate_data, live, vdi_map,
                        vif_map, options):
        pass

    def network_get_all_records_where(self, _1, filter):
        return self.xenapi.network.get_all_records()

    def xenapi_request(self, methodname, params):
        if methodname.startswith('login'):
            self._login(methodname, params)
            return None
        elif methodname == 'logout' or methodname == 'session.logout':
            self._logout()
            return None
        else:
            full_params = (self._session, ) + params
            meth = getattr(self, methodname, None)
            if meth is None:
                LOG.debug(_('Raising NotImplemented'))
                raise NotImplementedError(
                    _('xenapi.fake does not have an implementation for %s') %
                    methodname)
            return meth(*full_params)

    def _login(self, method, params):
        self._session = str(uuid.uuid4())
        _session_info = {
            'uuid': str(uuid.uuid4()),
            'this_host': _db_content['host'].keys()[0]
        }
        _db_content['session'][self._session] = _session_info

    def _logout(self):
        s = self._session
        self._session = None
        if s not in _db_content['session']:
            raise exception.NovaException(
                "Logging out a session that is invalid or already logged "
                "out: %s" % s)
        del _db_content['session'][s]

    def __getattr__(self, name):
        if name == 'handle':
            return self._session
        elif name == 'xenapi':
            return _Dispatcher(self.xenapi_request, None)
        elif name.startswith('login') or name.startswith('slave_local'):
            return lambda *params: self._login(name, params)
        elif name.startswith('Async'):
            return lambda *params: self._async(name, params)
        elif '.' in name:
            impl = getattr(self, name.replace('.', '_'))
            if impl is not None:

                def callit(*params):
                    localname = name
                    LOG.debug(_('Calling %(localname)s %(impl)s') % locals())
                    self._check_session(params)
                    return impl(*params)

                return callit
        if self._is_gettersetter(name, True):
            LOG.debug(_('Calling getter %s'), name)
            return lambda *params: self._getter(name, params)
        elif self._is_gettersetter(name, False):
            LOG.debug(_('Calling setter %s'), name)
            return lambda *params: self._setter(name, params)
        elif self._is_create(name):
            return lambda *params: self._create(name, params)
        elif self._is_destroy(name):
            return lambda *params: self._destroy(name, params)
        else:
            return None

    def _is_gettersetter(self, name, getter):
        bits = name.split('.')
        return (len(bits) == 2 and bits[0] in _CLASSES
                and bits[1].startswith(getter and 'get_' or 'set_'))

    def _is_create(self, name):
        return self._is_method(name, 'create')

    def _is_destroy(self, name):
        return self._is_method(name, 'destroy')

    def _is_method(self, name, meth):
        bits = name.split('.')
        return (len(bits) == 2 and bits[0] in _CLASSES and bits[1] == meth)

    def _getter(self, name, params):
        self._check_session(params)
        (cls, func) = name.split('.')
        if func == 'get_all':
            self._check_arg_count(params, 1)
            return get_all(cls)

        if func == 'get_all_records':
            self._check_arg_count(params, 1)
            return get_all_records(cls)

        if func == 'get_record':
            self._check_arg_count(params, 2)
            return get_record(cls, params[1])

        if func in ('get_by_name_label', 'get_by_uuid'):
            self._check_arg_count(params, 2)
            return_singleton = (func == 'get_by_uuid')
            return self._get_by_field(_db_content[cls],
                                      func[len('get_by_'):],
                                      params[1],
                                      return_singleton=return_singleton)

        if len(params) == 2:
            field = func[len('get_'):]
            ref = params[1]
            if (ref in _db_content[cls]):
                if (field in _db_content[cls][ref]):
                    return _db_content[cls][ref][field]
            else:
                raise Failure(['HANDLE_INVALID', cls, ref])

        LOG.debug(_('Raising NotImplemented'))
        raise NotImplementedError(
            _('xenapi.fake does not have an implementation for %s or it has '
              'been called with the wrong number of arguments') % name)

    def _setter(self, name, params):
        self._check_session(params)
        (cls, func) = name.split('.')

        if len(params) == 3:
            field = func[len('set_'):]
            ref = params[1]
            val = params[2]

            if (ref in _db_content[cls] and field in _db_content[cls][ref]):
                _db_content[cls][ref][field] = val
                return

        LOG.debug(_('Raising NotImplemented'))
        raise NotImplementedError(
            'xenapi.fake does not have an implementation for %s or it has '
            'been called with the wrong number of arguments or the database '
            'is missing that field' % name)

    def _create(self, name, params):
        self._check_session(params)
        is_sr_create = name == 'SR.create'
        is_vlan_create = name == 'VLAN.create'
        # Storage Repositories have a different API
        expected = is_sr_create and 10 or is_vlan_create and 4 or 2
        self._check_arg_count(params, expected)
        (cls, _) = name.split('.')
        ref = (is_sr_create and _create_sr(cls, params) or is_vlan_create
               and _create_vlan(params[1], params[2], params[3])
               or _create_object(cls, params[1]))

        # Call hook to provide any fixups needed (ex. creating backrefs)
        after_hook = 'after_%s_create' % cls
        if after_hook in globals():
            globals()[after_hook](ref, params[1])

        obj = get_record(cls, ref)

        # Add RO fields
        if cls == 'VM':
            obj['power_state'] = 'Halted'
        return ref

    def _destroy(self, name, params):
        self._check_session(params)
        self._check_arg_count(params, 2)
        table = name.split('.')[0]
        ref = params[1]
        if ref not in _db_content[table]:
            raise Failure(['HANDLE_INVALID', table, ref])

        # Call destroy function (if exists)
        destroy_func = globals().get('destroy_%s' % table.lower())
        if destroy_func:
            destroy_func(ref)
        else:
            del _db_content[table][ref]

    def _async(self, name, params):
        task_ref = create_task(name)
        task = _db_content['task'][task_ref]
        func = name[len('Async.'):]
        try:
            result = self.xenapi_request(func, params[1:])
            if result:
                result = as_value(result)
            task['result'] = result
            task['status'] = 'success'
        except Failure, exc:
            task['error_info'] = exc.details
            task['status'] = 'failed'
        task['finished'] = timeutils.utcnow()
        return task_ref
Exemple #35
0
 def test_validate_ec2_timestamp_advanced_time_expired(self):
     timestamp = timeutils.utcnow() + datetime.timedelta(seconds=350)
     params = {'Timestamp': timeutils.strtime(timestamp,
                                        "%Y-%m-%dT%H:%M:%SZ")}
     expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
     self.assertTrue(expired)
Exemple #36
0
 def delete(self, session=None):
     """Delete this object."""
     self.deleted = True
     self.deleted_at = timeutils.utcnow()
     self.save(session=session)
def stub_instance(id,
                  user_id=None,
                  project_id=None,
                  host=None,
                  node=None,
                  vm_state=None,
                  task_state=None,
                  reservation_id="",
                  uuid=FAKE_UUID,
                  image_ref="10",
                  flavor_id="1",
                  name=None,
                  key_name='',
                  access_ipv4=None,
                  access_ipv6=None,
                  progress=0,
                  auto_disk_config=False,
                  display_name=None,
                  include_fake_metadata=True,
                  config_drive=None,
                  power_state=None,
                  nw_cache=None,
                  metadata=None,
                  security_groups=None,
                  root_device_name=None,
                  limit=None,
                  marker=None,
                  launched_at=timeutils.utcnow(),
                  terminated_at=timeutils.utcnow(),
                  availability_zone='',
                  locked_by=None,
                  cleaned=False):

    if user_id is None:
        user_id = 'fake_user'
    if project_id is None:
        project_id = 'fake_project'

    if metadata:
        metadata = [{'key': k, 'value': v} for k, v in metadata.items()]
    elif include_fake_metadata:
        metadata = [models.InstanceMetadata(key='seq', value=str(id))]
    else:
        metadata = []

    inst_type = flavors.get_flavor_by_flavor_id(int(flavor_id))
    sys_meta = flavors.save_flavor_info({}, inst_type)

    if host is not None:
        host = str(host)

    if key_name:
        key_data = 'FAKE'
    else:
        key_data = ''

    if security_groups is None:
        security_groups = [{
            "id": 1,
            "name": "test",
            "description": "Foo:",
            "project_id": "project",
            "user_id": "user",
            "created_at": None,
            "updated_at": None,
            "deleted_at": None,
            "deleted": False
        }]

    # ReservationID isn't sent back, hack it in there.
    server_name = name or "server%s" % id
    if reservation_id != "":
        server_name = "reservation_%s" % (reservation_id, )

    info_cache = create_info_cache(nw_cache)

    instance = {
        "id": int(id),
        "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
        "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
        "deleted_at": datetime.datetime(2010, 12, 12, 10, 0, 0),
        "deleted": None,
        "user_id": user_id,
        "project_id": project_id,
        "image_ref": image_ref,
        "kernel_id": "",
        "ramdisk_id": "",
        "launch_index": 0,
        "key_name": key_name,
        "key_data": key_data,
        "config_drive": config_drive,
        "vm_state": vm_state or vm_states.BUILDING,
        "task_state": task_state,
        "power_state": power_state,
        "memory_mb": 0,
        "vcpus": 0,
        "root_gb": 0,
        "ephemeral_gb": 0,
        "ephemeral_key_uuid": None,
        "hostname": display_name or server_name,
        "host": host,
        "node": node,
        "instance_type_id": 1,
        "instance_type": dict(inst_type),
        "user_data": "",
        "reservation_id": reservation_id,
        "mac_address": "",
        "scheduled_at": timeutils.utcnow(),
        "launched_at": launched_at,
        "terminated_at": terminated_at,
        "availability_zone": availability_zone,
        "display_name": display_name or server_name,
        "display_description": "",
        "locked": locked_by != None,
        "locked_by": locked_by,
        "metadata": metadata,
        "access_ip_v4": access_ipv4,
        "access_ip_v6": access_ipv6,
        "uuid": uuid,
        "progress": progress,
        "auto_disk_config": auto_disk_config,
        "name": "instance-%s" % id,
        "shutdown_terminate": True,
        "disable_terminate": False,
        "security_groups": security_groups,
        "root_device_name": root_device_name,
        "system_metadata": utils.dict_to_metadata(sys_meta),
        "pci_devices": [],
        "vm_mode": "",
        "default_swap_device": "",
        "default_ephemeral_device": "",
        "launched_on": "",
        "cell_name": "",
        "architecture": "",
        "os_type": "",
        "cleaned": cleaned
    }

    instance.update(info_cache)
    instance['info_cache']['instance_uuid'] = instance['uuid']

    return instance
Exemple #38
0
 def __call__(self, req):
     start = timeutils.utcnow()
     rv = req.get_response(self.application)
     self.log_request_completion(rv, req, start)
     return rv
Exemple #39
0
    def __call__(self, request):
        now = timeutils.utcnow()
        reqBody = "-"
        heartBeatLog = False
        if 'xml' in str(request.content_type) or 'json' in str(
                request.content_type):
            if request.content_length is not None and request.content_length < 10240:
                reqBody = str(request.body) or '-'
                if HWExtend.hasSensitiveStr(reqBody):
                    reqBody = '-'

        reqBody = h_utils._filter_sensitive_data(reqBody)
        data = {
            'remote_addr': request.remote_addr,
            'remote_user': request.remote_user or '-',
            'token_id': "None",
            'request_datetime': '%s' % now.strftime(APACHE_TIME_FORMAT),
            'response_datetime': '%s' % now.strftime(APACHE_TIME_FORMAT),
            'method': request.method,
            'url': request.url,
            'http_version': request.http_version,
            'status': 500,
            'content_length': '-',
            'request_body': reqBody,
            'instance_id': '-'
        }
        token = ''
        try:
            token = request.headers['X-Auth-Token']
            token = HWExtend.b64encodeToken(token)
        except:
            token = "-"
        try:
            response = request.get_response(self.application)
            data['status'] = response.status_int
            data['content_length'] = response.content_length or '-'
        finally:
            # must be calculated *after* the application has been called
            now = timeutils.utcnow()
            data['token_id'] = token
            if "GET" in data['method'] and "/tokens/" in data['url']:
                Pos = data['url'].find("tokens") + 7
                logToken = data['url'][Pos:Pos + 32]
                encodedToken = HWExtend.b64encodeToken(logToken)
                data['url'] = data['url'].replace(logToken, encodedToken)
            elif "POST" in data['method'] and data['url'].endswith("/servers"):
                if int(data['status']) < 400:
                    try:
                        resp_body = json.loads(response.body)
                        vm_server = resp_body.get('server', None)
                        if vm_server is not None:
                            instance_id = vm_server.get('id', None)
                            if instance_id is not None:
                                data['instance_id'] = instance_id
                    except Exception:
                        pass
            elif "OPTIONS" in data['method'] and data['url'].endswith(
                    ":%s/" % self.port):
                heartBeatLog = True

            if heartBeatLog != True:
                #timeutils may not return UTC, so we can't hardcode +0000
                data['response_datetime'] = '%s' % (
                    now.strftime(APACHE_TIME_FORMAT))
                log.info(DRM_LOG_FORMAT % data, extra={"type": "operate"})
        return response
    def test_serializer_index(self):
        serializer = simple_tenant_usage.SimpleTenantUsagesTemplate()
        today = timeutils.utcnow()
        yesterday = today - datetime.timedelta(days=1)
        raw_usages = [
            dict(
                tenant_id='tenant1',
                total_local_gb_usage=1024,
                total_vcpus_usage=23,
                total_memory_mb_usage=512,
                total_hours=24,
                start=yesterday,
                stop=today,
                server_usages=[
                    dict(
                        instance_id='00000000-0000-0000-0000-0000000000000001',
                        name='test1',
                        hours=24,
                        memory_mb=1024,
                        local_gb=50,
                        vcpus=2,
                        tenant_id='tenant1',
                        flavor='m1.small',
                        started_at=yesterday,
                        ended_at=today,
                        state='terminated',
                        uptime=86400),
                    dict(
                        instance_id='00000000-0000-0000-0000-0000000000000002',
                        name='test2',
                        hours=42,
                        memory_mb=4201,
                        local_gb=25,
                        vcpus=1,
                        tenant_id='tenant1',
                        flavor='m1.tiny',
                        started_at=today,
                        ended_at=yesterday,
                        state='terminated',
                        uptime=43200),
                ],
            ),
            dict(
                tenant_id='tenant2',
                total_local_gb_usage=512,
                total_vcpus_usage=32,
                total_memory_mb_usage=1024,
                total_hours=42,
                start=today,
                stop=yesterday,
                server_usages=[
                    dict(
                        instance_id='00000000-0000-0000-0000-0000000000000003',
                        name='test3',
                        hours=24,
                        memory_mb=1024,
                        local_gb=50,
                        vcpus=2,
                        tenant_id='tenant2',
                        flavor='m1.small',
                        started_at=yesterday,
                        ended_at=today,
                        state='terminated',
                        uptime=86400),
                    dict(
                        instance_id='00000000-0000-0000-0000-0000000000000002',
                        name='test2',
                        hours=42,
                        memory_mb=4201,
                        local_gb=25,
                        vcpus=1,
                        tenant_id='tenant4',
                        flavor='m1.tiny',
                        started_at=today,
                        ended_at=yesterday,
                        state='terminated',
                        uptime=43200),
                ],
            ),
        ]
        tenant_usages = dict(tenant_usages=raw_usages)
        text = serializer.serialize(tenant_usages)

        tree = etree.fromstring(text)

        self.assertEqual('tenant_usages', tree.tag)
        self.assertEqual(len(raw_usages), len(tree))
        for idx, child in enumerate(tree):
            self._verify_tenant_usage(raw_usages[idx], child)
from nova.openstack.common import jsonutils
from nova.openstack.common import policy as common_policy
from nova.openstack.common import timeutils
from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils

SERVERS = 5
TENANTS = 2
HOURS = 24
ROOT_GB = 10
EPHEMERAL_GB = 20
MEMORY_MB = 1024
VCPUS = 2
NOW = timeutils.utcnow()
START = NOW - datetime.timedelta(hours=HOURS)
STOP = NOW

FAKE_INST_TYPE = {
    'id': 1,
    'vcpus': VCPUS,
    'root_gb': ROOT_GB,
    'ephemeral_gb': EPHEMERAL_GB,
    'memory_mb': MEMORY_MB,
    'name': 'fakeflavor',
    'flavorid': 'foo',
    'rxtx_factor': 1.0,
    'vcpu_weight': 1,
    'swap': 0,
    'created_at': None,
Exemple #42
0
def last_completed_audit_period(unit=None, before=None):
    """This method gives you the most recently *completed* audit period.

    arguments:
            units: string, one of 'hour', 'day', 'month', 'year'
                    Periods normally begin at the beginning (UTC) of the
                    period unit (So a 'day' period begins at midnight UTC,
                    a 'month' unit on the 1st, a 'year' on Jan, 1)
                    unit string may be appended with an optional offset
                    like so:  'day@18'  This will begin the period at 18:00
                    UTC.  'month@15' starts a monthly period on the 15th,
                    and year@3 begins a yearly one on March 1st.
            before: Give the audit period most recently completed before
                    <timestamp>. Defaults to now.


    returns:  2 tuple of datetimes (begin, end)
              The begin timestamp of this audit period is the same as the
              end of the previous.
    """
    if not unit:
        unit = CONF.instance_usage_audit_period

    offset = 0
    if '@' in unit:
        unit, offset = unit.split("@", 1)
        offset = int(offset)

    if before is not None:
        rightnow = before
    else:
        rightnow = timeutils.utcnow()
    if unit not in ('month', 'day', 'year', 'hour'):
        raise ValueError('Time period must be hour, day, month or year')
    if unit == 'month':
        if offset == 0:
            offset = 1
        end = datetime.datetime(day=offset,
                                month=rightnow.month,
                                year=rightnow.year)
        if end >= rightnow:
            year = rightnow.year
            if 1 >= rightnow.month:
                year -= 1
                month = 12 + (rightnow.month - 1)
            else:
                month = rightnow.month - 1
            end = datetime.datetime(day=offset, month=month, year=year)
        year = end.year
        if 1 >= end.month:
            year -= 1
            month = 12 + (end.month - 1)
        else:
            month = end.month - 1
        begin = datetime.datetime(day=offset, month=month, year=year)

    elif unit == 'year':
        if offset == 0:
            offset = 1
        end = datetime.datetime(day=1, month=offset, year=rightnow.year)
        if end >= rightnow:
            end = datetime.datetime(day=1,
                                    month=offset,
                                    year=rightnow.year - 1)
            begin = datetime.datetime(day=1,
                                      month=offset,
                                      year=rightnow.year - 2)
        else:
            begin = datetime.datetime(day=1,
                                      month=offset,
                                      year=rightnow.year - 1)

    elif unit == 'day':
        end = datetime.datetime(hour=offset,
                                day=rightnow.day,
                                month=rightnow.month,
                                year=rightnow.year)
        if end >= rightnow:
            end = end - datetime.timedelta(days=1)
        begin = end - datetime.timedelta(days=1)

    elif unit == 'hour':
        end = rightnow.replace(minute=offset, second=0, microsecond=0)
        if end >= rightnow:
            end = end - datetime.timedelta(hours=1)
        begin = end - datetime.timedelta(hours=1)

    return (begin, end)
Exemple #43
0
def service_is_up(service):
    """Check whether a service is up based on last heartbeat."""
    last_heartbeat = service['updated_at'] or service['created_at']
    # Timestamps in DB are UTC.
    elapsed = total_seconds(timeutils.utcnow() - last_heartbeat)
    return abs(elapsed) <= FLAGS.service_down_time
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

import datetime

from nova import db
from nova.objects import compute_node
from nova.objects import service
from nova.openstack.common import timeutils
from nova.tests.objects import test_objects

NOW = timeutils.utcnow().replace(microsecond=0)
fake_compute_node = {
    'created_at': NOW,
    'updated_at': None,
    'deleted_at': None,
    'deleted': False,
    'id': 123,
    'service_id': 456,
    'vcpus': 4,
    'memory_mb': 4096,
    'local_gb': 1024,
    'vcpus_used': 2,
    'memory_mb_used': 2048,
    'local_gb_used': 512,
    'hypervisor_type': 'Hyper-Dan-VM-ware',
    'hypervisor_version': 1001,
Exemple #45
0
 def update_capacities(self, capacities):
     """Update capacity information for a cell."""
     self.last_seen = timeutils.utcnow()
     self.capacities = capacities
 def auth_token_create(context, token):
     fake_token = FakeToken(created_at=timeutils.utcnow(), **token)
     FakeAuthDatabase.data[fake_token.token_hash] = fake_token
     FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
     return fake_token
Exemple #47
0
    def test_sync_instances(self):
        call_info = {}

        def sync_instances(self, context, **kwargs):
            call_info['project_id'] = kwargs.get('project_id')
            call_info['updated_since'] = kwargs.get('updated_since')
            call_info['deleted'] = kwargs.get('deleted')

        self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances)

        req = self._get_request("cells/sync_instances")
        body = {}
        self.controller.sync_instances(req, body=body)
        self.assertIsNone(call_info['project_id'])
        self.assertIsNone(call_info['updated_since'])

        body = {'project_id': 'test-project'}
        self.controller.sync_instances(req, body=body)
        self.assertEqual(call_info['project_id'], 'test-project')
        self.assertIsNone(call_info['updated_since'])

        expected = timeutils.utcnow().isoformat()
        if not expected.endswith("+00:00"):
            expected += "+00:00"

        body = {'updated_since': expected}
        self.controller.sync_instances(req, body=body)
        self.assertIsNone(call_info['project_id'])
        self.assertEqual(call_info['updated_since'], expected)

        body = {'updated_since': 'skjdfkjsdkf'}
        self.assertRaises(exc.HTTPBadRequest,
                          self.controller.sync_instances,
                          req,
                          body=body)

        body = {'deleted': False}
        self.controller.sync_instances(req, body=body)
        self.assertIsNone(call_info['project_id'])
        self.assertIsNone(call_info['updated_since'])
        self.assertEqual(call_info['deleted'], False)

        body = {'deleted': 'False'}
        self.controller.sync_instances(req, body=body)
        self.assertIsNone(call_info['project_id'])
        self.assertIsNone(call_info['updated_since'])
        self.assertEqual(call_info['deleted'], False)

        body = {'deleted': 'True'}
        self.controller.sync_instances(req, body=body)
        self.assertIsNone(call_info['project_id'])
        self.assertIsNone(call_info['updated_since'])
        self.assertEqual(call_info['deleted'], True)

        body = {'deleted': 'foo'}
        self.assertRaises(exc.HTTPBadRequest,
                          self.controller.sync_instances,
                          req,
                          body=body)

        body = {'foo': 'meow'}
        self.assertRaises(exc.HTTPBadRequest,
                          self.controller.sync_instances,
                          req,
                          body=body)
Exemple #48
0
    def test_unshelve_volume_backed(self):
        db_instance = jsonutils.to_primitive(self._create_fake_instance())
        host = 'fake-mini'
        node = test_compute.NODENAME
        limits = {}
        filter_properties = {'limits': limits}
        cur_time = timeutils.utcnow()
        cur_time_tz = cur_time.replace(tzinfo=iso8601.iso8601.Utc())
        timeutils.set_time_override(cur_time)
        self.compute.run_instance(self.context, db_instance, {}, {}, [], None,
                                  None, True, None, False)
        instance = instance_obj.Instance.get_by_uuid(
            self.context,
            db_instance['uuid'],
            expected_attrs=['metadata', 'system_metadata'])
        instance.task_state = task_states.UNSHELVING
        instance.save()
        sys_meta = dict(instance.system_metadata)
        sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
        sys_meta['shelved_image_id'] = None
        sys_meta['shelved_host'] = host

        self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
        self.mox.StubOutWithMock(self.compute, '_prep_block_device')
        self.mox.StubOutWithMock(self.compute.driver, 'spawn')
        self.mox.StubOutWithMock(self.compute, '_get_power_state')
        self.mox.StubOutWithMock(self.rt, 'instance_claim')
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')

        self.compute._notify_about_instance_usage(self.context, instance,
                                                  'unshelve.start')
        db.instance_update_and_get_original(
            self.context,
            instance['uuid'], {
                'task_state': task_states.SPAWNING
            },
            update_cells=False,
            columns_to_join=['metadata', 'system_metadata']).AndReturn(
                (db_instance, db_instance))
        self.compute._prep_block_device(self.context, instance,
                                        mox.IgnoreArg()).AndReturn('fake_bdm')
        db_instance['key_data'] = None
        db_instance['auto_disk_config'] = None
        self.rt.instance_claim(self.context, instance, limits).AndReturn(
            claims.Claim(db_instance, self.rt, _fake_resources()))
        self.compute.driver.spawn(self.context,
                                  instance,
                                  None,
                                  injected_files=[],
                                  admin_password=None,
                                  network_info=[],
                                  block_device_info='fake_bdm')
        self.compute._get_power_state(self.context, instance).AndReturn(123)
        db.instance_update_and_get_original(
            self.context,
            instance['uuid'], {
                'power_state': 123,
                'vm_state': vm_states.ACTIVE,
                'task_state': None,
                'key_data': None,
                'auto_disk_config': False,
                'expected_task_state': task_states.SPAWNING,
                'launched_at': cur_time_tz
            },
            update_cells=False,
            columns_to_join=['metadata', 'system_metadata']).AndReturn(
                (db_instance, db_instance))
        self.compute._notify_about_instance_usage(self.context, instance,
                                                  'unshelve.end')
        self.mox.ReplayAll()

        self.compute.unshelve_instance(self.context,
                                       instance,
                                       image=None,
                                       filter_properties=filter_properties,
                                       node=node)
Exemple #49
0
    def __init__(self,
                 user_id,
                 project_id,
                 is_admin=None,
                 read_deleted="no",
                 roles=None,
                 remote_address=None,
                 timestamp=None,
                 request_id=None,
                 auth_token=None,
                 overwrite=True,
                 quota_class=None,
                 user_name=None,
                 project_name=None,
                 service_catalog=None,
                 instance_lock_checked=False,
                 **kwargs):
        """
        :param read_deleted: 'no' indicates deleted records are hidden, 'yes'
            indicates deleted records are visible, 'only' indicates that
            *only* deleted records are visible.

        :param overwrite: Set to False to ensure that the greenthread local
            copy of the index is not overwritten.

        :param kwargs: Extra arguments that might be present, but we ignore
            because they possibly came in from older rpc messages.
        """
        if kwargs:
            LOG.warn(
                _('Arguments dropped when creating context: %s') % str(kwargs))

        self.user_id = user_id
        self.project_id = project_id
        self.roles = roles or []
        self.is_admin = is_admin
        if self.is_admin is None:
            self.is_admin = policy.check_is_admin(self.roles)
        self.read_deleted = read_deleted
        self.remote_address = remote_address
        if not timestamp:
            timestamp = timeutils.utcnow()
        if isinstance(timestamp, basestring):
            timestamp = timeutils.parse_strtime(timestamp)
        self.timestamp = timestamp
        if not request_id:
            request_id = generate_request_id()
        self.request_id = request_id
        self.auth_token = auth_token
        self.service_catalog = service_catalog
        self.instance_lock_checked = instance_lock_checked

        # NOTE(markmc): this attribute is currently only used by the
        # rs_limits turnstile pre-processor.
        # See https://lists.launchpad.net/openstack/msg12200.html
        self.quota_class = quota_class
        self.user_name = user_name
        self.project_name = project_name

        if overwrite or not hasattr(local.store, 'context'):
            self.update_store()
Exemple #50
0
 def update_capabilities(self, cell_metadata):
     """Update cell capabilities for a cell."""
     self.last_seen = timeutils.utcnow()
     self.capabilities = cell_metadata
Exemple #51
0
 def consume_from_instance(self, instance):
     """Consume nodes entire resources regardless of instance request."""
     self.free_ram_mb = 0
     self.free_disk_mb = 0
     self.vcpus_used = self.vcpus_total
     self.updated = timeutils.utcnow()
Exemple #52
0
 def _get_time_now(self):
     """Get current UTC. Broken out for testing."""
     return timeutils.utcnow()
Exemple #53
0
 def test_get_instance_diagnostics(self):
     instance_ref, network_info = self._get_running_instance(obj=True)
     instance_ref['launched_at'] = timeutils.utcnow()
     self.connection.get_instance_diagnostics(instance_ref)
Exemple #54
0
    def reserve(self,
                context,
                resources,
                deltas,
                expire=None,
                project_id=None):
        """Check quotas and reserve resources.

        For counting quotas--those quotas for which there is a usage
        synchronization function--this method checks quotas against
        current usage and the desired deltas.

        This method will raise a QuotaResourceUnknown exception if a
        given resource is unknown or if it does not have a usage
        synchronization function.

        If any of the proposed values is over the defined quota, an
        OverQuota exception will be raised with the sorted list of the
        resources which are too high.  Otherwise, the method returns a
        list of reservation UUIDs which were created.

        :param context: The request context, for access checks.
        :param resources: A dictionary of the registered resources.
        :param deltas: A dictionary of the proposed delta changes.
        :param expire: An optional parameter specifying an expiration
                       time for the reservations.  If it is a simple
                       number, it is interpreted as a number of
                       seconds and added to the current time; if it is
                       a datetime.timedelta object, it will also be
                       added to the current time.  A datetime.datetime
                       object will be interpreted as the absolute
                       expiration time.  If None is specified, the
                       default expiration time set by
                       --default-reservation-expire will be used (this
                       value will be treated as a number of seconds).
        :param project_id: Specify the project_id if current context
                           is admin and admin wants to impact on
                           common user's tenant.
        """

        # Set up the reservation expiration
        if expire is None:
            expire = CONF.reservation_expire
        if isinstance(expire, (int, long)):
            expire = datetime.timedelta(seconds=expire)
        if isinstance(expire, datetime.timedelta):
            expire = timeutils.utcnow() + expire
        if not isinstance(expire, datetime.datetime):
            raise exception.InvalidReservationExpiration(expire=expire)

        # If project_id is None, then we use the project_id in context
        if project_id is None:
            project_id = context.project_id

        # Get the applicable quotas.
        # NOTE(Vek): We're not worried about races at this point.
        #            Yes, the admin may be in the process of reducing
        #            quotas, but that's a pretty rare thing.
        quotas = self._get_quotas(context,
                                  resources,
                                  deltas.keys(),
                                  has_sync=True,
                                  project_id=project_id)

        # NOTE(Vek): Most of the work here has to be done in the DB
        #            API, because we have to do it in a transaction,
        #            which means access to the session.  Since the
        #            session isn't available outside the DBAPI, we
        #            have to do the work there.
        return db.quota_reserve(context,
                                resources,
                                quotas,
                                deltas,
                                expire,
                                CONF.until_refresh,
                                CONF.max_age,
                                project_id=project_id)
Exemple #55
0
 def _time_to_sync(self):
     """Is it time to sync the DB against our memory cache?"""
     diff = timeutils.utcnow() - self.last_cell_db_check
     return diff.seconds >= CONF.cells.db_check_interval
Exemple #56
0
    def _tenant_usages_for_period(self,
                                  context,
                                  period_start,
                                  period_stop,
                                  tenant_id=None,
                                  detailed=True):

        compute_api = api.API()
        instances = compute_api.get_active_by_window(context, period_start,
                                                     period_stop, tenant_id)
        rval = {}
        flavors = {}

        for instance in instances:
            info = {}
            info['hours'] = self._hours_for(instance, period_start,
                                            period_stop)
            flavor_type = instance['instance_type_id']

            if not flavors.get(flavor_type):
                try:
                    it_ref = compute_api.get_instance_type(
                        context, flavor_type)
                    flavors[flavor_type] = it_ref
                except exception.InstanceTypeNotFound:
                    # can't bill if there is no instance type
                    continue

            flavor = flavors[flavor_type]

            info['instance_id'] = instance['uuid']
            info['name'] = instance['display_name']

            info['memory_mb'] = flavor['memory_mb']
            info['local_gb'] = flavor['root_gb'] + flavor['ephemeral_gb']
            info['vcpus'] = flavor['vcpus']

            info['tenant_id'] = instance['project_id']

            info['flavor'] = flavor['name']

            info['started_at'] = instance['launched_at']

            info['ended_at'] = instance['terminated_at']

            if info['ended_at']:
                info['state'] = 'terminated'
            else:
                info['state'] = instance['vm_state']

            now = timeutils.utcnow()

            if info['state'] == 'terminated':
                delta = info['ended_at'] - info['started_at']
            else:
                delta = now - info['started_at']

            info['uptime'] = delta.days * 24 * 3600 + delta.seconds

            if not info['tenant_id'] in rval:
                summary = {}
                summary['tenant_id'] = info['tenant_id']
                if detailed:
                    summary['server_usages'] = []
                summary['total_local_gb_usage'] = 0
                summary['total_vcpus_usage'] = 0
                summary['total_memory_mb_usage'] = 0
                summary['total_hours'] = 0
                summary['start'] = period_start
                summary['stop'] = period_stop
                rval[info['tenant_id']] = summary

            summary = rval[info['tenant_id']]
            summary['total_local_gb_usage'] += info['local_gb'] * info['hours']
            summary['total_vcpus_usage'] += info['vcpus'] * info['hours']
            summary['total_memory_mb_usage'] += (info['memory_mb'] *
                                                 info['hours'])

            summary['total_hours'] += info['hours']
            if detailed:
                summary['server_usages'].append(info)

        return rval.values()
Exemple #57
0
def bm_deployment_destroy(context, dep_id):
    model_query(context, models.BareMetalDeployment).\
                filter_by(id=dep_id).\
                update({'deleted': True,
                        'deleted_at': timeutils.utcnow(),
                        'updated_at': literal_column('updated_at')})
Exemple #58
0
def bm_node_destroy(context, bm_node_id):
    model_query(context, models.BareMetalNode).\
            filter_by(id=bm_node_id).\
            update({'deleted': True,
                    'deleted_at': timeutils.utcnow(),
                    'updated_at': literal_column('updated_at')})
Exemple #59
0
 def test_validate_ec2_req_not_expired(self):
     expire = timeutils.utcnow() + datetime.timedelta(seconds=350)
     params = {'Expires': timeutils.strtime(expire, "%Y-%m-%dT%H:%M:%SZ")}
     expired = ec2utils.is_ec2_timestamp_expired(params)
     self.assertFalse(expired)
Exemple #60
0
 def soft_delete(self, session=None):
     """Mark this object as deleted."""
     self.deleted = self.id
     self.deleted_at = timeutils.utcnow()
     self.save(session=session)