def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # load tables for fk instances = Table('instances', meta, autoload=True) # # New Tables # instance_info_caches = Table('instance_info_caches', meta, Column('created_at', DateTime(timezone=False), default=utils.utcnow()), Column('updated_at', DateTime(timezone=False), onupdate=utils.utcnow()), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True), Column('network_info', Text()), Column('instance_id', String(36), ForeignKey('instances.uuid'), nullable=False, unique=True), mysql_engine='InnoDB') # create instance_info_caches table try: instance_info_caches.create() except Exception: LOG.error(_("Table |%s| not created!"), repr(instance_info_caches)) raise
def test_snapshot_index_detail_serializer(self): serializer = volumes.SnapshotsTemplate() raw_snapshots = [ dict( id='snap1_id', status='snap1_status', size=1024, createdAt=utils.utcnow(), displayName='snap1_name', displayDescription='snap1_desc', volumeId='vol1_id', ), dict( id='snap2_id', status='snap2_status', size=1024, createdAt=utils.utcnow(), displayName='snap2_name', displayDescription='snap2_desc', volumeId='vol2_id', ) ] text = serializer.serialize(dict(snapshots=raw_snapshots)) print text tree = etree.fromstring(text) self.assertEqual('snapshots', tree.tag) self.assertEqual(len(raw_snapshots), len(tree)) for idx, child in enumerate(tree): self._verify_snapshot(raw_snapshots[idx], child)
def stub_instance(id, metadata=None, image_ref="10", flavor_id="1", name=None, vm_state=None, task_state=None, uuid=None): if metadata is not None: metadata_items = [{'key':k, 'value':v} for k, v in metadata.items()] else: metadata_items = [{'key':'seq', 'value':id}] if uuid is None: uuid = FAKE_UUID inst_type = instance_types.get_instance_type_by_flavor_id(int(flavor_id)) instance = { "id": int(id), "name": str(id), "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), "admin_pass": "", "user_id": "fake", "project_id": "fake", "image_ref": image_ref, "kernel_id": "", "ramdisk_id": "", "launch_index": 0, "key_name": "", "key_data": "", "vm_state": vm_state or vm_states.ACTIVE, "task_state": task_state, "memory_mb": 0, "vcpus": 0, "local_gb": 0, "hostname": "", "host": "", "instance_type": dict(inst_type), "user_data": "", "reservation_id": "", "mac_address": "", "scheduled_at": utils.utcnow(), "launched_at": utils.utcnow(), "terminated_at": utils.utcnow(), "availability_zone": "", "display_name": name or "server%s" % id, "display_description": "", "locked": False, "metadata": metadata_items, "access_ip_v4": "", "access_ip_v6": "", "uuid": uuid, "virtual_interfaces": [], "progress": 0, } instance["fixed_ips"] = [{"address": '192.168.0.1', "network": {'label': 'public', 'cidr_v6': None}, "virtual_interface": {'address': 'aa:aa:aa:aa:aa:aa'}, "floating_ips": []}] return instance
def _schedule_instance(self, context, instance_id, *_args, **_kwargs): """Picks a host that is up and has the fewest running instances.""" instance_ref = db.instance_get(context, instance_id) if (instance_ref['availability_zone'] and ':' in instance_ref['availability_zone'] and context.is_admin): zone, _x, host = instance_ref['availability_zone'].partition(':') service = db.service_get_by_args(context.elevated(), host, 'nova-compute') if not self.service_is_up(service): raise driver.WillNotSchedule(_("Host %s is not alive") % host) # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow now = utils.utcnow() db.instance_update(context, instance_id, {'host': host, 'scheduled_at': now}) return host results = db.service_get_all_compute_sorted(context) for result in results: (service, instance_cores) = result if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores: raise driver.NoValidHost(_("All hosts have too many cores")) if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow now = utils.utcnow() db.instance_update(context, instance_id, {'host': service['host'], 'scheduled_at': now}) return service['host'] raise driver.NoValidHost(_("Scheduler was unable to locate a host" " for this request. Is the appropriate" " service running?"))
def test_snapshot_index_detail_serializer(self): serializer = volumes.SnapshotsTemplate() raw_snapshots = [dict( id='snap1_id', status='snap1_status', size=1024, createdAt=utils.utcnow(), displayName='snap1_name', displayDescription='snap1_desc', volumeId='vol1_id', ), dict( id='snap2_id', status='snap2_status', size=1024, createdAt=utils.utcnow(), displayName='snap2_name', displayDescription='snap2_desc', volumeId='vol2_id', )] text = serializer.serialize(dict(snapshots=raw_snapshots)) print text tree = etree.fromstring(text) self.assertEqual('snapshots', tree.tag) self.assertEqual(len(raw_snapshots), len(tree)) for idx, child in enumerate(tree): self._verify_snapshot(raw_snapshots[idx], child)
def ping(self, context=None): """Ping should be called periodically to update zone status.""" diff = utils.utcnow() - self.last_zone_db_check if diff.seconds >= FLAGS.zone_db_check_interval: logging.debug(_("Updating zone cache from db.")) self.last_zone_db_check = utils.utcnow() self._refresh_from_db(context) self._poll_zones(context)
def ping(self, context): """Ping should be called periodically to update zone status.""" diff = utils.utcnow() - self.last_zone_db_check if diff.seconds >= FLAGS.zone_db_check_interval: logging.debug(_("Updating zone cache from db.")) self.last_zone_db_check = utils.utcnow() self._refresh_from_db(context) self._poll_zones(context)
def stub_instance(id, user_id='fake', project_id='fake', host=None, vm_state=None, task_state=None, reservation_id="", uuid=FAKE_UUID, image_ref="10", flavor_id="1", name=None, key_name='', access_ipv4=None, access_ipv6=None, progress=0): if host is not None: host = str(host) if key_name: key_data = 'FAKE' else: key_data = '' # ReservationID isn't sent back, hack it in there. server_name = name or "server%s" % id if reservation_id != "": server_name = "reservation_%s" % (reservation_id, ) instance = { "id": int(id), "uuid": FAKE_UUID, "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), "admin_pass": "", "user_id": user_id, "project_id": project_id, "image_ref": image_ref, "kernel_id": "", "ramdisk_id": "", "launch_index": 0, "key_name": key_name, "key_data": key_data, "vm_state": vm_state or vm_states.BUILDING, "task_state": task_state, "memory_mb": 0, "vcpus": 0, "local_gb": 0, "hostname": "", "host": host, "instance_type": {}, "user_data": "", "reservation_id": reservation_id, "mac_address": "", "scheduled_at": utils.utcnow(), "launched_at": utils.utcnow(), "terminated_at": utils.utcnow(), "availability_zone": "", "display_name": server_name, "display_description": "", "locked": False, "metadata": [], "access_ip_v4": access_ipv4, "access_ip_v6": access_ipv6, "uuid": uuid, "progress": progress} return instance
def stub_instance(id, metadata=None, image_ref="10", flavor_id="1", name=None, vm_state=None, task_state=None): if metadata is not None: metadata_items = [{'key': k, 'value': v} for k, v in metadata.items()] else: metadata_items = [{'key': 'seq', 'value': id}] inst_type = instance_types.get_instance_type_by_flavor_id(int(flavor_id)) instance = { "id": int(id), "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), "admin_pass": "", "user_id": "fake", "project_id": "fake", "image_ref": image_ref, "kernel_id": "", "ramdisk_id": "", "launch_index": 0, "key_name": "", "key_data": "", "vm_state": vm_state or vm_states.ACTIVE, "task_state": task_state, "memory_mb": 0, "vcpus": 0, "local_gb": 0, "hostname": "", "host": "", "instance_type": dict(inst_type), "user_data": "", "reservation_id": "", "mac_address": "", "scheduled_at": utils.utcnow(), "launched_at": utils.utcnow(), "terminated_at": utils.utcnow(), "availability_zone": "", "display_name": name or "server%s" % id, "display_description": "", "locked": False, "metadata": metadata_items, "access_ip_v4": "", "access_ip_v6": "", "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", "virtual_interfaces": [], } instance["fixed_ips"] = { "address": '192.168.0.1', "floating_ips": [], } return instance
def stub_instance(id, user_id='fake', project_id='fake', host=None, vm_state=None, task_state=None, reservation_id="", uuid=FAKE_UUID, image_ref="10", flavor_id="1", name=None, key_name='', access_ipv4=None, access_ipv6=None, progress=0): if host is not None: host = str(host) if key_name: key_data = 'FAKE' else: key_data = '' # ReservationID isn't sent back, hack it in there. server_name = name or "server%s" % id if reservation_id != "": server_name = "reservation_%s" % (reservation_id, ) instance = { "id": int(id), "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), "admin_pass": "", "user_id": user_id, "project_id": project_id, "image_ref": image_ref, "kernel_id": "", "ramdisk_id": "", "launch_index": 0, "key_name": key_name, "key_data": key_data, "vm_state": vm_state or vm_states.BUILDING, "task_state": task_state, "memory_mb": 0, "vcpus": 0, "root_gb": 0, "hostname": "", "host": host, "instance_type": {}, "user_data": "", "reservation_id": reservation_id, "mac_address": "", "scheduled_at": utils.utcnow(), "launched_at": utils.utcnow(), "terminated_at": utils.utcnow(), "availability_zone": "", "display_name": server_name, "display_description": "", "locked": False, "metadata": [], "access_ip_v4": access_ipv4, "access_ip_v6": access_ipv6, "uuid": uuid, "progress": progress} return instance
def update(self, context): """Update status for all zones. This should be called periodically to refresh the zone states. """ diff = utils.utcnow() - self.last_zone_db_check if diff.seconds >= FLAGS.zone_db_check_interval: LOG.debug(_("Updating zone cache from db.")) self.last_zone_db_check = utils.utcnow() self._refresh_from_db(context) self._poll_zones()
def test_volume_index_detail_serializer(self): serializer = volumes.VolumesTemplate() raw_volumes = [ dict( id='vol1_id', status='vol1_status', size=1024, availability_zone='vol1_availability', created_at=utils.utcnow(), attachments=[ dict(id='vol1_id', volume_id='vol1_id', server_id='instance_uuid', device='/foo1') ], display_name='vol1_name', display_description='vol1_desc', volume_type='vol1_type', snapshot_id='snap1_id', metadata=dict( foo='vol1_foo', bar='vol1_bar', ), ), dict( id='vol2_id', status='vol2_status', size=1024, availability_zone='vol2_availability', created_at=utils.utcnow(), attachments=[ dict(id='vol2_id', volume_id='vol2_id', server_id='instance_uuid', device='/foo2') ], display_name='vol2_name', display_description='vol2_desc', volume_type='vol2_type', snapshot_id='snap2_id', metadata=dict( foo='vol2_foo', bar='vol2_bar', ), ) ] text = serializer.serialize(dict(volumes=raw_volumes)) print text tree = etree.fromstring(text) self.assertEqual('volumes', tree.tag) self.assertEqual(len(raw_volumes), len(tree)) for idx, child in enumerate(tree): self._verify_volume(raw_volumes[idx], child)
def stub_instance(id, power_state=0, metadata=None, image_ref="10", flavor_id="1", name=None): if metadata is not None: metadata_items = [{'key':k, 'value':v} for k, v in metadata.items()] else: metadata_items = [{'key':'seq', 'value':id}] inst_type = instance_types.get_instance_type_by_flavor_id(int(flavor_id)) instance = { "id": int(id), "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), "admin_pass": "", "user_id": "fake", "project_id": "fake", "image_ref": image_ref, "kernel_id": "", "ramdisk_id": "", "launch_index": 0, "key_name": "", "key_data": "", "state": power_state, "state_description": "", "memory_mb": 0, "vcpus": 0, "local_gb": 0, "hostname": "", "host": "", "instance_type": dict(inst_type), "user_data": "", "reservation_id": "", "mac_address": "", "scheduled_at": utils.utcnow(), "launched_at": utils.utcnow(), "terminated_at": utils.utcnow(), "availability_zone": "", "display_name": name or "server%s" % id, "display_description": "", "locked": False, "metadata": metadata_items, "access_ip_v4": "", "access_ip_v6": "", "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", "virtual_interfaces": [], } instance["fixed_ips"] = { "address": '192.168.0.1', "floating_ips": [], } return instance
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): """Picks the best host based on requested drive type capability.""" volume_ref = db.volume_get(context, volume_id) host = self._check_host_enforcement(context, volume_ref['availability_zone']) if host: now = utils.utcnow() db.volume_update(context, volume_id, {'host': host, 'scheduled_at': now}) return host volume_type_id = volume_ref['volume_type_id'] if volume_type_id: volume_type = volume_types.get_volume_type(context, volume_type_id) if volume_type_id is None or\ volume_types.is_vsa_volume(volume_type_id, volume_type): LOG.debug(_("Non-VSA volume %d"), volume_ref['id']) return super(VsaScheduler, self).schedule_create_volume(context, volume_id, *_args, **_kwargs) self._print_capabilities_info() drive_type = { 'name': volume_type['extra_specs'].get('drive_name'), 'type': volume_type['extra_specs'].get('drive_type'), 'size': int(volume_type['extra_specs'].get('drive_size')), 'rpm': volume_type['extra_specs'].get('drive_rpm'), } LOG.debug(_("Spawning volume %(volume_id)s with drive type "\ "%(drive_type)s"), locals()) request_spec = {'size': volume_ref['size'], 'drive_type': drive_type} hosts = self._filter_hosts("volume", request_spec) try: (host, qos_cap) = self._select_hosts(request_spec, all_hosts=hosts) except: if volume_ref['to_vsa_id']: db.vsa_update(context, volume_ref['to_vsa_id'], dict(status=VsaState.FAILED)) raise if host: now = utils.utcnow() db.volume_update(context, volume_id, {'host': host, 'scheduled_at': now}) self._consume_resource(qos_cap, volume_ref['size'], -1) return host
def test_volume_index_detail_serializer(self): serializer = volumes.VolumesTemplate() raw_volumes = [dict( id='vol1_id', status='vol1_status', size=1024, availabilityZone='vol1_availability', createdAt=utils.utcnow(), attachments=[dict( id='vol1_id', volumeId='vol1_id', serverId='instance_uuid', device='/foo1')], displayName='vol1_name', displayDescription='vol1_desc', volumeType='vol1_type', snapshotId='snap1_id', metadata=dict( foo='vol1_foo', bar='vol1_bar', ), ), dict( id='vol2_id', status='vol2_status', size=1024, availabilityZone='vol2_availability', createdAt=utils.utcnow(), attachments=[dict( id='vol2_id', volumeId='vol2_id', serverId='instance_uuid', device='/foo2')], displayName='vol2_name', displayDescription='vol2_desc', volumeType='vol2_type', snapshotId='snap2_id', metadata=dict( foo='vol2_foo', bar='vol2_bar', ), )] text = serializer.serialize(dict(volumes=raw_volumes)) print text tree = etree.fromstring(text) self.assertEqual('volumes', tree.tag) self.assertEqual(len(raw_volumes), len(tree)) for idx, child in enumerate(tree): self._verify_volume(raw_volumes[idx], child)
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, strategy='noauth', overwrite=True): """ :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. """ self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.is_admin = is_admin if self.is_admin is None: self.is_admin = 'admin' in [x.lower() for x in self.roles] self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = utils.utcnow() if isinstance(timestamp, basestring): timestamp = utils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = 'req-' + str(utils.gen_uuid()) self.request_id = request_id self.auth_token = auth_token self.strategy = strategy if overwrite or not hasattr(local.store, 'context'): local.store.context = self
def _create_compute_service(self, **kwargs): """Create a compute service.""" dic = {"binary": "nova-compute", "topic": "compute", "report_count": 0, "availability_zone": "dummyzone"} dic["host"] = kwargs.get("host", "dummy") s_ref = db.service_create(self.context, dic) if "created_at" in kwargs.keys() or "updated_at" in kwargs.keys(): t = utils.utcnow() - datetime.timedelta(0) dic["created_at"] = kwargs.get("created_at", t) dic["updated_at"] = kwargs.get("updated_at", t) db.service_update(self.context, s_ref["id"], dic) dic = { "service_id": s_ref["id"], "vcpus": 16, "memory_mb": 32, "local_gb": 100, "vcpus_used": 16, "local_gb_used": 10, "hypervisor_type": "qemu", "hypervisor_version": 12003, "cpu_info": "", } dic["memory_mb_used"] = kwargs.get("memory_mb_used", 32) dic["hypervisor_type"] = kwargs.get("hypervisor_type", "qemu") dic["hypervisor_version"] = kwargs.get("hypervisor_version", 12003) db.compute_node_create(self.context, dic) return db.service_get(self.context, s_ref["id"])
def bless_instance(self, context, instance_uuid, migration_url=None): """ Construct the blessed instance, with the uuid instance_uuid. If migration_url is specified then bless will ensure a memory server is available at the given migration url. """ LOG.debug(_("bless instance called: instance_uuid=%s, migration_url=%s"), instance_uuid, migration_url) instance_ref = self.db.instance_get_by_uuid(context, instance_uuid) if migration_url: # Tweak only this instance directly. source_instance_ref = instance_ref migration = True else: source_instance_ref = self._get_source_instance(context, instance_uuid) migration = False self._instance_update(context, instance_ref.id, vm_state=vm_states.BUILDING) try: # Create a new 'blessed' VM with the given name. name, migration_url, blessed_files = self.vms_conn.bless(context, source_instance_ref.name, instance_ref, migration_url=migration_url, use_image_service=FLAGS.gridcentric_use_image_service) if not(migration): self._instance_update(context, instance_ref.id, vm_state="blessed", task_state=None, launched_at=utils.utcnow()) except Exception, e: LOG.debug(_("Error during bless %s: %s"), str(e), traceback.format_exc()) self._instance_update(context, instance_ref.id, vm_state=vm_states.ERROR, task_state=None) # Short-circuit, nothing to be done. return
def host_schedule(rpc_method, context, base_options, instance_type, availability_zone, injected_files, admin_password, image, num_instances, requested_networks, block_device_mapping, security_group, filter_properties): instance_uuid = base_options.get('uuid') now = utils.utcnow() self.db.instance_update(context, instance_uuid, { 'host': target_host, 'scheduled_at': now }) rpc.cast( context, rpc.queue_get_for(context, CONF.compute_topic, target_host), { "method": "run_instance", "args": { "instance_uuid": instance_uuid, "availability_zone": availability_zone, "admin_password": admin_password, "injected_files": injected_files, "requested_networks": requested_networks } }) # Instance was already created before calling scheduler return self.get(context, instance_uuid)
def delete(self, context, volume): volume_id = volume['id'] if not volume['host']: # NOTE(vish): scheduling failed, so delete it self.db.volume_destroy(context, volume_id) return if volume['status'] not in ["available", "error"]: msg = _("Volume status must be available or error") raise exception.InvalidVolume(reason=msg) snapshots = self.db.snapshot_get_all_for_volume(context, volume_id) if len(snapshots): msg = _("Volume still has %d dependent snapshots") % len(snapshots) raise exception.InvalidVolume(reason=msg) now = utils.utcnow() self.db.volume_update(context, volume_id, { 'status': 'deleting', 'terminated_at': now }) host = volume['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.volume_topic, host), { "method": "delete_volume", "args": { "volume_id": volume_id } })
def test_live_migration_src_check_volume_node_not_alive(self): """Raise exception when volume node is not alive.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) dic = {'instance_id': instance_id, 'size': 1} v_ref = db.volume_create(self.context, { 'instance_id': instance_id, 'size': 1 }) t1 = utils.utcnow() - datetime.timedelta(1) dic = { 'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', 'topic': 'volume', 'report_count': 0 } s_ref = db.service_create(self.context, dic) self.assertRaises(exception.VolumeServiceUnavailable, self.scheduler.driver.schedule_live_migration, self.context, instance_id, i_ref['host']) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.volume_destroy(self.context, v_ref['id'])
def __init__(self, user, project, is_admin=None, read_deleted=False, remote_address=None, timestamp=None, request_id=None): if hasattr(user, 'id'): self._user = user self.user_id = user.id else: self._user = None self.user_id = user if hasattr(project, 'id'): self._project = project self.project_id = project.id else: self._project = None self.project_id = project if is_admin is None: if self.user_id and self.user: self.is_admin = self.user.is_admin() else: self.is_admin = False else: self.is_admin = is_admin self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = utils.utcnow() if isinstance(timestamp, str) or isinstance(timestamp, unicode): timestamp = utils.parse_isotime(timestamp) self.timestamp = timestamp if not request_id: chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-' request_id = ''.join([random.choice(chars) for x in xrange(20)]) self.request_id = request_id
def _provision_volume(self, context, vol, vsa_id, availability_zone): if availability_zone is None: availability_zone = FLAGS.storage_availability_zone now = utils.utcnow() options = { 'size': vol['size'], 'user_id': context.user_id, 'project_id': context.project_id, 'snapshot_id': None, 'availability_zone': availability_zone, 'status': "creating", 'attach_status': "detached", 'display_name': vol['name'], 'display_description': vol['description'], 'volume_type_id': vol['volume_type_id'], 'metadata': dict(to_vsa_id=vsa_id), 'host': vol['host'], 'scheduled_at': now } size = vol['size'] host = vol['host'] name = vol['name'] LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\ "host %(host)s"), locals()) volume_ref = db.volume_create(context, options) rpc.cast(context, db.queue_get_for(context, "volume", vol['host']), {"method": "create_volume", "args": {"volume_id": volume_ref['id'], "snapshot_id": None}})
def test_index_serializer(self): serializer = cloudpipe.CloudpipesTemplate() exemplar = dict(cloudpipes=[ dict(cloudpipe=dict( project_id='1234', public_ip='1.2.3.4', public_port='321', instance_id='1234-1234-1234-1234', created_at=utils.isotime(utils.utcnow()), state='running')), dict(cloudpipe=dict( project_id='4321', public_ip='4.3.2.1', public_port='123', state='pending'))]) text = serializer.serialize(exemplar) tree = etree.fromstring(text) self.assertEqual('cloudpipes', tree.tag) self.assertEqual(len(exemplar['cloudpipes']), len(tree)) for idx, cl_pipe in enumerate(tree): self.assertEqual('cloudpipe', cl_pipe.tag) kp_data = exemplar['cloudpipes'][idx]['cloudpipe'] for child in cl_pipe: self.assertTrue(child.tag in kp_data) self.assertEqual(child.text, kp_data[child.tag])
def log_request_completion(self, response, request, start): apireq = request.environ.get('ec2.request', None) if apireq: controller = apireq.controller action = apireq.action else: controller = None action = None ctxt = request.environ.get('nova.context', None) delta = utils.utcnow() - start seconds = delta.seconds microseconds = delta.microseconds LOG.info( "%s.%ss %s %s %s %s:%s %s [%s] %s %s", seconds, microseconds, request.remote_addr, request.method, "%s%s" % (request.script_name, request.path_info), controller, action, response.status_int, request.user_agent, request.content_type, response.content_type, context=ctxt)
def test_volume_show_create_serializer(self): serializer = volumes.VolumeTemplate() raw_volume = dict( id='vol_id', status='vol_status', size=1024, availabilityZone='vol_availability', createdAt=utils.utcnow(), attachments=[dict( id='vol_id', volumeId='vol_id', serverId='instance_uuid', device='/foo')], displayName='vol_name', displayDescription='vol_desc', volumeType='vol_type', snapshotId='snap_id', metadata=dict( foo='bar', baz='quux', ), ) text = serializer.serialize(dict(volume=raw_volume)) print text tree = etree.fromstring(text) self._verify_volume(raw_volume, tree)
def deleting(context, subscription_id, tenant_id, item_name, resource_uuid, created_at, updated_at, expires_at, order_unit, order_size, price, currency, region_name, interval_unit, interval_size, is_prepaid): app = context.app conn = driver.get_connection(item_name) if not conn.is_terminated(resource_uuid): app.info("wait:%s deleting, but %s not terminated." % (str(subscription_id), item_name)) if updated_at + relativedelta(minutes=10) < utils.utcnow(): app.info("%s(%s) status deleting-->error" % (str(subscription_id), item_name)) db.subscription_error(context, subscription_id) # TODO(lzyeval): report else: # TODO(lzyeval): implement app.info("%s(%s) status deleting-->terminated" % (str(subscription_id), item_name)) db.subscription_terminate(context, subscription_id) if not is_prepaid: interval_info = { interval_unit: interval_size, } quantity = conn.get_usage( resource_uuid, expires_at - relativedelta(**interval_info), expires_at, order_size) print "deleting", tenant_id, subscription_id, \ quantity, order_size, "\033[1;33m", price, "\033[0m" app.info("deleting %s(%s),tid=%s,price=%s" % (subscription_id, item_name, tenant_id, str(price))) charge(context, tenant_id, subscription_id, quantity, order_size, price) else: app.info("%s/%s/%s is_prepaid" % (tenant_id, str(subscription_id), item_name))
def creating(context, subscription_id, tenant_id, item_name, resource_uuid, created_at, updated_at, expires_at, order_unit, order_size, price, currency, region_name, interval_unit, interval_size, is_prepaid): app = context.app conn = driver.get_connection(item_name) if not conn.is_running(resource_uuid): app.info("wait:%s creating, but %s not running." % (str(subscription_id), item_name)) if created_at + relativedelta(minutes=10) < utils.utcnow(): app.info("%s(%s) status creating-->error" % (str(subscription_id), item_name)) db.subscription_error(context, subscription_id) # TODO(lzyeval): report else: interval_info = { interval_unit: interval_size, } app.info("%s(%s) status creating-->verify" % (str(subscription_id), item_name)) db.subscription_verify(context, subscription_id) if is_prepaid: quantity = conn.get_usage(resource_uuid, expires_at - relativedelta(**interval_info), expires_at, order_size) print "creating and is running", tenant_id, subscription_id, \ quantity, order_size, "\033[1;33m", price, "\033[0m" app.info("creating %s:subid=%s,tid=%s,price=%s" % (item_name, subscription_id, tenant_id, str(price))) charge(context, tenant_id, subscription_id, quantity, order_size, price) else: app.info("%s/%s/%s is_prepaid" % (tenant_id, str(subscription_id), item_name)) db.subscription_extend(context, subscription_id, expires_at + relativedelta(**interval_info))
def _provision_volume(self, context, vol, vsa_id, availability_zone): if availability_zone is None: availability_zone = FLAGS.storage_availability_zone now = utils.utcnow() options = { 'size': vol['size'], 'user_id': context.user_id, 'project_id': context.project_id, 'snapshot_id': None, 'availability_zone': availability_zone, 'status': "creating", 'attach_status': "detached", 'display_name': vol['name'], 'display_description': vol['description'], 'volume_type_id': vol['volume_type_id'], 'metadata': dict(to_vsa_id=vsa_id), } size = vol['size'] host = vol['host'] name = vol['name'] LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\ "host %(host)s"), locals()) volume_ref = db.volume_create(context.elevated(), options) driver.cast_to_volume_host(context, vol['host'], 'create_volume', volume_id=volume_ref['id'], snapshot_id=None)
def notify_usage_exists(instance_ref, current_period=False): """ Generates 'exists' notification for an instance for usage auditing purposes. Generates usage for last completed period, unless 'current_period' is True.""" admin_context = context.get_admin_context() begin, end = utils.current_audit_period() bw = {} if current_period: audit_start = end audit_end = utils.utcnow() else: audit_start = begin audit_end = end for b in db.bw_usage_get_by_instance(admin_context, instance_ref['id'], audit_start): bw[b.network_label] = dict(bw_in=b.bw_in, bw_out=b.bw_out) usage_info = utils.usage_from_instance( instance_ref, audit_period_beginning=str(audit_start), audit_period_ending=str(audit_end), bandwidth=bw) notifier_api.notify('compute.%s' % FLAGS.host, 'compute.instance.exists', notifier_api.INFO, usage_info)
def _create_compute_service(self, **kwargs): """Create a compute service.""" dic = { 'binary': 'nova-compute', 'topic': 'compute', 'report_count': 0, 'availability_zone': 'dummyzone' } dic['host'] = kwargs.get('host', 'dummy') s_ref = db.service_create(self.context, dic) if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys(): t = utils.utcnow() - datetime.timedelta(0) dic['created_at'] = kwargs.get('created_at', t) dic['updated_at'] = kwargs.get('updated_at', t) db.service_update(self.context, s_ref['id'], dic) dic = { 'service_id': s_ref['id'], 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, 'vcpus_used': 16, 'local_gb_used': 10, 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, 'cpu_info': '' } dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32) dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu') dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003) db.compute_node_create(self.context, dic) return db.service_get(self.context, s_ref['id'])
def creating(context, subscription_id, tenant_id, item_name, resource_uuid, created_at, updated_at, expires_at, order_unit, order_size, price, currency, region_name, interval_unit, interval_size, is_prepaid): app = context.app conn = driver.get_connection(item_name) if not conn.is_running(resource_uuid): app.info("wait:%s creating, but %s not running." % (str(subscription_id), item_name)) if created_at + relativedelta(minutes=10) < utils.utcnow(): app.info("%s(%s) status creating-->error" % (str(subscription_id), item_name)) db.subscription_error(context, subscription_id) # TODO(lzyeval): report else: interval_info = { interval_unit: interval_size, } app.info("%s(%s) status creating-->verify" % (str(subscription_id), item_name)) db.subscription_verify(context, subscription_id) if is_prepaid: quantity = conn.get_usage( resource_uuid, expires_at - relativedelta(**interval_info), expires_at, order_size) print "creating and is running", tenant_id, subscription_id, \ quantity, order_size, "\033[1;33m", price, "\033[0m" app.info("creating %s:subid=%s,tid=%s,price=%s" % (item_name, subscription_id, tenant_id, str(price))) charge(context, tenant_id, subscription_id, quantity, order_size, price) else: app.info("%s/%s/%s is_prepaid" % (tenant_id, str(subscription_id), item_name)) db.subscription_extend(context, subscription_id, expires_at + relativedelta(**interval_info))
def update_service_capabilities(self, service_name, host, capabilities): """Update the per-service capabilities based on this notification.""" logging.debug(_("Received %(service_name)s service update from " "%(host)s.") % locals()) service_caps = self.service_states.get(host, {}) capabilities["timestamp"] = utils.utcnow() # Reported time service_caps[service_name] = capabilities self.service_states[host] = service_caps
def __init__(self, user_id, project_id, is_admin=None, read_deleted=False, roles=None, remote_address=None, timestamp=None, request_id=None): self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.is_admin = is_admin if self.is_admin is None: self.admin = 'admin' in self.roles self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = utils.utcnow() if isinstance(timestamp, basestring): timestamp = utils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = unicode(uuid.uuid4()) self.request_id = request_id
def host_service_caps_stale(self, host, service): """Check if host service capabilites are not recent enough.""" allowed_time_diff = FLAGS.periodic_interval * 3 caps = self.service_states[host][service] if (utils.utcnow() - caps["timestamp"]) <= datetime.timedelta(seconds=allowed_time_diff): return False return True
def update_metadata(self, zone_metadata): """Update zone metadata after successful communications with child zone.""" self.last_seen = utils.utcnow() self.attempt = 0 self.capabilities = ", ".join(["%s=%s" % (k, v) for k, v in zone_metadata.iteritems() if k != "name"]) self.is_active = True
def deleting(context, subscription_id, tenant_id, item_name, resource_uuid, created_at, updated_at, expires_at, order_unit, order_size, price, currency, region_name, interval_unit, interval_size, is_prepaid): app = context.app conn = driver.get_connection(item_name) if not conn.is_terminated(resource_uuid): app.info("wait:%s deleting, but %s not terminated." % (str(subscription_id), item_name)) if updated_at + relativedelta(minutes=10) < utils.utcnow(): app.info("%s(%s) status deleting-->error" % (str(subscription_id), item_name)) db.subscription_error(context, subscription_id) # TODO(lzyeval): report else: # TODO(lzyeval): implement app.info("%s(%s) status deleting-->terminated" % (str(subscription_id), item_name)) db.subscription_terminate(context, subscription_id) if not is_prepaid: interval_info = { interval_unit: interval_size, } quantity = conn.get_usage(resource_uuid, expires_at - relativedelta(**interval_info), expires_at, order_size) print "deleting", tenant_id, subscription_id, \ quantity, order_size, "\033[1;33m", price, "\033[0m" app.info("deleting %s(%s),tid=%s,price=%s" % (subscription_id, item_name, tenant_id, str(price))) charge(context, tenant_id, subscription_id, quantity, order_size, price) else: app.info("%s/%s/%s is_prepaid" % (tenant_id, str(subscription_id), item_name))
def __init__( self, user_id, project_id, is_admin=None, read_deleted=False, roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, strategy="noauth", ): self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.is_admin = is_admin if self.is_admin is None: self.is_admin = "admin" in [x.lower() for x in self.roles] self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = utils.utcnow() if isinstance(timestamp, basestring): timestamp = utils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = unicode(uuid.uuid4()) self.request_id = request_id self.auth_token = auth_token self.strategy = strategy
def test_live_migration_common_check_service_orig_not_exists(self): """Destination host does not exist.""" dest = 'dummydest' # mocks for live_migration_common_check() instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) t1 = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t1, updated_at=t1, host=dest) # mocks for mounted_on_same_shared_storage() fpath = '/test/20110127120000' self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) topic = FLAGS.compute_topic driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(self.context, topic, dest), {"method": 'create_shared_storage_test_file'}).AndReturn(fpath) driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']), {"method": 'check_shared_storage_test_file', "args": {'filename': fpath}}) driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(mox.IgnoreArg(), topic, dest), {"method": 'cleanup_shared_storage_test_file', "args": {'filename': fpath}}) self.mox.ReplayAll() self.assertRaises(exception.SourceHostUnavailable, self.scheduler.driver._live_migration_common_check, self.context, i_ref, dest) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id'])
def notify_usage_exists(instance_ref, current_period=False): """ Generates 'exists' notification for an instance for usage auditing purposes. Generates usage for last completed period, unless 'current_period' is True.""" admin_context = context.get_admin_context() begin, end = utils.current_audit_period() bw = {} if current_period: audit_start = end audit_end = utils.utcnow() else: audit_start = begin audit_end = end for b in db.bw_usage_get_by_instance(admin_context, instance_ref['id'], audit_start): bw[b.network_label] = dict(bw_in=b.bw_in, bw_out=b.bw_out) usage_info = utils.usage_from_instance(instance_ref, audit_period_beginning=str(audit_start), audit_period_ending=str(audit_end), bandwidth=bw) notifier_api.notify('compute.%s' % FLAGS.host, 'compute.instance.exists', notifier_api.INFO, usage_info)
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, strategy='noauth', overwrite=True): """ :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. """ self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.is_admin = is_admin if self.is_admin is None: self.is_admin = 'admin' in [x.lower() for x in self.roles] elif self.is_admin and 'admin' not in self.roles: self.roles.append('admin') self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = utils.utcnow() if isinstance(timestamp, basestring): timestamp = utils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = 'req-' + str(utils.gen_uuid()) self.request_id = request_id self.auth_token = auth_token self.strategy = strategy if overwrite or not hasattr(local.store, 'context'): local.store.context = self
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, strategy='noauth'): """ :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. """ self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.is_admin = is_admin if self.is_admin is None: self.is_admin = 'admin' in [x.lower() for x in self.roles] self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = utils.utcnow() if isinstance(timestamp, basestring): timestamp = utils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = unicode(uuid.uuid4()) self.request_id = request_id self.auth_token = auth_token self.strategy = strategy local.store.context = self
def __init__(self, user_id, project_id, is_admin=None, read_deleted=False, roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, strategy='noauth'): self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.is_admin = is_admin if self.is_admin is None: self.is_admin = 'admin' in [x.lower() for x in self.roles] self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = utils.utcnow() if isinstance(timestamp, basestring): timestamp = utils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = unicode(uuid.uuid4()) self.request_id = request_id self.auth_token = auth_token self.strategy = strategy local.store.context = self
def host_schedule(rpc_method, context, base_options, instance_type, availability_zone, injected_files, admin_password, image, num_instances, requested_networks, block_device_mapping, security_group, filter_properties): instance_uuid = base_options.get('uuid') now = utils.utcnow() self.db.instance_update(context, instance_uuid, {'host': target_host, 'scheduled_at': now}) rpc.cast(context, rpc.queue_get_for(context, CONF.compute_topic, target_host), {"method": "run_instance", "args": {"instance_uuid": instance_uuid, "availability_zone": availability_zone, "admin_password": admin_password, "injected_files": injected_files, "requested_networks": requested_networks}}) # Instance was already created before calling scheduler return self.get(context, instance_uuid)
def describe_hosts(self, context, **_kwargs): """Returns status info for all nodes. Includes: * Hostname * Compute (up, down, None) * Instance count * Volume (up, down, None) * Volume Count """ services = db.service_get_all(context, False) now = utils.utcnow() hosts = [] rv = [] for host in [service['host'] for service in services]: if not host in hosts: hosts.append(host) for host in hosts: compute = [s for s in services if s['host'] == host \ and s['binary'] == 'nova-compute'] if compute: compute = compute[0] instances = db.instance_get_all_by_host(context, host) volume = [s for s in services if s['host'] == host \ and s['binary'] == 'nova-volume'] if volume: volume = volume[0] volumes = db.volume_get_all_by_host(context, host) rv.append(host_dict(host, compute, instances, volume, volumes, now)) return {'hosts': rv}
def _provision_volume(self, context, vol, vsa_id, availability_zone): if availability_zone is None: availability_zone = FLAGS.storage_availability_zone now = utils.utcnow() options = { "size": vol["size"], "user_id": context.user_id, "project_id": context.project_id, "snapshot_id": None, "availability_zone": availability_zone, "status": "creating", "attach_status": "detached", "display_name": vol["name"], "display_description": vol["description"], "volume_type_id": vol["volume_type_id"], "metadata": dict(to_vsa_id=vsa_id), } size = vol["size"] host = vol["host"] name = vol["name"] LOG.debug(_("Provision volume %(name)s of size %(size)s GB on " "host %(host)s"), locals()) volume_ref = db.volume_create(context.elevated(), options) driver.cast_to_volume_host(context, vol["host"], "create_volume", volume_id=volume_ref["id"], snapshot_id=None)
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, user_name=None, project_name=None, **kwargs): """ :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ if kwargs: LOG.warn( _('Arguments dropped when creating context: %s') % str(kwargs)) self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.is_admin = is_admin if self.is_admin is None: self.is_admin = 'admin' in [x.lower() for x in self.roles] elif self.is_admin and 'admin' not in self.roles: self.roles.append('admin') self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = utils.utcnow() if isinstance(timestamp, basestring): timestamp = utils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = generate_request_id() self.request_id = request_id self.auth_token = auth_token # NOTE(markmc): this attribute is currently only used by the # rs_limits turnstile pre-processor. # See https://lists.launchpad.net/openstack/msg12200.html self.quota_class = quota_class self.user_name = user_name self.project_name = project_name if overwrite or not hasattr(local.store, 'context'): self.update_store()
def region_destroy(context, region_id): session = get_session() with session.begin(): session.query(models.Region).\ filter_by(id=region_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')})
def test_run_terminate_timestamps(self): """Make sure timestamps are set for launched and destroyed""" instance_id = self._create_instance() instance_ref = db.instance_get(self.context, instance_id) self.assertEqual(instance_ref['launched_at'], None) self.assertEqual(instance_ref['deleted_at'], None) launch = utils.utcnow() self.compute.run_instance(self.context, instance_id) instance_ref = db.instance_get(self.context, instance_id) self.assert_(instance_ref['launched_at'] > launch) self.assertEqual(instance_ref['deleted_at'], None) terminate = utils.utcnow() self.compute.terminate_instance(self.context, instance_id) self.context = self.context.elevated(True) instance_ref = db.instance_get(self.context, instance_id) self.assert_(instance_ref['launched_at'] < terminate) self.assert_(instance_ref['deleted_at'] > terminate)
def update_metadata(self, zone_metadata): """Update zone metadata after successful communications with child zone.""" self.last_seen = utils.utcnow() self.attempt = 0 self.capabilities = dict([(k, v) for k, v in zone_metadata.iteritems() if k != 'name']) self.is_active = True
def test_fixed_ip_disassociate_all_by_timeout_multi_host(self): now = utils.utcnow() ctxt = context.get_admin_context() self._timeout_test(ctxt, now, True) result = db.fixed_ip_disassociate_all_by_timeout(ctxt, 'foo', now) self.assertEqual(result, 1) result = db.fixed_ip_disassociate_all_by_timeout(ctxt, 'bar', now) self.assertEqual(result, 0)
def host_service_caps_stale(self, host, service): """Check if host service capabilites are not recent enough.""" allowed_time_diff = FLAGS.periodic_interval * 3 caps = self.service_states[host][service] if (utils.utcnow() - caps["timestamp"]) <= \ datetime.timedelta(seconds=allowed_time_diff): return False return True