def _test_is_older_than(self, fn): strptime = datetime.datetime.strptime with mock.patch('datetime.datetime') as datetime_mock: datetime_mock.utcnow.return_value = self.skynet_self_aware_time datetime_mock.strptime = strptime expect_true = timeutils.is_older_than(fn(self.one_minute_before), 59) self.assertTrue(expect_true) expect_false = timeutils.is_older_than(fn(self.one_minute_before), 60) self.assertFalse(expect_false) expect_false = timeutils.is_older_than(fn(self.one_minute_before), 61) self.assertFalse(expect_false)
def _test_is_older_than(self, fn): strptime = datetime.datetime.strptime with mock.patch('datetime.datetime') as datetime_mock: datetime_mock.utcnow.return_value = self.skynet_self_aware_time datetime_mock.strptime = strptime expect_true = timeutils.is_older_than(fn(self.one_minute_before), 59) self.assertTrue(expect_true) expect_false = timeutils.is_older_than(fn(self.one_minute_before), 60) self.assertFalse(expect_false) expect_false = timeutils.is_older_than(fn(self.one_minute_before), 61) self.assertFalse(expect_false)
def check_backlogged_hosting_devices(self): """"Checks the status of backlogged hosting devices. Skips newly spun up instances during their booting time as specified in the boot time parameter. :return A dict of the format: {'reachable': [<hd_id>,..], 'dead': [<hd_id>,..]} """ response_dict = {'reachable': [], 'dead': []} LOG.debug("Current Backlogged hosting devices: %s", self.backlog_hosting_devices.keys()) for hd_id in self.backlog_hosting_devices.keys(): hd = self.backlog_hosting_devices[hd_id]['hd'] if not timeutils.is_older_than(hd['created_at'], hd['booting_time']): LOG.info(_LI("Hosting device: %(hd_id)s @ %(ip)s hasn't " "passed minimum boot time. Skipping it. "), {'hd_id': hd_id, 'ip': hd['management_ip_address']}) continue LOG.info(_LI("Checking hosting device: %(hd_id)s @ %(ip)s for " "reachability."), {'hd_id': hd_id, 'ip': hd['management_ip_address']}) if _is_pingable(hd['management_ip_address']): hd.pop('backlog_insertion_ts', None) del self.backlog_hosting_devices[hd_id] response_dict['reachable'].append(hd_id) LOG.info(_LI("Hosting device: %(hd_id)s @ %(ip)s is now " "reachable. Adding it to response"), {'hd_id': hd_id, 'ip': hd['management_ip_address']}) else: LOG.info(_LI("Hosting device: %(hd_id)s @ %(ip)s still not " "reachable "), {'hd_id': hd_id, 'ip': hd['management_ip_address']}) if timeutils.is_older_than( hd['backlog_insertion_ts'], cfg.CONF.cfg_agent.hosting_device_dead_timeout): LOG.debug("Hosting device: %(hd_id)s @ %(ip)s hasn't " "been reachable for the last %(time)d seconds. " "Marking it dead.", {'hd_id': hd_id, 'ip': hd['management_ip_address'], 'time': cfg.CONF.cfg_agent. hosting_device_dead_timeout}) response_dict['dead'].append(hd_id) hd.pop('backlog_insertion_ts', None) del self.backlog_hosting_devices[hd_id] LOG.debug("Response: %s", response_dict) return response_dict
def find_orphaned_instances(xenapi): """Find and return a list of orphaned instances.""" ctxt = context.get_admin_context(read_deleted="only") orphaned_instances = [] for vm_ref, vm_rec in _get_applicable_vm_recs(xenapi): try: uuid = vm_rec['other_config']['nova_uuid'] instance = db.instance_get_by_uuid(ctxt, uuid) except (KeyError, exception.InstanceNotFound): # NOTE(jk0): Err on the side of caution here. If we don't know # anything about the particular instance, ignore it. print_xen_object("INFO: Ignoring VM", vm_rec, indent_level=0) continue # NOTE(jk0): This would be triggered if a VM was deleted but the # actual deletion process failed somewhere along the line. is_active_and_deleting = (instance.vm_state == "active" and instance.task_state == "deleting") # NOTE(jk0): A zombie VM is an instance that is not active and hasn't # been updated in over the specified period. is_zombie_vm = (instance.vm_state != "active" and timeutils.is_older_than( instance.updated_at, CONF.zombie_instance_updated_at_window)) if is_active_and_deleting or is_zombie_vm: orphaned_instances.append((vm_ref, vm_rec, instance)) return orphaned_instances
def find_orphaned_instances(xenapi): """Find and return a list of orphaned instances.""" ctxt = context.get_admin_context(read_deleted="only") orphaned_instances = [] for vm_ref, vm_rec in _get_applicable_vm_recs(xenapi): try: uuid = vm_rec['other_config']['nova_uuid'] instance = db.instance_get_by_uuid(ctxt, uuid) except (KeyError, exception.InstanceNotFound): # NOTE(jk0): Err on the side of caution here. If we don't know # anything about the particular instance, ignore it. print_xen_object("INFO: Ignoring VM", vm_rec, indent_level=0) continue # NOTE(jk0): This would be triggered if a VM was deleted but the # actual deletion process failed somewhere along the line. is_active_and_deleting = (instance.vm_state == "active" and instance.task_state == "deleting") # NOTE(jk0): A zombie VM is an instance that is not active and hasn't # been updated in over the specified period. is_zombie_vm = (instance.vm_state != "active" and timeutils.is_older_than(instance.updated_at, CONF.zombie_instance_updated_at_window)) if is_active_and_deleting or is_zombie_vm: orphaned_instances.append((vm_ref, vm_rec, instance)) return orphaned_instances
def _cache_valid(self, host): cachevalid = False if host in self.compute_nodes: node_stats = self.compute_nodes.get(host) if not timeutils.is_older_than( node_stats['vtime'], CONF.trusted_computing.attestation_auth_timeout): cachevalid = True return cachevalid
def _cache_valid(self, host): cachevalid = False if host in self.compute_nodes: node_stats = self.compute_nodes.get(host) if not timeutils.is_older_than( node_stats['vtime'], CONF.trusted_computing.attestation_auth_timeout): cachevalid = True return cachevalid
def get_our_capabilities(self, include_children=True): capabs = copy.deepcopy(self.my_cell_state.capabilities) if include_children: for cell in self.child_cells.values(): if timeutils.is_older_than(cell.last_seen, CONF.cells.mute_child_interval): continue for capab_name, values in cell.capabilities.items(): if capab_name not in capabs: capabs[capab_name] = set([]) capabs[capab_name] |= values return capabs
def is_ec2_timestamp_expired(request, expires=None): """Checks the timestamp or expiry time included in an EC2 request and returns true if the request is expired """ query_time = None timestamp = request.get('Timestamp') expiry_time = request.get('Expires') def parse_strtime(strtime): if _ms_time_regex.match(strtime): # NOTE(MotoKen): time format for aws-sdk-java contains millisecond time_format = "%Y-%m-%dT%H:%M:%S.%fZ" else: time_format = "%Y-%m-%dT%H:%M:%SZ" return timeutils.parse_strtime(strtime, time_format) try: if timestamp and expiry_time: msg = _("Request must include either Timestamp or Expires," " but cannot contain both") LOG.error(msg) raise exception.InvalidRequest(msg) elif expiry_time: query_time = parse_strtime(expiry_time) return timeutils.is_older_than(query_time, -1) elif timestamp: query_time = parse_strtime(timestamp) # Check if the difference between the timestamp in the request # and the time on our servers is larger than 5 minutes, the # request is too old (or too new). if query_time and expires: return timeutils.is_older_than(query_time, expires) or \ timeutils.is_newer_than(query_time, expires) return False except ValueError: LOG.audit(_("Timestamp is invalid.")) return True
def _cooldown_inprogress(self): inprogress = False try: # Negative values don't make sense, so they are clamped to zero cooldown = max(0, self.properties[self.COOLDOWN]) except TypeError: # If not specified, it will be None, same as cooldown == 0 cooldown = 0 metadata = self.metadata_get() if metadata and cooldown != 0: last_adjust = metadata.keys()[0] if not timeutils.is_older_than(last_adjust, cooldown): inprogress = True return inprogress
def _weigh_object(self, cell, weight_properties): """Check cell against the last_seen timestamp that indicates the time that the most recent capability or capacity update was received from the given cell. """ last_seen = cell.last_seen secs = CONF.cells.mute_child_interval if timeutils.is_older_than(last_seen, secs): # yep, that's a mute child; recommend highly that it be skipped! LOG.warning(_LW("%(cell)s has not been seen since %(last_seen)s " "and is being treated as mute."), {'cell': cell, 'last_seen': last_seen}) return CONF.cells.mute_weight_value else: return 0
def _weigh_object(self, cell, weight_properties): """Check cell against the last_seen timestamp that indicates the time that the most recent capability or capacity update was received from the given cell. """ last_seen = cell.last_seen secs = CONF.cells.mute_child_interval if timeutils.is_older_than(last_seen, secs): # yep, that's a mute child; recommend highly that it be skipped! LOG.warning( _LW("%(cell)s has not been seen since %(last_seen)s " "and is being treated as mute."), { 'cell': cell, 'last_seen': last_seen }) return CONF.cells.mute_weight_value else: return 0
def flush(self, context): if not self.initial_timestamp: return [] expired = (self.retention_time and timeutils.is_older_than( self.initial_timestamp, self.retention_time)) full = self.aggregated_samples >= self.size if full or expired: x = self.samples.values() # gauge aggregates need to be averages for s in x: if s.type == sample.TYPE_GAUGE: key = self._get_unique_key(s) s.volume /= self.counts[key] self.samples.clear() self.counts.clear() self.aggregated_samples = 0 self.initial_timestamp = None return x return []
def flush(self, context): if not self.initial_timestamp: return [] expired = (self.retention_time and timeutils.is_older_than(self.initial_timestamp, self.retention_time)) full = self.aggregated_samples >= self.size if full or expired: x = self.samples.values() # gauge aggregates need to be averages for s in x: if s.type == sample.TYPE_GAUGE: key = self._get_unique_key(s) s.volume /= self.counts[key] self.samples.clear() self.counts.clear() self.aggregated_samples = 0 self.initial_timestamp = None return x return []
def _age_cached_images(self, context, datastore, dc_info, ds_path): """Ages cached images.""" age_seconds = CONF.remove_unused_original_minimum_age_seconds unused_images = self.originals - self.used_images ds_browser = self._get_ds_browser(datastore.ref) for image in unused_images: path = self.timestamp_folder_get(ds_path, image) # Lock to ensure that the spawn will not try and access a image # that is currently being deleted on the datastore. with lockutils.lock(str(path), lock_file_prefix='nova-vmware-ts', external=True): ts = self._get_timestamp(ds_browser, path) if not ts: ts_path = path.join(self._get_timestamp_filename()) try: ds_util.mkdir(self._session, ts_path, dc_info.ref) except vexc.FileAlreadyExistsException: LOG.debug("Timestamp already exists.") LOG.info( _LI("Image %s is no longer used by this node. " "Pending deletion!"), image) else: dt = self._get_datetime_from_filename(str(ts)) if timeutils.is_older_than(dt, age_seconds): LOG.info( _LI("Image %s is no longer used. " "Deleting!"), path) # Image has aged - delete the image ID folder self._folder_delete(path, dc_info.ref) # If the image is used and the timestamp file exists then we delete # the timestamp. for image in self.used_images: path = self.timestamp_folder_get(ds_path, image) with lockutils.lock(str(path), lock_file_prefix='nova-vmware-ts', external=True): self.timestamp_cleanup(dc_info.ref, ds_browser, path)
def _age_cached_images(self, context, datastore, dc_info, ds_path): """Ages cached images.""" age_seconds = CONF.remove_unused_original_minimum_age_seconds unused_images = self.originals - self.used_images ds_browser = self._get_ds_browser(datastore.ref) for image in unused_images: path = self.timestamp_folder_get(ds_path, image) # Lock to ensure that the spawn will not try and access a image # that is currently being deleted on the datastore. with lockutils.lock(str(path), lock_file_prefix='nova-vmware-ts', external=True): ts = self._get_timestamp(ds_browser, path) if not ts: ts_path = path.join(self._get_timestamp_filename()) try: ds_util.mkdir(self._session, ts_path, dc_info.ref) except vexc.FileAlreadyExistsException: LOG.debug("Timestamp already exists.") LOG.info(_LI("Image %s is no longer used by this node. " "Pending deletion!"), image) else: dt = self._get_datetime_from_filename(str(ts)) if timeutils.is_older_than(dt, age_seconds): LOG.info(_LI("Image %s is no longer used. " "Deleting!"), path) # Image has aged - delete the image ID folder self._folder_delete(path, dc_info.ref) # If the image is used and the timestamp file exists then we delete # the timestamp. for image in self.used_images: path = self.timestamp_folder_get(ds_path, image) with lockutils.lock(str(path), lock_file_prefix='nova-vmware-ts', external=True): self.timestamp_cleanup(dc_info.ref, ds_browser, path)
def is_agent_down(cls, heart_beat_time): return timeutils.is_older_than(heart_beat_time, cfg.CONF.agent_down_time)
def is_agent_down(cls, heart_beat_time, timeout=cfg.CONF.general.cfg_agent_down_time): return timeutils.is_older_than(heart_beat_time, timeout)
def is_agent_down(cls, heart_beat_time): return timeutils.is_older_than(heart_beat_time, cfg.CONF.agent_down_time)
def provide_ems(self, requester, netapp_backend, app_version, server_type="cluster"): """Provide ems with volume stats for the requester. :param server_type: cluster or 7mode. """ def _create_ems(netapp_backend, app_version, server_type): """Create ems API request.""" ems_log = NaElement('ems-autosupport-log') host = socket.getfqdn() or 'Cinder_node' if server_type == "cluster": dest = "cluster node" else: dest = "7 mode controller" ems_log.add_new_child('computer-name', host) ems_log.add_new_child('event-id', '0') ems_log.add_new_child('event-source', 'Cinder driver %s' % netapp_backend) ems_log.add_new_child('app-version', app_version) ems_log.add_new_child('category', 'provisioning') ems_log.add_new_child('event-description', 'OpenStack Cinder connected to %s' % dest) ems_log.add_new_child('log-level', '6') ems_log.add_new_child('auto-support', 'false') return ems_log def _create_vs_get(): """Create vs_get API request.""" vs_get = NaElement('vserver-get-iter') vs_get.add_new_child('max-records', '1') query = NaElement('query') query.add_node_with_children('vserver-info', **{'vserver-type': 'node'}) vs_get.add_child_elem(query) desired = NaElement('desired-attributes') desired.add_node_with_children( 'vserver-info', **{'vserver-name': '', 'vserver-type': ''}) vs_get.add_child_elem(desired) return vs_get def _get_cluster_node(na_server): """Get the cluster node for ems.""" na_server.set_vserver(None) vs_get = _create_vs_get() res = na_server.invoke_successfully(vs_get) if (res.get_child_content('num-records') and int(res.get_child_content('num-records')) > 0): attr_list = res.get_child_by_name('attributes-list') vs_info = attr_list.get_child_by_name('vserver-info') vs_name = vs_info.get_child_content('vserver-name') return vs_name return None do_ems = True if hasattr(requester, 'last_ems'): sec_limit = 3559 if not (timeutils.is_older_than(requester.last_ems, sec_limit)): do_ems = False if do_ems: na_server = copy.copy(self.connection) na_server.set_timeout(25) ems = _create_ems(netapp_backend, app_version, server_type) try: if server_type == "cluster": api_version = na_server.get_api_version() if api_version: major, minor = api_version else: raise NaApiError(code='Not found', message='No API version found') if major == 1 and minor > 15: node = getattr(requester, 'vserver', None) else: node = _get_cluster_node(na_server) if node is None: raise NaApiError(code='Not found', message='No vserver found') na_server.set_vserver(node) else: na_server.set_vfiler(None) na_server.invoke_successfully(ems, True) LOG.debug("ems executed successfully.") except NaApiError as e: LOG.warning(_LW("Failed to invoke ems. Message : %s") % e) finally: requester.last_ems = timeutils.utcnow()
def is_agent_down(cls, heart_beat_time, timeout=cfg.CONF.general.cfg_agent_down_time): return timeutils.is_older_than(heart_beat_time, timeout)
def check_backlogged_hosting_devices(self): """"Checks the status of backlogged hosting devices. Skips newly spun up instances during their booting time as specified in the boot time parameter. :return A dict of the format: {'reachable': [<hd_id>,..], 'dead': [<hd_id>,..]} """ response_dict = {'reachable': [], 'dead': []} LOG.debug("Current Backlogged hosting devices: %s", self.backlog_hosting_devices.keys()) for hd_id in self.backlog_hosting_devices.keys(): hd = self.backlog_hosting_devices[hd_id]['hd'] if not timeutils.is_older_than(hd['created_at'], hd['booting_time']): LOG.info( _LI("Hosting device: %(hd_id)s @ %(ip)s hasn't " "passed minimum boot time. Skipping it. "), { 'hd_id': hd_id, 'ip': hd['management_ip_address'] }) continue LOG.info( _LI("Checking hosting device: %(hd_id)s @ %(ip)s for " "reachability."), { 'hd_id': hd_id, 'ip': hd['management_ip_address'] }) if _is_pingable(hd['management_ip_address']): hd.pop('backlog_insertion_ts', None) del self.backlog_hosting_devices[hd_id] response_dict['reachable'].append(hd_id) LOG.info( _LI("Hosting device: %(hd_id)s @ %(ip)s is now " "reachable. Adding it to response"), { 'hd_id': hd_id, 'ip': hd['management_ip_address'] }) else: LOG.info( _LI("Hosting device: %(hd_id)s @ %(ip)s still not " "reachable "), { 'hd_id': hd_id, 'ip': hd['management_ip_address'] }) if timeutils.is_older_than( hd['backlog_insertion_ts'], cfg.CONF.cfg_agent.hosting_device_dead_timeout): LOG.debug( "Hosting device: %(hd_id)s @ %(ip)s hasn't " "been reachable for the last %(time)d seconds. " "Marking it dead.", { 'hd_id': hd_id, 'ip': hd['management_ip_address'], 'time': cfg.CONF.cfg_agent.hosting_device_dead_timeout }) response_dict['dead'].append(hd_id) hd.pop('backlog_insertion_ts', None) del self.backlog_hosting_devices[hd_id] LOG.debug("Response: %s", response_dict) return response_dict
def provide_ems(self, requester, netapp_backend, app_version, server_type="cluster"): """Provide ems with volume stats for the requester. :param server_type: cluster or 7mode. """ def _create_ems(netapp_backend, app_version, server_type): """Create ems API request.""" ems_log = NaElement('ems-autosupport-log') host = socket.getfqdn() or 'Cinder_node' if server_type == "cluster": dest = "cluster node" else: dest = "7 mode controller" ems_log.add_new_child('computer-name', host) ems_log.add_new_child('event-id', '0') ems_log.add_new_child('event-source', 'Cinder driver %s' % netapp_backend) ems_log.add_new_child('app-version', app_version) ems_log.add_new_child('category', 'provisioning') ems_log.add_new_child('event-description', 'OpenStack Cinder connected to %s' % dest) ems_log.add_new_child('log-level', '6') ems_log.add_new_child('auto-support', 'false') return ems_log def _create_vs_get(): """Create vs_get API request.""" vs_get = NaElement('vserver-get-iter') vs_get.add_new_child('max-records', '1') query = NaElement('query') query.add_node_with_children('vserver-info', **{'vserver-type': 'node'}) vs_get.add_child_elem(query) desired = NaElement('desired-attributes') desired.add_node_with_children( 'vserver-info', **{ 'vserver-name': '', 'vserver-type': '' }) vs_get.add_child_elem(desired) return vs_get def _get_cluster_node(na_server): """Get the cluster node for ems.""" na_server.set_vserver(None) vs_get = _create_vs_get() res = na_server.invoke_successfully(vs_get) if (res.get_child_content('num-records') and int(res.get_child_content('num-records')) > 0): attr_list = res.get_child_by_name('attributes-list') vs_info = attr_list.get_child_by_name('vserver-info') vs_name = vs_info.get_child_content('vserver-name') return vs_name return None do_ems = True if hasattr(requester, 'last_ems'): sec_limit = 3559 if not (timeutils.is_older_than(requester.last_ems, sec_limit)): do_ems = False if do_ems: na_server = copy.copy(self.connection) na_server.set_timeout(25) ems = _create_ems(netapp_backend, app_version, server_type) try: if server_type == "cluster": api_version = na_server.get_api_version() if api_version: major, minor = api_version else: raise NaApiError(code='Not found', message='No API version found') if major == 1 and minor > 15: node = getattr(requester, 'vserver', None) else: node = _get_cluster_node(na_server) if node is None: raise NaApiError(code='Not found', message='No vserver found') na_server.set_vserver(node) else: na_server.set_vfiler(None) na_server.invoke_successfully(ems, True) LOG.debug("ems executed successfully.") except NaApiError as e: LOG.warning(_LW("Failed to invoke ems. Message : %s") % e) finally: requester.last_ems = timeutils.utcnow()