def _test_local_tz(self): tz = tzlocal() dt_no_tz = datetime.datetime(year=2000, month=1, day=1, hour=12, minute=34) now_dt = datetime.datetime(year=2000, month=1, day=1, hour=12, minute=34, tzinfo=tz) isodate.parse_date(now_dt.isoformat()) # last member is is_dst, which is -1, if there is no tzinfo, which # we expect for dt_no_tz # # see if we get the same times now_dt_tt = now_dt.timetuple() dt_no_tz_tt = dt_no_tz.timetuple() # tm_isdst (timpletuple()[8]) is 0 if a tz is set, # but the dst offset is 0 # if it is -1, no timezone is set if now_dt_tt[8] == 1 and dt_no_tz_tt == -1: # we are applying DST to now time, but not no_tz time, so # they will be off by an hour. This is kind of weird self.assertEquals(now_dt_tt[:2], dt_no_tz_tt[:2]) self.assertEquals(now_dt_tt[4:7], dt_no_tz_tt[4:7]) # add an hour for comparisons dt_no_tz_dst = dt_no_tz dt_no_tz_dst = dt_no_tz + datetime.timedelta(hours=1) self.assertEquals(now_dt_tt[3], dt_no_tz_dst.timetuple()[3]) else: self.assertEquals(now_dt_tt[:7], dt_no_tz_tt[:7])
def _dates_overlap(self, pool, certs): pool_start = isodate.parse_date(pool['startDate']) pool_end = isodate.parse_date(pool['endDate']) for cert in certs: cert_range = cert.valid_range if cert_range.has_date(pool_start) or cert_range.has_date(pool_end): return True return False
def check_for_update(self): if self.exists(): data = json.loads(self.read()) last_update = parse_date(data["lastUpdate"]) else: last_update = None return self._query_for_update(if_modified_since=last_update)
def test_2038_bug(self): parsed = isodate.parse_date("2038-11-24T00:00:00.000+0000") # this should be okay with either time parser, even on # 32bit platforms. maybe self.assertEquals(2038, parsed.year) self.assertEquals(11, parsed.month) self.assertEquals(24, parsed.day)
def test_9999_bug(self): parsed = isodate.parse_date("9999-09-06T00:00:00.000+0000") # depending on what sys modules are available, the different # parser handle overflow slightly differently if isodate.parse_date_impl_name == 'dateutil': self._dateutil_overflow(parsed) else: self._pyxml_overflow(parsed)
def test_server_date_utc_timezone(self): # sample date from json response from server server_date = "2012-04-10T00:00:00.000+0000" dt = isodate.parse_date(server_date) # no dst self.assertEquals(datetime.timedelta(seconds=0), dt.tzinfo.dst(dt)) # it's a utc date, no offset self.assertEquals(datetime.timedelta(seconds=0), dt.tzinfo.utcoffset(dt))
def test_10000_bug(self): # dateutil is okay up to 9999, so we just return # 9999-9-6 after that since that's what datetime/dateutil do # on RHEL5, y10k breaks pyxml with a value error parsed = isodate.parse_date("10000-09-06T00:00:00.000+0000") if isodate.parse_date_impl_name == 'dateutil': self._dateutil_overflow(parsed) else: self._pyxml_overflow(parsed)
def calculate(self, product_hash): """ Calculate the valid date range for the specified product based on today's date. Partially entitled products are considered when determining the valid range. NOTE: The returned date range will be in GMT, so keep this in mind when presenting these dates to the user. """ # If we're not registered, don't return a valid range: if not self.identity.is_valid(): return None if self.prod_status is None: return None for prod in self.prod_status: if product_hash != prod['productId']: continue # Found the product ID requested: if 'startDate' in prod and 'endDate' in prod: # Unentitled product: if prod['startDate'] is None or prod['endDate'] is None: return None return DateRange(parse_date(prod['startDate']), parse_date(prod['endDate'])) else: # If startDate / endDate not supported log.warn("Server does not support product date ranges.") return None # At this point, we haven't found the installed product that was # asked for, which could indicate the server somehow doesn't know # about it yet. This is extremely weird and should be unlikely, # but we will log and handle gracefully: log.error("Requested status for installed product server does not " "know about: %s" % product_hash) return None
def add_pool(self, pool, default_quantity_value): self.total_contracts += 1 self.total_contracts_label.set_text(str(self.total_contracts)) self.subscription_name_label.set_text(pool['productName']) # Use unlimited for -1 quanities quantity = pool['quantity'] if quantity < 0: quantity = _('Unlimited') quantity_available = -1 else: quantity_available = int(pool['quantity']) - int(pool['consumed']) # cap the default selected quantity at the max available # for that pool. See #855257. Watch out for quantity_available # being -1 (unlimited). if default_quantity_value > quantity_available and quantity_available >= 0: default_quantity_value = quantity_available quantity_increment = 1 if 'calculatedAttributes' in pool: calculated_attrs = pool['calculatedAttributes'] if 'quantity_increment' in calculated_attrs: quantity_increment = int(calculated_attrs['quantity_increment']) self.model.add_map({ 'contract_number': pool['contractNumber'], 'consumed_fraction': "%s / %s" % (pool['consumed'], quantity), 'start_date': isodate.parse_date(pool['startDate']), 'end_date': isodate.parse_date(pool['endDate']), 'default_quantity': default_quantity_value, 'product_name': pool['productName'], 'pool': pool, 'is_virt_only': PoolWrapper(pool).is_virt_only(), 'multi_entitlement': allows_multi_entitlement(pool), 'quantity_available': quantity_available, 'quantity_increment': quantity_increment, })
def get_available_entitlements(facts, get_all=False, active_on=None, overlapping=False, uninstalled=False, text=None): """ Returns a list of entitlement pools from the server. Facts will be updated if appropriate before making the request, to ensure the rules on the server will pass if appropriate. The 'all' setting can be used to return all pools, even if the rules do not pass. (i.e. show pools that are incompatible for your hardware) """ columns = ['id', 'quantity', 'consumed', 'endDate', 'productName', 'providedProducts', 'productId', 'attributes', 'pool_type', 'service_level', 'service_type', 'suggested', 'contractNumber'] pool_stash = PoolStash(Facts(require(ENT_DIR), require(PROD_DIR))) dlist = pool_stash.get_filtered_pools_list(active_on, not get_all, overlapping, uninstalled, text) for pool in dlist: pool_wrapper = PoolWrapper(pool) pool['providedProducts'] = pool_wrapper.get_provided_products() if allows_multi_entitlement(pool): pool['multi-entitlement'] = "Yes" else: pool['multi-entitlement'] = "No" support_attrs = pool_wrapper.get_product_attributes("support_level", "support_type") pool['service_level'] = support_attrs['support_level'] pool['service_type'] = support_attrs['support_type'] pool['suggested'] = pool_wrapper.get_suggested_quantity() pool['pool_type'] = pool_wrapper.get_pool_type() if pool['suggested'] is None: pool['suggested'] = "" # no default, so default is None if key not found data = [_sub_dict(pool, columns) for pool in dlist] for d in data: if int(d['quantity']) < 0: d['quantity'] = _('Unlimited') else: d['quantity'] = str(int(d['quantity']) - int(d['consumed'])) d['endDate'] = format_date(isodate.parse_date(d['endDate'])) del d['consumed'] return data
def get_available_entitlements(cpserver, consumer_uuid, facts, get_all=False, active_on=None): """ Returns a list of entitlement pools from the server. Facts will be updated if appropriate before making the request, to ensure the rules on the server will pass if appropriate. The 'all' setting can be used to return all pools, even if the rules do not pass. (i.e. show pools that are incompatible for your hardware) """ columns = ['id', 'quantity', 'consumed', 'endDate', 'productName', 'providedProducts', 'productId', 'attributes', 'multi-entitlement', 'service_level', 'service_type'] dlist = list_pools(cpserver, consumer_uuid, facts, get_all, active_on) for pool in dlist: pool_wrapper = PoolWrapper(pool) if allows_multi_entitlement(pool): pool['multi-entitlement'] = "Yes" else: pool['multi-entitlement'] = "No" support_attrs = pool_wrapper.get_product_attributes("support_level", "support_type") pool['service_level'] = support_attrs['support_level'] pool['service_type'] = support_attrs['support_type'] data = [_sub_dict(pool, columns) for pool in dlist] for d in data: if int(d['quantity']) < 0: d['quantity'] = _('Unlimited') else: d['quantity'] = str(int(d['quantity']) - int(d['consumed'])) d['endDate'] = format_date(isodate.parse_date(d['endDate'])) del d['consumed'] return data
def test_check_for_update_provides_date(self): mock_exists = Mock(return_value=True) with patch('os.path.exists', mock_exists): self.cache.check_for_update() date = isodate.parse_date("2016-12-01T21:56:35+0000") self.mock_uep.getAccessibleContent.assert_called_once_with(self.cache.identity.uuid, if_modified_since=date)
def get_available_entitlements( get_all=False, active_on=None, overlapping=False, uninstalled=False, text=None, filter_string=None, future=None, after_date=None, page=0, items_per_page=0, iso_dates=False, ): """ Returns a list of entitlement pools from the server. The 'all' setting can be used to return all pools, even if the rules do not pass. (i.e. show pools that are incompatible for your hardware) """ columns = [ "id", "quantity", "consumed", "startDate", "endDate", "productName", "providedProducts", "productId", "roles", "attributes", "pool_type", "service_level", "service_type", "usage", "addons", "suggested", "contractNumber", "management_enabled", ] pool_stash = PoolStash() dlist = pool_stash.get_filtered_pools_list( active_on, not get_all, overlapping, uninstalled, text, filter_string, future=future, after_date=after_date, page=page, items_per_page=items_per_page, ) if iso_dates: date_formatter = format_iso8601_date else: date_formatter = format_date for pool in dlist: pool_wrapper = PoolWrapper(pool) pool["providedProducts"] = pool_wrapper.get_provided_products() if allows_multi_entitlement(pool): pool["multi-entitlement"] = "Yes" else: pool["multi-entitlement"] = "No" support_attrs = pool_wrapper.get_product_attributes( "support_level", "support_type", "roles", "usage", "addons" ) pool["service_level"] = support_attrs["support_level"] pool["service_type"] = support_attrs["support_type"] pool["roles"] = support_attrs["roles"] pool["usage"] = support_attrs["usage"] pool["addons"] = support_attrs["addons"] pool["suggested"] = pool_wrapper.get_suggested_quantity() pool["pool_type"] = pool_wrapper.get_pool_type() pool["management_enabled"] = pool_wrapper.management_enabled() if pool["suggested"] is None: pool["suggested"] = "" # no default, so default is None if key not found data = [_sub_dict(pool, columns) for pool in dlist] for d in data: if int(d["quantity"]) < 0: d["quantity"] = _("Unlimited") else: d["quantity"] = str(int(d["quantity"]) - int(d["consumed"])) d["startDate"] = date_formatter(isodate.parse_date(d["startDate"])) d["endDate"] = date_formatter(isodate.parse_date(d["endDate"])) del d["consumed"] return data
def get_available_entitlements(get_all=False, active_on=None, overlapping=False, uninstalled=False, text=None, filter_string=None, future=None, after_date=None): """ Returns a list of entitlement pools from the server. The 'all' setting can be used to return all pools, even if the rules do not pass. (i.e. show pools that are incompatible for your hardware) """ columns = [ 'id', 'quantity', 'consumed', 'startDate', 'endDate', 'productName', 'providedProducts', 'productId', 'attributes', 'pool_type', 'service_level', 'service_type', 'suggested', 'contractNumber', 'management_enabled' ] pool_stash = PoolStash() dlist = pool_stash.get_filtered_pools_list(active_on, not get_all, overlapping, uninstalled, text, filter_string, future=future, after_date=after_date) for pool in dlist: pool_wrapper = PoolWrapper(pool) pool['providedProducts'] = pool_wrapper.get_provided_products() if allows_multi_entitlement(pool): pool['multi-entitlement'] = "Yes" else: pool['multi-entitlement'] = "No" support_attrs = pool_wrapper.get_product_attributes("support_level", "support_type") pool['service_level'] = support_attrs['support_level'] pool['service_type'] = support_attrs['support_type'] pool['suggested'] = pool_wrapper.get_suggested_quantity() pool['pool_type'] = pool_wrapper.get_pool_type() pool['management_enabled'] = pool_wrapper.management_enabled() if pool['suggested'] is None: pool['suggested'] = "" # no default, so default is None if key not found data = [_sub_dict(pool, columns) for pool in dlist] for d in data: if int(d['quantity']) < 0: d['quantity'] = _('Unlimited') else: d['quantity'] = str(int(d['quantity']) - int(d['consumed'])) d['startDate'] = format_date(isodate.parse_date(d['startDate'])) d['endDate'] = format_date(isodate.parse_date(d['endDate'])) del d['consumed'] return data
def _parse_server_status(self): """ Fetch entitlement status info from server and parse. """ if not self.is_registered(): log.debug("Unregistered, skipping server compliance check.") return # Override get_status status = self.get_compliance_status() if status is None: return # TODO: we're now mapping product IDs to entitlement cert JSON, # previously we mapped to actual entitlement cert objects. However, # nothing seems to actually use these, so it may not matter for now. self.valid_products = status['compliantProducts'] self.partially_valid_products = status['partiallyCompliantProducts'] self.partial_stacks = status['partialStacks'] if 'reasons' in status: self.supports_reasons = True self.reasons = Reasons(status['reasons'], self) if 'status' in status and len(status['status']): self.system_status = status['status'] # Some old candlepin versions do not return 'status' with information elif status['nonCompliantProducts']: self.system_status = 'invalid' elif self.partially_valid_products or self.partial_stacks or \ self.reasons.reasons: self.system_status = 'partial' else: self.system_status = 'unknown' # For backward compatability with old find first invalid date, # we drop one second from the compliant until from server (as # it is returning the first second we are invalid), then add a full # 24 hours giving us the first date where we know we're completely # invalid from midnight to midnight. self.compliant_until = None if status['compliantUntil'] is not None: self.compliant_until = parse_date(status['compliantUntil']) # Lookup product certs for each unentitled product returned by # the server: unentitled_pids = status['nonCompliantProducts'] # Add in any installed products not in the server response. This # could happen if something changes before the certd runs. Log # a warning if it does, and treat it like an unentitled product. for pid in list(self.installed_products.keys()): if pid not in self.valid_products and pid not in \ self.partially_valid_products and pid not in \ unentitled_pids: log.warn("Installed product %s not present in response from " "server." % pid) unentitled_pids.append(pid) for unentitled_pid in unentitled_pids: prod_cert = self.product_dir.find_by_product(unentitled_pid) # Ignore anything server thinks we have but we don't. if prod_cert is None: log.warn( "Server reported installed product not on system: %s" % unentitled_pid) continue self.unentitled_products[unentitled_pid] = prod_cert self._scan_entitlement_certs() self.log_products()
def _parse_server_status(self): """ Fetch entitlement status info from server and parse. """ if not self.is_registered(): log.debug("Unregistered, skipping server compliance check.") return # Override get_status status = self.get_compliance_status() if status is None: return # TODO: we're now mapping product IDs to entitlement cert JSON, # previously we mapped to actual entitlement cert objects. However, # nothing seems to actually use these, so it may not matter for now. self.valid_products = status['compliantProducts'] self.partially_valid_products = status['partiallyCompliantProducts'] self.partial_stacks = status['partialStacks'] if 'reasons' in status: self.supports_reasons = True self.reasons = Reasons(status['reasons'], self) if 'status' in status and len(status['status']): self.system_status = status['status'] # Some old candlepin versions do not return 'status' with information elif status['nonCompliantProducts']: self.system_status = 'invalid' elif self.partially_valid_products or self.partial_stacks or \ self.reasons.reasons: self.system_status = 'partial' else: self.system_status = 'unknown' # For backward compatability with old find first invalid date, # we drop one second from the compliant until from server (as # it is returning the first second we are invalid), then add a full # 24 hours giving us the first date where we know we're completely # invalid from midnight to midnight. self.compliant_until = None if status['compliantUntil'] is not None: self.compliant_until = parse_date(status['compliantUntil']) # Lookup product certs for each unentitled product returned by # the server: unentitled_pids = status['nonCompliantProducts'] # Add in any installed products not in the server response. This # could happen if something changes before the certd runs. Log # a warning if it does, and treat it like an unentitled product. for pid in list(self.installed_products.keys()): if pid not in self.valid_products and pid not in \ self.partially_valid_products and pid not in \ unentitled_pids: log.warn("Installed product %s not present in response from " "server." % pid) unentitled_pids.append(pid) for unentitled_pid in unentitled_pids: prod_cert = self.product_dir.find_by_product(unentitled_pid) # Ignore anything server thinks we have but we don't. if prod_cert is None: log.warn("Server reported installed product not on system: %s" % unentitled_pid) continue self.unentitled_products[unentitled_pid] = prod_cert self._scan_entitlement_certs() self.log_products()
def test_server_date_est_timezone(self): est_date = "2012-04-10T00:00:00.000-04:00" dt = isodate.parse_date(est_date) self.assertEquals(abs(datetime.timedelta(hours=-4)), abs(dt.tzinfo.utcoffset(dt)))