def test_time_spec_error_message(): """Test that the correct error message is shown to the user.""" dt = datetime(2020, 4, 10, 10, 11, tzinfo=timezone.utc) with pytest.raises(ValueError) as excinfo: matches_time_spec( dt, "Mon-Thur 09:00-20:00 Europe/London,Fri-Fri 10:00-18:00 Europe/London" ) assert ( 'Time spec value "Mon-Thur 09:00-20:00 Europe/London" does not match format ("Mon-Fri 06:30-20:30 Europe/Berlin" or "2019-01-01T00:00:00+00:00-2019-01-02T12:34:56+00:00")' in str(excinfo.value))
def autoscale_resource(resource: pykube.objects.NamespacedAPIObject, default_uptime: str, default_downtime: str, forced_uptime: bool, dry_run: bool, now: datetime.datetime, grace_period: int, downtime_replicas: int): try: # any value different from "false" will ignore the resource (to be on the safe side) exclude = resource.annotations.get(EXCLUDE_ANNOTATION, 'false').lower() != 'false' if exclude: logger.debug('%s %s/%s was excluded', resource.kind, resource.namespace, resource.name) else: replicas = resource.replicas if forced_uptime: uptime = "forced" downtime = "ignored" is_uptime = True else: uptime = resource.annotations.get(UPTIME_ANNOTATION, default_uptime) downtime = resource.annotations.get(DOWNTIME_ANNOTATION, default_downtime) is_uptime = helper.matches_time_spec(now, uptime) and not helper.matches_time_spec(now, downtime) original_replicas = resource.annotations.get(ORIGINAL_REPLICAS_ANNOTATION) logger.debug('%s %s/%s has %s replicas (original: %s, uptime: %s)', resource.kind, resource.namespace, resource.name, replicas, original_replicas, uptime) update_needed = False if is_uptime and replicas == 0 and original_replicas and int(original_replicas) > 0: logger.info('Scaling up %s %s/%s from %s to %s replicas (uptime: %s, downtime: %s)', resource.kind, resource.namespace, resource.name, replicas, original_replicas, uptime, downtime) resource.replicas = int(original_replicas) resource.annotations[ORIGINAL_REPLICAS_ANNOTATION] = None update_needed = True elif not is_uptime and replicas > 0: target_replicas = int(resource.annotations.get(DOWNTIME_REPLICAS_ANNOTATION, downtime_replicas)) if within_grace_period(resource, grace_period, now): logger.info('%s %s/%s within grace period (%ds), not scaling down (yet)', resource.kind, resource.namespace, resource.name, grace_period) else: logger.info('Scaling down %s %s/%s from %s to %s replicas (uptime: %s, downtime: %s)', resource.kind, resource.namespace, resource.name, replicas, target_replicas, uptime, downtime) resource.annotations[ORIGINAL_REPLICAS_ANNOTATION] = str(replicas) resource.replicas = target_replicas update_needed = True if update_needed: if dry_run: logger.info('**DRY-RUN**: would update %s %s/%s', resource.kind, resource.namespace, resource.name) else: resource.update() except Exception as e: logger.exception('Failed to process %s %s/%s : %s', resource.kind, resource.namespace, resource.name, str(e))
def test_time_spec(): assert not matches_time_spec(datetime.now(), 'never') assert matches_time_spec(datetime.now(), 'always') assert not matches_time_spec(datetime.now(), 'Mon-Mon 00:00-00:00 CET') assert matches_time_spec(datetime.now(), 'Mon-Sun 00:00-24:00 CET') dt = datetime(2017, 11, 26, 15, 33) assert matches_time_spec(dt, 'Sat-Sun 15:30-16:00 UTC') assert not matches_time_spec(dt, 'Sat-Sun 15:34-16:00 UTC') assert not matches_time_spec(dt, 'Mon-Fri 08:00-18:00 UTC') assert matches_time_spec( dt, 'Mon-Fri 08:00-18:00 UTC, Sun-Sun 15:30-16:00 UTC') dt = datetime(2018, 11, 4, 20, 30, 00) assert matches_time_spec(dt, 'Mon-Fri 09:00-10:00 Pacific/Auckland') assert not matches_time_spec(dt, 'Sat-Sun 09:00-10:00 Pacific/Auckland')
def test_week_starts_sunday(): # Monday, November 27th 2017 dt = datetime(2017, 11, 27, 15, 33, tzinfo=timezone.utc) assert matches_time_spec(dt, "Sun-Fri 15:30-16:00 UTC") assert matches_time_spec(dt, "Sun-Mon 00:00-16:00 UTC") assert not matches_time_spec(dt, "Sun-Mon 00:00-15:00 UTC") # Sunday, November 26th 2017 dt = datetime(2017, 11, 26, 15, 33, tzinfo=timezone.utc) assert matches_time_spec(dt, "Sun-Fri 15:30-16:00 UTC") assert matches_time_spec(dt, "Sun-Mon 00:00-16:00 UTC") assert not matches_time_spec(dt, "Sun-Mon 00:00-15:00 UTC")
def test_invalid_time_spec(): with pytest.raises(ValueError): matches_time_spec(datetime.now(), "")
def test_time_spec(): assert not matches_time_spec(datetime.now(), "never") assert matches_time_spec(datetime.now(), "always") assert not matches_time_spec(datetime.now(), "Mon-Mon 00:00-00:00 CET") assert matches_time_spec(datetime.now(), "Mon-Sun 00:00-24:00 CET") dt = datetime(2017, 11, 26, 15, 33, tzinfo=timezone.utc) assert matches_time_spec(dt, "Sat-Sun 15:30-16:00 UTC") assert not matches_time_spec(dt, "Sat-Sun 15:34-16:00 UTC") assert not matches_time_spec(dt, "Mon-Fri 08:00-18:00 UTC") assert matches_time_spec( dt, "Mon-Fri 08:00-18:00 UTC, Sun-Sun 15:30-16:00 UTC") assert matches_time_spec( dt, "2017-11-26T14:04:48+00:00-2017-11-26T16:04:48+00:00") assert not matches_time_spec( dt, "2017-11-26T14:04:48+00:00-2017-11-26T15:04:48+00:00") dt = datetime(2018, 11, 4, 20, 30, 00, tzinfo=timezone.utc) assert matches_time_spec(dt, "Mon-Fri 09:00-10:00 Pacific/Auckland") assert not matches_time_spec(dt, "Sat-Sun 09:00-10:00 Pacific/Auckland") assert matches_time_spec( dt, "2017-01-01T00:01:02+00:00-2018-12-31T23:59:00+00:00") assert matches_time_spec( dt, "2017-01-01T00:01:02+00:00-2017-01-02T19:00:50+00:00, 2018-11-01T00:00:00+00:00-2018-12-31T23:59:00+00:00", ) assert not matches_time_spec( dt, "2019-01-01T00:01:02+00:00-2019-12-31T23:59:00+00:00") assert matches_time_spec( dt, "2018-11-04T16:00:00-04:00-2018-11-04T16:40:00-04:00") assert not matches_time_spec( dt, "2018-11-04T20:00:00-04:00-2018-11-04T20:40:00-04:00")
def autoscale_resource( resource: pykube.objects.NamespacedAPIObject, upscale_period: str, downscale_period: str, default_uptime: str, default_downtime: str, forced_uptime: bool, dry_run: bool, now: datetime.datetime, grace_period: int = 0, downtime_replicas: int = 0, namespace_excluded=False, deployment_time_annotation: Optional[str] = None, ): try: exclude = namespace_excluded or ignore_resource(resource, now) original_replicas = resource.annotations.get( ORIGINAL_REPLICAS_ANNOTATION) downtime_replicas = int( resource.annotations.get(DOWNTIME_REPLICAS_ANNOTATION, downtime_replicas)) if exclude and not original_replicas: logger.debug( "%s %s/%s was excluded", resource.kind, resource.namespace, resource.name, ) else: ignore = False is_uptime = True upscale_period = resource.annotations.get( UPSCALE_PERIOD_ANNOTATION, upscale_period) downscale_period = resource.annotations.get( DOWNSCALE_PERIOD_ANNOTATION, downscale_period) if forced_uptime or (exclude and original_replicas): uptime = "forced" downtime = "ignored" is_uptime = True elif upscale_period != "never" or downscale_period != "never": uptime = upscale_period downtime = downscale_period if helper.matches_time_spec( now, uptime) and helper.matches_time_spec( now, downtime): logger.debug( "Upscale and downscale periods overlap, do nothing") ignore = True elif helper.matches_time_spec(now, uptime): is_uptime = True elif helper.matches_time_spec(now, downtime): is_uptime = False else: ignore = True logger.debug( "Periods checked: upscale=%s, downscale=%s, ignore=%s, is_uptime=%s", upscale_period, downscale_period, ignore, is_uptime, ) else: uptime = resource.annotations.get(UPTIME_ANNOTATION, default_uptime) downtime = resource.annotations.get(DOWNTIME_ANNOTATION, default_downtime) is_uptime = helper.matches_time_spec( now, uptime) and not helper.matches_time_spec(now, downtime) if resource.kind == "CronJob": suspended = resource.obj["spec"]["suspend"] replicas = 0 if suspended else 1 logger.debug( "%s %s/%s is %s (original: %s, uptime: %s)", resource.kind, resource.namespace, resource.name, "suspended" if suspended else "not suspended", "suspended" if original_replicas == 0 else "not suspended", uptime, ) else: replicas = resource.replicas logger.debug( "%s %s/%s has %s replicas (original: %s, uptime: %s)", resource.kind, resource.namespace, resource.name, replicas, original_replicas, uptime, ) update_needed = False if (not ignore and is_uptime and replicas == downtime_replicas and original_replicas and int(original_replicas) > 0): if resource.kind == "CronJob": resource.obj["spec"]["suspend"] = False resource.obj["spec"]["startingDeadlineSeconds"] = 0 logger.info( "Unsuspending %s %s/%s (uptime: %s, downtime: %s)", resource.kind, resource.namespace, resource.name, uptime, downtime, ) else: resource.replicas = int(original_replicas) logger.info( "Scaling up %s %s/%s from %s to %s replicas (uptime: %s, downtime: %s)", resource.kind, resource.namespace, resource.name, replicas, original_replicas, uptime, downtime, ) resource.annotations[ORIGINAL_REPLICAS_ANNOTATION] = None update_needed = True elif (not ignore and not is_uptime and replicas > 0 and replicas > int(downtime_replicas)): target_replicas = int( resource.annotations.get(DOWNTIME_REPLICAS_ANNOTATION, downtime_replicas)) if within_grace_period(resource, grace_period, now, deployment_time_annotation): logger.info( "%s %s/%s within grace period (%ds), not scaling down (yet)", resource.kind, resource.namespace, resource.name, grace_period, ) else: if resource.kind == "CronJob": resource.obj["spec"]["suspend"] = True logger.info( "Suspending %s %s/%s (uptime: %s, downtime: %s)", resource.kind, resource.namespace, resource.name, uptime, downtime, ) else: resource.replicas = target_replicas logger.info( "Scaling down %s %s/%s from %s to %s replicas (uptime: %s, downtime: %s)", resource.kind, resource.namespace, resource.name, replicas, target_replicas, uptime, downtime, ) resource.annotations[ORIGINAL_REPLICAS_ANNOTATION] = str( replicas) update_needed = True if update_needed: if dry_run: logger.info( "**DRY-RUN**: would update %s %s/%s", resource.kind, resource.namespace, resource.name, ) else: resource.update() except Exception as e: logger.exception( "Failed to process %s %s/%s : %s", resource.kind, resource.namespace, resource.name, str(e), )
def autoscale_resource( resource: NamespacedAPIObject, upscale_period: str, downscale_period: str, default_uptime: str, default_downtime: str, forced_uptime: bool, dry_run: bool, now: datetime.datetime, grace_period: int = 0, downtime_replicas: int = 0, namespace_excluded=False, deployment_time_annotation: Optional[str] = None, ): try: exclude = namespace_excluded or ignore_resource(resource, now) original_replicas = get_annotation_value_as_int( resource, ORIGINAL_REPLICAS_ANNOTATION) downtime_replicas_from_annotation = get_annotation_value_as_int( resource, DOWNTIME_REPLICAS_ANNOTATION) if downtime_replicas_from_annotation is not None: downtime_replicas = downtime_replicas_from_annotation if exclude and not original_replicas: logger.debug( f"{resource.kind} {resource.namespace}/{resource.name} was excluded" ) else: ignore = False is_uptime = True upscale_period = resource.annotations.get( UPSCALE_PERIOD_ANNOTATION, upscale_period) downscale_period = resource.annotations.get( DOWNSCALE_PERIOD_ANNOTATION, downscale_period) if forced_uptime or (exclude and original_replicas): uptime = "forced" downtime = "ignored" is_uptime = True elif upscale_period != "never" or downscale_period != "never": uptime = upscale_period downtime = downscale_period if matches_time_spec(now, uptime) and matches_time_spec( now, downtime): logger.debug( "Upscale and downscale periods overlap, do nothing") ignore = True elif matches_time_spec(now, uptime): is_uptime = True elif matches_time_spec(now, downtime): is_uptime = False else: ignore = True logger.debug( f"Periods checked: upscale={upscale_period}, downscale={downscale_period}, ignore={ignore}, is_uptime={is_uptime}" ) else: uptime = resource.annotations.get(UPTIME_ANNOTATION, default_uptime) downtime = resource.annotations.get(DOWNTIME_ANNOTATION, default_downtime) is_uptime = matches_time_spec( now, uptime) and not matches_time_spec(now, downtime) replicas = get_replicas(resource, original_replicas, uptime) update_needed = False if (not ignore and is_uptime and replicas == downtime_replicas and original_replicas and original_replicas > 0): scale_up(resource, replicas, original_replicas, uptime, downtime) update_needed = True elif (not ignore and not is_uptime and replicas > 0 and replicas > downtime_replicas): if within_grace_period(resource, grace_period, now, deployment_time_annotation): logger.info( f"{resource.kind} {resource.namespace}/{resource.name} within grace period ({grace_period}s), not scaling down (yet)" ) else: scale_down(resource, replicas, downtime_replicas, uptime, downtime) update_needed = True if update_needed: if dry_run: logger.info( f"**DRY-RUN**: would update {resource.kind} {resource.namespace}/{resource.name}" ) else: resource.update() except Exception as e: logger.exception( f"Failed to process {resource.kind} {resource.namespace}/{resource.name}: {e}" )
def autoscale_resources( api, kind, namespace: str, exclude_namespaces: FrozenSet[Pattern], exclude_names: FrozenSet[str], upscale_period: str, downscale_period: str, default_uptime: str, default_downtime: str, forced_uptime: bool, dry_run: bool, now: datetime.datetime, grace_period: int, downtime_replicas: int, deployment_time_annotation: Optional[str] = None, ): resources_by_namespace = collections.defaultdict(list) for resource in kind.objects(api, namespace=(namespace or pykube.all)): if resource.name in exclude_names: logger.debug( f"{resource.kind} {resource.namespace}/{resource.name} was excluded (name matches exclusion list)" ) continue resources_by_namespace[resource.namespace].append(resource) for current_namespace, resources in sorted(resources_by_namespace.items()): if any( [pattern.fullmatch(current_namespace) for pattern in exclude_namespaces] ): logger.debug( f"Namespace {current_namespace} was excluded (exclusion list regex matches)" ) continue logger.debug( f"Processing {len(resources)} {kind.endpoint} in namespace {current_namespace}.." ) # Override defaults with (optional) annotations from Namespace namespace_obj = Namespace.objects(api).get_by_name(current_namespace) excluded = ignore_resource(namespace_obj, now) default_uptime_for_namespace = namespace_obj.annotations.get( UPTIME_ANNOTATION, default_uptime ) default_downtime_for_namespace = namespace_obj.annotations.get( DOWNTIME_ANNOTATION, default_downtime ) default_downtime_replicas_for_namespace = get_annotation_value_as_int( namespace_obj, DOWNTIME_REPLICAS_ANNOTATION ) if default_downtime_replicas_for_namespace is None: default_downtime_replicas_for_namespace = downtime_replicas upscale_period_for_namespace = namespace_obj.annotations.get( UPSCALE_PERIOD_ANNOTATION, upscale_period ) downscale_period_for_namespace = namespace_obj.annotations.get( DOWNSCALE_PERIOD_ANNOTATION, downscale_period ) forced_uptime_value_for_namespace = str( namespace_obj.annotations.get(FORCE_UPTIME_ANNOTATION, forced_uptime) ) if forced_uptime_value_for_namespace.lower() == "true": forced_uptime_for_namespace = True elif forced_uptime_value_for_namespace.lower() == "false": forced_uptime_for_namespace = False elif forced_uptime_value_for_namespace: forced_uptime_for_namespace = matches_time_spec( now, forced_uptime_value_for_namespace ) else: forced_uptime_for_namespace = False for resource in resources: autoscale_resource( resource, upscale_period_for_namespace, downscale_period_for_namespace, default_uptime_for_namespace, default_downtime_for_namespace, forced_uptime_for_namespace, dry_run, now, grace_period, default_downtime_replicas_for_namespace, namespace_excluded=excluded, deployment_time_annotation=deployment_time_annotation, )
def autoscale_resource(resource: pykube.objects.NamespacedAPIObject, upscale_period: str, downscale_period: str, default_uptime: str, default_downtime: str, forced_uptime: bool, dry_run: bool, now: datetime.datetime, grace_period: int, downtime_replicas: int, namespace_excluded=False): try: exclude = namespace_excluded or ignore_resource(resource) original_replicas = resource.annotations.get(ORIGINAL_REPLICAS_ANNOTATION) downtime_replicas = int(resource.annotations.get(DOWNTIME_REPLICAS_ANNOTATION, downtime_replicas)) if exclude and not original_replicas: logger.debug('%s %s/%s was excluded', resource.kind, resource.namespace, resource.name) else: replicas = resource.replicas ignore = False upscale_period = resource.annotations.get(UPSCALE_PERIOD_ANNOTATION, upscale_period) downscale_period = resource.annotations.get(DOWNSCALE_PERIOD_ANNOTATION, downscale_period) if forced_uptime or (exclude and original_replicas): uptime = "forced" downtime = "ignored" is_uptime = True elif upscale_period != 'never' or downscale_period != 'never': uptime = upscale_period downtime = downscale_period if helper.matches_time_spec(now, uptime) and helper.matches_time_spec(now, downtime): logger.debug('Upscale and downscale periods overlap, do nothing') ignore = True elif helper.matches_time_spec(now, uptime): is_uptime = True elif helper.matches_time_spec(now, downtime): is_uptime = False else: ignore = True logger.debug('Periods checked: upscale=%s, downscale=%s, ignore=%s, is_uptime=%s', upscale_period, downscale_period, ignore, is_uptime) else: uptime = resource.annotations.get(UPTIME_ANNOTATION, default_uptime) downtime = resource.annotations.get(DOWNTIME_ANNOTATION, default_downtime) is_uptime = helper.matches_time_spec(now, uptime) and not helper.matches_time_spec(now, downtime) logger.debug('%s %s/%s has %s replicas (original: %s, uptime: %s)', resource.kind, resource.namespace, resource.name, replicas, original_replicas, uptime) update_needed = False if not ignore and is_uptime and replicas == downtime_replicas and original_replicas and int(original_replicas) > 0: logger.info('Scaling up %s %s/%s from %s to %s replicas (uptime: %s, downtime: %s)', resource.kind, resource.namespace, resource.name, replicas, original_replicas, uptime, downtime) resource.replicas = int(original_replicas) resource.annotations[ORIGINAL_REPLICAS_ANNOTATION] = None update_needed = True elif not ignore and not is_uptime and replicas > 0 and replicas > int(downtime_replicas): target_replicas = int(resource.annotations.get(DOWNTIME_REPLICAS_ANNOTATION, downtime_replicas)) if within_grace_period(resource, grace_period, now): logger.info('%s %s/%s within grace period (%ds), not scaling down (yet)', resource.kind, resource.namespace, resource.name, grace_period) else: logger.info('Scaling down %s %s/%s from %s to %s replicas (uptime: %s, downtime: %s)', resource.kind, resource.namespace, resource.name, replicas, target_replicas, uptime, downtime) resource.annotations[ORIGINAL_REPLICAS_ANNOTATION] = str(replicas) resource.replicas = target_replicas update_needed = True if update_needed: if dry_run: logger.info('**DRY-RUN**: would update %s %s/%s', resource.kind, resource.namespace, resource.name) else: resource.update() except Exception as e: logger.exception('Failed to process %s %s/%s : %s', resource.kind, resource.namespace, resource.name, str(e))