def __init__(self,
                 regions,
                 aws_access_key,
                 aws_secret_key,
                 kubeconfig,
                 pod_namespace,
                 idle_threshold,
                 type_idle_threshold,
                 instance_init_time,
                 cluster_name,
                 notifier,
                 scale_up=True,
                 maintainance=True,
                 datadog_api_key=None,
                 over_provision=5,
                 dry_run=False):
        if kubeconfig:
            # for using locally
            logger.debug('Using kubeconfig %s', kubeconfig)
            self.api = pykube.HTTPClient(
                pykube.KubeConfig.from_file(kubeconfig))
        else:
            # for using on kube
            logger.debug('Using kube service account')
            self.api = pykube.HTTPClient(
                pykube.KubeConfig.from_service_account())
        if pod_namespace is None:
            self.pod_namespace = pykube.all
        else:
            self.pod_namespace = pod_namespace

        self._drained = {}
        self.session = boto3.session.Session(
            aws_access_key_id=aws_access_key,
            aws_secret_access_key=aws_secret_key,
            region_name=regions[0])  # provide a default region
        self.autoscaling_groups = autoscaling_groups.AutoScalingGroups(
            session=self.session, regions=regions, cluster_name=cluster_name)
        self.autoscaling_timeouts = autoscaling_groups.AutoScalingTimeouts(
            self.session)

        # config
        self.regions = regions
        self.idle_threshold = idle_threshold
        self.instance_init_time = instance_init_time
        self.type_idle_threshold = type_idle_threshold
        self.over_provision = over_provision

        self.scale_up = scale_up
        self.maintainance = maintainance

        self.notifier = notifier

        if datadog_api_key:
            datadog.initialize(api_key=datadog_api_key)
            logger.info('Datadog initialized')
        self.stats = datadog.ThreadStats()
        self.stats.start()

        self.dry_run = dry_run
Beispiel #2
0
    def __init__(self, aws_regions, aws_access_key, aws_secret_key,
                 azure_client_id, azure_client_secret, azure_subscription_id, azure_tenant_id,
                 azure_resource_group_names, azure_slow_scale_classes, kubeconfig,
                 idle_threshold, type_idle_threshold,
                 instance_init_time, cluster_name, notifier,
                 max_scale_in_fraction=0.1,
                 scale_up=True, maintainance=True,
                 datadog_api_key=None,
                 over_provision=5, dry_run=False):
        if kubeconfig:
            # for using locally
            logger.debug('Using kubeconfig %s', kubeconfig)
            self.api = pykube.HTTPClient(
                pykube.KubeConfig.from_file(kubeconfig))
        else:
            # for using on kube
            logger.debug('Using kube service account')
            self.api = pykube.HTTPClient(
                pykube.KubeConfig.from_service_account())

        self.max_scale_in_fraction = max_scale_in_fraction
        self._drained = {}
        self.session = None
        if aws_access_key and aws_secret_key:
            self.session = boto3.session.Session(
                aws_access_key_id=aws_access_key,
                aws_secret_access_key=aws_secret_key,
                region_name=aws_regions[0])  # provide a default region
        self.autoscaling_groups = autoscaling_groups.AutoScalingGroups(
            session=self.session, regions=aws_regions,
            cluster_name=cluster_name)
        self.autoscaling_timeouts = autoscaling_groups.AutoScalingTimeouts(
            self.session)

        azure_regions = []
        resource_groups = []
        self.azure_client = None
        if azure_client_id:
            azure_credentials = ServicePrincipalCredentials(
                client_id=azure_client_id,
                secret=azure_client_secret,
                tenant=azure_tenant_id
            )

            # Setup the Azure client
            resource_client = ResourceManagementClient(azure_credentials, azure_subscription_id)
            resource_client.providers.register('Microsoft.Compute')
            resource_client.providers.register('Microsoft.Network')
            resource_client.providers.register('Microsoft.Insights')

            region_map = {}
            for resource_group_name in azure_resource_group_names:
                resource_group = resource_client.resource_groups.get(resource_group_name)
                location = resource_group.location
                if location in region_map:
                    logger.fatal("{} and {} are both in {}. May only have one resource group per region".format(
                        resource_group_name, region_map[location], location
                    ))
                region_map[location] = resource_group_name
                azure_regions.append(location)
                resource_groups.append(resource_group)

            compute_client = ComputeManagementClient(azure_credentials, azure_subscription_id)
            compute_client.config.retry_policy.policy = azure.AzureBoundedRetry.from_retry(compute_client.config.retry_policy.policy)

            monitor_client = MonitorClient(azure_credentials, azure_subscription_id)
            monitor_client.config.retry_policy.policy = azure.AzureBoundedRetry.from_retry(monitor_client.config.retry_policy.policy)
            self.azure_client = AzureWriteThroughCachedApi(AzureWrapper(compute_client, monitor_client))

        self.azure_groups = azure.AzureGroups(resource_groups, azure_slow_scale_classes, self.azure_client)

        # config
        self.azure_resource_group_names = azure_resource_group_names
        self.azure_regions = azure_regions
        self.aws_regions = aws_regions
        self.idle_threshold = idle_threshold
        self.instance_init_time = instance_init_time
        self.type_idle_threshold = type_idle_threshold
        self.over_provision = over_provision

        self.scale_up = scale_up
        self.maintainance = maintainance

        self.notifier = notifier

        if datadog_api_key:
            datadog.initialize(api_key=datadog_api_key)
            logger.info('Datadog initialized')
        self.stats = datadog.ThreadStats()
        self.stats.start()

        self.dry_run = dry_run
Beispiel #3
0
    def __init__(self,
                 regions,
                 aws_access_key,
                 aws_secret_key,
                 kubeconfig,
                 pod_namespace,
                 idle_threshold,
                 type_idle_threshold,
                 instance_init_time,
                 cluster_name,
                 notifier,
                 scale_up=True,
                 maintainance=True,
                 datadog_api_key=None,
                 over_provision=5,
                 dry_run=False,
                 drainable_labels={},
                 scale_label=None,
                 instance_type_priorities={}):
        if kubeconfig:
            # for using locally
            logger.debug('Using kubeconfig %s', kubeconfig)
            self.api = pykube.HTTPClient(
                pykube.KubeConfig.from_file(kubeconfig))
        else:
            # for using on kube
            logger.debug('Using kube service account')
            self.api = pykube.HTTPClient(
                pykube.KubeConfig.from_service_account())
        if pod_namespace is None:
            self.pod_namespace = pykube.all
        else:
            self.pod_namespace = pod_namespace

        self._drained = {}
        self.session = boto3.session.Session(
            aws_access_key_id=aws_access_key,
            aws_secret_access_key=aws_secret_key,
            region_name=regions[0])  # provide a default region
        self.autoscaling_groups = autoscaling_groups.AutoScalingGroups(
            session=self.session, regions=regions, cluster_name=cluster_name)
        self.autoscaling_timeouts = autoscaling_groups.AutoScalingTimeouts(
            self.session)

        # config
        self.regions = regions
        self.idle_threshold = idle_threshold
        self.instance_init_time = instance_init_time
        self.type_idle_threshold = type_idle_threshold
        self.over_provision = over_provision

        self.scale_up = scale_up
        self.maintainance = maintainance

        self.notifier = notifier

        if datadog_api_key:
            datadog.initialize(api_key=datadog_api_key)
            logger.info('Datadog initialized')
        self.stats = datadog.ThreadStats()
        self.stats.start()

        self.dry_run = dry_run
        self.drainable_labels = drainable_labels
        self.scale_label = scale_label
        if not instance_type_priorities:
            self.instance_type_priorities = self._GROUP_PRIORITIES
        else:
            multiple_priorities = len(
                filter(lambda x: len(instance_type_priorities[x]) > 1,
                       instance_type_priorities.keys()))
            if multiple_priorities > 0:
                raise ValueError(
                    'You have specified more than one priority for %d instance types. Please specify a single priority for each instance type that you care about.'
                    % multiple_priorities)
            self.instance_type_priorities = {
                instance: min([int(value) for value in values])
                for instance, values in instance_type_priorities.items()
            }