Esempio n. 1
0
    def setUp(self):
        # load dummy kube specs
        dir_path = os.path.dirname(os.path.realpath(__file__))
        with open(os.path.join(dir_path, 'data/busybox.yaml'), 'r') as f:
            self.dummy_pod = yaml.load(f.read())
        with open(os.path.join(dir_path, 'data/ds-pod.yaml'), 'r') as f:
            self.dummy_ds_pod = yaml.load(f.read())
        with open(os.path.join(dir_path, 'data/rc-pod.yaml'), 'r') as f:
            self.dummy_rc_pod = yaml.load(f.read())
        with open(os.path.join(dir_path, 'data/node.yaml'), 'r') as f:
            self.dummy_node = yaml.load(f.read())

        # this isn't actually used here
        # only needed to create the KubePod object...
        self.api = pykube.HTTPClient(
            pykube.KubeConfig.from_file('~/.kube/config'))

        # start creating our mock ec2 environment
        self.mocks = [moto.mock_ec2(), moto.mock_autoscaling()]
        for moto_mock in self.mocks:
            moto_mock.start()

        client = boto3.client('autoscaling', region_name='us-west-2')
        self.asg_client = client

        client.create_launch_configuration(LaunchConfigurationName='dummy-lc',
                                           ImageId='ami-deadbeef',
                                           KeyName='dummy-key',
                                           SecurityGroups=[
                                               'sg-cafebeef',
                                           ],
                                           InstanceType='t2.medium')

        client.create_auto_scaling_group(AutoScalingGroupName='dummy-asg',
                                         LaunchConfigurationName='dummy-lc',
                                         MinSize=0,
                                         MaxSize=10,
                                         VPCZoneIdentifier='subnet-beefbeef',
                                         Tags=[{
                                             'Key': 'KubernetesCluster',
                                             'Value': 'dummy-cluster',
                                             'PropagateAtLaunch': True
                                         }, {
                                             'Key': 'KubernetesRole',
                                             'Value': 'worker',
                                             'PropagateAtLaunch': True
                                         }])

        # finally our cluster
        self.cluster = Cluster(aws_access_key='',
                               aws_secret_key='',
                               regions=['us-west-2', 'us-east-1', 'us-west-1'],
                               kubeconfig='~/.kube/config',
                               pod_namespace=None,
                               idle_threshold=60,
                               instance_init_time=60,
                               type_idle_threshold=60,
                               cluster_name='dummy-cluster',
                               notifier=Notifier(),
                               dry_run=False)
Esempio n. 2
0
def main(cluster_name, aws_regions, azure_resource_groups,
         azure_slow_scale_classes, sleep, kubeconfig, azure_client_id,
         azure_client_secret, azure_subscription_id, azure_tenant_id,
         aws_access_key, aws_secret_key, pod_namespace, datadog_api_key,
         idle_threshold, type_idle_threshold, max_scale_in_fraction,
         drain_utilization, over_provision, instance_init_time, no_scale,
         no_maintenance, slack_hook, slack_bot_token, dry_run, verbose):
    logger_handler = logging.StreamHandler(sys.stderr)
    logger_handler.setFormatter(
        logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
            datefmt='%Y-%m-%dT%H:%M:%S%z'))
    logger.addHandler(logger_handler)
    logger.setLevel(DEBUG_LOGGING_MAP.get(verbose, logging.CRITICAL))

    aws_regions_list = aws_regions.split(',') if aws_regions else []
    if not (aws_secret_key and aws_access_key) and aws_regions_list:
        logger.error(
            "Missing AWS credentials. Please provide aws-access-key and aws-secret-key."
        )
        sys.exit(1)

    notifier = Notifier(slack_hook, slack_bot_token)
    cluster = Cluster(
        aws_access_key=aws_access_key,
        aws_secret_key=aws_secret_key,
        aws_regions=aws_regions_list,
        azure_client_id=azure_client_id,
        azure_client_secret=azure_client_secret,
        azure_subscription_id=azure_subscription_id,
        azure_tenant_id=azure_tenant_id,
        azure_resource_group_names=azure_resource_groups.split(',')
        if azure_resource_groups else [],
        azure_slow_scale_classes=azure_slow_scale_classes.split(',')
        if azure_slow_scale_classes else [],
        kubeconfig=kubeconfig,
        pod_namespace=pod_namespace,
        idle_threshold=idle_threshold,
        instance_init_time=instance_init_time,
        type_idle_threshold=type_idle_threshold,
        cluster_name=cluster_name,
        max_scale_in_fraction=max_scale_in_fraction,
        drain_utilization_below=drain_utilization,
        scale_up=not no_scale,
        maintainance=not no_maintenance,
        over_provision=over_provision,
        datadog_api_key=datadog_api_key,
        notifier=notifier,
        dry_run=dry_run,
    )
    backoff = sleep
    while True:
        scaled = cluster.scale_loop()
        if scaled:
            time.sleep(sleep)
            backoff = sleep
        else:
            logger.warn("backoff: %s" % backoff)
            backoff *= 2
            time.sleep(backoff)
Esempio n. 3
0
def main(resource_group, acs_deployment, sleep, kubeconfig,
         service_principal_app_id, service_principal_secret,
         kubeconfig_private_key, client_private_key, 
         service_principal_tenant_id, spare_agents, idle_threshold,
         no_scale, over_provision, no_maintenance, ignore_pools, slack_hook, slack_bot_token,
         dry_run, verbose, debug):
    logger_handler = logging.StreamHandler(sys.stderr)
    logger_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
    logger.addHandler(logger_handler)
    logger.setLevel(DEBUG_LOGGING_MAP.get(verbose, logging.CRITICAL))

    if not (service_principal_app_id and service_principal_secret and service_principal_tenant_id):
        logger.error("Missing Azure credentials. Please provide aws-service_principal_app_id, service_principal_secret and service_principal_tenant_id.")
        sys.exit(1)
    
    if not client_private_key:
        logger.error('Missing client_private_key. Provide it through --client-private-key or CLIENT_PRIVATE_KEY environment variable')
    
    if not kubeconfig_private_key:
        logger.error('Missing kubeconfig_private_key. Provide it through --kubeconfig-private-key or KUBECONFIG_PRIVATE_KEY environment variable')
    
    notifier = None
    if slack_hook and slack_bot_token:
        notifier = Notifier(slack_hook, slack_bot_token)

    instance_init_time = 600
    
    cluster = Cluster(kubeconfig=kubeconfig,
                      instance_init_time=instance_init_time,
                      spare_agents=spare_agents,
                      idle_threshold=idle_threshold,
                      resource_group=resource_group,
                      acs_deployment=acs_deployment,
                      service_principal_app_id=service_principal_app_id,
                      service_principal_secret=service_principal_secret,
                      service_principal_tenant_id=service_principal_tenant_id,
                      kubeconfig_private_key=kubeconfig_private_key,
                      client_private_key=client_private_key,
                      scale_up=not no_scale,
                      ignore_pools=ignore_pools,
                      maintainance=not no_maintenance,
                      over_provision=over_provision,
                      notifier=notifier,
                      dry_run=dry_run,
                      )
    cluster.login()
    backoff = sleep
    while True:
        scaled = cluster.loop(debug)
        if scaled:
            time.sleep(sleep)
            backoff = sleep
        else:
            logger.warn("backoff: %s" % backoff)
            backoff *= 2
            time.sleep(backoff)
def main(container_service_name, resource_group, sleep, kubeconfig,
         service_principal_app_id, service_principal_secret,
         service_principal_tenant_id, cpu_per_node, datadog_api_key,
         idle_threshold, reserve_idle_threshold, over_provision,
         instance_init_time, no_scale, no_maintenance, slack_hook,
         slack_bot_token, dry_run, verbose):
    logger_handler = logging.StreamHandler(sys.stderr)
    logger_handler.setFormatter(
        logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
    logger.addHandler(logger_handler)
    logger.setLevel(DEBUG_LOGGING_MAP.get(verbose, logging.CRITICAL))

    if not (service_principal_app_id and service_principal_secret
            and service_principal_tenant_id):
        logger.error(
            "Missing Azure credentials. Please provide aws-service_principal_app_id, service_principal_secret and service_principal_tenant_id."
        )
        sys.exit(1)

    notifier = Notifier(slack_hook, slack_bot_token)
    cluster = Cluster(
        service_principal_app_id=service_principal_app_id,
        service_principal_secret=service_principal_secret,
        service_principal_tenant_id=service_principal_tenant_id,
        kubeconfig=kubeconfig,
        idle_threshold=idle_threshold,
        instance_init_time=instance_init_time,
        reserve_idle_threshold=reserve_idle_threshold,
        container_service_name=container_service_name,
        resource_group=resource_group,
        scale_up=not no_scale,
        maintainance=not no_maintenance,
        over_provision=over_provision,
        datadog_api_key=datadog_api_key,
        notifier=notifier,
        dry_run=dry_run,
    )
    backoff = sleep
    while True:
        scaled = cluster.scale_loop()
        if scaled:
            time.sleep(sleep)
            backoff = sleep
        else:
            logger.warn("backoff: %s" % backoff)
            backoff *= 2
            time.sleep(backoff)
def main(cluster_name, regions, sleep, kubeconfig, pod_namespace,
         aws_access_key, aws_secret_key, datadog_api_key, idle_threshold,
         type_idle_threshold, over_provision, instance_init_time, no_scale,
         no_maintenance, slack_hook, slack_bot_token, dry_run, verbose,
         drainable_labels, scale_label, instance_type_priorities):
    logger_handler = logging.StreamHandler(sys.stderr)
    logger_handler.setFormatter(
        logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
    logger.addHandler(logger_handler)
    logger.setLevel(DEBUG_LOGGING_MAP.get(verbose, logging.CRITICAL))

    if not (aws_secret_key and aws_access_key):
        logger.error(
            "Missing AWS credentials. Please provide aws-access-key and aws-secret-key."
        )
        sys.exit(1)

    notifier = Notifier(slack_hook, slack_bot_token)
    cluster = Cluster(aws_access_key=aws_access_key,
                      aws_secret_key=aws_secret_key,
                      regions=regions.split(','),
                      kubeconfig=kubeconfig,
                      pod_namespace=pod_namespace,
                      idle_threshold=idle_threshold,
                      instance_init_time=instance_init_time,
                      type_idle_threshold=type_idle_threshold,
                      cluster_name=cluster_name,
                      scale_up=not no_scale,
                      maintainance=not no_maintenance,
                      over_provision=over_provision,
                      datadog_api_key=datadog_api_key,
                      notifier=notifier,
                      dry_run=dry_run,
                      drainable_labels=drainable_labels,
                      scale_label=scale_label,
                      instance_type_priorities=instance_type_priorities)
    backoff = sleep
    while True:
        scaled = cluster.scale_loop()
        if scaled:
            time.sleep(sleep)
            backoff = sleep
        else:
            logger.warn("backoff: %s" % backoff)
            backoff *= 2
            time.sleep(backoff)
 def test_scale_up(self):
     # Attempting to set two different priorities for the same instance type.
     self.assertRaises(
         ValueError,
         Cluster,
         aws_access_key='',
         aws_secret_key='',
         regions=['us-west-2'],
         kubeconfig='~/.kube/config',
         pod_namespace=None,
         idle_threshold=60,
         instance_init_time=60,
         type_idle_threshold=60,
         cluster_name='dummy-cluster-with-priorities',
         instance_type_priorities={'p2.xlarge': set(['0', '2'])},
         notifier=Notifier(),
         dry_run=False)
def main(sleep, kubeconfig, kubecontext, scale_out_webhook, scale_in_webhook,
         spare_agents, pool_name_regex, idle_threshold, drain, no_scale,
         over_provision, no_maintenance, ignore_pools, slack_hook, verbose,
         debug):
    logger_handler = logging.StreamHandler(sys.stderr)
    logger_handler.setFormatter(
        logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
    logger.addHandler(logger_handler)
    logger.setLevel(DEBUG_LOGGING_MAP.get(verbose, logging.CRITICAL))

    notifier = None
    if slack_hook:
        notifier = Notifier(slack_hook)

    cluster = Cluster(kubeconfig=kubeconfig,
                      kubecontext=kubecontext,
                      scale_out_webhook=scale_out_webhook,
                      scale_in_webhook=scale_in_webhook,
                      pool_name_regex=pool_name_regex,
                      spare_agents=spare_agents,
                      idle_threshold=idle_threshold,
                      drain=drain,
                      scale_up=not no_scale,
                      ignore_pools=ignore_pools,
                      maintainance=not no_maintenance,
                      over_provision=over_provision,
                      notifier=notifier)
    cluster.login()
    backoff = sleep
    while True:
        scaled = cluster.loop(debug)
        if scaled:
            time.sleep(sleep)
            backoff = sleep
        else:
            logger.warn("backoff: %s" % backoff)
            backoff *= 2
            time.sleep(backoff)
Esempio n. 8
0
def main(container_service_name, resource_group, sleep, kubeconfig,
         service_principal_app_id, service_principal_secret, service_principal_tenant_id,
         datadog_api_key,idle_threshold, spare_agents,
         template_file, parameters_file, template_file_url, parameters_file_url,
         over_provision, instance_init_time, no_scale, no_maintenance,
         slack_hook, slack_bot_token, dry_run, verbose, debug):
    logger_handler = logging.StreamHandler(sys.stderr)
    logger_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
    logger.addHandler(logger_handler)
    logger.setLevel(DEBUG_LOGGING_MAP.get(verbose, logging.CRITICAL))

    if not (service_principal_app_id and service_principal_secret and service_principal_tenant_id):
        logger.error("Missing Azure credentials. Please provide aws-service_principal_app_id, service_principal_secret and service_principal_tenant_id.")
        sys.exit(1)
    
    if (template_file and not parameters_file) or (not template_file and parameters_file):
        logger.error("Both --template-file and --parameters-file should be provided when running on acs-engine")
        sys.exit(1)
    
    if (template_file and template_file_url):
        logger.error('--template-file and --template-file-url are mutually exclusive.')
        sys.exit(1)
    
    if (parameters_file and parameters_file_url):
        logger.error('--parameters-file and --parameters-file-url are mutually exclusive.')
        sys.exit(1)
        
    if template_file and container_service_name:
        logger.error("--template-file and --container-service-name cannot be specified simultaneously. Provide --container-service-name when running on ACS, or --template-file and --parameters-file when running on acs-engine")
        sys.exit(1)
        
    notifier = None
    if slack_hook and slack_bot_token:
        notifier = Notifier(slack_hook, slack_bot_token)
    
    cluster = Cluster(service_principal_app_id=service_principal_app_id,
                      service_principal_secret=service_principal_secret,
                      service_principal_tenant_id=service_principal_tenant_id,
                      kubeconfig=kubeconfig,
                      template_file=template_file,
                      template_file_url=template_file_url,
                      parameters_file_url=parameters_file_url,
                      parameters_file=parameters_file,
                      idle_threshold=idle_threshold,
                      instance_init_time=instance_init_time,
                      spare_agents=spare_agents,
                      container_service_name=container_service_name,
                      resource_group=resource_group,
                      scale_up=not no_scale,
                      maintainance=not no_maintenance,
                      over_provision=over_provision,
                      datadog_api_key=datadog_api_key,
                      notifier=notifier,
                      dry_run=dry_run,
                      )    
    backoff = sleep
    while True:
        scaled = cluster.scale_loop(debug)
        if scaled:
            time.sleep(sleep)
            backoff = sleep
        else:
            logger.warn("backoff: %s" % backoff)
            backoff *= 2
            time.sleep(backoff)