Example #1
0
def moto_start(set_region):
    mock_autoscaling().start()
    mock_ec2().start()
    mock_ecs().start()
    mock_sns().start()
    mock_sqs().start()
    yield
    mock_autoscaling().stop()
    mock_ec2().stop()
    mock_ecs().stop()
    mock_sns().stop()
    mock_sqs().stop()
Example #2
0
    def setUp(self):
        # load dummy kube specs
        dir_path = os.path.dirname(os.path.realpath(__file__))
        with open(os.path.join(dir_path, 'data/busybox.yaml'), 'r') as f:
            self.dummy_pod = yaml.load(f.read())
        with open(os.path.join(dir_path, 'data/ds-pod.yaml'), 'r') as f:
            self.dummy_ds_pod = yaml.load(f.read())
        with open(os.path.join(dir_path, 'data/rc-pod.yaml'), 'r') as f:
            self.dummy_rc_pod = yaml.load(f.read())
        with open(os.path.join(dir_path, 'data/node.yaml'), 'r') as f:
            self.dummy_node = yaml.load(f.read())

        # this isn't actually used here
        # only needed to create the KubePod object...
        self.api = pykube.HTTPClient(
            pykube.KubeConfig.from_file('~/.kube/config'))

        # start creating our mock ec2 environment
        self.mocks = [moto.mock_ec2(), moto.mock_autoscaling()]
        for moto_mock in self.mocks:
            moto_mock.start()

        client = boto3.client('autoscaling', region_name='us-west-2')
        self.asg_client = client

        client.create_launch_configuration(LaunchConfigurationName='dummy-lc',
                                           ImageId='ami-deadbeef',
                                           KeyName='dummy-key',
                                           SecurityGroups=[
                                               'sg-cafebeef',
                                           ],
                                           InstanceType='t2.medium')

        client.create_auto_scaling_group(AutoScalingGroupName='dummy-asg',
                                         LaunchConfigurationName='dummy-lc',
                                         MinSize=0,
                                         MaxSize=10,
                                         VPCZoneIdentifier='subnet-beefbeef',
                                         Tags=[{
                                             'Key': 'KubernetesCluster',
                                             'Value': 'dummy-cluster',
                                             'PropagateAtLaunch': True
                                         }, {
                                             'Key': 'KubernetesRole',
                                             'Value': 'worker',
                                             'PropagateAtLaunch': True
                                         }])

        # finally our cluster
        self.cluster = Cluster(aws_access_key='',
                               aws_secret_key='',
                               regions=['us-west-2', 'us-east-1', 'us-west-1'],
                               kubeconfig='~/.kube/config',
                               pod_namespace=None,
                               idle_threshold=60,
                               instance_init_time=60,
                               type_idle_threshold=60,
                               cluster_name='dummy-cluster',
                               notifier=Notifier(),
                               dry_run=False)
Example #3
0
def asg():
    """AutoScaling mock service"""
    mock = mock_autoscaling()
    mock.start()

    client = boto3.client('autoscaling')
    groups = []
    lcs = []
    for i in range(3):
        lc_config = dict(
            LaunchConfigurationName='lc-{0}'.format(i),
            ImageId='ami-xxxxxx',
            KeyName='mykey',
            InstanceType='c3.xlarge',
        )
        client.create_launch_configuration(**lc_config)
        lcs.append(lc_config)
        asg_config = dict(
            AutoScalingGroupName='asg-{0}'.format(i),
            LaunchConfigurationName='lc-{0}'.format(i),
            MaxSize=10,
            MinSize=2,
        )
        client.create_auto_scaling_group(**asg_config)
        groups.append(asg_config)

    yield {'lcs': lcs, 'groups': groups}
    mock.stop()
Example #4
0
def asg():
    """AutoScaling mock service"""
    mock = mock_autoscaling()
    mock.start()

    client = boto3.client('autoscaling')
    groups = []
    lcs = []
    for i in range(3):
        lc_config = dict(
            LaunchConfigurationName='lc-{0}'.format(i),
            ImageId='ami-xxxxxx',
            KeyName='mykey',
            InstanceType='c3.xlarge',
        )
        client.create_launch_configuration(**lc_config)
        lcs.append(lc_config)
        asg_config = dict(
            AutoScalingGroupName='asg-{0}'.format(i),
            LaunchConfigurationName='lc-{0}'.format(i),
            MaxSize=10,
            MinSize=2,
        )
        client.create_auto_scaling_group(**asg_config)
        groups.append(asg_config)

    yield {'lcs': lcs, 'groups': groups}
    mock.stop()
Example #5
0
def fake_lc():
    """ASG mock service"""
    mock = mock_autoscaling()
    mock.start()
    client = session.client("autoscaling")
    client.create_launch_configuration(
        LaunchConfigurationName="test_lc", ImageId="ami-abcd1234", InstanceType="t2.medium"
    )
    yield client.describe_launch_configurations()
    mock.stop()
Example #6
0
def fake_lc():
    """ASG mock service"""
    mock = mock_autoscaling()
    mock.start()
    client = session.client('autoscaling')
    client.create_launch_configuration(
        LaunchConfigurationName='test_lc',
        ImageId='ami-abcd1234',
        InstanceType='t2.medium'
    )
    yield client.describe_launch_configurations()
    mock.stop()
Example #7
0
def boto_patches(context):
    mock_sqs_obj = mock_sqs()
    mock_sqs_obj.start()
    mock_ec2_obj = mock_ec2()
    mock_ec2_obj.start()
    mock_autoscaling_obj = mock_autoscaling()
    mock_autoscaling_obj.start()
    vpc_response = ec2.create_vpc(CidrBlock='10.0.0.0/24')
    subnet_response = ec2.create_subnet(CidrBlock='10.0.0.0/24',
                                        VpcId=vpc_response['Vpc']['VpcId'],
                                        AvailabilityZone='us-west-2a')
    context.subnet_id = subnet_response['Subnet']['SubnetId']
    yield
    mock_sqs_obj.stop()
    mock_ec2_obj.stop()
    mock_autoscaling_obj.stop()
Example #8
0
def fake_asg():
    """ASG mock service"""
    mock = mock_autoscaling()
    mock.start()
    client = session.client('autoscaling')
    client.create_launch_configuration(
        LaunchConfigurationName='test_lc',
        ImageId='ami-abcd1234',
        InstanceType='t2.medium'
    )

    client.create_auto_scaling_group(
        AutoScalingGroupName='test_asg',
        LaunchConfigurationName='test_lc',
        InstanceId='string',
        MinSize=123,
        MaxSize=123,
        DesiredCapacity=123,
        DefaultCooldown=123,
        AvailabilityZones=[
            'string',
        ],
        # LoadBalancerNames=[
        #    'string',
        # ],
        HealthCheckType='string',
        HealthCheckGracePeriod=123,
        PlacementGroup='string',
        VPCZoneIdentifier='string',
        TerminationPolicies=[
            'string',
        ],
        Tags=[
            {
                'ResourceId': 'string',
                'ResourceType': 'string',
                'Key': 'string',
                'Value': 'string',
                'PropagateAtLaunch': True
            },
        ]
    )
    yield client.describe_auto_scaling_groups()
    mock.stop()
Example #9
0
def fake_asg():
    """ASG mock service"""
    mock = mock_autoscaling()
    mock.start()
    client = session.client("autoscaling")
    client.create_launch_configuration(
        LaunchConfigurationName="test_lc", ImageId="ami-abcd1234", InstanceType="t2.medium"
    )

    client.create_auto_scaling_group(
        AutoScalingGroupName="test_asg",
        LaunchConfigurationName="test_lc",
        InstanceId="string",
        MinSize=123,
        MaxSize=123,
        DesiredCapacity=123,
        DefaultCooldown=123,
        AvailabilityZones=["string"],
        # LoadBalancerNames=[
        #    'string',
        # ],
        HealthCheckType="string",
        HealthCheckGracePeriod=123,
        PlacementGroup="string",
        VPCZoneIdentifier="string",
        TerminationPolicies=["string"],
        Tags=[
            {
                "ResourceId": "string",
                "ResourceType": "string",
                "Key": "string",
                "Value": "string",
                "PropagateAtLaunch": True,
            }
        ],
    )
    yield client.describe_auto_scaling_groups()
    mock.stop()
Example #10
0
def setup_mocked_as_group():
    mocked_as = mock_autoscaling()

    change_capacity_old = mocked_as.backends[REGION].change_capacity
    set_desired_capacity_old = mocked_as.backends[REGION].set_desired_capacity

    def change_capacity(self, group_name, scaling_adjustment):
        print("Hello from: change_capacity")

        change_capacity_old(self, group_name, scaling_adjustment)

    def set_desired_capacity(self, new_capacity):
        print("Setting new desired capacity: %s" % str(new_capacity))

        if number_of_docker_workers() < new_capacity:
            run_docker_compose_cmd("up -d --scale worker=%s --no-recreate" %
                                   str(new_capacity))

        set_desired_capacity_old(self, new_capacity)

    mocked_as.backends[REGION].change_capacity = change_capacity
    mocked_as.backends[REGION].set_desired_capacity = set_desired_capacity

    mocked_as.start(reset=False)
Example #11
0
def autoscaling():
    with mock_autoscaling():
        yield boto3.client('autoscaling', region_name='us-east-1')
    def setUp(self):
        # load dummy kube specs
        dir_path = os.path.dirname(os.path.realpath(__file__))
        with open(os.path.join(dir_path, 'data/busybox.yaml'), 'r') as f:
            self.dummy_pod = yaml.load(f.read())
        with open(os.path.join(dir_path, 'data/ds-pod.yaml'), 'r') as f:
            self.dummy_ds_pod = yaml.load(f.read())
        with open(os.path.join(dir_path, 'data/rc-pod.yaml'), 'r') as f:
            self.dummy_rc_pod = yaml.load(f.read())
        with open(os.path.join(dir_path, 'data/node.yaml'), 'r') as f:
            self.dummy_node = yaml.load(f.read())
            for condition in self.dummy_node['status']['conditions']:
                if condition['type'] == 'Ready' and condition[
                        'status'] == 'True':
                    condition['lastHeartbeatTime'] = datetime.now(
                        condition['lastHeartbeatTime'].tzinfo)
            # Convert timestamps to strings to match PyKube
            for condition in self.dummy_node['status']['conditions']:
                condition['lastHeartbeatTime'] = datetime.isoformat(
                    condition['lastHeartbeatTime'])
                condition['lastTransitionTime'] = datetime.isoformat(
                    condition['lastTransitionTime'])

        # this isn't actually used here
        # only needed to create the KubePod object...
        self.api = pykube.HTTPClient(
            pykube.KubeConfig.from_file('~/.kube/config'))

        # start creating our mock ec2 environment
        self.mocks = [moto.mock_ec2(), moto.mock_autoscaling()]
        for moto_mock in self.mocks:
            moto_mock.start()

        client = boto3.client('autoscaling', region_name='us-west-2')
        self.asg_client = client

        client.create_launch_configuration(LaunchConfigurationName='dummy-lc',
                                           ImageId='ami-deadbeef',
                                           KeyName='dummy-key',
                                           SecurityGroups=[
                                               'sg-cafebeef',
                                           ],
                                           InstanceType='t2.medium')

        client.create_auto_scaling_group(AutoScalingGroupName='dummy-asg',
                                         LaunchConfigurationName='dummy-lc',
                                         MinSize=0,
                                         MaxSize=10,
                                         VPCZoneIdentifier='subnet-beefbeef',
                                         Tags=[{
                                             'Key': 'KubernetesCluster',
                                             'Value': 'dummy-cluster',
                                             'PropagateAtLaunch': True
                                         }, {
                                             'Key': 'KubernetesRole',
                                             'Value': 'worker',
                                             'PropagateAtLaunch': True
                                         }])

        # finally our cluster
        self.cluster = Cluster(
            aws_access_key='fake',
            aws_secret_key='fake',
            aws_regions=['us-west-2', 'us-east-1', 'us-west-1'],
            azure_client_id='',
            azure_client_secret='',
            azure_subscription_id='',
            azure_tenant_id='',
            azure_resource_group_names=[],
            azure_slow_scale_classes=[],
            kubeconfig='~/.kube/config',
            pod_namespace=None,
            drain_utilization_below=0.3,
            idle_threshold=60,
            instance_init_time=60,
            type_idle_threshold=60,
            cluster_name='dummy-cluster',
            notifier=mock.Mock(),
            dry_run=False)
Example #13
0
def autoscaling():
    with mock_autoscaling():
        yield boto3.client("autoscaling", region_name="us-east-1")
Example #14
0
    def setUp(self):
        # load dummy kube specs
        dir_path = os.path.dirname(os.path.realpath(__file__))
        with open(os.path.join(dir_path, 'data/busybox.yaml'), 'r') as f:
            self.dummy_pod = yaml.load(f.read())
        with open(os.path.join(dir_path, 'data/ds-pod.yaml'), 'r') as f:
            self.dummy_ds_pod = yaml.load(f.read())
        with open(os.path.join(dir_path, 'data/rc-pod.yaml'), 'r') as f:
            self.dummy_rc_pod = yaml.load(f.read())
        with open(os.path.join(dir_path, 'data/node.yaml'), 'r') as f:
            self.dummy_node = yaml.load(f.read())

        # this isn't actually used here
        # only needed to create the KubePod object...
        self.api = pykube.HTTPClient(pykube.KubeConfig.from_file('~/.kube/config'))

        # start creating our mock ec2 environment
        self.mocks = [moto.mock_ec2(), moto.mock_autoscaling()]
        for moto_mock in self.mocks:
            moto_mock.start()

        client = boto3.client('autoscaling')
        self.asg_client = client

        client.create_launch_configuration(
            LaunchConfigurationName='dummy-lc',
            ImageId='ami-deadbeef',
            KeyName='dummy-key',
            SecurityGroups=[
                'sg-cafebeef',
            ],
            InstanceType='t2.medium'
        )

        client.create_auto_scaling_group(
            AutoScalingGroupName='dummy-asg',
            LaunchConfigurationName='dummy-lc',
            MinSize=0,
            MaxSize=10,
            VPCZoneIdentifier='subnet-beefbeef',
            Tags=[
                {
                    'Key': 'KubernetesCluster',
                    'Value': 'dummy-cluster',
                    'PropagateAtLaunch': True
                },
                {
                    'Key': 'KubernetesRole',
                    'Value': 'worker',
                    'PropagateAtLaunch': True
                }
            ]
        )

        # finally our cluster
        self.cluster = Cluster(
            aws_access_key='',
            aws_secret_key='',
            regions=['us-west-2', 'us-east-1', 'us-west-1'],
            kubeconfig='~/.kube/config',
            idle_threshold=60,
            instance_init_time=60,
            type_idle_threshold=60,
            cluster_name='dummy-cluster',
            slack_hook='',
            dry_run=False
        )
Example #15
0
def setup_autoscaling():
    mock_autoscaling_obj = moto.mock_autoscaling()
    mock_autoscaling_obj.start()
    yield
    mock_autoscaling_obj.stop()