예제 #1
0
  def run_deploy_upsert_load_balancer_pipeline(self):
    url_path = 'pipelines/{0}/{1}'.format(self.TEST_APP, self.pipeline_id)

    previous_group_name = frigga.Naming.server_group(
        app=self.TEST_APP,
        stack=self.TEST_STACK,
        version='v000')

    deployed_group_name = frigga.Naming.server_group(
        app=self.TEST_APP,
        stack=self.TEST_STACK,
        version='v001')

    payload = self.agent.make_json_payload_from_kwargs(
        type='manual',
        user='******')

    builder = gcp.GcpContractBuilder(self.appengine_observer)
    (builder.new_clause_builder('Service Modified', retryable_for_secs=60)
     .inspect_resource('apps.services',
                       self.__lb_name,
                       appsId=self.__gcp_project)
     .EXPECT(
         ov_factory.value_list_path_contains(
             jp.build_path('split', 'allocations'),
             jp.DICT_MATCHES({previous_group_name: jp.NUM_EQ(0.9),
                              deployed_group_name: jp.NUM_EQ(0.1)}))))

    return st.OperationContract(
        self.new_post_operation(
            title='run_deploy_upsert_load_balancer_pipeline',
            data=payload, path=url_path),
        builder.build())
예제 #2
0
    def test_list_match_unique_ok(self):
        context = ExecutionContext()
        source = [1, 2]
        want = [jp.NUM_EQ(1)]
        match_pred = jp.LIST_MATCHES(want, unique=True)
        result = match_pred(context, source)

        expect = (jp.SequencedPredicateResultBuilder(match_pred).append_result(
            jp.MapPredicate(jp.NUM_EQ(1))(context, source)).build(True))

        self.assertTrue(result)
        self.assertEquals(expect, result)
예제 #3
0
    def trigger_bake_and_deploy_google_pipeline(self):
        path = "applications/{app}/pipelines".format(app=self.TEST_APP)

        group_name = "{app}-{stack}-v000".format(
            app=self.TEST_APP, stack=self.bindings["TEST_STACK"])
        builder = gcp.GcpContractBuilder(self.gcp_observer)
        (builder.new_clause_builder(
            "Managed Instance Group Deployed",
            retryable_for_secs=30).inspect_resource(
                "instanceGroupManagers", group_name).EXPECT(
                    ov_factory.value_list_path_contains(
                        "targetSize", jp.NUM_EQ(1))))

        return st.OperationContract(
            self.jenkins_agent.new_jenkins_trigger_operation(
                title="monitor_bake_pipeline",
                job=self.bindings["JENKINS_JOB"],
                token=self.bindings["JENKINS_TOKEN"],
                status_class=gate.GatePipelineStatus,
                status_path=path,
                max_wait_secs=1080,
            ),  # Allow 18 mins to bake and deploy.
            contract=builder.build(),
            cleanup=self.capture_baked_image,
        )
예제 #4
0
 def execute_delete_statefulset_pipeline(self):
     bindings = self.bindings
     payload = self.agent.make_json_payload_from_kwargs(
         job=[{
             'type': 'manual',
             'user': '******'
         }],
         description='Delete StatefulSet in Application' + self.TEST_APP,
         application=self.TEST_APP)
     builder = kube.KubeContractBuilder(self.kube_observer)
     (builder.new_clause_builder(
         'StatefulSet Deleted', retryable_for_secs=15).get_resources(
             'statefulsets',
             extra_args=[
                 self.TEST_APP + self.POSTFIX_STATEFUL_DAEMONSET,
                 '--namespace', self.TEST_NAMESPACE
             ]).EXPECT(
                 ov_factory.value_list_path_contains(
                     'targetSize', jp.NUM_EQ(0))).OR(
                         self.__not_found_observation_predicate()))
     return st.OperationContract(self.new_post_operation(
         title='Delete_StatefulSet',
         data=payload,
         path='pipelines/' + self.TEST_APP + '/statefulset-delete-pipeline',
         status_class=st.SynchronousHttpOperationStatus),
                                 contract=builder.build())
예제 #5
0
    def scale_manifest(self):
        """Creates OperationContract for scaleManifest.

    To verify the operation, we just check that the deployment has changed size
    """
        bindings = self.bindings
        name = self.TEST_APP + '-deployment'
        payload = self.agent.make_json_payload_from_kwargs(
            job=[{
                'cloudProvider': 'kubernetes',
                'account': bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
                'manifestName': 'deployment ' + name,
                'location': self.TEST_NAMESPACE,
                'type': 'scaleManifest',
                'user': '******',
                'replicas': 2
            }],
            description='Deploy manifest',
            application=self.TEST_APP)

        builder = kube.KubeContractBuilder(self.kube_v2_observer)
        (builder.new_clause_builder(
            'Deployment scaled', retryable_for_secs=15).get_resources(
                'deploy',
                extra_args=[name, '--namespace', self.TEST_NAMESPACE]).EXPECT(
                    ov_factory.value_list_contains(
                        jp.DICT_MATCHES({
                            'spec':
                            jp.DICT_MATCHES({'replicas': jp.NUM_EQ(2)})
                        }))))

        return st.OperationContract(self.new_post_operation(
            title='scale_manifest', data=payload, path='tasks'),
                                    contract=builder.build())
예제 #6
0
    def create_server_group(self):
        """Creates OperationContract for createServerGroup.

    To verify the operation, we just check that Managed Instance Group
    for the server was created.
    """
        bindings = self.bindings

        # Spinnaker determines the group name created,
        # which will be the following:
        group_name = '{app}-{stack}-v000'.format(app=self.TEST_APP,
                                                 stack=bindings['TEST_STACK'])

        payload = self.agent.make_json_payload_from_kwargs(
            job=[{
                'cloudProvider': 'gce',
                'application': self.TEST_APP,
                'credentials': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
                'strategy': '',
                'capacity': {
                    'min': 2,
                    'max': 2,
                    'desired': 2
                },
                'targetSize': 2,
                'image': bindings['TEST_GCE_IMAGE_NAME'],
                'zone': bindings['TEST_GCE_ZONE'],
                'stack': bindings['TEST_STACK'],
                'instanceType': 'f1-micro',
                'type': 'createServerGroup',
                'loadBalancers': [self.__lb_name],
                'availabilityZones': {
                    bindings['TEST_GCE_REGION']: [bindings['TEST_GCE_ZONE']]
                },
                'instanceMetadata': {
                    'startup-script': ('sudo apt-get update'
                                       ' && sudo apt-get install apache2 -y'),
                    'load-balancer-names':
                    self.__lb_name
                },
                'account': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
                'authScopes': ['compute'],
                'user': '******'
            }],
            description='Create Server Group in ' + group_name,
            application=self.TEST_APP)

        builder = gcp.GcpContractBuilder(self.gcp_observer)
        (builder.new_clause_builder(
            'Managed Instance Group Added',
            retryable_for_secs=30).inspect_resource(
                'instanceGroupManagers', group_name).EXPECT(
                    ov_factory.value_list_path_contains(
                        'targetSize', jp.NUM_EQ(2))))

        return st.OperationContract(self.new_post_operation(
            title='create_server_group', data=payload, path='tasks'),
                                    contract=builder.build())
예제 #7
0
  def upsert_load_balancer(self):
    self.__use_lb_name = 'katotest-lb-' + self.test_id
    self.__use_lb_hc_name = '%s-hc' % self.__use_lb_name
    self.__use_lb_tp_name = '%s-tp' % self.__use_lb_name
    self.__use_lb_target = '{0}/targetPools/{1}'.format(
        self.bindings['TEST_GCE_REGION'], self.__use_lb_tp_name)

    interval = 123
    healthy = 4
    unhealthy = 5
    timeout = 78
    path = '/' + self.__use_lb_target

    health_check = {
        'checkIntervalSec': interval,
        'healthyThreshold': healthy,
        'unhealthyThreshold': unhealthy,
        'timeoutSec': timeout,
        'requestPath': path
        }

    # pylint: disable=bad-continuation
    payload = self.agent.type_to_payload(
        'upsertGoogleLoadBalancerDescription',
        {
          'healthCheck': health_check,
          'region': self.bindings['TEST_GCE_REGION'],
          'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'],
          'loadBalancerName': self.__use_lb_name
        })

    builder = gcp.GcpContractBuilder(self.gcp_observer)
    (builder.new_clause_builder('Forwarding Rules Added',
                                retryable_for_secs=30)
       .list_resource('forwardingRules')
       .contains_path_value('name', self.__use_lb_name)
       .contains_path_value('target', self.__use_lb_target))
    (builder.new_clause_builder('Target Pool Added', retryable_for_secs=15)
       .list_resource('targetPools')
       .contains_path_value('name', self.__use_lb_tp_name))

     # We list the resources here because the name isnt exact
     # and the list also returns the details we need.
    hc_dict = dict(health_check)
    del hc_dict['requestPath']

    hc_match = {name: jp.NUM_EQ(value) for name, value in hc_dict.items()}
    hc_match['requestPath'] = jp.STR_EQ(path)
    hc_match['name'] = jp.STR_SUBSTR(self.__use_http_lb_hc_name)
    (builder.new_clause_builder('Health Check Added', retryable_for_secs=15)
       .list_resource('httpHealthChecks')
       .contains_match(hc_match))

    return st.OperationContract(
      self.new_post_operation(
          title='upsert_load_balancer', data=payload, path='ops'),
      contract=builder.build())
예제 #8
0
    def create_server_group(self):
        """Creates OperationContract for createServerGroup.

        To verify the operation, we just check that Managed Instance Group
        for the server was created.
        """
        bindings = self.bindings

        # Spinnaker determines the group name created,
        # which will be the following:
        group_name = "{app}-{stack}-v000".format(
            app=self.TEST_APP, stack=bindings["TEST_STACK"]
        )

        payload = self.agent.make_json_payload_from_kwargs(
            job=[
                {
                    "cloudProvider": "gce",
                    "application": self.TEST_APP,
                    "credentials": bindings["SPINNAKER_GOOGLE_ACCOUNT"],
                    "strategy": "",
                    "capacity": {"min": 2, "max": 2, "desired": 2},
                    "targetSize": 2,
                    "image": bindings["TEST_GCE_IMAGE_NAME"],
                    "zone": bindings["TEST_GCE_ZONE"],
                    "stack": bindings["TEST_STACK"],
                    "instanceType": "f1-micro",
                    "type": "createServerGroup",
                    "loadBalancers": [self.__lb_name],
                    "availabilityZones": {
                        bindings["TEST_GCE_REGION"]: [bindings["TEST_GCE_ZONE"]]
                    },
                    "instanceMetadata": {"load-balancer-names": self.__lb_name},
                    "account": bindings["SPINNAKER_GOOGLE_ACCOUNT"],
                    "authScopes": ["compute"],
                    "user": "******",
                }
            ],
            description="Create Server Group in " + group_name,
            application=self.TEST_APP,
        )

        builder = gcp.GcpContractBuilder(self.gcp_observer)
        (
            builder.new_clause_builder(
                "Managed Instance Group Added", retryable_for_secs=30
            )
            .inspect_resource("instanceGroupManagers", group_name)
            .EXPECT(ov_factory.value_list_path_contains("targetSize", jp.NUM_EQ(2)))
        )

        return st.OperationContract(
            self.new_post_operation(
                title="create_server_group", data=payload, path="tasks"
            ),
            contract=builder.build(),
        )
예제 #9
0
    def delete_server_group(self):
        """Creates OperationContract for deleteServerGroup.

        To verify the operation, we just check that the GCP managed instance group
        is no longer visible on GCP (or is in the process of terminating).
        """
        bindings = self.bindings
        group_name = "{app}-{stack}-v000".format(
            app=self.TEST_APP, stack=bindings["TEST_STACK"]
        )

        # TODO(ttomsu): Change this back from asgName to serverGroupName
        #               once it is fixed in orca.
        payload = self.agent.make_json_payload_from_kwargs(
            job=[
                {
                    "cloudProvider": "gce",
                    "serverGroupName": group_name,
                    "region": bindings["TEST_GCE_REGION"],
                    "zone": bindings["TEST_GCE_ZONE"],
                    "asgName": group_name,
                    "type": "destroyServerGroup",
                    "regions": [bindings["TEST_GCE_REGION"]],
                    "zones": [bindings["TEST_GCE_ZONE"]],
                    "credentials": bindings["SPINNAKER_GOOGLE_ACCOUNT"],
                    "user": "******",
                }
            ],
            application=self.TEST_APP,
            description="DestroyServerGroup: " + group_name,
        )

        builder = gcp.GcpContractBuilder(self.gcp_observer)
        (
            builder.new_clause_builder("Managed Instance Group Removed")
            .inspect_resource("instanceGroupManagers", group_name)
            .EXPECT(
                ov_factory.error_list_contains(gcp.HttpErrorPredicate(http_code=404))
            )
            .OR(ov_factory.value_list_path_contains("targetSize", jp.NUM_EQ(0)))
        )

        (
            builder.new_clause_builder("Instances Are Removed", retryable_for_secs=30)
            .list_resource("instances")
            .EXPECT(
                ov_factory.value_list_path_excludes("name", jp.STR_SUBSTR(group_name))
            )
        )

        return st.OperationContract(
            self.new_post_operation(
                title="delete_server_group", data=payload, path="tasks"
            ),
            contract=builder.build(),
        )
    def trigger_bake_and_deploy_pipeline(self, sg_id='v000'):
        """Create OperationContract that manually trigger the bake and deploy pipeline
        This create a new server group below the given load balancer.

        To verify the operation, we check that the spinnaker server group
        for the given load balancer was created in correct size.
        """

        pipeline_id = self.bake_pipeline_id
        payload = self.agent.make_json_payload_from_kwargs(
            job=[{
                "dryRun": False,
                "type": "manual",
                "user": "******"
            }],
            description="Test - begin bake and deploy: {pl}".format(
                pl=pipeline_id),
            application=self.TEST_APP)

        builder = az.AzContractBuilder(self.az_observer)
        (builder.new_clause_builder(
            "Has Virtual Machine Scale Set",
            retryable_for_secs=30).collect_resources(
                az_resource='vmss',
                command='list',
                args=[
                    '--resource-group',
                    '{app}-{rg}'.format(app=self.TEST_APP,
                                        rg=self.__rg_location)
                ]).EXPECT(
                    ov_factory.value_list_contains(
                        jp.DICT_MATCHES({
                            "name":
                            jp.STR_EQ('{lb}-{sg}'.format(
                                lb=self.__full_lb_name, sg=sg_id)),
                            "sku":
                            jp.DICT_MATCHES({
                                "name":
                                jp.STR_EQ(self.__sku['name']),
                                "tier":
                                jp.STR_EQ(self.__sku['tier']),
                                "capacity":
                                jp.NUM_EQ(self.__sku['capacity'])
                            })
                        }))))

        return st.OperationContract(
            self.new_post_operation(
                title='bake and deploy',
                data=payload,
                # TODO: cannot use v2 url: pipelines/v2/{app}/{pl}
                path='pipelines/{app}/{pl}'.format(app=self.TEST_APP,
                                                   pl=pipeline_id),
                max_wait_secs=3600),
            contract=builder.build())
예제 #11
0
    def run_deploy_upsert_load_balancer_pipeline(self):
        url_path = "pipelines/{0}/{1}".format(self.TEST_APP, self.pipeline_id)

        previous_group_name = frigga.Naming.server_group(
            app=self.TEST_APP, stack=self.TEST_STACK, version="v000"
        )

        deployed_group_name = frigga.Naming.server_group(
            app=self.TEST_APP, stack=self.TEST_STACK, version="v001"
        )

        payload = self.agent.make_json_payload_from_kwargs(
            type="manual", user="******"
        )

        builder = gcp.GcpContractBuilder(self.appengine_observer)
        (
            builder.new_clause_builder("Service Modified", retryable_for_secs=60)
            .inspect_resource(
                "apps.services", self.__lb_name, appsId=self.__gcp_project
            )
            .EXPECT(
                ov_factory.value_list_path_contains(
                    jp.build_path("split", "allocations"),
                    jp.DICT_MATCHES(
                        {
                            previous_group_name: jp.NUM_EQ(0.9),
                            deployed_group_name: jp.NUM_EQ(0.1),
                        }
                    ),
                )
            )
        )

        return st.OperationContract(
            self.new_post_operation(
                title="run_deploy_upsert_load_balancer_pipeline",
                data=payload,
                path=url_path,
            ),
            builder.build(),
        )
예제 #12
0
  def test_c_put_array(self):
    """Example writes an array value then shows many ways to check values."""
    key = self.make_key('MyArrayKey')
    expect_value = [{'a': 1, 'b': 1}, 2, {'a': 3, 'b': 3}]

    operation = http_agent.HttpPostOperation(
        title='Writing Key Value',
        data=json.JSONEncoder().encode(expect_value),
        path='/put/' + key)
    # Examples of different ways to express things
    builder = st.HttpContractBuilder(self.scenario.agent)
    (builder.new_clause_builder('Contains a=1 and contains b=3')
     .get_url_path('/lookup/' + key)
     .contains_path_value('a', 1)
     .contains_path_value('b', 3))
    (builder.new_clause_builder('Contains (a=1 and b=1))')
     .get_url_path('/lookup/' + key)
     .contains_pred_list([jp.PathPredicate('a', jp.NUM_EQ(1)),
                          jp.PathPredicate('b', jp.NUM_EQ(1))]))
    (builder.new_clause_builder('Does not contain (a=1 and b=3)')
     .get_url_path('/lookup/' + key)
     .excludes_pred_list([jp.PathPredicate('a', jp.NUM_EQ(1)),
                          jp.PathPredicate('b', jp.NUM_EQ(3))]))
    (builder.new_clause_builder('Contains List')
     .get_url_path('/lookup/' + key)
     .contains_match([jp.EQUIVALENT(2),
                      jp.DICT_MATCHES({'a': jp.EQUIVALENT(3),
                                       'b': jp.DIFFERENT(1)})]))
    (builder.new_clause_builder("Contains Multiple A's >= 0")
     .get_url_path('/lookup/' + key)
     .contains_path_pred('a', jp.NUM_GE(0), min=2))
    (builder.new_clause_builder("Contains only 1 A >= 2")
     .get_url_path('/lookup/' + key)
     .contains_path_pred('a', jp.NUM_GE(2), min=1, max=1))
    (builder.new_clause_builder("Contains no A >= 10")
     .get_url_path('/lookup/' + key)
     .excludes_path_pred('a', jp.NUM_GE(10)))

    contract = builder.build()

    test = st.OperationContract(operation, contract)
    self.run_test_case(test)
예제 #13
0
    def resize_server_group(self):
        job = [{
            "targetSize": 2,
            "capacity": {
                "min": 2,
                "max": 2,
                "desired": 2
            },
            "replicaPoolName": self.__server_group_name,
            "numReplicas": 2,
            "region": self.TEST_REGION,
            "zone": self.TEST_ZONE,
            "asgName": self.__server_group_name,
            "serverGroupName": self.__server_group_name,
            "type": "resizeServerGroup",
            "regions": [self.TEST_REGION],
            "zones": [self.TEST_ZONE],
            "credentials": self.bindings["SPINNAKER_GOOGLE_ACCOUNT"],
            "cloudProvider": "gce",
            "user": "******",
        }]
        job[0].update(self.__mig_payload_extra)

        # We set the timeout to 10 minutes, as Spinnaker is returning success once
        # it has seen the new instance appear, but the contract is waiting for the
        # instance group's self-reported size to be the new size. There is sometimes a
        # delay of several minutes between the instance first appearing and the instance
        # group manager reporting the new size. In order to avoid intermittently failing
        # tests, we set a reasonably long timeout to wait for consistency between the
        # Spinnaker internal contract and the contract this test is measuring.
        builder = gcp.GcpContractBuilder(self.gcp_observer)
        (builder.new_clause_builder(
            self.__mig_title + " Resized",
            retryable_for_secs=600).inspect_resource(
                self.__mig_resource_name, self.__server_group_name,
                **self.__mig_resource_kwargs).EXPECT(
                    ov_factory.value_list_path_contains("size", jp.NUM_EQ(2))))

        payload = self.agent.make_json_payload_from_kwargs(
            job=job,
            description=self.__mig_title + " Test - resize to 2 instances",
            application=self.TEST_APP,
        )

        return st.OperationContract(
            self.new_post_operation(title="resize_instances",
                                    data=payload,
                                    path=self.__path),
            contract=builder.build(),
        )
예제 #14
0
    def test_dict_match_missing_path(self):
        context = ExecutionContext()
        source = {'n': 10}
        want = {'missing': jp.NUM_EQ(10)}
        result = jp.DICT_MATCHES(want)(context, source)

        expect = (jp.KeyedPredicateResultBuilder(
            jp.DICT_MATCHES(want)).add_result(
                'missing',
                jp.MissingPathError(source=source,
                                    target_path='missing')).build(False))

        self.assertFalse(result)
        self.assertEquals(expect, result)
  def delete_server_group(self):
    """Creates OperationContract for deleteServerGroup.

    To verify the operation, we just check that the AWS Auto Scaling Group
    is no longer visible on AWS (or is in the process of terminating).
    """
    bindings = self.bindings

    group_name = '{app}-{stack}-v000'.format(
        app=self.TEST_APP, stack=bindings['TEST_STACK'])

    payload = self.agent.make_json_payload_from_kwargs(
        job=[{
            'cloudProvider': 'aws',
            'type': 'destroyServerGroup',
            'serverGroupName': group_name,
            'asgName': group_name,
            'region': bindings['TEST_AWS_REGION'],
            'regions': [bindings['TEST_AWS_REGION']],
            'credentials': bindings['SPINNAKER_AWS_ACCOUNT'],
            'user': '******'
        }],
        application=self.TEST_APP,
        description='DestroyServerGroup: ' + group_name)

    builder = aws.AwsPythonContractBuilder(self.aws_observer)
    (builder.new_clause_builder('Auto Scaling Group Removed')
     .call_method(
         self.autoscaling_client.describe_auto_scaling_groups,
         AutoScalingGroupNames=[group_name])
     .EXPECT(
         ov_factory.error_list_contains(
             jp.ExceptionMatchesPredicate(
                   (BotoCoreError, ClientError), 'AutoScalingGroupNotFound')))
     .OR(
         ov_factory.value_list_path_contains(
             'AutoScalingGroups',
           jp.LIST_MATCHES([])))
     .OR(
         ov_factory.value_list_path_contains(
             'AutoScalingGroups',
             jp.LIST_MATCHES([
                 jp.DICT_MATCHES({'Status': jp.STR_SUBSTR('Delete'),
                                  'MaxSize': jp.NUM_EQ(0)})])))
     )

    return st.OperationContract(
        self.new_post_operation(
            title='delete_server_group', data=payload, path='tasks'),
        contract=builder.build())
예제 #16
0
    def test_dict_match_missing_path(self):
        context = ExecutionContext()
        source = {'n': 10}
        pred = jp.NUM_EQ(10)
        want = {'missing': pred}
        result = jp.DICT_MATCHES(want)(context, source)

        expect = (jp.KeyedPredicateResultBuilder(
            jp.DICT_MATCHES(want)).add_result(
                'missing',
                self._match_dict_attribute_result(context, pred, 'missing',
                                                  source)).build(False))

        self.assertFalse(result)
        self.assertEquals(expect, result)
예제 #17
0
    def resize_server_group(self):
        job = [{
            'targetSize': 2,
            'capacity': {
                'min': 2,
                'max': 2,
                'desired': 2
            },
            'replicaPoolName': self.__server_group_name,
            'numReplicas': 2,
            'region': self.TEST_REGION,
            'zone': self.TEST_ZONE,
            'asgName': self.__server_group_name,
            'serverGroupName': self.__server_group_name,
            'type': 'resizeServerGroup',
            'regions': [self.TEST_REGION],
            'zones': [self.TEST_ZONE],
            'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'],
            'cloudProvider': 'gce',
            'user': '******'
        }]
        job[0].update(self.__mig_payload_extra)

        # We set the timeout to 10 minutes, as Spinnaker is returning success once
        # it has seen the new instance appear, but the contract is waiting for the
        # instance group's self-reported size to be the new size. There is sometimes a
        # delay of several minutes between the instance first appearing and the instance
        # group manager reporting the new size. In order to avoid intermittently failing
        # tests, we set a reasonably long timeout to wait for consistency between the
        # Spinnaker internal contract and the contract this test is measuring.
        builder = gcp.GcpContractBuilder(self.gcp_observer)
        (builder.new_clause_builder(
            self.__mig_title + ' Resized',
            retryable_for_secs=600).inspect_resource(
                self.__mig_resource_name, self.__server_group_name,
                **self.__mig_resource_kwargs).EXPECT(
                    ov_factory.value_list_path_contains('size', jp.NUM_EQ(2))))

        payload = self.agent.make_json_payload_from_kwargs(
            job=job,
            description=self.__mig_title + ' Test - resize to 2 instances',
            application=self.TEST_APP)

        return st.OperationContract(self.new_post_operation(
            title='resize_instances', data=payload, path=self.__path),
                                    contract=builder.build())
예제 #18
0
    def delete_server_group(self):
        """Creates OperationContract for deleteServerGroup.

    To verify the operation, we just check that the GCP managed instance group
    is no longer visible on GCP (or is in the process of terminating).
    """
        bindings = self.bindings
        group_name = '{app}-{stack}-v000'.format(app=self.TEST_APP,
                                                 stack=bindings['TEST_STACK'])

        # TODO(ttomsu): Change this back from asgName to serverGroupName
        #               once it is fixed in orca.
        payload = self.agent.make_json_payload_from_kwargs(
            job=[{
                'cloudProvider': 'gce',
                'serverGroupName': group_name,
                'region': bindings['TEST_GCE_REGION'],
                'zone': bindings['TEST_GCE_ZONE'],
                'asgName': group_name,
                'type': 'destroyServerGroup',
                'regions': [bindings['TEST_GCE_REGION']],
                'zones': [bindings['TEST_GCE_ZONE']],
                'credentials': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
                'user': '******'
            }],
            application=self.TEST_APP,
            description='DestroyServerGroup: ' + group_name)

        builder = gcp.GcpContractBuilder(self.gcp_observer)
        (builder.new_clause_builder(
            'Managed Instance Group Removed').inspect_resource(
                'instanceGroupManagers', group_name).EXPECT(
                    ov_factory.error_list_contains(
                        gcp.HttpErrorPredicate(http_code=404))).OR(
                            ov_factory.value_list_path_contains(
                                'targetSize', jp.NUM_EQ(0))))

        (builder.new_clause_builder(
            'Instances Are Removed',
            retryable_for_secs=30).list_resource('instances').EXPECT(
                ov_factory.value_list_path_excludes(
                    'name', jp.STR_SUBSTR(group_name))))

        return st.OperationContract(self.new_post_operation(
            title='delete_server_group', data=payload, path='tasks'),
                                    contract=builder.build())
예제 #19
0
  def run_disable_and_destroy_google_pipeline(self, pipeline_id):
    path = 'pipelines/{app}/{id}'.format(app=self.TEST_APP,
                                         id=self.google_destroy_pipeline_id)
    group_name = '{app}-{stack}-v000'.format(
        app=self.TEST_APP, stack=self.bindings['TEST_STACK'])
    builder = gcp.GcpContractBuilder(self.gcp_observer)
    (builder.new_clause_builder('Managed Instance Group Destroyed')
     .inspect_resource('instanceGroupManagers', group_name)
     .EXPECT(ov_factory.error_list_contains(
         gcp.HttpErrorPredicate(http_code=404)))
     .OR(ov_factory.value_list_path_contains('targetSize', jp.NUM_EQ(0))))

    return st.OperationContract(
        self.new_post_operation(
            title='run_destroy_pipeline',
            data='',
            path=path,
            max_wait_secs=480),  # Allow 8 mins to disable and destroy
        contract=jc.Contract())
예제 #20
0
    def delete_server_group(self):
        """Creates OperationContract for deleteServerGroup.

    To verify the operation, we just check that the AWS Auto Scaling Group
    is no longer visible on AWS (or is in the process of terminating).
    """
        bindings = self.bindings

        group_name = '{app}-{stack}-v000'.format(app=self.TEST_APP,
                                                 stack=bindings['TEST_STACK'])

        payload = self.agent.make_json_payload_from_kwargs(
            job=[{
                'cloudProvider': 'aws',
                'type': 'destroyServerGroup',
                'serverGroupName': group_name,
                'asgName': group_name,
                'region': bindings['TEST_AWS_REGION'],
                'regions': [bindings['TEST_AWS_REGION']],
                'credentials': bindings['SPINNAKER_AWS_ACCOUNT'],
                'user': '******'
            }],
            application=self.TEST_APP,
            description='DestroyServerGroup: ' + group_name)

        builder = aws.AwsContractBuilder(self.aws_observer)
        (builder.new_clause_builder(
            'Auto Scaling Group Removed').collect_resources(
                'autoscaling',
                'describe-auto-scaling-groups',
                args=['--auto-scaling-group-names', group_name],
                no_resources_ok=True).contains_path_match(
                    'AutoScalingGroups', {'MaxSize': jp.NUM_EQ(0)}))

        (builder.new_clause_builder(
            'Instances Are Removed', retryable_for_secs=30).collect_resources(
                'ec2', 'describe-instances',
                no_resources_ok=True).excludes_path_value('name', group_name))

        return st.OperationContract(self.new_post_operation(
            title='delete_server_group', data=payload, path='tasks'),
                                    contract=builder.build())
예제 #21
0
    def delete_server_group(self, version='v000'):
        """Creates OperationContract for deleteServerGroup.

    To verify the operation, we just check that the Kubernetes container
    is no longer visible (or is in the process of terminating).
    """
        bindings = self.bindings
        group_name = frigga.Naming.server_group(app=self.TEST_APP,
                                                stack=bindings['TEST_STACK'],
                                                version=version)

        payload = self.agent.make_json_payload_from_kwargs(
            job=[{
                'cloudProvider': 'kubernetes',
                'type': 'destroyServerGroup',
                'account': bindings['SPINNAKER_KUBERNETES_ACCOUNT'],
                'credentials': bindings['SPINNAKER_KUBERNETES_ACCOUNT'],
                'user': '******',
                'serverGroupName': group_name,
                'asgName': group_name,
                'regions': [self.TEST_NAMESPACE],
                'namespace': self.TEST_NAMESPACE,
                'region': self.TEST_NAMESPACE,
                'zones': [self.TEST_NAMESPACE],
                'interestingHealthProviderNames': ['KubernetesService']
            }],
            application=self.TEST_APP,
            description='Destroy Server Group: ' + group_name)

        builder = kube.KubeContractBuilder(self.kube_observer)
        (builder.new_clause_builder('Replica Set Removed').get_resources(
            'rs',
            extra_args=[group_name, '--namespace',
                        self.TEST_NAMESPACE]).EXPECT(
                            ov_factory.value_list_path_contains(
                                'targetSize', jp.NUM_EQ(0))).OR(
                                    self.__not_found_observation_predicate()))

        return st.OperationContract(self.new_post_operation(
            title='delete_server_group', data=payload, path='tasks'),
                                    contract=builder.build())
예제 #22
0
  def trigger_bake_and_deploy_google_pipeline(self):
    path = 'applications/{app}/pipelines'.format(app=self.TEST_APP)

    group_name = '{app}-{stack}-v000'.format(
        app=self.TEST_APP, stack=self.bindings['TEST_STACK'])
    builder = gcp.GcpContractBuilder(self.gcp_observer)
    (builder.new_clause_builder('Managed Instance Group Deployed',
                                retryable_for_secs=30)
     .inspect_resource('instanceGroupManagers', group_name)
     .EXPECT(ov_factory.value_list_path_contains('targetSize', jp.NUM_EQ(1))))

    return st.OperationContract(
        self.jenkins_agent.new_jenkins_trigger_operation(
            title='monitor_bake_pipeline',
            job=self.bindings['JENKINS_JOB'],
            token=self.bindings['JENKINS_TOKEN'],
            status_class=gate.GatePipelineStatus,
            status_path=path,
            max_wait_secs=480),  # Allow 8 mins to bake and deploy
        contract=builder.build(),
        cleanup=self.delete_baked_image)
예제 #23
0
    def test_list_match_ok_and_bad(self):
        context = ExecutionContext()
        source = [1, 2, 3]
        want = [jp.NUM_EQ(1), jp.NUM_EQ(-1), jp.NUM_EQ(3)]
        result = jp.LIST_MATCHES(want)(context, source)

        expect = (jp.SequencedPredicateResultBuilder(
            jp.LIST_MATCHES(want)).append_result(
                jp.MapPredicate(jp.NUM_EQ(1))(context, source)).append_result(
                    jp.MapPredicate(jp.NUM_EQ(-1))(
                        context, source)).append_result(
                            jp.MapPredicate(jp.NUM_EQ(3))(
                                context, source)).build(False))

        self.assertFalse(result)
        self.assertEquals(expect, result)
    def resize_server_group(self):
        job = [{
            'targetSize': 2,
            'capacity': {
                'min': 2,
                'max': 2,
                'desired': 2
            },
            'replicaPoolName': self.__server_group_name,
            'numReplicas': 2,
            'region': self.TEST_REGION,
            'zone': self.TEST_ZONE,
            'asgName': self.__server_group_name,
            'serverGroupName': self.__server_group_name,
            'type': 'resizeServerGroup',
            'regions': [self.TEST_REGION],
            'zones': [self.TEST_ZONE],
            'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'],
            'cloudProvider': 'gce',
            'user': '******'
        }]
        job[0].update(self.__mig_payload_extra)

        builder = gcp.GcpContractBuilder(self.gcp_observer)
        (builder.new_clause_builder(
            self.__mig_title + ' Resized',
            retryable_for_secs=90).inspect_resource(
                self.__mig_resource_name, self.__server_group_name,
                **self.__mig_resource_kwargs).EXPECT(
                    ov_factory.value_list_path_contains('size', jp.NUM_EQ(2))))

        payload = self.agent.make_json_payload_from_kwargs(
            job=job,
            description=self.__mig_title + ' Test - resize to 2 instances',
            application=self.TEST_APP)

        return st.OperationContract(self.new_post_operation(
            title='resize_instances', data=payload, path=self.__path),
                                    contract=builder.build())
예제 #25
0
    def run_disable_and_destroy_google_pipeline(self, pipeline_id):
        path = "pipelines/{app}/{id}".format(
            app=self.TEST_APP, id=self.google_destroy_pipeline_id)
        group_name = "{app}-{stack}-v000".format(
            app=self.TEST_APP, stack=self.bindings["TEST_STACK"])
        builder = gcp.GcpContractBuilder(self.gcp_observer)
        (builder.new_clause_builder(
            "Managed Instance Group Destroyed").inspect_resource(
                "instanceGroupManagers", group_name).EXPECT(
                    ov_factory.error_list_contains(
                        gcp.HttpErrorPredicate(http_code=404))).OR(
                            ov_factory.value_list_path_contains(
                                "targetSize", jp.NUM_EQ(0))))

        return st.OperationContract(
            self.new_post_operation(
                title="run_destroy_pipeline",
                data="",
                path=path,
                max_wait_secs=1080),  # Allow 18 mins to disable and destroy.
            contract=jc.Contract(),
            cleanup=self.delete_baked_image,
        )
예제 #26
0
    def upsert_load_balancer(self, use_vpc):
        """Creates OperationContract for upsertLoadBalancer.

    Calls Spinnaker's upsertLoadBalancer with a configuration, then verifies
    that the expected resources and configurations are visible on AWS. See
    the contract builder for more info on what the expectations are.

    Args:
      use_vpc: [bool] if True configure a VPC otherwise dont.
    """
        bindings = self.bindings
        context = citest.base.ExecutionContext()

        # We're assuming that the given region has 'A' and 'B' availability
        # zones. This seems conservative but might be brittle since we permit
        # any region.
        region = bindings['TEST_AWS_REGION']
        avail_zones = [region + 'a', region + 'b']
        load_balancer_name = self.lb_name

        if use_vpc:
            # TODO(ewiseblatt): 20160301
            # We're hardcoding the VPC here, but not sure which we really want.
            # I think this comes from the spinnaker.io installation instructions.
            # What's interesting about this is that it is a 10.* CidrBlock,
            # as opposed to the others, which are public IPs. All this is sensitive
            # as to where the TEST_AWS_VPC_ID came from so this is going to be
            # brittle. Ideally we only need to know the vpc_id and can figure the
            # rest out based on what we have available.
            subnet_type = 'internal (defaultvpc)'
            vpc_id = bindings['TEST_AWS_VPC_ID']

            # Not really sure how to determine this value in general.
            security_groups = ['default']

            # The resulting load balancer will only be available in the zone of
            # the subnet we are using. We'll figure that out by looking up the
            # subnet we want.
            subnet_details = self.aws_observer.get_resource_list(
                context,
                root_key='Subnets',
                aws_command='describe-subnets',
                aws_module='ec2',
                args=[
                    '--filters', 'Name=vpc-id,Values={vpc_id}'
                    ',Name=tag:Name,Values=defaultvpc.internal.{region}'.
                    format(vpc_id=vpc_id, region=region)
                ])
            try:
                expect_avail_zones = [subnet_details[0]['AvailabilityZone']]
            except KeyError:
                raise ValueError(
                    'vpc_id={0} appears to be unknown'.format(vpc_id))
        else:
            subnet_type = ""
            vpc_id = None
            security_groups = None
            expect_avail_zones = avail_zones

            # This will be a second load balancer not used in other tests.
            # Decorate the name so as not to confuse it.
            load_balancer_name += '-pub'

        listener = {'Listener': {'InstancePort': 80, 'LoadBalancerPort': 80}}
        health_check = {
            'HealthyThreshold': 8,
            'UnhealthyThreshold': 3,
            'Interval': 12,
            'Timeout': 6,
            'Target': 'HTTP:%d/' % listener['Listener']['InstancePort']
        }

        payload = self.agent.make_json_payload_from_kwargs(
            job=[{
                'type':
                'upsertLoadBalancer',
                'cloudProvider':
                'aws',
                # 'loadBalancerName': load_balancer_name,
                'credentials':
                bindings['SPINNAKER_AWS_ACCOUNT'],
                'name':
                load_balancer_name,
                'stack':
                bindings['TEST_STACK'],
                'detail':
                self.lb_detail,
                'region':
                bindings['TEST_AWS_REGION'],
                'availabilityZones': {
                    region: avail_zones
                },
                'regionZones':
                avail_zones,
                'listeners': [{
                    'internalProtocol':
                    'HTTP',
                    'internalPort':
                    listener['Listener']['InstancePort'],
                    'externalProtocol':
                    'HTTP',
                    'externalPort':
                    listener['Listener']['LoadBalancerPort']
                }],
                'healthCheck':
                health_check['Target'],
                'healthCheckProtocol':
                'HTTP',
                'healthCheckPort':
                listener['Listener']['LoadBalancerPort'],
                'healthCheckPath':
                '/',
                'healthTimeout':
                health_check['Timeout'],
                'healthInterval':
                health_check['Interval'],
                'healthyThreshold':
                health_check['HealthyThreshold'],
                'unhealthyThreshold':
                health_check['UnhealthyThreshold'],
                'user':
                '******',
                'usePreferredZones':
                True,
                'vpcId':
                vpc_id,
                'subnetType':
                subnet_type,
                # If I set security group to this then I get an error it is missing.
                # bindings['TEST_AWS_SECURITY_GROUP_ID']],
                'securityGroups':
                security_groups
            }],
            description='Create Load Balancer: ' + load_balancer_name,
            application=self.TEST_APP)

        builder = aws.AwsContractBuilder(self.aws_observer)
        (builder.new_clause_builder(
            'Load Balancer Added', retryable_for_secs=10).collect_resources(
                aws_module='elb',
                command='describe-load-balancers',
                args=[
                    '--load-balancer-names', load_balancer_name
                ]).contains_path_match(
                    'LoadBalancerDescriptions', {
                        'HealthCheck':
                        jp.DICT_MATCHES({
                            key: jp.EQUIVALENT(value)
                            for key, value in health_check.items()
                        }),
                        'AvailabilityZones':
                        jp.LIST_SIMILAR(expect_avail_zones),
                        'ListenerDescriptions/Listener':
                        jp.DICT_MATCHES({
                            key: jp.NUM_EQ(value)
                            for key, value in listener['Listener'].items()
                        })
                    }))

        title_decorator = '_with_vpc' if use_vpc else '_without_vpc'
        return st.OperationContract(self.new_post_operation(
            title='upsert_load_balancer' + title_decorator,
            data=payload,
            path='tasks'),
                                    contract=builder.build())
    def _add_contract_clauses(self, contract_builder, upsert):
        '''Add the proper predicates to the contract builder for a given
    upsert description.
    '''
        host_rules = upsert['hostRules']  # Host rules will be distinct.
        backend_services = [upsert['defaultService']]
        for host_rule in host_rules:
            path_matcher = host_rule['pathMatcher']
            backend_services.append(path_matcher['defaultService'])
            for path_rule in path_matcher['pathRules']:
                backend_services.append(path_rule['backendService'])
        health_checks = [
            service['healthCheck'] for service in backend_services
        ]

        hc_clause_builder = (contract_builder.new_clause_builder(
            'Health Checks Created',
            retryable_for_secs=30).list_resource('httpHealthChecks'))
        for hc in health_checks:
            hc_clause_builder.contains_match({
                'name':
                jp.STR_EQ(hc['name']),
                'requestPath':
                jp.STR_EQ(hc['requestPath']),
                'port':
                jp.NUM_EQ(hc['port'])
            })

        bs_clause_builder = (contract_builder.new_clause_builder(
            'Backend Services Created',
            retryable_for_secs=30).list_resource('backendServices'))
        for bs in backend_services:
            bs_clause_builder.contains_match({
                'name':
                jp.STR_EQ(bs['name']),
                'portName':
                jp.STR_EQ('http'),
                'healthChecks':
                jp.LIST_MATCHES(
                    [jp.STR_EQ(self._get_hc_link(bs['healthCheck']['name']))])
            })

        url_map_clause_builder = (contract_builder.new_clause_builder(
            'Url Map Created', retryable_for_secs=30).list_resource('urlMaps'))
        for hr in host_rules:
            pm = hr['pathMatcher']

            path_rules_spec = [
                jp.DICT_MATCHES({
                    'service':
                    jp.STR_EQ(self._get_bs_link(pr['backendService']['name'])),
                    'paths':
                    jp.LIST_MATCHES([jp.STR_EQ(path) for path in pr['paths']])
                }) for pr in pm['pathRules']
            ]

            path_matchers_spec = {
                'defaultService':
                jp.STR_EQ(self._get_bs_link(pm['defaultService']['name'])),
                'pathRules':
                jp.LIST_MATCHES(path_rules_spec)
            }

            url_map_clause_builder.contains_match({
                'name':
                jp.STR_EQ(self.__lb_name),
                'defaultService':
                jp.STR_EQ(self._get_bs_link(upsert['defaultService']['name'])),
                'hostRules/hosts':
                jp.LIST_MATCHES(
                    [jp.STR_SUBSTR(host) for host in hr['hostPatterns']]),
                'pathMatchers':
                jp.LIST_MATCHES([jp.DICT_MATCHES(path_matchers_spec)]),
            })

        port_string = '443-443'
        if upsert['certificate'] == '':
            port_string = '%s-%s' % (upsert['portRange'], upsert['portRange'])

        (contract_builder.new_clause_builder(
            'Forwarding Rule Created', retryable_for_secs=30).list_resource(
                'globalForwardingRules').contains_match({
                    'name':
                    jp.STR_EQ(self.__lb_name),
                    'portRange':
                    jp.STR_EQ(port_string)
                }))

        proxy_clause_builder = contract_builder.new_clause_builder(
            'Target Proxy Created', retryable_for_secs=30)
        self._add_proxy_clause(upsert['certificate'], proxy_clause_builder)
예제 #28
0
  def create_http_load_balancer(self):
    logical_http_lb_name = 'katotest-httplb-' + self.test_id
    self.__use_http_lb_name = logical_http_lb_name

    # TODO(ewiseblatt): 20150530
    # This needs to be abbreviated to hc.
    self.__use_http_lb_hc_name = logical_http_lb_name + '-health-check'

    # TODO(ewiseblatt): 20150530
    # This needs to be abbreviated to bs.
    self.__use_http_lb_bs_name = logical_http_lb_name + '-backend-service'
    self.__use_http_lb_fr_name = logical_http_lb_name

    # TODO(ewiseblatt): 20150530
    # This should be abbreviated (um?).
    self.__use_http_lb_map_name = logical_http_lb_name + '-url-map'

    # TODO(ewiseblatt): 20150530
    # This should be abbreviated (px)?.
    self.__use_http_lb_proxy_name = logical_http_lb_name + '-target-http-proxy'

    interval = 231
    healthy = 8
    unhealthy = 9
    timeout = 65
    path = '/hello/world'

    # TODO(ewiseblatt): 20150530
    # This field might be broken. 123-456 still resolves to 80-80
    # Changing it for now so the test passes.
    port_range = "80-80"

    # TODO(ewiseblatt): 20150530
    # Specify explicit backends?

    health_check = {
        'checkIntervalSec': interval,
        'healthyThreshold': healthy,
        'unhealthyThreshold': unhealthy,
        'timeoutSec': timeout,
        'requestPath': path
        }

    # pylint: disable=bad-continuation
    payload = self.agent.type_to_payload(
        'createGoogleHttpLoadBalancerDescription',
        {
          'healthCheck': health_check,
          'portRange': port_range,
          'loadBalancerName': logical_http_lb_name,
          'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT']
        })

    hc_dict = dict(health_check)
    del hc_dict['requestPath']
    hc_match = {name: jp.NUM_EQ(value)
                for name, value in health_check.items()}
    hc_match['requestPath'] = jp.STR_EQ(path)
    hc_match['name'] = jp.STR_SUBSTR(self.__use_http_lb_hc_name),
    builder = gcp.GcpContractBuilder(self.gcp_observer)
    (builder.new_clause_builder('Http Health Check Added')
        .list_resource('httpHealthChecks')
        .contains_match(hc_match))
    (builder.new_clause_builder('Global Forwarding Rule Added',
                                retryable_for_secs=15)
       .list_resource('globalForwardingRules')
       .contains_match({
          'name': jp.STR_SUBSTR(self.__use_http_lb_fr_name),
          'portRante': jp.STR_EQ(port_range)}))
    (builder.new_clause_builder('Backend Service Added')
       .list_resource('backendServices')
       .contains_match({
           'name': jp.STR_SUBSTR(self.__use_http_lb_bs_name),
           'healthChecks': jp.STR_SUBSTR(self.__use_http_lb_hc_name)}))
    (builder.new_clause_builder('Url Map Added')
       .list_resource('urlMaps')
       .contains_match({
          'name': jp.STR_SUBSTR(self.__use_http_lb_map_name),
          'defaultService': jp.STR_SUBSTR(self.__use_http_lb_bs_name)}))
    (builder.new_clause_builder('Target Http Proxy Added')
       .list_resource('targetHttpProxies')
       .contains_match({
          'name': jp.STR_SUBSTR(self.__use_http_lb_proxy_name),
          'urlMap': jp.STR_SUBSTR(self.__use_http_lb_map_name)}))

    return st.OperationContract(
        self.new_post_operation(
            title='create_http_load_balancer', data=payload, path='ops'),
        contract=builder.build())
예제 #29
0
    def create_server_group(self):
        """Creates OperationContract for createServerGroup.

    To verify the operation, we just check that the AWS Auto Scaling Group
    for the server group was created.
    """
        bindings = self.bindings

        # Spinnaker determines the group name created,
        # which will be the following:
        group_name = '{app}-{stack}-v000'.format(app=self.TEST_APP,
                                                 stack=bindings['TEST_STACK'])

        region = bindings['TEST_AWS_REGION']
        avail_zones = [region + 'a', region + 'b']

        payload = self.agent.make_json_payload_from_kwargs(
            job=[{
                'type': 'createServerGroup',
                'cloudProvider': 'aws',
                'application': self.TEST_APP,
                'credentials': bindings['SPINNAKER_AWS_ACCOUNT'],
                'strategy': '',
                'capacity': {
                    'min': 2,
                    'max': 2,
                    'desired': 2
                },
                'targetHealthyDeployPercentage': 100,
                'loadBalancers': [self.lb_name],
                'cooldown': 8,
                'healthCheckType': 'EC2',
                'healthCheckGracePeriod': 40,
                'instanceMonitoring': False,
                'ebsOptimized': False,
                'iamRole': bindings['AWS_IAM_ROLE'],
                'terminationPolicies': ['Default'],
                'availabilityZones': {
                    region: avail_zones
                },
                'keyPair': bindings['SPINNAKER_AWS_ACCOUNT'] + '-keypair',
                'suspendedProcesses': [],
                # TODO(ewiseblatt): Inquiring about how this value is determined.
                # It seems to be the "Name" tag value of one of the VPCs
                # but is not the default VPC, which is what we using as the VPC_ID.
                # So I suspect something is out of whack. This name comes from
                # spinnaker.io tutorial. But using the default vpc would probably
                # be more adaptive to the particular deployment.
                'subnetType': 'internal (defaultvpc)',
                'securityGroups': [bindings['TEST_AWS_SECURITY_GROUP_ID']],
                'virtualizationType': 'paravirtual',
                'stack': bindings['TEST_STACK'],
                'freeFormDetails': '',
                'amiName': bindings['TEST_AWS_AMI'],
                'instanceType': 'm1.small',
                'useSourceCapacity': False,
                'account': bindings['SPINNAKER_AWS_ACCOUNT'],
                'user': '******'
            }],
            description='Create Server Group in ' + group_name,
            application=self.TEST_APP)

        builder = aws.AwsContractBuilder(self.aws_observer)
        (builder.new_clause_builder(
            'Auto Server Group Added',
            retryable_for_secs=30).collect_resources(
                'autoscaling',
                'describe-auto-scaling-groups',
                args=['--auto-scaling-group-names', group_name
                      ]).contains_path_match('AutoScalingGroups',
                                             {'MaxSize': jp.NUM_EQ(2)}))

        return st.OperationContract(self.new_post_operation(
            title='create_server_group', data=payload, path='tasks'),
                                    contract=builder.build())
예제 #30
0
    def upsert_load_balancer(self):
        """Creates OperationContract for upsertLoadBalancer.

    Calls Spinnaker's upsertLoadBalancer with a configuration, then verifies
    that the expected resources and configurations are visible on GCE. See
    the contract builder for more info on what the expectations are.
    """
        bindings = self.bindings
        target_pool_name = '{0}/targetPools/{1}-tp'.format(
            bindings['TEST_GCE_REGION'], self.__lb_name)

        spec = {
            'checkIntervalSec': 9,
            'healthyThreshold': 3,
            'unhealthyThreshold': 5,
            'timeoutSec': 2,
            'port': 80
        }

        payload = self.agent.make_json_payload_from_kwargs(
            job=[{
                'cloudProvider': 'gce',
                'provider': 'gce',
                'stack': bindings['TEST_STACK'],
                'detail': self.__lb_detail,
                'credentials': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
                'region': bindings['TEST_GCE_REGION'],
                'ipProtocol': 'TCP',
                'portRange': spec['port'],
                'loadBalancerName': self.__lb_name,
                'name': self.__lb_name,
                'healthCheck': {
                    'port': spec['port'],
                    'timeoutSec': spec['timeoutSec'],
                    'checkIntervalSec': spec['checkIntervalSec'],
                    'healthyThreshold': spec['healthyThreshold'],
                    'unhealthyThreshold': spec['unhealthyThreshold'],
                },
                'type': 'upsertLoadBalancer',
                'availabilityZones': {
                    bindings['TEST_GCE_REGION']: []
                },
                'user': '******'
            }],
            description='Create Load Balancer: ' + self.__lb_name,
            application=self.TEST_APP)

        hc_match_spec = {key: jp.NUM_EQ(value) for key, value in spec.items()}
        hc_match_spec['name'] = jp.STR_SUBSTR('%s-hc' % self.__lb_name)
        builder = gcp.GcpContractBuilder(self.gcp_observer)
        (builder.new_clause_builder(
            'Health Check Added',
            retryable_for_secs=30).list_resource('httpHealthChecks').EXPECT(
                ov_factory.value_list_contains(
                    jp.DICT_MATCHES(hc_match_spec))))
        (builder.new_clause_builder(
            'Target Pool Added',
            retryable_for_secs=30).list_resource('targetPools').EXPECT(
                ov_factory.value_list_path_contains(
                    'name', jp.STR_SUBSTR('%s-tp' % self.__lb_name))))
        (builder.new_clause_builder(
            'Forwarding Rules Added',
            retryable_for_secs=30).list_resource('forwardingRules').EXPECT(
                ov_factory.value_list_contains(
                    jp.DICT_MATCHES({
                        'name': jp.STR_SUBSTR(self.__lb_name),
                        'target': jp.STR_SUBSTR(target_pool_name)
                    }))))

        return st.OperationContract(self.new_post_operation(
            title='upsert_load_balancer', data=payload, path='tasks'),
                                    contract=builder.build())