def tearDownClass(cls):
     eco_utils.execute_uninstall('k8s')
     try:
         del os.environ['KUBERNETES_MASTER_IP']
     except KeyError:
         pass
     eco_utils.execute_command('cfy profiles delete {0}'.format(
         os.environ['ECOSYSTEM_SESSION_MANAGER_IP']))
     super(KubernetesTestBase, cls).tearDownClass()
Exemple #2
0
 def test_scalelist(self):
     blueprint_id = 'scalelist-{0}'.format(self.application_prefix)
     utils.execute_command(
         'cfy blueprints upload cloudify_scalelist/'
         'examples/blueprint.yaml -b {0}'.format(blueprint_id))
     utils.create_deployment(blueprint_id)
     utils.execute_install(blueprint_id)
     node_one_instances = utils.get_node_instances('one', blueprint_id)
     self.assertEqual(
         len(self._only_init_nodeinstances(node_one_instances)), 1)
     node_two_instances = utils.get_node_instances('two', blueprint_id)
     self.assertEqual(
         len(self._only_init_nodeinstances(node_two_instances)), 1)
     node_three_instances = utils.get_node_instances('three', blueprint_id)
     self.assertEqual(
         len(self._only_init_nodeinstances(node_three_instances)), 1)
     node_four_instances = utils.get_node_instances('four', blueprint_id)
     self.assertEqual(
         len(self._only_init_nodeinstances(node_four_instances)), 1)
     # Scale Up 1:no change 2:2 3:3
     if utils.execute_command(
             'cfy executions start scaleuplist -d {0} '
             '-p cloudify_scalelist/examples/scaleup_params.yaml'.format(
                 blueprint_id)):
         raise Exception('{0} scaleup failed.'.format(blueprint_id))
     sleep(5)
     node_one_instances = utils.get_node_instances('one', blueprint_id)
     self.assertEqual(
         len(self._only_init_nodeinstances(node_one_instances)), 1)
     node_two_instances = utils.get_node_instances('two', blueprint_id)
     self.assertEqual(
         len(self._only_init_nodeinstances(node_two_instances)), 3)
     node_three_instances = utils.get_node_instances('three', blueprint_id)
     self.assertEqual(
         len(self._only_init_nodeinstances(node_three_instances)), 4)
     node_four_instances = utils.get_node_instances('four', blueprint_id)
     self.assertEqual(
         len(self._only_init_nodeinstances(node_four_instances)), 4)
     # Undo Scaleup operations
     if utils.execute_command(
             'cfy executions start scaledownlist -d {0} '
             '-p cloudify_scalelist/examples/scaledown_params.yaml'.format(
                 blueprint_id)):
         raise Exception('{0} scaleup failed.'.format(blueprint_id))
     sleep(5)
     node_one_instances = utils.get_node_instances('one', blueprint_id)
     self.assertEqual(
         len(self._only_init_nodeinstances(node_one_instances)), 1)
     node_two_instances = utils.get_node_instances('two', blueprint_id)
     self.assertEqual(
         len(self._only_init_nodeinstances(node_two_instances)), 1)
     node_three_instances = utils.get_node_instances('three', blueprint_id)
     self.assertEqual(
         len(self._only_init_nodeinstances(node_three_instances)), 1)
     self.assertEqual(
         len(self._only_init_nodeinstances(node_four_instances)), 4)
Exemple #3
0
 def install_ssh_key(self, blueprint_id):
     utils.upload_blueprint(SSH_KEY_BP_ZIP, blueprint_id, 'keys.yaml')
     utils.create_deployment(blueprint_id)
     utils.execute_install(blueprint_id)
     delete_dep_command = \
         'cfy deployments delete -f {0}'.format(blueprint_id)
     utils.execute_command(delete_dep_command)
     if not utils.get_secrets('agent_key_private') or not \
             utils.get_secrets('agent_key_public'):
         raise Exception(
             'agent_key_private or agent_key_public not in secrets')
Exemple #4
0
    def remove_deployment(self, deployment_id, nodes_to_check):

        # UnDeploy the application
        execute_command('cfy executions start uninstall '
                        '-p ignore_failure=true -d {0}'.format(deployment_id))

        deployment_nodes = \
            get_deployment_resources_by_node_type_substring(
                deployment_id, self.node_type_prefix)

        self.check_resources_in_deployment_deleted(deployment_nodes,
                                                   nodes_to_check)
Exemple #5
0
 def install_deployment_proxy_external(self, blueprint_id):
     parent_id = '{0}-existing'.format(blueprint_id)
     utils.execute_command(
         'cfy blueprints upload cloudify_deployment_proxy/'
         'examples/test-blueprint-existing.yaml -b {0}'.format(parent_id))
     utils.create_deployment(parent_id, inputs={'test_id': blueprint_id})
     utils.execute_install(parent_id)
     rs = utils.get_deployment_resources_by_node_type_substring(
         parent_id, 'cloudify.nodes.DeploymentProxy')
     deployment_outputs = \
         rs[0]['instances'][0]['runtime_properties']['deployment']
     if 'output1' not in deployment_outputs['outputs']:
         raise Exception('output1 not in {0}'.format(deployment_outputs))
Exemple #6
0
 def install_rest(self, blueprint_id):
     utils.execute_command(
         'cfy blueprints upload cloudify_rest/'
         'examples/example-4-blueprint.yaml -b {0}'.format(blueprint_id))
     utils.create_deployment(blueprint_id,
                             inputs={'commit': os.environ['CIRCLE_SHA1']})
     utils.execute_install(blueprint_id)
     rs = utils.get_deployment_resources_by_node_type_substring(
         blueprint_id, 'cloudify.rest.Requests')
     rest_instance = rs[0]['instances'][0]['runtime_properties']
     if 'commit' not in rest_instance['result_propeties']:
         raise Exception('{0} not in {1}'.format(
             'commit', rest_instance['result_propeties']))
     utils.execute_uninstall(blueprint_id)
Exemple #7
0
 def install_cloud_init(self, blueprint_id):
     utils.execute_command(
         'cfy blueprints upload cloudify_cloudinit/'
         'examples/simple.yaml -b {0}'.format(blueprint_id))
     utils.create_deployment(blueprint_id)
     utils.execute_install(blueprint_id)
     rs = utils.get_deployment_resources_by_node_type_substring(
         blueprint_id, 'cloudify.nodes.CloudInit.CloudConfig')
     cloud_config = \
         rs[0]['instances'][0]['runtime_properties']['cloud_config']
     if '#cloud-config' not in cloud_config:
         raise Exception('{0} not in {1}'.format('#cloud-config',
                                                 cloud_config))
     utils.execute_uninstall(blueprint_id)
Exemple #8
0
 def install_file(self, blueprint_id):
     file_path = '/tmp/{0}'.format(blueprint_id)
     utils.execute_command(
         'cfy blueprints upload cloudify_files/'
         'examples/simple.yaml -b {0}'.format(blueprint_id))
     utils.create_deployment(blueprint_id, inputs={'file_path': file_path})
     utils.execute_install(blueprint_id)
     if utils.execute_command(
             'docker exec cfy_manager stat {0}'.format(file_path)):
         raise Exception('{0} not written.'.format(file_path))
     utils.execute_uninstall(blueprint_id)
     if not utils.execute_command(
             'docker exec cfy_manager stat {0}'.format(file_path)):
         raise Exception('{0} not deleted.'.format(file_path))
    def remove_deployment(self, deployment_id, nodes_to_check):

        # UnDeploy the application
        utils.execute_command(
            'cfy executions start uninstall '
            '-p ignore_failure=true -d {0}'.format(
                deployment_id))

        deployment_nodes = \
            utils.get_deployment_resources_by_node_type_substring(
                deployment_id, self.node_type_prefix)

        self.check_resources_in_deployment_deleted(
            deployment_nodes, nodes_to_check
        )
 def node_instances_after_setup(self):
     for resource in utils.get_resource_ids_by_type(
             self.node_instances,
             'cloudify.azure.nodes',
             self.cfy_local.storage.get_node):
         list_resources = 'az resource list --name {0}'.format(resource)
         self.assertEqual(0, utils.execute_command(list_resources))
Exemple #11
0
    def check_external_nodecellar(self):
        blueprint_dir = tempfile.mkdtemp()
        blueprint_zip = os.path.join(blueprint_dir, 'blueprint.zip')
        blueprint_archive = 'nodecellar-auto-scale-auto-heal-blueprint-master'
        download_path = \
            os.path.join(blueprint_dir, blueprint_archive, 'aws.yaml')
        blueprint_path = utils.create_blueprint(utils.NODECELLAR,
                                                blueprint_zip, blueprint_dir,
                                                download_path)

        skip_transform = [
            'aws', 'vpc', 'public_subnet', 'private_subnet',
            'ubuntu_trusty_ami'
        ]

        deployment_nodes = self.get_nc_deployment_nodes()
        new_blueprint_path = utils.create_external_resource_blueprint(
            blueprint_path,
            NC_AWS_NODES,
            deployment_nodes,
            resource_id_attr='aws_resource_id',
            nodes_to_keep_without_transform=skip_transform)

        # Install nc-external
        failed = utils.execute_command(
            'cfy install {0} -b nc-external'.format(new_blueprint_path))
        if failed:
            raise Exception('Nodecellar external install failed.')

        # Un-install nc-external
        failed = utils.execute_uninstall('nc-external')
        if failed:
            raise Exception('Nodecellar external uninstall failed.')
 def install_network(self):
     resource_group_name = \
         'cfyresource_group{0}'.format(
             os.environ['CIRCLE_BUILD_NUM'])
     self.addCleanup(
         self.teardown_failed_resource_group,
         resource_group_name)
     network_inputs = {
         'resource_suffix': os.environ['CIRCLE_BUILD_NUM']
     }
     utils.create_deployment(
         'azure-example-network',
         inputs=network_inputs)
     sleep(30)
     utils.execute_command('cfy executions list --include-system-workflows')
     utils.execute_install('azure-example-network')
     self.check_resources_in_deployment('azure-example-network')
 def check_resources_in_deployment(self, deployment):
     for resource in \
             utils.get_deployment_resource_names(
                 deployment, 'cloudify.azure.nodes',
                 'name',
                 resource_id_key='name'):
         list_resources = \
             'az resource list --name {0}'.format(resource)
         self.assertEqual(0, utils.execute_command(list_resources))
Exemple #14
0
 def test_blueprints_valid(self):
     for blueprint in ['aws', 'azure', 'gcp', 'openstack', 'hostpool']:
         failed = eco_utils.execute_command(
             'cfy blueprints upload {0}.yaml -b {0}-{1}'.format(
                 blueprint, self.application_prefix))
         if failed:
             raise Exception(
                 'Blueprint {0}-{1} must not be valid check logs.'.format(
                     blueprint, self.application_prefix))
 def check_nodecellar(self):
     aws_nodes = [
         'security_group', 'haproxy_nic', 'nodejs_nic', 'mongo_nic',
         'nodecellar_ip'
     ]
     monitored_nodes = [
         'haproxy_frontend_host', 'nodejs_host', 'mongod_host'
     ]
     failed = utils.install_nodecellar(
         blueprint_file_name=self.blueprint_file_name)
     if failed:
         raise Exception('Nodecellar install failed.')
     del failed
     self.addCleanup(self.cleanup_deployment, 'nc')
     failed = utils.execute_scale('nc', scalable_entity_name='nodejs_group')
     if failed:
         raise Exception('Nodecellar scale failed.')
     del failed
     deployment_nodes = \
         utils.get_deployment_resources_by_node_type_substring(
             'nc', 'cloudify')
     self.check_resources_in_deployment_created(deployment_nodes, aws_nodes)
     self.check_resources_in_deployment_created(deployment_nodes,
                                                monitored_nodes)
     blueprint_dir = tempfile.mkdtemp()
     blueprint_zip = os.path.join(blueprint_dir, 'blueprint.zip')
     blueprint_archive = 'nodecellar-auto-scale-auto-heal-blueprint-master'
     download_path = \
         os.path.join(blueprint_dir, blueprint_archive, 'aws.yaml')
     blueprint_path = utils.create_blueprint(utils.NODECELLAR,
                                             blueprint_zip, blueprint_dir,
                                             download_path)
     skip_transform = [
         'aws', 'vpc', 'public_subnet', 'private_subnet',
         'ubuntu_trusty_ami'
     ]
     new_blueprint_path = utils.create_external_resource_blueprint(
         blueprint_path,
         aws_nodes,
         deployment_nodes,
         resource_id_attr='aws_resource_id',
         nodes_to_keep_without_transform=skip_transform)
     failed = utils.execute_command(
         'cfy install {0} -b nc-external'.format(new_blueprint_path))
     if failed:
         raise Exception('Nodecellar external install failed.')
     failed = utils.execute_uninstall('nc-external')
     if failed:
         raise Exception('Nodecellar external uninstall failed.')
     failed = utils.execute_uninstall('nc')
     if failed:
         raise Exception('Nodecellar uninstall failed.')
     del failed
     self.check_resources_in_deployment_deleted(deployment_nodes, aws_nodes)
     self.check_resources_in_deployment_deleted(deployment_nodes,
                                                monitored_nodes)
 def test_kubernetes_blueprint(self):
     blueprint_path = 'tests/blueprint.yaml'
     blueprint_id = 'infra-{0}'.format(self.application_prefix)
     self.addCleanup(self.cleanup_deployment, blueprint_id)
     failed = eco_utils.execute_command('cfy install {0} -b {1}'.format(
         blueprint_path, blueprint_id))
     if failed:
         raise NonRecoverableError(
             'Failed to install the infrastructure blueprint.')
     load_host = eco_utils.get_node_instances('k8s_load_host',
                                              blueprint_id)[0]
     node_host = eco_utils.get_node_instances('k8s_node_host',
                                              blueprint_id)[0]
     master_host = eco_utils.get_node_instances('k8s_master_host',
                                                blueprint_id)[0]
     compute_blueprint_path = 'baremetal.yaml'
     compute_blueprint_id = 'kube-{0}'.format(self.application_prefix)
     self.addCleanup(self.cleanup_deployment, compute_blueprint_id)
     eco_utils.execute_command('cfy blueprints upload {0} -b {1}'.format(
         compute_blueprint_path, compute_blueprint_id))
     eco_utils.create_deployment(
         compute_blueprint_id, {
             'public_master_ip':
             master_host.get('runtime_properties', {}).get('ip'),
             'k8s_node_host_ip':
             node_host.get('runtime_properties', {}).get('ip'),
             'k8s_load_host_ip':
             load_host.get('runtime_properties', {}).get('ip'),
             'agent_user':
             '******',
             'dashboard_ip':
             master_host.get('runtime_properties',
                             {}).get('public_ip_address')
         })
     eco_utils.execute_install(compute_blueprint_id)
     check_blueprint = eco_utils.install_nodecellar
     failed = check_blueprint('examples/wordpress-blueprint.yaml',
                              blueprint_archive=WORDPRESS,
                              blueprint_id='wp')
     if failed:
         raise NonRecoverableError(
             'Failed to install the Wordpress blueprint.')
Exemple #17
0
    def check_resource_method(self, resources=None, name_property='name'):
        if resources:
            for resource in resources:
                node_type = resource['node_type']
                # if it exists then the prefix command should be
                # "gcloud compute"
                # otherwise the prefix command should be something else
                if CFY_GCP_COMPUTE_RESOURCE.get(node_type):
                    gcp_resource = CFY_GCP_COMPUTE_RESOURCE[node_type]

                    # Get the name of the resource for the current node from
                    # the created instances associated with it
                    for instance in resource['instances']:
                        # Get the name form the "runtime_properties"
                        name =\
                            instance['runtime_properties'].get(name_property)
                        describe_resource =\
                            GCLOUD_COMPUTE_COMMAND.format(gcp_resource, name)
                        self.assertEqual(
                            0, utils.execute_command(describe_resource))
    def check_resource_method(self, resources=None, name_property='name'):
        if resources:
            for resource in resources:
                node_type = resource['node_type']
                # if it exists then the prefix command should be
                # "gcloud compute"
                # otherwise the prefix command should be something else
                if CFY_GCP_COMPUTE_RESOURCE.get(node_type):
                    gcp_resource = CFY_GCP_COMPUTE_RESOURCE[node_type]

                    # Get the name of the resource for the current node from
                    # the created instances associated with it
                    for instance in resource['instances']:
                        # Get the name form the "runtime_properties"
                        name =\
                            instance['runtime_properties'].get(name_property)
                        describe_resource =\
                            GCLOUD_COMPUTE_COMMAND.format(gcp_resource, name)
                        self.assertEqual(
                            0, utils.execute_command(describe_resource))
 def install_blueprint(blueprint_path, blueprint_id):
     install_command =\
         'cfy install {0} -b {1}'.format(blueprint_path, blueprint_id)
     failed = utils.execute_command(install_command)
     if failed:
         raise Exception('Install {0} failed.'.format(blueprint_id))
 def test_blueprints_valid(self):
     for blueprint in ['aws', 'azure', 'gcp', 'openstack', 'hostpool']:
         failed = eco_utils.execute_command(
             'cfy blueprints upload {0}.yaml'.format(blueprint))
 def test_wordpress(self):
     failed = eco_utils.execute_command(
         'cfy install examples/wordpress-blueprint.yaml -b wp')
     if failed:
         raise Exception('Failed to install wordpress blueprint.')
     del failed
 def teardown_failed_resource_group(self, resource_group_name):
     utils.execute_command(
         'az resource delete --name {0}'.format(
             resource_group_name))
    def check_resource_method(self,
                              resource_id=None,
                              resource_type=None,
                              exists=True,
                              command=None):

        print 'Checking AWS resource args {0} {1} {2} {3}'.format(
            resource_id, resource_type, exists, command)

        if not isinstance(resource_id, text_type):
            print 'Warning resource_id is {0}'.format(resource_id)
            resource_id = str(resource_id)
        sleep(1)
        if command:
            pass
        elif 'cloudify.nodes.aws.ec2.Vpc' == \
                resource_type or resource_id.startswith('vpc-'):
            command = 'aws ec2 describe-vpcs --vpc-ids {0}'.format(resource_id)
        elif 'cloudify.nodes.aws.ec2.InternetGateway' == \
                resource_type or resource_id.startswith('igw-'):
            command = 'aws ec2 describe-internet-gateways ' \
                      '--internet-gateway-ids {0}'.format(resource_id)
        elif 'cloudify.nodes.aws.ec2.Subnet' == \
                resource_type or resource_id.startswith('subnet-'):
            command = 'aws ec2 describe-subnets --subnet-ids {0}'.format(
                resource_id)
        elif 'cloudify.nodes.aws.ec2.RouteTable' == \
                resource_type or resource_id.startswith('rtb-'):
            command = \
                'aws ec2 describe-route-tables --route-table-ids {0}'.format(
                    resource_id)
        elif 'cloudify.nodes.aws.ec2.NATGateway' == \
                resource_type or resource_id.startswith('nat-'):
            command = \
                'aws ec2 describe-nat-gateways --nat-gateway-ids {0}'.format(
                    resource_id)
        elif 'cloudify.nodes.aws.ec2.ElasticIP' == \
                resource_type or \
                re.compile(IP_ADDRESS_REGEX).match(resource_id):
            command = 'aws ec2 describe-addresses --public-ips {0}'.format(
                resource_id)
        elif 'cloudify.nodes.aws.ec2.SecurityGroup' == \
                resource_type or resource_id.startswith('sg-'):
            command = \
                'aws ec2 describe-security-groups --group-ids {0}'.format(
                    resource_id)
        elif 'cloudify.nodes.aws.ec2.Interface' == \
                resource_type or resource_id.startswith('eni-'):
            command = 'aws ec2 describe-network-interfaces ' \
                      '--network-interface-ids {0}'.format(
                          resource_id)
        elif 'cloudify.nodes.aws.ec2.EBSVolume' == \
                resource_type or resource_id.startswith('vol-'):
            command = 'aws ec2 describe-volumes --volume-ids {0}'.format(
                resource_id)
        elif 'cloudify.nodes.aws.ec2.Instances' == \
                resource_type or resource_id.startswith('i-'):
            command = 'aws ec2 describe-instances --instance-ids {0}'.format(
                resource_id)
        elif 'cloudify.nodes.aws.ec2.NATGateway' == \
                resource_type or resource_id.startswith('nat-'):
            command = \
                'aws ec2 describe-nat-gateways ' \
                '--nat-gateway-ids {0}'.format(resource_id)
        elif 'cloudify.nodes.aws.SQS.Queue' == resource_type:
            if not exists:
                return
            # Change queue url to name to get queue url.
            resource_id = resource_id.split('/')[-1]
            command = 'aws sqs get-queue-url --queue-name {0}'.format(
                resource_id)
        elif 'cloudify.nodes.aws.SNS.Topic' == resource_type:
            command = 'aws sns list-subscriptions-by-topic ' \
                      '--topic-arn {0}'.format(resource_id)
        elif 'cloudify.nodes.aws.s3.Bucket' == resource_type:
            command = 'aws s3 ls {0}'.format(resource_id)
        elif 'cloudify.nodes.aws.autoscaling.Group' == resource_type:
            command = 'aws autoscaling describe-auto-scaling-groups ' \
                      '--auto-scaling-group-names {0}'.format(resource_id)
        elif 'cloudify.nodes.aws.CloudFormation.Stack' == resource_type:
            sleep(1)
            command = 'aws cloudformation describe-stacks ' \
                      '--stack-name {0}'.format(resource_id)
        elif 'cloudify.nodes.aws.elb.Classic.LoadBalancer' == \
                resource_type:
            command = 'aws elb describe-load-balancers ' \
                      '--load-balancer-name my-load-balancer {0}'.format(
                          resource_id)
        elif resource_id.startswith('ami-'):
            return
        else:
            raise Exception('Unsupported type {0} for {1}.'.format(
                resource_type, resource_id))
        self.assertEqual(0 if exists else 255, utils.execute_command(command))
 def install_blueprint(blueprint_path, blueprint_id):
     install_command =\
         'cfy install {0} -b {1}'.format(blueprint_path, blueprint_id)
     failed = utils.execute_command(install_command)
     if failed:
         raise Exception('Install {0} failed.'.format(blueprint_id))
secrets = {
    'ec2_region_endpoint': 'ec2.ap-northeast-1.amazonaws.com',
    'ec2_region_name': 'ap-northeast-1',
    'aws_region_name': 'ap-northeast-1',
    'availability_zone': 'ap-northeast-1b',
    'aws_availability_zone': 'ap-northeast-1b',
    'aws_secret_access_key': os.environ['AWS_SECRET_ACCESS_KEY'],
    'aws_access_key_id': os.environ['AWS_ACCESS_KEY_ID'],
    'agent_key_private': '',
    'agent_key_public': ''
}

for name, value in secrets.items():
    utils.execute_command(
        'cfy secrets create -u {0} -s {1}'.format(
            name, value
        )
    )

SSH_KEY_BP_ZIP = 'https://github.com/cloudify-examples/' \
                 'helpful-blueprint/archive/master.zip'


class TestAWS(EcosystemTestBase):

    @classmethod
    def setUpClass(cls):
        os.environ['ECOSYSTEM_SESSION_PASSWORD'] = '******'

    @classmethod
    def tearDownClass(cls):
Exemple #26
0
    """
    This method will generate the path file for the service account that
    need to be used when activate the service account
    :return:
    """
    service_account_map = populate_gcp_service_account()
    service_account_file, name = tempfile.mkstemp(suffix='.json')
    os.write(service_account_file, json.dumps(service_account_map))
    os.close(service_account_file)
    return name


if __name__ == '__main__':
    # Get the file path for service account
    file_path = generate_service_account_file()

    # Enable to use service account authorization
    activate_return_code = utils.execute_command(
        GCP_ACTIVATE_SERVICE_ACCOUNT.format(file_path))

    if activate_return_code:
        raise GCPErrorCodeException(
            'Failed to activate service account command')

    # Set Service Account Project
    set_project_return_code = utils.execute_command(
        GCP_SET_PROJECT.format(os.environ['GCP_PRIVATE_PROJECT_ID']))

    if set_project_return_code:
        raise GCPErrorCodeException('Failed to set google cloud project')
    def check_resource_method(self,
                              resource_id=None,
                              resource_type=None,
                              exists=True,
                              command=None):

        print 'Checking AWS resource args {0} {1} {2} {3}'.format(
            resource_id, resource_type, exists, command)

        if not isinstance(resource_id, basestring):
            print 'Warning resource_id is {0}'.format(resource_id)
            resource_id = str(resource_id)
        sleep(1)
        if command:
            pass
        elif 'cloudify.nodes.aws.ec2.Vpc' == \
                resource_type or resource_id.startswith('vpc-'):
            command = 'aws ec2 describe-vpcs --vpc-ids {0}'.format(resource_id)
        elif 'cloudify.nodes.aws.ec2.InternetGateway' == \
                resource_type or resource_id.startswith('igw-'):
            command = 'aws ec2 describe-internet-gateways ' \
                      '--internet-gateway-ids {0}'.format(resource_id)
        elif 'cloudify.nodes.aws.ec2.Subnet' == \
                resource_type or resource_id.startswith('subnet-'):
            command = 'aws ec2 describe-subnets --subnet-ids {0}'.format(
                resource_id)
        elif 'cloudify.nodes.aws.ec2.RouteTable' == \
                resource_type or resource_id.startswith('rtb-'):
            command = \
                'aws ec2 describe-route-tables --route-table-ids {0}'.format(
                    resource_id)
        elif 'cloudify.nodes.aws.ec2.NATGateway' == \
                resource_type or resource_id.startswith('nat-'):
            command = \
                'aws ec2 describe-nat-gateways --nat-gateway-ids {0}'.format(
                    resource_id)
        elif 'cloudify.nodes.aws.ec2.ElasticIP' == \
                resource_type or \
                re.compile(IP_ADDRESS_REGEX).match(resource_id):
            command = 'aws ec2 describe-addresses --public-ips {0}'.format(
                resource_id)
        elif 'cloudify.nodes.aws.ec2.SecurityGroup' == \
                resource_type or resource_id.startswith('sg-'):
            command = \
                'aws ec2 describe-security-groups --group-ids {0}'.format(
                    resource_id)
        elif 'cloudify.nodes.aws.ec2.Interface' == \
                resource_type or resource_id.startswith('eni-'):
            command = 'aws ec2 describe-network-interfaces ' \
                      '--network-interface-ids {0}'.format(
                          resource_id)
        elif 'cloudify.nodes.aws.ec2.EBSVolume' == \
                resource_type or resource_id.startswith('vol-'):
            command = 'aws ec2 describe-volumes --volume-ids {0}'.format(
                resource_id)
        elif 'cloudify.nodes.aws.ec2.Instances' == \
                resource_type or resource_id.startswith('i-'):
            command = 'aws ec2 describe-instances --instance-ids {0}'.format(
                resource_id)
        elif 'cloudify.nodes.aws.ec2.NATGateway' == \
                resource_type or resource_id.startswith('nat-'):
            command = \
                'aws ec2 describe-nat-gateways ' \
                '--nat-gateway-ids {0}'.format(resource_id)
        elif 'cloudify.nodes.aws.SQS.Queue' == resource_type:
            if not exists:
                return
            # Change queue url to name to get queue url.
            resource_id = resource_id.split('/')[-1]
            command = 'aws sqs get-queue-url --queue-name {0}'.format(
                resource_id)
        elif 'cloudify.nodes.aws.SNS.Topic' == resource_type:
            command = 'aws sns list-subscriptions-by-topic ' \
                      '--topic-arn {0}'.format(resource_id)
        elif 'cloudify.nodes.aws.s3.Bucket' == resource_type:
            command = 'aws s3 ls {0}'.format(resource_id)
        elif 'cloudify.nodes.aws.autoscaling.Group' == resource_type:
            command = 'aws autoscaling describe-auto-scaling-groups ' \
                      '--auto-scaling-group-names {0}'.format(resource_id)
        elif 'cloudify.nodes.aws.CloudFormation.Stack' == resource_type:
            sleep(1)
            command = 'aws cloudformation describe-stacks ' \
                      '--stack-name {0}'.format(resource_id)
        elif 'cloudify.nodes.aws.elb.Classic.LoadBalancer' == \
                resource_type:
            command = 'aws elb describe-load-balancers ' \
                      '--load-balancer-name my-load-balancer {0}'.format(
                          resource_id)
        elif resource_id.startswith('ami-'):
            return
        else:
            raise Exception('Unsupported type {0} for {1}.'.format(
                resource_type, resource_id))
        self.assertEqual(0 if exists else 255, utils.execute_command(command))
secrets = {
    'ec2_region_endpoint': 'ec2.ap-northeast-1.amazonaws.com',
    'ec2_region_name': 'ap-northeast-1',
    'aws_region_name': 'ap-northeast-1',
    'availability_zone': 'ap-northeast-1b',
    'aws_availability_zone': 'ap-northeast-1b',
    'aws_secret_access_key': os.environ['AWS_SECRET_ACCESS_KEY'],
    'aws_access_key_id': os.environ['AWS_ACCESS_KEY_ID'],
    'agent_key_private': '',
    'agent_key_public': ''
}

for name, value in secrets.items():
    utils.execute_command(
        'cfy secrets create -u {0} -s {1}'.format(
            name, value
        )
    )

SSH_KEY_BP_ZIP = 'https://github.com/cloudify-examples/' \
                 'helpful-blueprint/archive/master.zip'


class TestAWS(EcosystemTestBase):

    @classmethod
    def setUpClass(cls):
        os.environ['ECOSYSTEM_SESSION_PASSWORD'] = '******'

    @classmethod
    def tearDownClass(cls):
    need to be used when activate the service account
    :return:
    """
    service_account_map = populate_gcp_service_account()
    service_account_file, name = tempfile.mkstemp(suffix='.json')
    os.write(service_account_file, json.dumps(service_account_map))
    os.close(service_account_file)
    return name


if __name__ == '__main__':
    # Get the file path for service account
    file_path = generate_service_account_file()

    # Enable to use service account authorization
    activate_return_code = utils.execute_command(
        GCP_ACTIVATE_SERVICE_ACCOUNT.format(file_path))

    if activate_return_code:
        raise GCPErrorCodeException(
            'Failed to activate service account command')

    # Set Service Account Project
    set_project_return_code = utils.execute_command(
        GCP_SET_PROJECT.format(os.environ['GCP_PRIVATE_PROJECT_ID']))

    if set_project_return_code:
        raise GCPErrorCodeException(
            'Failed to set google cloud project')