def set_product(self, pricing_mgr, service_code):
        # print(f'--- START @@@ {service_code} @@@')
        for product in pricing_mgr.list_products(service_code):
            # print(product.get('product', {}).get('sku'))

            try:
                product_dic = {
                    'service_code': product.get('serviceCode'),
                    'region_name': self.get_region_name_from_location(
                        product.get('product', {}).get('attributes', {}).get('location', '')),
                    'product_family': product.get('product', {}).get('productFamily'),
                    'sku': product.get('product', {}).get('sku'),
                    # 'attributes': product.get('product', {}).get('attributes'),
                    'attributes': self.set_product_attributes(product.get('product', {}).get('attributes')),
                    'publication_date': product.get('publicationDate'),
                    'version': product.get('version'),
                    'terms': self.get_terms(product.get('terms', {}))
                }
                product_data = Product(product_dic, strict=False)

                product_resource = ProductResource({
                    'data': product_data,
                    'region_code': product_dic['region_name'],
                    'reference': ReferenceModel(product_data.reference())
                })

                # print(f'##### {product_data.get("service_code")}: {product_data.get("sku")}')

                yield ProductResponse({
                    'resource': product_resource
                })

            except Exception as e:
                print(f"[[ ERROR ]] {e}")
    def set_database_resource(vm_cluster_list, region):
        result = []
        backup_list = []
        for vm_cluster in vm_cluster_list:
            db_list = vm_cluster.get('list_database', [])
            db_backup = vm_cluster.get('list_backup', [])

            for db in db_list:
                db_data = Database(db, strict=False)
                db_resource = ExadataDatabaseResource({
                    'data':
                    db_data,
                    'region_code':
                    region,
                    'reference':
                    ReferenceModel(db_data.reference()),
                    'tags':
                    db.get('_freeform_tags', []),
                    'name':
                    db_data.db_name
                })
                result.append(
                    ExadataDatabaseResponse({'resource': db_resource}))
                backup_list.extend(db_backup)
        return result, backup_list
    def collect_power_state(self, params):
        print("** Auto Scaling Start **")
        start_time = time.time()
        auto_scaling_conn: AutoScalingConnector = self.locator.get_connector(
            self.connector_name, **params)
        ec2_conn: EC2Connector = self.locator.get_connector(
            'EC2Connector', **params)

        auto_scaling_resources = []

        for region_name in params.get('regions', []):
            # print(f'[ AutoScaling {region_name} ]')
            auto_scaling_conn.set_client(region_name)
            ec2_conn.set_client(region_name)

            for asg in auto_scaling_conn.describe_auto_scaling_groups():
                asg.update({
                    'instances':
                    self.get_asg_instances(asg.get('Instances', []), ec2_conn),
                })

                auto_scaling_data = AutoScalingGroup(asg, strict=False)

                auto_scaling_resource = AutoScalingResource({
                    'data':
                    auto_scaling_data,
                    'reference':
                    ReferenceModel(auto_scaling_data.reference())
                })

                auto_scaling_resources.append(
                    AutoScalingResponse({'resource': auto_scaling_resource}))

        print(f' Auto Scaling Finished {time.time() - start_time} Seconds')
        return auto_scaling_resources
    def set_image_resource(self, image_list, region, compartment):
        result = []
        for image in image_list:
            image = self.convert_dictionary(image)
            image.update({
                'region':
                region,
                'compartment_name':
                compartment.name,
                '_freeform_tags':
                self.convert_tags(image.get('_freeform_tags'))
            })

            image_data = DatabaseSoftwareImage(image, strict=False)
            image_resource = ExadataDatabaseSoftwareImageResource({
                'data':
                image_data,
                'region_code':
                region,
                'reference':
                ReferenceModel(image_data.reference()),
                'tags':
                image.get('_freeform_tags', []),
                'name':
                image_data.display_name
            })
            result.append(
                ExadataDatabaseSoftwareImageResponse(
                    {'resource': image_resource}))

        return result
    def collect_cloud_services(self, params):
        print("** Trusted Advisor Start **")
        start_time = time.time()
        ta_conn: TrustedAdvisorConnector = self.locator.get_connector(self.connector_name, **params)
        ta_conn.set_client()

        language = params.get('options', {}).get('language', DEFAULT_LANGUAGE)
        need_refresh = params.get('options', {}).get('refresh', DEFAULT_REFRESH)

        ta_resources = []

        for check in ta_conn.describe_trusted_advisor_checks(language):
            check_id_data = CheckId(check, strict=False)
            check_id = check['id']

            if need_refresh:
                ta_conn.refresh_trusted_advisor_check(check_id)

            check_result = ta_conn.describe_trusted_advisor_check_result(check_id, language)

            if not check_result:
                # Nothing to do
                continue

            # if status != "ok", there exists flagged resources
            # Processing flagged resource for easy viewing(a.k.a. TableDynamic)
            # if raw['status'] != "ok":
            #    print(checkId.metadata)
            #    print(raw)
            #    import time
            #    time.sleep(5)
            # raw.update({'headers': ['a','b','c'], 'flagged_resources': [[1,2,3], [4,5,6]]})
            # Change 1.1
            # { 'flagged_resources': [{'a': 1, 'b': 2, 'c': 3}, {'a': 4, 'b': 5, 'c': 6}] }

            flagged_resources = self._merge_flagged_resources(check_id_data, check_result)
            check_result.update({'flagged_resources': flagged_resources})
            check_result.update({
                'name': check_id_data.name,
                'category': check_id_data.category,
                'description': check_id_data.description,
                'arn': self.generate_arn(service='support', region=DEFAULT_REGION,
                                         account_id=params['account_id'], resource_type="trusted_advisor",
                                         resource_id=check_id),
                'account_id': params['account_id']
            })

            check_data = Check(check_result, strict=False)

            check_resource = CheckResource({
                'name': check_data.name,
                'data': check_data,
                'region_code': 'global',
                'reference': ReferenceModel(check_data.reference())
            })

            ta_resources.append(CheckResponse({'resource': check_resource}))

        print(f' Trusted Advisor Finished {time.time() - start_time} Seconds')
        return ta_resources
    def collect_cloud_service(self, params):
        print("** Route START **")
        start_time = time.time()
        """
        Args:
            params:
                - options
                - schema
                - secret_data
                - filter
                - zones
        Response:
            CloudServiceResponse
        """
        collected_cloud_services = []
        secret_data = params['secret_data']
        route_conn: RouteConnector = self.locator.get_connector(
            self.connector_name, **params)

        # Get lists that relate with snapshots through Google Cloud API
        routes = route_conn.list_routes()
        compute_vms = route_conn.list_instance()
        region = 'global'
        for route in routes:
            display = {
                'network_display':
                self._get_matched_last_target('network', route),
                'next_hop': self.get_next_hop(route),
                'instance_tags_on_list': self._get_tags_display(route, 'list'),
                'instance_tags': self._get_tags_display(route, 'not list'),
            }

            route.update({
                'display':
                display,
                'project':
                secret_data['project_id'],
                'applicable_instance':
                self.get_matched_instace(route, secret_data['project_id'],
                                         compute_vms),
            })

            # No Labels
            route_data = Route(route, strict=False)
            route_resource = RouteResource({
                'region_code':
                region,
                'data':
                route_data,
                'reference':
                ReferenceModel(route_data.reference())
            })

            self.set_region_code(region)
            collected_cloud_services.append(
                RouteResponse({'resource': route_resource}))

        print(f'** Route Finished {time.time() - start_time} Seconds **')
        return collected_cloud_services
    def get_rds_databases(self, rds_conn):
        databases = []
        for cluster in rds_conn.describe_db_clusters(
                Filters=self.get_rds_filter()):
            rds_cluster = {
                'arn': cluster['DBClusterArn'],
                'db_identifier': cluster['DBClusterIdentifier'],
                'status': cluster['Status'],
                'role': 'cluster'
            }

            rds_data = RDS(rds_cluster, strict=False)

            rds_resource = RDSDatabaseResource({
                'data':
                rds_data,
                'reference':
                ReferenceModel(rds_data.reference())
            })

            databases.append(RDSDatabaseResponse({'resource': rds_resource}))

        for instance in rds_conn.describe_db_instances(
                Filters=self.get_rds_filter()):
            if not instance.get('DBClusterIdentifier'):
                rds_instance = {
                    'arn': instance['DBInstanceArn'],
                    'db_identifier': instance['DBInstanceIdentifier'],
                    'status': instance['DBInstanceStatus'],
                    'role': 'instance'
                }

                rds_data = RDS(rds_instance, strict=False)

                rds_resource = RDSDatabaseResource({
                    'data':
                    rds_data,
                    'reference':
                    ReferenceModel(rds_data.reference())
                })

                databases.append(
                    RDSDatabaseResponse({'resource': rds_resource}))

        return databases
    def collect_cloud_service(self, params):
        print("** Cloud SQL START **")
        start_time = time.time()
        """
        Args:
            params:
                - options
                - schema
                - secret_data
                - filter
                - zones
        Response:
            CloudServiceResponse
        """

        cloud_sql_conn: CloudSQLConnector = self.locator.get_connector(
            self.connector_name, **params)

        collected_cloud_services = []
        for instance in cloud_sql_conn.list_instances():
            instance_name = instance['name']
            project = instance.get('project', '')
            # Get Databases
            databases = cloud_sql_conn.list_databases(instance_name)

            # Get Users
            users = cloud_sql_conn.list_users(instance_name)
            stackdriver = self.get_stackdriver(project, instance_name)
            instance.update({
                'stackdriver':
                stackdriver,
                'display_state':
                self._get_display_state(instance),
                'databases':
                [Database(database, strict=False) for database in databases],
                'users': [User(user, strict=False) for user in users],
            })

            # No labels!!
            instance_data = Instance(instance, strict=False)
            instance_resource = InstanceResource({
                'data':
                instance_data,
                'region_code':
                instance['region'],
                'name':
                instance_name,
                'reference':
                ReferenceModel(instance_data.reference())
            })

            self.set_region_code(instance['region'])
            collected_cloud_services.append(
                InstanceResponse({'resource': instance_resource}))

        print(f'** Cloud SQL Finished {time.time() - start_time} Seconds **')
        return collected_cloud_services
Beispiel #9
0
    def collect_cloud_service(self, params):
        print("** External IP Address START **")
        start_time = time.time()
        """
        Args:
            params:
                - options
                - schema
                - secret_data
                - filter
                - zones
        Response:
            CloudServiceResponse
        """
        collected_cloud_services = []
        secret_data = params['secret_data']
        exp_conn: ExternalIPAddressConnector = self.locator.get_connector(self.connector_name, **params)
        regional_global_addresses = exp_conn.list_regional_addresses()
        compute_engine_vm_address = exp_conn.list_instance_for_networks()
        forwarding_rule_address = exp_conn.list_forwarding_rule()

        # Get lists that relate with snapshots through Google Cloud API
        all_external_ip_addresses = self.get_external_ip_addresses(regional_global_addresses,
                                                                   compute_engine_vm_address,
                                                                   forwarding_rule_address)

        for external_ip_juso in all_external_ip_addresses:
            region = external_ip_juso.get('region') if external_ip_juso.get('region') else 'global'
            external_ip_juso.update({'project': secret_data['project_id'],
                                     'status_display': external_ip_juso.get('status').replace('_', ' ').title()
                                     })
            if external_ip_juso.get('selfLink') is None:
                external_ip_juso.update({'self_link': self._get_external_self_link_when_its_empty(external_ip_juso)})

            # No Labels (exists on console but No option on APIs)

            external_ip_juso_data = ExternalIpAddress(external_ip_juso, strict=False)
            external_ip_juso_resource = ExternalIpAddressResource({
                'region_code': region,
                'data': external_ip_juso_data,
                'reference': ReferenceModel(external_ip_juso_data.reference())
            })

            self.set_region_code(region)
            collected_cloud_services.append(ExternalIpAddressResponse({'resource': external_ip_juso_resource}))

        print(f'** External IP Address Finished {time.time() - start_time} Seconds **')
        return collected_cloud_services
Beispiel #10
0
 def set_image_resources(images, region):
     result = []
     for image in images:
         image_data = DatabaseSoftwareImage(image, strict=False)
         image_resource = DatabaseSoftwareImagesResource({
             'data':
             image_data,
             'region_code':
             region,
             'reference':
             ReferenceModel(image_data.reference()),
             'tags':
             image.get('_freeform_tags', []),
             'name':
             image_data.display_name
         })
         result.append(
             DatabaseSoftwareImagesResponse({'resource': image_resource}))
     return result
Beispiel #11
0
 def set_database_resources(databases, region):
     result = []
     for database in databases:
         database.update({'region': region})
         database_data = Database(database, strict=False)
         database_resource = DatabaseResource({
             'data':
             database_data,
             'region_code':
             region,
             'reference':
             ReferenceModel(database_data.reference()),
             'tags':
             database.get('_freeform_tags', []),
             'name':
             database_data.db_name
         })
         result.append(DatabaseResponse({'resource': database_resource}))
     return result
    def _set_resources(self, raw_data):
        result = []
        for raw in raw_data:
            autonomous_db_data = Database(raw, strict=False)
            autonomous_db_resource = DatabaseResource({
                'data':
                autonomous_db_data,
                'region_code':
                autonomous_db_data.region,
                'reference':
                ReferenceModel(autonomous_db_data.reference()),
                'tags':
                raw['_freeform_tags']
            })
            # self.set_region_code(autonomous_db_data.region)
            result.append(
                DatabaseResponse({'resource': autonomous_db_resource}))

        return result
    def get_rds_instances(self, region_name, rds_conn):
        instances = []
        for instance in rds_conn.describe_db_instances(Filters=self.get_rds_filter(region_name)):
            rds = {
                'arn': instance['DBInstanceArn'],
                'db_identifier': instance['DBInstanceIdentifier'],
                'status': instance['DBInstanceStatus'],
                'role': 'instance'
            }

            rds_data = RDS(rds, strict=False)

            rds_resource = RDSInstanceResource({
                'data': rds_data,
                'reference': ReferenceModel(rds_data.reference())
            })

            instances.append(RDSInstanceResponse({'resource': rds_resource}))

        return instances
    def collect_power_state(self, params):
        """
        Args:
            params:
                - secret_data
                - resource_group
        Response:
            [VirtualMachineResponse, ...]
        """

        vm_conn: VirtualMachineConnector = self.locator.get_connector(self.connector_name, **params)
        vm_conn.set_connect(params['secret_data'])

        # print("** VM Start **")
        start_time = time.time()

        vms = []
        for vm in vm_conn.list_vms():
            vm_info = vm_conn.get_vm(vm.id.split('/')[4], vm.name)
            reference_id = self.get_reference_id(vm.id)
            power_state, instance_state = self._get_status_map(vm_info.instance_view.statuses)

            compute_vm = {
                'compute': Compute({'instance_state': instance_state, 'instance_id': reference_id}, strict=False),
                'power_state': PowerState({'status': power_state}, strict=False)
            }
            compute_vm_data = Server(compute_vm, strict=False)

            # print("vm_data")
            # print(compute_vm_data.to_primitive())

            compute_vm_resource = VirtualMachineResource({
                'data': compute_vm_data,
                'reference': ReferenceModel(compute_vm_data.reference())
            })

            vms.append(VirtualMachineResponse({'resource': compute_vm_resource}))

        print(f' Virtual Machine Finished {time.time() - start_time} Seconds')
        return vms
Beispiel #15
0
 def set_backup_resources(backups, region, compartment):
     result = []
     for backup in backups:
         backup.update({
             'region': region,
             'compartment_name': compartment.name
         })
         backup_data = Backup(backup, strict=False)
         backup_resource = BackupResource({
             'data':
             backup_data,
             'region_code':
             region,
             'reference':
             ReferenceModel(backup_data.reference()),
             'tags':
             backup.get('_freeform_tags', []),
             'name':
             backup_data.display_name
         })
         result.append(BackupResponse({'resource': backup_resource}))
     return result
Beispiel #16
0
    def collect_cloud_service(self, params):
        start_time = time.time()
        """
        Args:
            params:
                - options
                - schema
                - secret_data
                - filter
                - region
                - compartment
        Response:
            CloudServiceResponse
        """
        secret_data = params['secret_data']
        region = params['region']
        compartment = params['compartment']

        secret_data.update({'region': region})
        bmvm_conn: BareMetalVMDatabaseConnector = self.locator.get_connector(
            self.connector_name, **params)
        bmvm_conn.set_connect(secret_data)

        # Get list of BareMetal,VM Database Resources
        basic_bmvm_dbsystem_list = bmvm_conn.list_database_dbsystems(
            compartment)
        bmvm_images_list = bmvm_conn.list_database_images(compartment)
        bmvm_backup_list = bmvm_conn.list_database_backups(compartment)
        bmvm_database = []
        for db_system in basic_bmvm_dbsystem_list:
            '''
            if db_system.lifecycle_state == DbSystemSummary.LIFECYCLE_STATE_TERMINATED:
                continue
            '''
            db_system_raw = self.convert_nested_dictionary(self, db_system)
            db_homes = bmvm_conn.list_database_home(compartment,
                                                    db_system_raw['_id'])
            db_nodes, node_conn = self._collect_db_nodes(
                bmvm_conn, compartment, db_system_raw.get('_id'))
            db_system_raw.update({
                'region':
                region,
                'compartment_name':
                compartment.name,
                '_maintenance_window':
                bmvm_conn.load_database_maintenance_windows(
                    db_system_raw['_maintenance_window']),
                '_freeform_tags':
                self.convert_tags(db_system_raw['_freeform_tags']),
                'last_maintenance_run':
                bmvm_conn.load_maintenance_run(
                    db_system_raw['_last_maintenance_run_id'],
                    db_system_raw['_display_name'] + ' - ' +
                    db_system_raw['_shape']),
                'next_maintenance_run':
                bmvm_conn.load_maintenance_run(
                    db_system_raw['_next_maintenance_run_id'],
                    db_system_raw['_display_name'] + ' - ' +
                    db_system_raw['_shape']),
                'list_db_Home':
                self._convert_database_homes(db_homes),
                'list_database':
                self._collect_matched_database(bmvm_conn, compartment,
                                               db_homes),
                'list_db_node':
                db_nodes,
                'console_connections':
                node_conn,
                'list_patch_history':
                self._collect_db_system_patch_history(
                    bmvm_conn, db_system_raw.get('list_db_Home')),
                'list_patches':
                self._collect_db_system_patch(bmvm_conn,
                                              db_system_raw.get('_id'),
                                              compartment),
                'list_backups':
                self._convert_object_to_list(bmvm_backup_list),
                'list_software_images':
                self._update_software_images(bmvm_images_list, region,
                                             compartment)
            })

            db_system_data = DbSystem(db_system_raw, strict=False)
            db_system_resource = DBSystemsResource({
                'data':
                db_system_data,
                'region_code':
                region,
                'reference':
                ReferenceModel(db_system_data.reference()),
                'tags':
                db_system_raw.get('_freeform_tags', []),
                'name':
                db_system_data.display_name
            })
            bmvm_database.extend(
                self.set_database_resources(
                    db_system_raw.get('list_database', []), region))
            bmvm_database.append(
                DBSystemResponse({'resource': db_system_resource}))
            bmvm_database.extend(
                self.set_image_resources(
                    db_system_raw.get('list_software_images', []), region))
            bmvm_database.extend(
                self.set_backup_resources(
                    db_system_raw.get('list_backups', []), region,
                    compartment))

            if bmvm_database:
                print(
                    f"SET REGION CODE FROM BareMetal,VM DB... {params.get('region')} // {params.get('compartment').name}"
                )
                self.set_region_code(region)

        return bmvm_database
Beispiel #17
0
    def collect_cloud_service(self, params):
        print("** Snapshot START **")
        start_time = time.time()
        """
        Args:
            params:
                - options
                - schema
                - secret_data
                - filter
                - zones
                - subscription_info
        Response:
            CloudServiceResponse
        """
        secret_data = params['secret_data']
        # subscription_info = params['subscription_info']
        subscription_info = {
            'subscription_id': '3ec64e1e-1ce8-4f2c-82a0-a7f6db0899ca',
            'subscription_name': 'Azure subscription 1',
            'tenant_id': '35f43e22-0c0b-4ff3-90aa-b2c04ef1054c'
        }
        snapshot_conn: SnapshotConnector = self.locator.get_connector(self.connector_name, **params)
        snapshots = []
        for snapshot in snapshot_conn.list_snapshots():
            snapshot_dict = self.convert_dictionary(snapshot)
            sku_dict = self.convert_dictionary(snapshot.sku)
            creation_data_dict = self.convert_dictionary(snapshot.creation_data)
            encryption_dict = self.convert_dictionary(snapshot.encryption)

            # update sku_dict
            # switch SnapshotStorageAccountType to snapshot_sku_name for user-friendly words.
            # (ex.Premium_LRS -> Premium SSD, Standard HDD..)
            sku_dict.update({
                'name': self.get_disk_sku_name(sku_dict['name'])
            })

            # update creation_data dict
            # update creation_data_dict >> image_reference_dict
            if snapshot.creation_data.image_reference in snapshot_dict:
                image_reference_dict = self.convert_dictionary(snapshot.creation_data.image_reference)
                creation_data_dict.update({
                    'image_reference': image_reference_dict
                })

            # update creation_data_dict >> gallery_image_reference_dict
            if snapshot.creation_data.gallery_image_reference in snapshot_dict:
                gallery_image_dict = self.convert_dictionary(snapshot.creation_data.gallery_image_reference)
                creation_data_dict.update({
                    'gallery_image_reference': gallery_image_dict
                })

            # update encryption_dict type to user-friendly words
            # (ex.EncryptionAtRestWithPlatformKey -> Platform-managed key...)
            if snapshot.encryption.type is not None:
                if snapshot.encryption.type == 'EncryptionAtRestWithPlatformKey':
                    encryption_type = 'Platform-managed key'
                elif snapshot.encryption.type == 'EncryptionAtRestWithPlatformAndCustomerKeys':
                    encryption_type = 'Platform and customer managed key'
                elif snapshot.encryption.type == 'EncryptionAtRestWithCustomerKey':
                    encryption_type = 'Customer-managed key'

                encryption_dict.update({
                    'type_display': encryption_type
                })

            # update snapshot_dict
            snapshot_dict.update({
                'resource_group': self.get_resource_group_from_id(snapshot_dict['id']),  # parse resource_group from ID
                'subscription_id': subscription_info['subscription_id'],
                'subscription_name': subscription_info['subscription_name'],
                'size': snapshot_dict['disk_size_bytes'],
                'sku': sku_dict,
                'creation_data': creation_data_dict,
                'encryption': encryption_dict,
                'incremental_display': self.get_incremental_display(snapshot_dict['incremental'])
            })

            if 'network_access_policy' in snapshot_dict:
                snapshot_dict.update({
                    'network_access_policy_display': self.get_network_access_policy(
                        snapshot_dict['network_access_policy'])
                })

            # get attached vm's name
            managed_by = snapshot_dict['managed_by']
            if managed_by:
                snapshot_dict.update({
                    'managed_by': self.get_attached_vm_name_from_managed_by(snapshot_dict['managed_by'])
                })

            # get source_disk_name from source_resource_id
            source_disk_name = creation_data_dict['source_resource_id']
            if source_disk_name:
                snapshot_dict.update({
                    'source_disk_name': self.get_source_disk_name(creation_data_dict['source_resource_id'])
                })

            # switch tags form
            tags = snapshot_dict.get('tags', {})
            _tags = self.convert_tag_format(tags)
            snapshot_dict.update({
                'tags': _tags
            })

            snapshot_data = Snapshot(snapshot_dict, strict=False)
            snapshot_resource = SnapshotResource({
                'data': snapshot_data,
                'region_code': snapshot_data.location,
                'reference': ReferenceModel(snapshot_data.reference()),
                'tags': _tags
            })

            # print("snapshot_dict")
            # print(snapshot_dict)

            # Must set_region_code method for region collection
            self.set_region_code(snapshot_data['location'])
            snapshots.append(SnapshotResponse({'resource': snapshot_resource}))

        print(f'** Snapshot Finished {time.time() - start_time} Seconds **')
        return snapshots
Beispiel #18
0
    def collect_cloud_service(self, params):
        print("** LoadBalancer START **")
        start_time = time.time()
        """
        Args:
            params:
                - options
                - schema
                - secret_data
                - filter
                - zones
                - subscription_info
        Response:
            CloudServiceResponse
        """
        secret_data = params['secret_data']
        subscription_info = params['subscription_info']

        load_balancer_conn: LoadBalancerConnector = self.locator.get_connector(
            self.connector_name, **params)
        load_balancers = []
        for load_balancer in load_balancer_conn.list_load_balancers():
            load_balancer_dict = self.convert_nested_dictionary(
                self, load_balancer)
            # update vm_scale_set_dict
            load_balancer_dict.update({
                'resource_group':
                self.get_resource_group_from_id(load_balancer_dict['id']),
                # parse resource_group from ID
                'subscription_id':
                subscription_info['subscription_id'],
                'subscription_name':
                subscription_info['subscription_name'],
            })

            # Get Network Interfaces attached in this load balancer
            load_balancer_dict.update({
                'network_interfaces':
                self.get_network_interfaces(
                    self, load_balancer_conn,
                    load_balancer_dict['resource_group'],
                    load_balancer_dict['name'])
            })

            # Get Frontend IP Configurations information
            if load_balancer_dict.get(
                    'frontend_ip_configurations') is not None:
                private_ip_address_list = list()
                used_by_list = list()
                for fic in load_balancer_dict['frontend_ip_configurations']:
                    if fic.get(
                            'subnet'
                    ):  # If the 'public' type, Skip this part because there isn't subnet information for them.
                        fic['subnet'][
                            'address_prefix'] = self.get_frontend_address_prefix(
                                self, load_balancer_conn, fic['subnet'])
                        fic['subnet'][
                            'name'] = self.get_frontend_ip_subnet_name(
                                fic['subnet']['id'])

                    # Get used inbound NAT rules
                    if fic.get('inbound_nat_rules') is not None:
                        load_balancer_dict.update({
                            'frontend_ip_configurations_used_by_display':
                            self.
                            get_frontend_ip_configurations_used_by_display(
                                used_by_list, fic['inbound_nat_rules'])
                        })

                    # Get used load balancing NAT rules
                    if fic.get('load_balancing_rules') is not None:
                        load_balancer_dict.update({
                            'frontend_ip_configurations_used_by_display':
                            self.
                            get_frontend_ip_configurations_used_by_display(
                                used_by_list, fic['load_balancing_rules']),
                        })

                    # Get all of private ip addresses
                    private_ip_address_list.append(fic['private_ip_address'])

                    load_balancer_dict.update({
                        'private_ip_address_display':
                        private_ip_address_list
                    })

            # Since Azure python sdk returns only one backend pool, delete the backend pool list first, and then use the new API connection
            if load_balancer_dict.get('backend_address_pools') is not None:
                load_balancer_dict['backend_address_pools'].clear()
                load_balancer_dict.update({
                    'backend_address_pools':
                    self.list_load_balancer_backend_address_pools(
                        self, load_balancer_conn,
                        load_balancer_dict['resource_group'],
                        load_balancer_dict['name'],
                        load_balancer_dict['network_interfaces'])
                })
                # get backend address pool's count
                load_balancer_dict.update({
                    'backend_address_pools_count_display':
                    self.get_backend_address_pools_count(
                        self, load_balancer_dict['backend_address_pools'])
                })

            # Get load balancing Rules for display
            if load_balancer_dict.get('load_balancing_rules') is not None:
                load_balancer_dict.update({
                    'load_balancing_rules_display':
                    self.get_load_balancing_rules_display(
                        self, load_balancer_dict['load_balancing_rules']),
                })

                for lbr in load_balancer_dict['load_balancing_rules']:
                    if lbr.get('backend_address_pool') is not None:
                        lbr.update({
                            'backend_address_pool_display':
                            self.get_backend_address_pool_name(
                                lbr['backend_address_pool']),
                        })

                    if lbr.get('load_distribution') is not None:
                        lbr.update({
                            'load_distribution_display':
                            self.get_load_distribution_display(
                                lbr['load_distribution'])
                        })

                    if lbr.get('frontend_ip_configuration') is not None:
                        lbr.update({
                            'frontend_ip_configuration_display':
                            self.get_frontend_ip_configuration_display(
                                lbr['frontend_ip_configuration'])
                        })

            # Get Inbound NAT Rules for display
            if load_balancer_dict.get('inbound_nat_rules') is not None:
                load_balancer_dict.update({
                    'inbound_nat_rules_display':
                    self.get_nat_rules_display(
                        self, load_balancer_dict['inbound_nat_rules'])
                })
                for inr in load_balancer_dict['inbound_nat_rules']:
                    inr.update({
                        'frontend_ip_configuration_display':
                        self.get_frontend_ip_configuration_display(
                            inr['frontend_ip_configuration']),
                        'port_mapping_display':
                        self.get_port_mapping_display(inr['frontend_port'],
                                                      inr['backend_port']),
                        'target_virtual_machine':
                        self.get_matched_vm_info(
                            self, inr['backend_ip_configuration']['id'],
                            load_balancer_dict['network_interfaces'])
                    })

            # Get Health Probes for display
            if load_balancer_dict.get('probes') is not None:
                load_balancer_dict.update({
                    'probes_display':
                    self.get_probe_display_list(self,
                                                load_balancer_dict['probes'])
                })

            # switch tags form
            tags = load_balancer_dict.get('tags', {})
            _tags = self.convert_tag_format(tags)
            load_balancer_dict.update({'tags': _tags})

            # print("load_balancer_dict")
            # print(load_balancer_dict)

            load_balancer_data = LoadBalancer(load_balancer_dict, strict=False)
            load_balancer_resource = LoadBalancerResource({
                'data':
                load_balancer_data,
                'region_code':
                load_balancer_data.location,
                'reference':
                ReferenceModel(load_balancer_data.reference()),
                'tags':
                _tags
            })

            # Must set_region_code method for region collection
            self.set_region_code(load_balancer_data['location'])
            load_balancers.append(
                LoadBalancerResponse({'resource': load_balancer_resource}))

        print(
            f'** LoadBalancer Finished {time.time() - start_time} Seconds **')
        return load_balancers
    def collect_cloud_service(self, params):
        print("** Disk START **")
        start_time = time.time()
        """
        Args:
            params:
                - options
                - schema
                - secret_data
                - filter
                - zones
                - subscription_info
        Response:
            CloudServiceResponse
        """
        secret_data = params['secret_data']
        subscription_info = params['subscription_info']

        disk_conn: DiskConnector = self.locator.get_connector(
            self.connector_name, **params)
        disks = []
        for disk in disk_conn.list_disks():
            disk_dict = self.convert_dictionary(disk)
            sku_dict = self.convert_dictionary(disk.sku)
            creation_data_dict = self.convert_dictionary(disk.creation_data)
            encryption_dict = self.convert_dictionary(disk.encryption)

            # update sku_dict
            # switch DiskStorageAccountType to disk_sku_name for user-friendly words.
            # (ex.Premium SSD, Standard HDD..)
            sku_dict.update({'name': self.get_disk_sku_name(sku_dict['name'])})

            # update creation_data dict
            if disk.creation_data.image_reference is not None:
                image_reference_dict = self.convert_dictionary(
                    disk.creation_data.image_reference)
                creation_data_dict.update(
                    {'image_reference': image_reference_dict})

            # update disk_data dict
            disk_dict.update({
                'resource_group':
                self.get_resource_group_from_id(
                    disk_dict['id']),  # parse resource_group from ID
                'subscription_id':
                subscription_info['subscription_id'],
                'subscription_name':
                subscription_info['subscription_name'],
                'size':
                disk_dict['disk_size_bytes'],
                'sku':
                sku_dict,
                'creation_data':
                creation_data_dict,
                'encryption':
                encryption_dict,
                'tier_display':
                self.get_tier_display(disk_dict['disk_iops_read_write'],
                                      disk_dict['disk_m_bps_read_write']),
            })

            if 'network_access_policy' in disk_dict:
                disk_dict.update({
                    'network_access_policy_display':
                    self.get_network_access_policy(
                        disk_dict['network_access_policy'])
                })

            # get attached vm's name
            managed_by = disk_dict['managed_by']
            if managed_by:
                disk_dict.update({
                    'managed_by':
                    self.get_attached_vm_name_from_managed_by(
                        disk_dict['managed_by'])
                })

            # switch tags form
            tags = disk_dict.get('tags', {})
            _tags = self.convert_tag_format(tags)
            disk_dict.update({'tags': _tags})

            disk_data = Disk(disk_dict, strict=False)

            disk_resource = DiskResource({
                'data':
                disk_data,
                'region_code':
                disk_data.location,
                'reference':
                ReferenceModel(disk_data.reference()),
                'tags':
                _tags,
                'name':
                disk_data.name
            })

            # Must set_region_code method for region collection
            self.set_region_code(disk_data['location'])

            disks.append(DiskResponse({'resource': disk_resource}))

        print(f'** Disk Finished {time.time() - start_time} Seconds **')
        return disks
Beispiel #20
0
    def collect_cloud_service(self, params):
        print("** Snapshot START **")
        start_time = time.time()
        """
        Args:
            params:
                - options
                - schema
                - secret_data
                - filter
                - zones
        Response:
            CloudServiceResponse
        """
        collected_cloud_services = []
        secret_data = params['secret_data']
        snapshot_conn: SnapshotConnector = self.locator.get_connector(
            self.connector_name, **params)

        # Get lists that relate with snapshots through Google Cloud API
        snapshots = snapshot_conn.list_snapshot()
        all_region_resource_policies = snapshot_conn.list_resource_policies()
        disk_list_info = snapshot_conn.list_all_disks_for_snapshots()

        for snapshot in snapshots:
            region = self.get_matching_region(snapshot.get('storageLocations'))
            snapshot_schedule = []
            snapshot_schedule_display = []
            disk_name_key = self._get_disk_name_key(snapshot.get('name'))

            for resource_policy in disk_list_info.get(disk_name_key, []):
                snapshot_schedule_display.append(
                    self._get_last_target(resource_policy))
                matched_po = self.get_matched_snapshot_schedule(
                    all_region_resource_policies.get(resource_policy))
                snapshot_schedule.append(
                    SnapShotSchedule(matched_po, strict=False))
            labels = self.convert_labels_format(snapshot.get('labels', {}))
            snapshot.update({
                'project':
                secret_data['project_id'],
                'disk':
                self.get_disk_info(snapshot),
                'snapshot_schedule':
                snapshot_schedule,
                'snapshot_schedule_display':
                snapshot_schedule_display,
                'creation_type':
                'Scheduled' if snapshot.get('autoCreated') else 'Manual',
                'encryption':
                self._get_encryption_info(snapshot),
                'labels':
                labels
            })
            _name = snapshot_data.get('name', '')
            # labels -> tags
            snapshot_data = Snapshot(snapshot, strict=False)
            snapshots_resource = SnapshotResource({
                'name':
                _name,
                'region_code':
                region.get('region_code'),
                'data':
                snapshot_data,
                'tags':
                labels,
                'reference':
                ReferenceModel(snapshot_data.reference())
            })

            self.set_region_code(region.get('region_code'))
            collected_cloud_services.append(
                SnapshotResponse({'resource': snapshots_resource}))

        print(f'** SnapShot Finished {time.time() - start_time} Seconds **')
        return collected_cloud_services
    def collect_cloud_service(self, params):
        print("** VPC Network START **")
        start_time = time.time()
        """
        Args:
            params:
                - options
                - schema
                - secret_data
                - filter
                - zones
        Response:
            CloudServiceResponse
        """
        collected_cloud_services = []
        secret_data = params['secret_data']
        vpc_conn: VPCNetworkConnector = self.locator.get_connector(
            self.connector_name, **params)

        # Get lists that relate with snapshots through Google Cloud API
        networks = vpc_conn.list_networks()
        firewalls = vpc_conn.list_firewall()
        subnets = vpc_conn.list_subnetworks()
        routes = vpc_conn.list_routes()
        regional_address = vpc_conn.list_regional_addresses()

        for network in networks:

            network_identifier = network.get('selfLink')
            matched_firewall = self._get_matched_firewalls(
                network_identifier, firewalls)
            matched_route = self.get_matched_route(network_identifier, routes)
            matched_subnets = self._get_matched_subnets(
                network_identifier, subnets)
            region = self.match_region_info('global')
            peerings = self.get_peering(network)

            network.update({
                'mode':
                'Auto' if network.get('autoCreateSubnetworks') else 'Custom',
                'project':
                secret_data['project_id'],
                'global_dynamic_route':
                self._get_global_dynamic_route(network, 'not_mode'),
                'dynamic_routing_mode':
                self._get_global_dynamic_route(network, 'mode'),
                'subnet_creation_mode':
                'Auto' if network.get('autoCreateSubnetworks') else 'Custom',
                'ip_address_data':
                self.get_internal_ip_address_in_use(network, regional_address),
                'peerings':
                peerings,
                'route_data': {
                    'total_number': len(matched_route),
                    'route': matched_route
                },
                'firewall_data': {
                    'total_number': len(matched_firewall),
                    'firewall': matched_firewall
                },
                'subnetwork_data': {
                    'total_number': len(matched_subnets),
                    'subnets': matched_subnets
                },
            })

            # No labels
            _name = network.get('name', '')
            vpc_data = VPCNetwork(network, strict=False)
            vpc_resource = VPCNetworkResource({
                'name':
                _name,
                'region_code':
                region.get('region_code'),
                'data':
                vpc_data,
                'reference':
                ReferenceModel(vpc_data.reference())
            })

            self.set_region_code('global')
            collected_cloud_services.append(
                VPCNetworkResponse({'resource': vpc_resource}))

        print(f'** VPC Network Finished {time.time() - start_time} Seconds **')
        return collected_cloud_services
    def collect_cloud_service(self, params):
        print("** Storage START **")
        start_time = time.time()
        """
        Args:
            params:
                - options
                - schema
                - secret_data
                - filter
                - zones
        Response:
            CloudServiceResponse
        """
        collected_cloud_services = []
        secret_data = params['secret_data']
        storage_conn: StorageConnector = self.locator.get_connector(
            self.connector_name, **params)

        # Get lists that relate with snapshots through Google Cloud API
        buckets = storage_conn.list_buckets()

        for bucket in buckets:
            bucket_name = bucket.get('name')

            objects = storage_conn.list_objects(bucket_name)
            obj_count, size = self._get_number_of_obj_and_size(objects)
            iam_policy = storage_conn.list_iam_policy(bucket_name)
            st_class = bucket.get('storageClass').lower()
            region = self.get_matching_region(bucket)
            labels = self.convert_labels_format(bucket.get('labels', {}))
            stackdriver = self.get_stackdriver(bucket_name)
            bucket.update({
                'project':
                secret_data['project_id'],
                'encryption':
                self._get_encryption(bucket),
                'requester_pays':
                self._get_requester_pays(bucket),
                'retention_policy_display':
                self._get_retention_policy_display(bucket),
                'links':
                self._get_config_link(bucket),
                'size':
                size,
                'stackdriver':
                stackdriver,
                'default_event_based_hold':
                'Enabled'
                if bucket.get('defaultEventBasedHold') else 'Disabled',
                'iam_policy':
                iam_policy,
                'iam_policy_binding':
                self._get_iam_policy_binding(iam_policy),
                'object_count':
                obj_count,
                'object_total_size':
                size,
                'lifecycle_rule':
                self._get_lifecycle_rule(bucket),
                'location':
                self.get_location(bucket),
                'default_storage_class':
                st_class.capitalize(),
                'access_control':
                self._get_access_control(bucket),
                'public_access':
                self._get_public_access(bucket, iam_policy),
                'labels':
                labels
            })
            _name = bucket.get('name', '')
            bucket_data = Storage(bucket, strict=False)
            # labels -> tags
            bucket_resource = StorageResource({
                'name':
                _name,
                'tags':
                labels,
                'region_code':
                region.get('region_code'),
                'data':
                bucket_data,
                'reference':
                ReferenceModel(bucket_data.reference())
            })

            self.set_region_code(region.get('region_code'))
            collected_cloud_services.append(
                StorageResponse({'resource': bucket_resource}))

        print(f'** Storage Finished {time.time() - start_time} Seconds **')
        return collected_cloud_services
    def collect_cloud_service(self, params):
        print("** Big Query START **")
        start_time = time.time()
        """
        Args:
            params:
                - options
                - schema
                - secret_data
                - filter
                - zones
        Response:
            CloudServiceResponse
        """

        collected_cloud_services = []

        try:
            secret_data = params['secret_data']
            project_id = secret_data['project_id']
            big_query_conn: BigQueryConnector = self.locator.get_connector(
                self.connector_name, **params)

            data_sets = big_query_conn.list_dataset()
            projects = big_query_conn.list_projects()
            jobs = big_query_conn.list_job()

            for data_set in data_sets:
                data_refer = data_set.get('datasetReference', {})
                data_set_id = data_refer.get('datasetId')
                dataset_project_id = data_refer.get('projectId')
                bq_dataset = big_query_conn.get_dataset(data_set_id)
                bq_dt_tables = big_query_conn.list_tables(data_set_id)
                update_bq_dt_tables, table_schemas = self._get_table_list_with_schema(
                    big_query_conn, bq_dt_tables)
                matched_projects = self._get_matching_project(
                    dataset_project_id, projects)
                matched_jobs = self._get_matching_jobs(data_set,
                                                       update_bq_dt_tables,
                                                       jobs)

                creation_time = bq_dataset.get('creationTime')
                if creation_time:
                    bq_dataset.update({
                        'creationTime':
                        datetime.fromtimestamp(int(creation_time) / 1000)
                    })

                last_modified_time = bq_dataset.get('lastModifiedTime')
                if last_modified_time:
                    bq_dataset.update({
                        'lastModifiedTime':
                        datetime.fromtimestamp(int(last_modified_time) / 1000)
                    })

                region = self.get_region(bq_dataset.get('location', ''))

                exp_partition_ms = bq_dataset.get(
                    'defaultPartitionExpirationMs')
                exp_table_ms = bq_dataset.get('defaultTableExpirationMs')

                if exp_partition_ms:
                    bq_dataset.update({
                        'default_partition_expiration_ms_display':
                        self.get_ms_display(exp_partition_ms)
                    })

                if exp_table_ms:
                    bq_dataset.update({
                        'default_table_expiration_ms_display':
                        self.get_ms_display(exp_table_ms)
                    })

                labels = self.convert_labels_format(
                    bq_dataset.get('labels', {}))
                bq_dataset.update({
                    'name':
                    data_set_id,
                    'project':
                    project_id,
                    'tables':
                    update_bq_dt_tables,
                    'table_schemas':
                    table_schemas,
                    'region':
                    region,
                    'visible_on_console':
                    self.get_visible_on_console(data_set_id),
                    'jobs':
                    matched_jobs,
                    'matching_projects':
                    matched_projects,
                    'labels':
                    labels
                })

                big_query_data = BigQueryWorkSpace(bq_dataset, strict=False)
                big_query_work_space_resource = SQLWorkSpaceResource({
                    'tags':
                    labels,
                    'data':
                    big_query_data,
                    'region_code':
                    region,
                    'reference':
                    ReferenceModel(big_query_data.reference())
                })

                self.set_region_code(region)
                collected_cloud_services.append(
                    SQLWorkSpaceResponse(
                        {'resource': big_query_work_space_resource}))

            print(
                f'** Big Query Finished {time.time() - start_time} Seconds **')

        except Exception as e:
            print(e)
            pass

        return collected_cloud_services
    def collect_cloud_service(self, params):
        print("** Instance Template START **")
        start_time = time.time()
        """
        Args:
            params:
                - options
                - schema
                - secret_data
                - filter
                - zones
        Response:
            CloudServiceResponse
        """
        secret_data = params['secret_data']
        instance_template_conn: InstanceTemplateConnector = self.locator.get_connector(
            self.connector_name, **params)

        # Get Instance Templates
        instance_templates = instance_template_conn.list_instance_templates()
        instance_groups = instance_template_conn.list_instance_group_managers()
        machine_types = instance_template_conn.list_machine_types()
        collected_cloud_services = []

        for inst_template in instance_templates:
            properties = inst_template.get('properties', {})
            tags = properties.get('tags', {})

            in_used_by, matched_instance_group = self.match_instance_group(
                inst_template, instance_groups)
            disks = self.get_disks(properties)
            labels = self.convert_labels_format(properties.get('labels', {}))

            inst_template.update({
                'project':
                secret_data['project_id'],
                'in_used_by':
                in_used_by,
                'ip_forward':
                properties.get('canIpForward', False),
                'machine':
                MachineType(self._get_machine_type(properties, machine_types),
                            strict=False),
                'network_tags':
                tags.get('items', []),
                'scheduling':
                self._get_scheduling(properties),
                'disk_display':
                self._get_disk_type_display(disks, 'disk_type'),
                'image':
                self._get_disk_type_display(disks, 'source_image_display'),
                'instance_groups':
                matched_instance_group,
                'network_interfaces':
                self.get_network_interface(properties),
                'fingerprint':
                self._get_properties_item(properties, 'metadata',
                                          'fingerprint'),
                'labels':
                labels,
                'disks':
                disks
            })

            svc_account = properties.get('serviceAccounts', [])
            if len(svc_account) > 0:
                inst_template.update({
                    'service_account':
                    self._get_service_account(svc_account)
                })

            instance_template_data = InstanceTemplate(inst_template,
                                                      strict=False)
            # labels -> tags
            default_region = 'global'
            instance_template_resource = InstanceTemplateResource({
                'tags':
                labels,
                'data':
                instance_template_data,
                'reference':
                ReferenceModel(instance_template_data.reference()),
                'region_code':
                default_region
            })

            self.set_region_code(default_region)
            collected_cloud_services.append(
                InstanceTemplateResponse(
                    {'resource': instance_template_resource}))

        print(
            f'** Instance Template Finished {time.time() - start_time} Seconds **'
        )
        return collected_cloud_services
    def collect_cloud_service(self, params):
        print("** VmScaleSet START **")
        start_time = time.time()
        """
        Args:
            params:
                - options
                - schema
                - secret_data
                - filter
                - zones
                - subscription_info
        Response:
            CloudServiceResponse
        """
        secret_data = params['secret_data']
        subscription_info = params['subscription_info']

        vm_scale_set_conn: VmScaleSetConnector = self.locator.get_connector(
            self.connector_name, **params)
        vm_scale_sets = []
        for vm_scale_set in vm_scale_set_conn.list_vm_scale_sets():
            vm_scale_set_dict = self.convert_nested_dictionary(
                self, vm_scale_set)

            # update vm_scale_set_dict
            vm_scale_set_dict.update({
                'resource_group':
                self.get_resource_group_from_id(
                    vm_scale_set_dict['id']),  # parse resource_group from ID
                'subscription_id':
                subscription_info['subscription_id'],
                'subscription_name':
                subscription_info['subscription_name'],
            })

            if vm_scale_set_dict.get(
                    'proximity_placement_group'
            ):  # if it has a key -> get value -> check if it isn't None / if no 'Key' ->  return None
                vm_scale_set_dict.update({
                    'proximity_placement_group_display':
                    self.get_proximity_placement_group_name(
                        vm_scale_set_dict['proximity_placement_group']['id'])
                })

            # Get Instance termination notification display
            if vm_scale_set_dict.get('virtual_machine_profile') is not None:
                if vm_scale_set_dict['virtual_machine_profile'].get(
                        'scheduled_events_profile'):
                    if vm_scale_set.virtual_machine_profile.scheduled_events_profile.terminate_notification_profile.enable:
                        terminate_notification_display = 'On'
                    else:
                        terminate_notification_display = 'Off'
                    vm_scale_set_dict.update({
                        'terminate_notification_display':
                        terminate_notification_display
                    })

                # Convert disks' sku-dict to string display
                if vm_scale_set_dict['virtual_machine_profile'].get(
                        'storage_profile') is not None:
                    if vm_scale_set_dict['virtual_machine_profile'][
                            'storage_profile'].get('image_reference'):
                        image_reference_dict = vm_scale_set_dict[
                            'virtual_machine_profile']['storage_profile'][
                                'image_reference']
                        image_reference_str = \
                            str(image_reference_dict['publisher']) + " / " + str(image_reference_dict['offer']) + " / " + str(image_reference_dict['sku']) + " / " + str(image_reference_dict['version'])
                        vm_scale_set_dict['virtual_machine_profile'][
                            'storage_profile'].update({
                                'image_reference_display':
                                image_reference_str
                            })

                    # switch storage_account_type to storage_account_type for user-friendly words.
                    # (ex.Premium LRS -> Premium SSD, Standard HDD..)
                    if vm_scale_set_dict['virtual_machine_profile'][
                            'storage_profile'].get('data_disks'):
                        for data_disk in vm_scale_set_dict[
                                'virtual_machine_profile']['storage_profile'][
                                    'data_disks']:
                            data_disk['managed_disk'].update({
                                'storage_type':
                                self.get_disk_storage_type(
                                    data_disk['managed_disk']
                                    ['storage_account_type'])
                            })
                # Get VM Profile's operating_system type (Linux or Windows)
                if vm_scale_set_dict['virtual_machine_profile'].get(
                        'os_profile') is not None:
                    vm_scale_set_dict['virtual_machine_profile'][
                        'os_profile'].update({
                            'operating_system':
                            self.get_operating_system(
                                vm_scale_set_dict['virtual_machine_profile']
                                ['os_profile'])
                        })

                # Get VM Profile's primary Vnet\
                if vm_scale_set_dict['virtual_machine_profile'].get(
                        'network_profile') is not None:
                    vmss_vm_network_profile_dict = vm_scale_set_dict[
                        'virtual_machine_profile']['network_profile']
                    vmss_vm_network_profile_dict.update({
                        'primary_vnet':
                        self.get_primary_vnet(vmss_vm_network_profile_dict[
                            'network_interface_configurations'])
                    })

            # Add vm instances list attached to VMSS
            vm_instances_list = list()
            instance_count = 0
            for vm_instance in vm_scale_set_conn.list_vm_scale_set_vms(
                    vm_scale_set_dict['resource_group'],
                    vm_scale_set_dict['name']):
                instance_count += 1
                vm_scale_set_dict.update({'instance_count': instance_count})

                vm_instance_dict = self.get_vm_instance_dict(
                    self, vm_instance, vm_scale_set_conn,
                    vm_scale_set_dict['resource_group'],
                    vm_scale_set_dict['name'])
                vm_instances_list.append(vm_instance_dict)

            vm_scale_set_dict['vm_instances'] = vm_instances_list

            # Get auto scale settings by resource group and vm id
            vm_scale_set_dict.update({
                'autoscale_settings':
                self.list_auto_scale_settings_obj(
                    self, vm_scale_set_conn,
                    vm_scale_set_dict['resource_group'],
                    vm_scale_set_dict['id'])
            })

            # Set virtual_machine_scale_set_power_state information
            if vm_scale_set_dict.get('autoscale_settings') is not None:
                vm_scale_set_dict.update({
                    'virtual_machine_scale_set_power_state':
                    self.list_virtual_machine_scale_set_power_state(
                        self, vm_scale_set_dict['autoscale_settings']),
                })

            # update auto_scale_settings to autoscale_setting_resource_collection
            auto_scale_setting_resource_col_dict = dict()
            auto_scale_setting_resource_col_dict.update({
                'value':
                self.list_auto_scale_settings(
                    self, vm_scale_set_conn,
                    vm_scale_set_dict['resource_group'],
                    vm_scale_set_dict['id'])
            })

            vm_scale_set_dict.update({
                'autoscale_setting_resource_collection':
                auto_scale_setting_resource_col_dict
            })

            # switch tags form
            tags = vm_scale_set_dict.get('tags', {})
            _tags = self.convert_tag_format(tags)
            vm_scale_set_dict.update({'tags': _tags})

            # print("vm_scale_set_dict")
            # print(vm_scale_set_dict)

            vm_scale_set_data = VirtualMachineScaleSet(vm_scale_set_dict,
                                                       strict=False)
            vm_scale_set_resource = VmScaleSetResource({
                'data':
                vm_scale_set_data,
                'region_code':
                vm_scale_set_data.location,
                'reference':
                ReferenceModel(vm_scale_set_data.reference()),
                'tags':
                _tags,
                'name':
                vm_scale_set_data.name
            })

            # Must set_region_code method for region collection
            self.set_region_code(vm_scale_set_data['location'])
            vm_scale_sets.append(
                VmScaleSetResponse({'resource': vm_scale_set_resource}))

        print(f'** VmScaleSet Finished {time.time() - start_time} Seconds **')
        return vm_scale_sets
    def collect_cloud_service(self, params):
        start_time = time.time()
        """
        Args:
            params:
                - options
                - schema
                - secret_data
                - filter
                - region
                - compartment
        Response:
            CloudServiceResponse
        """
        secret_data = params['secret_data']
        region = params['region']
        compartment = params['compartment']

        secret_data.update({'region': region})
        adb_conn: AutonomousDatabaseConnector = self.locator.get_connector(
            self.connector_name, **params)
        autonomous_database_list = []
        adb_conn.set_connect(secret_data)
        basic_adb_list = adb_conn.list_of_autonomous_databases(
            params['compartment'])

        for basic_adb in basic_adb_list:
            adb_raw = self.convert_nested_dictionary(self, basic_adb)
            adb_raw.update({
                'region':
                region,
                'compartment_name':
                compartment.name,
                '_freeform_tags':
                self.convert_tags(adb_raw['_freeform_tags']),
                'db_workload_display':
                self._set_workload_type(adb_raw['_db_workload']),
                'size':
                OCIManager.gigabyte_to_byte(
                    adb_raw['_data_storage_size_in_gbs']
                ),  # 바이트 단위로 정규화 후 넣어주기
                '_data_storage_size_in_tbs':
                self.gbs_to_tbs(adb_raw['_data_storage_size_in_gbs']),
                '_license_model':
                self.define_license_type(adb_raw['_license_model']),
                '_permission_level':
                self.define_permission_level(adb_raw['_permission_level']),
                'list_autonomous_backup':
                self._set_backup_list(
                    adb_conn.list_autonomous_database_backup(adb_raw['_id'])),
                'list_autonomous_database_clones':
                self._set_clone_list(
                    adb_conn.list_autonomous_database_clones(
                        adb_raw['_compartment_id'], adb_raw['_id']))
            })

            autonomous_db_data = Database(adb_raw, strict=False)
            autonomous_db_resource = DatabaseResource({
                'data':
                autonomous_db_data,
                'region_code':
                autonomous_db_data.region,
                'reference':
                ReferenceModel(autonomous_db_data.reference()),
                'tags':
                adb_raw['_freeform_tags'],
                'name':
                autonomous_db_data.display_name
            })
            # self.set_region_code(autonomous_db_data.region)
            autonomous_database_list.append(
                DatabaseResponse({'resource': autonomous_db_resource}))
            # Must set_region_code method for region collection

        if basic_adb_list:
            print(
                f"SET REGION CODE FROM AUTONOMOUS DB... {params.get('region')} // {params.get('compartment').name}"
            )
            self.set_region_code(region)

            # raw_data = self._set_mandatory_param(adb_conn, basic_adb_list,
            #                                      params['region'], params['compartment'].name)

            # autonomous_database_list.extend(self._set_resources(raw_data))
        ''' 
        for region in regions:
            secret_data['region'] = region
            adb_conn.set_connect(secret_data)
            for compartment in compartments:
                basic_adb_list = adb_conn.list_of_autonomous_databases(compartment)
                if basic_adb_list:
                    raw_data = self._set_mandatory_param(adb_conn, basic_adb_list, region, compartment.name)

                    # Must set_region_code method for region collection
                    self.set_region_code(region)
                    autonomous_database_list.extend(self._set_resources(raw_data))
        '''
        # print(f'** Autonomous Database Finished {time.time() - start_time} Seconds **')
        return autonomous_database_list
Beispiel #27
0
            display_loc = { 'region': location, 'zone': ''} if loc_type == 'region' \
                else {'region': location[:-2], 'zone': location}

            instance_group.update({'display_location': display_loc})

            instance_group.update({
                'power_scheduler': scheduler,
                'instances': self.get_instances(instances),
                'instance_counts': len(instances)
            })
            # No labels
            instance_group_data = InstanceGroup(instance_group, strict=False)
            instance_group_resource = InstanceGroupResource({
                'data': instance_group_data,
                'region_code': region,
                'reference': ReferenceModel(instance_group_data.reference())
            })

            self.set_region_code(region)
            collected_cloud_services.append(InstanceGroupResponse({'resource': instance_group_resource}))

        print(f'** Instance Group Finished {time.time() - start_time} Seconds **')
        return collected_cloud_services

    def get_instance_group_loc(self, instance_group):
        inst_type = 'zone' if 'zone' in instance_group else 'region'
        loc = self._get_last_target(instance_group, inst_type)
        return inst_type, loc

    def get_instances(self, instances):
        _instances = []
    def collect_cloud_service(self, params):
        start_time = time.time()
        """
        Args:
            params:
                - options
                - schema
                - secret_data
                - filter
                - region
                - compartment
        Response:
            CloudServiceResponse
        """
        secret_data = params['secret_data']
        region = params['region']
        compartment = params['compartment']

        secret_data.update({'region': region})
        exa_conn: ExadataCloudDatabaseConnector = self.locator.get_connector(
            self.connector_name, **params)
        exa_conn.set_connect(secret_data)

        result = []
        basic_exadata_infra_list = exa_conn.list_cloud_exadata_infra(
            compartment)
        exadata_image_list = exa_conn.list_database_images(compartment)
        exadata_software_image_response = self.set_image_resource(
            exadata_image_list, region, compartment)
        result.extend(exadata_software_image_response)

        for exadata_infra in basic_exadata_infra_list:
            exadata_infra_raw = self.convert_nested_dictionary(
                self, exadata_infra)
            list_vm_cluster = exa_conn.list_cloud_vm_cluster(
                region, compartment, exa_conn, exadata_infra_raw['_id'])
            exadata_infra_raw.update({
                'region':
                region,
                'compartment_name':
                compartment.name,
                '_maintenance_window':
                exa_conn.load_database_maintenance_windows(
                    exadata_infra_raw['_maintenance_window']),
                '_freeform_tags':
                self.convert_tags(exadata_infra_raw['_freeform_tags']),
                'last_maintenance_run':
                exa_conn.load_maintenance_run(
                    exadata_infra_raw['_last_maintenance_run_id'],
                    exadata_infra_raw['_display_name'] + ' - ' +
                    exadata_infra_raw['_shape']),
                'next_maintenance_run':
                exa_conn.load_maintenance_run(
                    exadata_infra_raw['_next_maintenance_run_id'],
                    exadata_infra_raw['_display_name'] + ' - ' +
                    exadata_infra_raw['_shape']),
                'list_cloud_vm_cluster':
                self.set_vm_cluster_data(region, compartment, exa_conn,
                                         list_vm_cluster)
            })
            exadata_vm_cluster_list = exadata_infra_raw.get[
                'list_cloud_vm_cluster']
            exadata_vm_database_list,  exadata_vm_database_backup_list = \
                self.set_database_resource(exadata_vm_cluster_list, region)
            exadata_backup_response_list = self.set_backup_resource(
                exadata_vm_database_backup_list, region, compartment)

            exadata_infra_data = CloudExadataInfra(exadata_infra_raw,
                                                   strict=False)
            exadata_infra_resource = ExadataInfrastructureResource({
                'data':
                exadata_infra_data,
                'region_code':
                region,
                'reference':
                ReferenceModel(exadata_infra_data.reference()),
                'tags':
                exadata_infra_raw.get('_freeform_tags', []),
                'name':
                exadata_infra_data.display_name
            })
            result.append(
                ExadataInfrastructureResponse(
                    {'resource': exadata_infra_resource}))
            result.extend(exadata_vm_database_list)
            result.extend(exadata_backup_response_list)

        return result
Beispiel #29
0
    def collect_cloud_service(self, params):
        print("** Big Query START **")
        start_time = time.time()
        """
        Args:
            params:
                - options
                - schema
                - secret_data
                - filter
                - zones
        Response:
            CloudServiceResponse
        """

        collected_cloud_services = []
        secret_data = params['secret_data']
        project_id = secret_data['project_id']
        big_query_conn: BigQueryConnector = self.locator.get_connector(
            self.connector_name, **params)

        data_sets = big_query_conn.list_dataset()
        projects = big_query_conn.list_projects()
        jobs = big_query_conn.list_job()
        '''
        class BigQuery(Model):
        # id = StringType()
        # name = StringType()
        #project = StringType()
        #region = StringType()
        #dataset_reference = ModelType(DatasetReference, deserialize_from='datasetReference', serialize_when_none=False)
        #friendly_name = StringType(deserialize_from='friendlyName', serialize_when_none=False)
        
        #tables = ListType(ModelType(Table), default=[])
        #table_schemas = ListType(ModelType(TableSchemaRef), default=[])
        
        jobs = ListType(ModelType(Job), default=[])
        
        #projects = ListType(ModelType(Project), default=[])
        #access = ListType(ModelType(Access), default=[])
        #labels = ListType(ModelType(Labels), default=[])
        #self_link = StringType()
        #etags = StringType()
        #location = StringType()
        #visible_on_console = BooleanType()
        #default_partition_expiration_ms = DateTimeType(deserialize_from='defaultPartitionExpirationMs')
        #default_table_expiration_ms = DateTimeType(deserialize_from='defaultTableExpirationMs')
        #creation_time = DateTimeType(deserialize_from='creationTime')
        #last_modified_time = DateTimeType(deserialize_from='lastModifiedTime')
        '''

        for data_set in data_sets:
            data_refer = data_set.get('datasetReference', {})
            data_set_id = data_refer.get('datasetId')
            dataset_project_id = data_refer.get('projectId')

            bq_dataset = big_query_conn.get_dataset(data_set_id)
            bq_dt_tables = big_query_conn.list_tables(data_set_id)
            update_bq_dt_tables, table_schemas = self._get_table_list_with_schema(
                big_query_conn, bq_dt_tables)
            matched_projects = self._get_matching_project(
                dataset_project_id, projects)
            matched_jobs = self._get_matching_jobs(update_bq_dt_tables, jobs)

            creation_time = bq_dataset.get('creationTime')
            if creation_time:
                bq_dataset.update({
                    'creationTime':
                    datetime.fromtimestamp(int(creation_time) / 1000)
                })

            last_modified_time = bq_dataset.get('lastModifiedTime')
            if last_modified_time:
                bq_dataset.update({
                    'lastModifiedTime':
                    datetime.fromtimestamp(int(last_modified_time) / 1000)
                })

            region = self.get_region(bq_dataset.get('location', ''))

            exp_partition_ms = bq_dataset.get('defaultPartitionExpirationMs')
            exp_table_ms = bq_dataset.get('defaultTableExpirationMs')

            if exp_partition_ms:
                bq_dataset.update({
                    'default_partition_expiration_ms_display':
                    self.get_ms_display(exp_partition_ms)
                })

            if exp_table_ms:
                bq_dataset.update({
                    'default_table_expiration_ms_display':
                    self.get_ms_display(exp_table_ms)
                })

            labels = self.convert_labels_format(bq_dataset.get('labels', {}))
            bq_dataset.update({
                'name':
                data_set_id,
                'project':
                project_id,
                'tables':
                update_bq_dt_tables,
                'table_schemas':
                table_schemas,
                'region':
                region,
                'visible_on_console':
                self.get_visible_on_console(data_set_id),
                'jobs':
                matched_jobs,
                'matching_projects':
                matched_projects,
                'labels':
                labels
            })

            big_query_data = BigQueryWorkSpace(bq_dataset, strict=False)
            big_query_work_space_resource = SQLWorkSpaceResource({
                'tags':
                labels,
                'data':
                big_query_data,
                'region_code':
                region,
                'reference':
                ReferenceModel(big_query_data.reference())
            })

            self.set_region_code(region)
            collected_cloud_services.append(
                SQLWorkSpaceResponse(
                    {'resource': big_query_work_space_resource}))

        print(f'** Big Query Finished {time.time() - start_time} Seconds **')
        return collected_cloud_services
Beispiel #30
0
    def collect_cloud_service(self, params):
        print("** Load Balancing START **")
        start_time = time.time()
        """
        Args:
            params:
                - options
                - schema
                - secret_data
                - filter
                - zones
        Response:
            CloudServiceResponse
        """
        collected_cloud_services = []

        secret_data = params['secret_data']
        load_bal_conn: LoadBalancingConnector = self.locator.get_connector(
            self.connector_name, **params)

        project_id = secret_data.get('project_id')
        load_balancers = []

        instance_groups = load_bal_conn.list_instance_groups()
        target_pools = load_bal_conn.list_target_pools()
        url_maps = load_bal_conn.list_url_maps()
        forwarding_rules = load_bal_conn.list_forwarding_rules()
        backend_services = load_bal_conn.list_back_end_services()

        backend_buckets = load_bal_conn.list_back_end_buckets()
        ssl_certificates = load_bal_conn.list_ssl_certificates()
        auto_scalers = load_bal_conn.list_auto_scalers()
        health_checks = load_bal_conn.list_health_checks()

        legacy_health_checks = []
        http_health_checks = load_bal_conn.list_http_health_checks()
        https_health_checks = load_bal_conn.list_https_health_checks()
        legacy_health_checks.extend(http_health_checks)
        legacy_health_checks.extend(https_health_checks)

        # proxies
        grpc_proxies = load_bal_conn.list_grpc_proxies()
        http_proxies = load_bal_conn.list_target_http_proxies()
        https_proxies = load_bal_conn.list_target_https_proxies()
        ssl_proxies = load_bal_conn.list_ssl_proxies()
        tcp_proxies = load_bal_conn.list_tcp_proxies()

        target_proxies, selective_proxies = self.get_all_proxy_list(
            grpc_proxies, http_proxies, https_proxies, ssl_proxies,
            tcp_proxies, forwarding_rules)

        lbs_from_proxy = self.get_load_balacer_from_target_proxy(
            backend_services, selective_proxies, project_id)

        lbs_from_url_map = self.get_load_balancer_from_url_maps(
            url_maps, backend_services, backend_buckets, project_id)
        lbs_from_target_pool = self.get_load_balancer_from_target_pools(
            target_pools, project_id)

        load_balancers.extend(lbs_from_proxy)
        load_balancers.extend(lbs_from_url_map)
        load_balancers.extend(lbs_from_target_pool)

        for load_balancer in load_balancers:
            lb_type = load_balancer.get('lb_type')
            health_checks_vo = load_balancer.get('heath_check_vos', {})
            health_self_links = health_checks_vo.get(
                'health_check_self_link_list', [])
            ##################################
            # Set Target Proxies
            ##################################
            if lb_type != 'target_proxy':
                matched_target_proxies, matched_certificates = self.get_matched_target_proxies(
                    load_balancer, target_proxies, ssl_certificates)
                load_balancer.update({
                    'target_proxies': matched_target_proxies,
                    'certificates': matched_certificates
                })
            ##################################
            # Set forwarding Rules to Load Balancer
            ##################################
            matched_forwarding_rules = self.get_matched_forwarding_rules(
                load_balancer, forwarding_rules)
            load_balancer.update(
                {'forwarding_rules': matched_forwarding_rules})

            ##################################
            # Set Health Check to Load Balancer
            ##################################
            if len(health_self_links) > 0:
                filter_check_list = list(
                    set(health_checks_vo.get('health_check_list', [])))
                filter_check_self_link_list = list(
                    set(health_checks_vo.get('health_check_self_link_list',
                                             [])))
                matched_health_list = self._get_matched_health_checks(
                    filter_check_self_link_list, health_checks)

                if len(matched_health_list) == len(filter_check_list):
                    load_balancer['heath_check_vos'].update({
                        'health_check_list':
                        filter_check_list,
                        'health_check_self_link_list':
                        filter_check_self_link_list,
                        'health_checks':
                        matched_health_list
                    })
                else:
                    matched_health_legacy_list = self._get_matched_health_checks(
                        filter_check_self_link_list, legacy_health_checks)
                    matched_health_list.extend(matched_health_legacy_list)
                    load_balancer['heath_check_vos'].update({
                        'health_check_list':
                        filter_check_list,
                        'health_check_self_link_list':
                        filter_check_self_link_list,
                        'health_checks':
                        matched_health_list
                    })
            ############################
            # Set Front to Load Balancer
            ############################

            frontends = self.get_front_from_loadbalancer(load_balancer)
            frontend_display = self._get_frontend_display(frontends)
            if len(frontends) > 0:
                load_balancer.update({
                    'frontends': frontends,
                    'frontend_display': frontend_display
                })

            #############################
            # Set Backend to Load Balancer
            #############################
            backend_vo = {}
            if lb_type in ['target_pool']:
                backend_vo.update({
                    'type':
                    'target_pool',
                    'target_pool_backend':
                    self.get_backend_from_target_pools(load_balancer,
                                                       instance_groups)
                })

            elif lb_type in ['url_map', 'target_proxy']:
                key = 'proxy_backend' if lb_type == 'target_proxy' else 'url_map_backend'
                backends = self.get_backend_from_url_map_and_proxy(
                    load_balancer, instance_groups, auto_scalers)
                backend_vo.update({
                    'type':
                    'proxy' if lb_type == 'target_proxy' else 'url_map',
                    key:
                    backends
                })

            load_balancer.update({'backends': backend_vo})

            ########################################
            # Set Backend Tab to LoadBlancer
            ########################################
            backends_tab = self._get_backend_tabs(load_balancer)
            load_balancer.update({'backend_tabs': backends_tab})

            ########################################
            # Set Backend Display
            ########################################
            backend_display = self._get_backend_display(load_balancer)
            load_balancer.update({'backends_display': backend_display})
            '''
                        Get Appropriate Region & Protocols

                        Protocols
                        -  1. Frontend's forwarding Maps
                           2. Backend's end protocol

                        Region 
                        - backend-svc's backend

            '''
            lead_protocol = self._get_lead_protocol(load_balancer)
            region = self._get_proper_region(load_balancer)
            load_balancer.update({
                'lead_protocol': lead_protocol,
                'region': region
            })
            refer_link = self._get_refer_link(load_balancer, project_id)
            load_balance_data = LoadBalancing(load_balancer, strict=False)

            lb_resource = LoadBalancingResource({
                'region_code':
                region,
                'data':
                load_balance_data,
                'reference':
                ReferenceModel(load_balance_data.reference(refer_link))
            })

            self.set_region_code(region)
            collected_cloud_services.append(
                LoadBalancingResponse({'resource': lb_resource}))

        print(
            f'** Load Balancing Finished {time.time() - start_time} Seconds **'
        )
        return collected_cloud_services