Beispiel #1
0
def get_api_status(api_name: str,
                   stage: str,
                   region: str,
                   logger: logging.Logger = None) -> dict:
    """
    Method to return the status of an API Namespace for the specified Stage in a Region.

    :param api_name: The API Namespace to get the status of
    :param stage: The Stage to query for the Namespace status
    :param region: The AWS Region in which the Stage is provisioned
    :return: dict:
        Status: The Status of the API Namespace in the stage
    """
    global log
    if logger is None:
        log = utils.setup_logging()
    else:
        log = logger

    api_metadata_handler = ApiMetadata(region, log)
    s = "Status"
    return {
        s:
        api_metadata_handler.get_api_metadata(api_name=api_name,
                                              stage=stage).get(s)
    }
    def __init__(self, dialect: str):
        if dialect not in [DIALECT_MYSQL, DIALECT_PG]:
            raise exceptions.InvalidArgumentsException(
                f"Unknown Dialect {dialect}")
        else:
            self._dialect = dialect

        self._logger = utils.setup_logging()

        # load the sql statement helper
        with open(
                os.path.join(os.path.dirname(__file__),
                             f'sql_fragments_{self._dialect}.json'), 'r') as f:
            self._sql_helper = json.load(f)
Beispiel #3
0
    def __init__(self, logger, region):
        if region is None:
            raise Exception(
                "Cannot instantiate DynamoTableUtils without Region")
        else:
            self._dynamo_client = boto3.client('dynamodb', region_name=region)
            self._dynamo_resource = boto3.resource('dynamodb',
                                                   region_name=region)

        self._control_table = self._dynamo_resource.Table(params.CONTROL_TABLE)
        if logger is not None:
            self._log = logger
        else:
            self._log = utils.setup_logging()
    def __init__(self, deployment_stage, search_config):
        streams_integration_logger = f'SearchIntegration-{deployment_stage}' if deployment_stage.lower() != 'prod' else 'SearchIntegration'
        self._logger = utils.setup_logging()
        self._deployment_stage = deployment_stage

        if self._es_client is None:
            self._logger.info("Setting up new ElasticSearch and Firehose clients")
            self._es_client = boto3.client('es', region_name=REGION)
            self._fh_client = boto3.client('firehose', region_name=REGION)

            # create a reference to the api control table in ddb so we can pull metadata
            dynamo_helper, self._api_control_table = utils.get_api_control_table(REGION, self._logger)

            # create the source/dest mapping table
            for t in [params.RESOURCE, params.METADATA]:
                config = search_config.get('DeliveryStreams')[t]
                self._dest_mapping[config['SourceStreamARN']] = config['DestinationDeliveryStreamARN']
Beispiel #5
0
        cognito_authorizer = CognitoUserPoolAuthorizer(cog_pool_name, provider_arns=cog_provider_arns.split(','))
    else:
        print("Unable to configure Cognito Authorizer without %s and %s configuration items" % params.COGNITO_POOL_NAME,
              params.COGNITO_PROVIDER_ARNS)
elif set_authorizer == params.AUTHORIZER_CUSTOM:
    use_authorizer = None
else:
    use_authorizer = None

if use_authorizer is None:
    print("Stage deployed without Authorizer")
else:
    print("Using Authorizer %s" % set_authorizer.__name__)

# setup class logger
log = utils.setup_logging()

# create an API Metadata Handler
api_metadata_handler = ApiMetadata(REGION, log)

# create a cache of all API references tracked by this deployment stage
api_cache = DataApiCache(app=app, stage=STAGE, region=REGION, logger=log)

# create the streams integration handler, which is used by the lambda function embedded at the end of this app
es_indexer = None

# module level settings used as flags for lazy initialisers in functions
search_flow_verified = False

# load the cors config
cors_config = None
Beispiel #6
0
    def __init__(self, **kwargs):
        self._region = kwargs.get(params.REGION, os.getenv('AWS_REGION'))

        self._full_config = kwargs
        self._api_name = kwargs.get(params.API_NAME_PARAM)
        self._table_name = kwargs.get(params.STORAGE_TABLE)

        # setup instance logger
        self._logger = utils.setup_logging(params.AWS_DATA_API_NAME)
        global log
        log = self._logger

        self._logger.debug("Constructing new Data API with Args")
        self._logger.debug(kwargs)

        # create the API metadata handler
        self._api_metadata_handler = ApiMetadata(
            self._region, self._logger, kwargs.get(params.KMS_KEY_ARN))

        # Load class properties from any supplied metadata. These will be populated when hydrating an existing API
        # namespace from DynamoDB
        self._app = kwargs.get(params.APP, None)
        self._deployment_stage = kwargs.get(params.STAGE)
        self._pk_name = kwargs.get(params.PRIMARY_KEY, None)
        self._delete_mode = kwargs.get(params.DELETE_MODE,
                                       params.DEFAULT_DELETE_MODE)
        self._allow_runtime_delete_mode_change = kwargs.get(
            params.ALLOW_RUNTIME_DELETE_MODE_CHANGE,
            params.DEFAULT_ALLOW_RUNTIME_DELETE_MODE_CHANGE)
        self._crawler_rolename = kwargs.get(params.CRAWLER_ROLENAME, None)
        self._table_indexes = kwargs.get(params.TABLE_INDEXES, None)
        self._metadata_indexes = kwargs.get(params.METADATA_INDEXES, None)
        self._schema_validation_refresh_hitcount = kwargs.get(
            params.SCHEMA_VALIDATION_REFRESH_HITCOUNT,
            params.DEFAULT_SCHEMA_VALIDATION_REFRESH_HITCOUNT)
        self._gremlin_address = kwargs.get(params.GREMLIN_ADDRESS, None)
        self._allow_non_itemmaster_writes = kwargs.get(
            params.NON_ITEM_MASTER_WRITES_ALLOWED,
            params.DEFAULT_NON_ITEM_MASTER_WRITE_ALLOWED)
        self._strict_occv = kwargs.get(params.STRICT_OCCV,
                                       params.DEFAULT_STRICT_OCCV)
        self._catalog_database = kwargs.get(params.CATALOG_DATABASE,
                                            params.DEFAULT_CATALOG_DATABASE)

        # setup the storage handler which implements the backend data api functionality
        storage_args = kwargs

        resource_schema = self._api_metadata_handler.get_schema(
            api_name=self._api_name,
            stage=self._deployment_stage,
            schema_type=params.RESOURCE)
        if resource_schema is not None:
            storage_args[params.CONTROL_TYPE_RESOURCE_SCHEMA] = resource_schema

        metadata_schema = self._api_metadata_handler.get_schema(
            api_name=self._api_name,
            stage=self._deployment_stage,
            schema_type=params.METADATA)
        if metadata_schema is not None:
            storage_args[params.CONTROL_TYPE_METADATA_SCHEMA] = metadata_schema

        storage_args["table_name"] = self._table_name
        storage_args["primary_key_attribute"] = self._pk_name
        storage_args["region"] = self._region
        storage_args["delete_mode"] = self._delete_mode
        storage_args[
            "allow_runtime_delete_mode_change"] = self._allow_runtime_delete_mode_change
        storage_args["table_indexes"] = self._table_indexes
        storage_args["metadata_indexes"] = self._metadata_indexes
        storage_args[
            "schema_validation_refresh_hitcount"] = self._schema_validation_refresh_hitcount
        storage_args["crawler_rolename"] = self._crawler_rolename
        storage_args["catalog_database"] = self._catalog_database
        storage_args[
            "allow_non_itemmaster_writes"] = self._allow_non_itemmaster_writes
        storage_args["strict_occv"] = self._strict_occv
        storage_args["deployed_account"] = kwargs.get(params.DEPLOYED_ACCOUNT,
                                                      None)
        storage_args["handler_name"] = kwargs[params.STORAGE_HANDLER]
        storage_args["pitr_enabled"] = utils.strtobool(
            kwargs.get(params.PITR_ENABLED, params.DEFAULT_PITR_ENABLED))
        storage_args["kms_key_arn"] = kwargs.get(params.STORAGE_CRYPTO_KEY_ARN,
                                                 None)

        self._storage_handler = self._get_storage_handler(**storage_args)

        # setup the gremlin integration if one has been provided
        if self._gremlin_address is not None:
            log.info(
                f"Binding new Gremlin Handler to address {self._gremlin_address}"
            )
            tokens = self._gremlin_address.split(":")
            self._gremlin_endpoint = GremlinHandler(url=tokens[0],
                                                    port=tokens[1])

        if "SearchConfig" in kwargs:
            self._search_config = kwargs.get("SearchConfig")

        log.info(
            f"AWS Data API for {self._catalog_database}.{self._table_name} Online."
        )
    def __init__(self,
                 table_name,
                 primary_key_attribute,
                 region,
                 delete_mode,
                 allow_runtime_delete_mode_change,
                 table_indexes,
                 metadata_indexes,
                 schema_validation_refresh_hitcount,
                 crawler_rolename,
                 catalog_database,
                 allow_non_itemmaster_writes,
                 strict_occv,
                 deployed_account,
                 pitr_enabled=None,
                 kms_key_arn=None,
                 logger=None,
                 **kwargs):
        # setup class logger
        if logger is None:
            self._logger = utils.setup_logging()
        else:
            self._logger = logger

        self._logger.debug(
            "Creating new RDBMS Storage Handler with Properties:")
        self._logger.debug(kwargs)

        global log
        log = self._logger

        validate_params(**kwargs)

        # validate engine type
        self._engine_type = RdbmsEngineType(kwargs.get(params.RDBMS_DIALECT))

        # setup foundation properties
        self._region = region
        self._resource_table_name = table_name.lower()
        self._logger.debug(f"Resource Table {self._resource_table_name}")

        # allow override of the metadata table name
        if params.OVERRIDE_METADATA_TABLENAME in kwargs:
            self._metadata_table_name = kwargs.get(
                params.OVERRIDE_METADATA_TABLENAME)
        else:
            self._metadata_table_name = f"{self._resource_table_name}_{params.METADATA}".lower(
            )

        self._pk_name = primary_key_attribute
        self._logger.debug(f"Primary Key {self._pk_name}")
        self._deployed_account = deployed_account
        self._crawler_rolename = crawler_rolename
        self._catalog_database = catalog_database
        self._delete_mode = delete_mode

        # resolve connection details
        self._cluster_address = kwargs.get(params.CLUSTER_ADDRESS)
        self._cluster_port = kwargs.get(params.CLUSTER_PORT)
        self._cluster_user = kwargs.get(params.DB_USERNAME)
        self._cluster_db = kwargs.get(params.DB_NAME)
        self._cluster_pstore = kwargs.get(params.DB_USERNAME_PSTORE_ARN)
        self._ssl = kwargs.get(params.DB_USE_SSL)

        # pick up schemas to push table structure
        self._resource_schema = kwargs.get(params.CONTROL_TYPE_RESOURCE_SCHEMA)
        self._metadata_schema = kwargs.get(params.CONTROL_TYPE_METADATA_SCHEMA)

        # create schema validators
        if self._resource_schema is not None:
            self._resource_validator = fastjsonschema.compile(
                self._resource_schema)
        else:
            raise exceptions.InvalidArgumentsException(
                "Relational Storage Handler requires a JSON Schema to initialise"
            )

        if self._metadata_schema is not None:
            self._metadata_validator = fastjsonschema.compile(
                self._metadata_schema)

        if self._cluster_pstore is None:
            raise exceptions.InvalidArgumentsException(
                "Unable to connect to Target Cluster Database without SSM Parameter Store Password ARN"
            )

        # extract the password from ssm
        _pwd = utils.get_encrypted_parameter(
            parameter_name=self._cluster_pstore, region=self._region)

        # connect to the database
        self._db_conn = self._engine_type.get_connection(
            cluster_user=self._cluster_user,
            cluster_address=self._cluster_address,
            cluster_port=self._cluster_port,
            database=self._cluster_db,
            pwd=_pwd,
            ssl=self._ssl)

        self._logger.info(
            f"Connected to {self._cluster_address}:{self._cluster_port} as {self._cluster_user}"
        )

        # verify the resource table, indexes, and catalog registry exists
        self._engine_type.verify_table(conn=self._db_conn,
                                       table_ref=self._resource_table_name,
                                       table_schema=self._resource_schema,
                                       pk_name=self._pk_name)
        self._engine_type.verify_indexes(self._db_conn,
                                         self._resource_table_name,
                                         table_indexes)
        self._verify_catalog(self._resource_table_name, **kwargs)

        # verify the metadata table, indexes, and catalog registry exists
        if self._metadata_validator is not None:
            self._logger.debug(f"Metadata Table {self._metadata_table_name}")
            self._engine_type.verify_table(conn=self._db_conn,
                                           table_ref=self._metadata_table_name,
                                           table_schema=self._metadata_schema,
                                           pk_name=self._pk_name)
            self._engine_type.verify_indexes(self._db_conn,
                                             self._metadata_table_name,
                                             metadata_indexes)