示例#1
0
 def __post_init_post_parse__(self):
     self._callback_fn = None
     if self.callback in (ROLLBACK_HANDLER, CONFIGURE_HANDLER):
         raise ConfigurationError(
             f'`{self.callback}` callback name is reserved')
     if self.callback and self.callback != pascal_to_snake(self.callback):
         raise ConfigurationError(
             '`callback` field must conform to snake_case naming style')
示例#2
0
 def get_template(self, name: str) -> IndexConfigTemplateT:
     if not self.templates:
         raise ConfigurationError('`templates` section is missing')
     try:
         return self.templates[name]
     except KeyError as e:
         raise ConfigurationError(
             f'Template `{name}` not found in `templates` config section'
         ) from e
示例#3
0
 def get_datasource(self, name: str) -> DatasourceConfigT:
     try:
         return self.datasources[name]
     except KeyError as e:
         raise ConfigurationError(
             f'Datasource `{name}` not found in `datasources` config section'
         ) from e
示例#4
0
 def get_contract(self, name: str) -> ContractConfig:
     try:
         return self.contracts[name]
     except KeyError as e:
         raise ConfigurationError(
             f'Contract `{name}` not found in `contracts` config section'
         ) from e
示例#5
0
async def cli(ctx, config: List[str], logging_config: str):
    try:
        path = join(os.getcwd(), logging_config)
        _logging_config = LoggingConfig.load(path)
    except FileNotFoundError:
        path = join(dirname(__file__), 'configs', logging_config)
        _logging_config = LoggingConfig.load(path)
    _logging_config.apply()

    _config = DipDupConfig.load(config)
    if _config.spec_version not in spec_version_to_version:
        raise ConfigurationError('Unknown `spec_version`')
    if _config.spec_version != __spec_version__ and ctx.invoked_subcommand != 'migrate':
        migration_required(_config.spec_version, __spec_version__)

    if _config.sentry:
        sentry_sdk.init(
            dsn=_config.sentry.dsn,
            integrations=[AioHttpIntegration()],
        )

    ctx.obj = CLIContext(
        config_paths=config,
        config=_config,
        logging_config=_logging_config,
    )
示例#6
0
 def __post_init_post_parse__(self):
     StorageTypeMixin.__post_init_post_parse__(self)
     ParameterTypeMixin.__post_init_post_parse__(self)
     TransactionIdMixin.__post_init_post_parse__(self)
     if self.entrypoint and not self.destination:
         raise ConfigurationError(
             'Transactions with entrypoint must also have destination')
示例#7
0
    def load(
        cls,
        filenames: List[str],
    ) -> 'DipDupConfig':

        current_workdir = os.path.join(os.getcwd())

        json_config: Dict[str, Any] = {}
        for filename in filenames:
            filename = os.path.join(current_workdir, filename)

            _logger.info('Loading config from %s', filename)
            with open(filename) as file:
                raw_config = file.read()

            _logger.info('Substituting environment variables')
            for match in re.finditer(ENV_VARIABLE_REGEX, raw_config):
                variable, default_value = match.group(1), match.group(2)
                value = env.get(variable)
                if not default_value and not value:
                    raise ConfigurationError(
                        f'Environment variable `{variable}` is not set')
                placeholder = '${' + variable + ':-' + default_value + '}'
                raw_config = raw_config.replace(placeholder, value
                                                or default_value)

            json_config = {
                **json_config,
                **YAML(typ='base').load(raw_config),
            }

        config = cls(**json_config)
        return config
示例#8
0
    async def _execute_sql_scripts(self, reindex: bool) -> None:
        """Execute SQL included with project"""
        sql_path = join(self._config.package_path, 'sql')
        if not exists(sql_path):
            return
        if any(
                map(lambda p: p not in ('on_reindex', 'on_restart'),
                    listdir(sql_path))):
            raise ConfigurationError(
                f'SQL scripts must be placed either to `{self._config.package}/sql/on_restart` or to `{self._config.package}/sql/on_reindex` directory'
            )
        if not isinstance(self._config.database, PostgresDatabaseConfig):
            self._logger.warning(
                'Execution of user SQL scripts is supported on PostgreSQL only, skipping'
            )
            return

        sql_path = join(sql_path, 'on_reindex' if reindex else 'on_restart')
        if not exists(sql_path):
            return
        self._logger.info('Executing SQL scripts from `%s`', sql_path)
        for filename in sorted(listdir(sql_path)):
            if not filename.endswith('.sql'):
                continue

            with open(join(sql_path, filename)) as file:
                sql = file.read()

            self._logger.info('Executing `%s`', filename)
            await get_connection(None).execute_script(sql)
示例#9
0
 def add_contract(self, name: str, address: str, typename: Optional[str] = None) -> None:
     if name in self.config.contracts:
         raise ConfigurationError(f'Contract `{name}` is already exists')
     self.config.contracts[name] = ContractConfig(
         address=address,
         typename=typename,
     )
     self._updated = True
示例#10
0
 def add_index(self, name: str, template: str, values: Dict[str, Any]) -> None:
     if name in self.config.indexes:
         raise ConfigurationError(f'Index `{name}` is already exists')
     self.config.get_template(template)
     self.config.indexes[name] = StaticTemplateConfig(
         template=template,
         values=values,
     )
     self._updated = True
示例#11
0
    def _process_storage(
        self,
        storage_type: Type[StorageType],
        storage: Dict[str, Any],
        prefix: str = None,
    ) -> Dict[str, Any]:
        for key, field in storage_type.__fields__.items():

            if key == '__root__':
                continue

            if field.alias:
                key = field.alias

            bigmap_name = key if prefix is None else f'{prefix}.{key}'

            # NOTE: TzKT could return bigmaps as object or as array of key-value objects. We need to guess this from storage.
            # TODO: This code should be a part of datasource module.
            try:
                value = storage[key]
            except KeyError as e:
                if not field.required:
                    continue
                raise ConfigurationError(
                    f'Type `{storage_type.__name__}` is invalid: `{key}` field does not exists'
                ) from e

            # FIXME: Pydantic bug. `BaseModel.type_` returns incorrect value when annotation is Dict[str, bool]
            if field.type_ != field.outer_type_ and field.type_ == bool:
                annotation = field.outer_type_
            else:
                annotation = field.type_

            if annotation not in (int, bool) and isinstance(value, int):
                if hasattr(
                        annotation, '__fields__'
                ) and 'key' in annotation.__fields__ and 'value' in annotation.__fields__:
                    storage[key] = []
                    if self.diffs:
                        self._merge_bigmapdiffs(storage,
                                                bigmap_name,
                                                array=True)
                else:
                    storage[key] = {}
                    if self.diffs:
                        self._merge_bigmapdiffs(storage,
                                                bigmap_name,
                                                array=False)
            elif hasattr(annotation, '__fields__') and isinstance(
                    storage[key], dict):
                storage[key] = self._process_storage(annotation, storage[key],
                                                     bigmap_name)

        return storage
示例#12
0
async def migrate(ctx):
    def _bump_spec_version(spec_version: str):
        for config_path in ctx.obj.config_paths:
            for line in fileinput.input(config_path, inplace=True):
                if 'spec_version' in line:
                    print(f'spec_version: {spec_version}')
                else:
                    print(line.rstrip())

    config: DipDupConfig = ctx.obj.config
    config.pre_initialize()

    if config.spec_version == __spec_version__:
        _logger.error('Project is already at latest version')
    elif config.spec_version == '0.1':
        await DipDup(config).migrate_to_v10()
        _bump_spec_version('1.0')
    elif config.spec_version == '1.0':
        await DipDup(config).migrate_to_v11()
        _bump_spec_version('1.1')
    else:
        raise ConfigurationError('Unknown `spec_version`')
示例#13
0
 def get_tzkt_datasource(self, name: str) -> TzktDatasourceConfig:
     datasource = self.get_datasource(name)
     if not isinstance(datasource, TzktDatasourceConfig):
         raise ConfigurationError(
             '`datasource` field must refer to TzKT datasource')
     return datasource
示例#14
0
 def valid_url(cls, v):
     parsed_url = urlparse(v)
     if not (parsed_url.scheme and parsed_url.netloc):
         raise ConfigurationError(f'`{v}` is not a valid Hasura URL')
     return v
示例#15
0
    async def fetch_schemas(self) -> None:
        """Fetch JSONSchemas for all contracts used in config"""
        self._logger.info('Creating `schemas` package')
        schemas_path = join(self._config.package_path, 'schemas')
        with suppress(FileExistsError):
            mkdir(schemas_path)

        for index_config in self._config.indexes.values():

            if isinstance(index_config, OperationIndexConfig):
                for operation_handler_config in index_config.handlers:
                    for operation_pattern_config in operation_handler_config.pattern:

                        if (isinstance(
                                operation_pattern_config,
                                OperationHandlerTransactionPatternConfig)
                                and operation_pattern_config.entrypoint):
                            contract_config = operation_pattern_config.destination_contract_config
                            originated = False
                        elif isinstance(
                                operation_pattern_config,
                                OperationHandlerOriginationPatternConfig):
                            contract_config = operation_pattern_config.contract_config
                            originated = bool(operation_pattern_config.source)
                        else:
                            continue

                        self._logger.debug(contract_config)
                        contract_schemas = await self._get_schema(
                            index_config.datasource_config, contract_config,
                            originated)

                        contract_schemas_path = join(
                            schemas_path, contract_config.module_name)
                        with suppress(FileExistsError):
                            mkdir(contract_schemas_path)

                        storage_schema_path = join(contract_schemas_path,
                                                   'storage.json')

                        storage_schema = resolve_big_maps(
                            contract_schemas['storageSchema'])
                        if not exists(storage_schema_path):
                            with open(storage_schema_path, 'w') as file:
                                file.write(
                                    json.dumps(storage_schema,
                                               indent=4,
                                               sort_keys=True))

                        if not isinstance(
                                operation_pattern_config,
                                OperationHandlerTransactionPatternConfig):
                            continue

                        parameter_schemas_path = join(contract_schemas_path,
                                                      'parameter')
                        with suppress(FileExistsError):
                            mkdir(parameter_schemas_path)

                        try:
                            entrypoint_schema = next(
                                ep['parameterSchema']
                                for ep in contract_schemas['entrypoints']
                                if ep['name'] ==
                                operation_pattern_config.entrypoint)
                        except StopIteration as e:
                            raise ConfigurationError(
                                f'Contract `{contract_config.address}` has no entrypoint `{operation_pattern_config.entrypoint}`'
                            ) from e

                        entrypoint_schema_path = join(
                            parameter_schemas_path,
                            f'{operation_pattern_config.entrypoint}.json')

                        if not exists(entrypoint_schema_path):
                            with open(entrypoint_schema_path, 'w') as file:
                                file.write(
                                    json.dumps(entrypoint_schema, indent=4))
                        elif contract_config.typename is not None:
                            with open(entrypoint_schema_path, 'r') as file:
                                existing_schema = json.loads(file.read())
                            if entrypoint_schema != existing_schema:
                                self._logger.warning(
                                    'Contract "%s" falsely claims to be a "%s"',
                                    contract_config.address,
                                    contract_config.typename)

            elif isinstance(index_config, BigMapIndexConfig):
                for big_map_handler_config in index_config.handlers:
                    contract_config = big_map_handler_config.contract_config

                    contract_schemas = await self._get_schema(
                        index_config.datasource_config, contract_config, False)

                    contract_schemas_path = join(schemas_path,
                                                 contract_config.module_name)
                    with suppress(FileExistsError):
                        mkdir(contract_schemas_path)

                    big_map_schemas_path = join(contract_schemas_path,
                                                'big_map')
                    with suppress(FileExistsError):
                        mkdir(big_map_schemas_path)

                    try:
                        big_map_schema = next(
                            ep for ep in contract_schemas['bigMaps']
                            if ep['path'] == big_map_handler_config.path)
                    except StopIteration as e:
                        raise ConfigurationError(
                            f'Contract `{contract_config.address}` has no big map path `{big_map_handler_config.path}`'
                        ) from e
                    big_map_key_schema = big_map_schema['keySchema']
                    big_map_key_schema_path = join(
                        big_map_schemas_path,
                        f'{big_map_handler_config.path}.key.json')

                    if not exists(big_map_key_schema_path):
                        with open(big_map_key_schema_path, 'w') as file:
                            file.write(json.dumps(big_map_key_schema,
                                                  indent=4))

                    big_map_value_schema = big_map_schema['valueSchema']
                    big_map_value_schema_path = join(
                        big_map_schemas_path,
                        f'{big_map_handler_config.path}.value.json')

                    if not exists(big_map_value_schema_path):
                        with open(big_map_value_schema_path, 'w') as file:
                            file.write(
                                json.dumps(big_map_value_schema, indent=4))

            elif isinstance(index_config, StaticTemplateConfig):
                raise RuntimeError('Config is not pre-initialized')

            else:
                raise NotImplementedError(
                    f'Index kind `{index_config.kind}` is not supported')
示例#16
0
 def validate(self) -> None:
     if isinstance(self.database, SqliteDatabaseConfig) and self.hasura:
         raise ConfigurationError(
             'SQLite DB engine is not supported by Hasura')
示例#17
0
async def configure_hasura(config: DipDupConfig):
    """Generate Hasura metadata and apply to instance with credentials from `hasura` config section."""

    if config.hasura is None:
        raise ConfigurationError('`hasura` config section missing')
    if not isinstance(config.database, PostgresDatabaseConfig):
        raise RuntimeError

    _logger.info('Configuring Hasura')
    url = config.hasura.url.rstrip("/")
    views = [
        row[0] for row in (await get_connection(None).execute_query(
            f"SELECT table_name FROM information_schema.views WHERE table_schema = '{config.database.schema_name}'"
        ))[1]
    ]

    hasura_metadata = await generate_hasura_metadata(config, views)

    async with aiohttp.ClientSession() as session:
        _logger.info('Waiting for Hasura instance to be healthy')
        for _ in range(60):
            with suppress(ClientConnectorError, ClientOSError):
                response = await session.get(f'{url}/healthz')
                if response.status == 200:
                    break
            await asyncio.sleep(1)
        else:
            raise HasuraError('Hasura instance not responding for 60 seconds')

        headers = {}
        if config.hasura.admin_secret:
            headers['X-Hasura-Admin-Secret'] = config.hasura.admin_secret

        _logger.info('Fetching existing metadata')
        existing_hasura_metadata = await http_request(
            session,
            'post',
            url=f'{url}/v1/query',
            data=json.dumps({
                "type": "export_metadata",
                "args": {},
            }, ),
            headers=headers,
        )

        _logger.info('Merging existing metadata')
        hasura_metadata_tables = [
            table['table'] for table in hasura_metadata['tables']
        ]
        for table in existing_hasura_metadata['tables']:
            if table['table'] not in hasura_metadata_tables:
                hasura_metadata['tables'].append(table)

        _logger.info('Sending replace metadata request')
        result = await http_request(
            session,
            'post',
            url=f'{url}/v1/query',
            data=json.dumps(
                {
                    "type": "replace_metadata",
                    "args": hasura_metadata,
                }, ),
            headers=headers,
        )
        if result.get('message') != 'success':
            raise HasuraError('Can\'t configure Hasura instance', result)

        _logger.info('Hasura instance has been configured')
示例#18
0
 def valid_address(cls, v):
     # NOTE: Wallet addresses are allowed for debugging purposes (source field). Do we need a separate section?
     if not (v.startswith('KT1') or v.startswith('tz1')) or len(v) != 36:
         raise ConfigurationError(f'`{v}` is not a valid contract address')
     return v
示例#19
0
 def valid_immune_tables(cls, v):
     if v and 'dipdup_state' in v:
         raise ConfigurationError('`dipdup_state` table can\'t be immune')
     return v