async def close_connnection_pool(self, **kwargs) -> None: colored_warning( "`close_connnection_pool` is a typo - please change it to " "`close_connection_pool`.", category=DeprecationWarning, ) return await self.close_connection_pool()
def get_piccolo_conf_module( self, module_name: t.Optional[str] = None ) -> t.Optional[PiccoloConfModule]: """ Searches the path for a 'piccolo_conf.py' module to import. The location searched can be overriden by: * Explicitly passing a module name into this method. * Setting the PICCOLO_CONF environment variable. An example override is 'my_folder.piccolo_conf'. """ env_module_name = os.environ.get(ENVIRONMENT_VARIABLE, None) if not module_name and env_module_name: module_name = env_module_name if not module_name: module_name = DEFAULT_MODULE_NAME try: module = t.cast(PiccoloConfModule, import_module(module_name)) except ModuleNotFoundError: if self.diagnose: colored_warning( (f"{module_name} either doesn't exist or the import " "failed. Traceback:"), level=Level.high, ) print(traceback.format_exc()) return None else: return module
def set_length(self, column: t.Union[str, Varchar], length: int) -> Alter: """ Change the max length of a varchar column. Unfortunately, this isn't supported by SQLite, but SQLite also doesn't enforce any length limits on varchar columns anyway. Band.alter().set_length('name', 512) """ if self.engine_type == "sqlite": colored_warning( ( "SQLITE doesn't support changes in length. It also " "doesn't enforce any length limits, so your code will " "still work as expected. Skipping." ), level=Level.medium, ) return self if not isinstance(column, (str, Varchar)): raise ValueError( "Only Varchar columns can have their length changed." ) self._set_length.append(SetLength(column, length)) return self
async def start_connection_pool(self, **kwargs) -> None: if self.pool: colored_warning( "A pool already exists - close it first if you want to create " "a new pool.", ) else: config = dict(self.config) config.update(**kwargs) self.pool = await asyncpg.create_pool(**config)
def ilike(self, value: str) -> Where: if "%" not in value: raise ValueError("% is required for ilike operators") if self._meta.engine_type == "postgres": operator: t.Type[ComparisonOperator] = ILike else: colored_warning( "SQLite doesn't support ILIKE currently, falling back to LIKE." ) operator = Like return Where(column=self, value=value, operator=operator)
async def prep_database(self): for extension in self.extensions: try: await self._run_in_new_connection( f'CREATE EXTENSION IF NOT EXISTS "{extension}"', ) except asyncpg.exceptions.InsufficientPrivilegeError: colored_warning( f"=> Unable to create {extension} extension - some " "functionality may not behave as expected. Make sure " "your database user has permission to create " "extensions, or add it manually using " f'`CREATE EXTENSION "{extension}";`', level=Level.medium, )
def ilike(self, value: str) -> Where: """ Only Postgres supports ILIKE. It's used for case insensitive matching. For SQLite, it's just proxied to a LIKE query instead. """ if self._meta.engine_type == "postgres": operator: t.Type[ComparisonOperator] = ILike else: colored_warning( "SQLite doesn't support ILIKE, falling back to LIKE.") operator = Like return Where(column=self, value=value, operator=operator)
async def get_version(self) -> float: """ Returns the version of Postgres being run. """ try: response: t.Sequence[t.Dict] = await self._run_in_new_connection( "SHOW server_version") except ConnectionRefusedError as exception: # Suppressing the exception, otherwise importing piccolo_conf.py # containing an engine will raise an ImportError. colored_warning(f"Unable to connect to database - {exception}") return 0.0 else: version_string = response[0]["server_version"] return self._parse_raw_version_string( version_string=version_string)
def get_piccolo_conf_module( self, module_name: t.Optional[str] = None ) -> t.Optional[PiccoloConfModule]: """ Searches the path for a 'piccolo_conf.py' module to import. The location searched can be overriden by: * Explicitly passing a module name into this method. * Setting the PICCOLO_CONF environment variable. An example override is 'my_folder.piccolo_conf'. """ env_module_name = os.environ.get(ENVIRONMENT_VARIABLE, None) if not module_name and env_module_name: module_name = env_module_name if not module_name: module_name = DEFAULT_MODULE_NAME try: module = t.cast(PiccoloConfModule, import_module(module_name)) except ModuleNotFoundError as exc: if self.diagnose: colored_warning( (f"{module_name} either doesn't exist or the import " "failed. Traceback:"), level=Level.high, ) print(traceback.format_exc()) if str(exc) == "No module named 'asyncpg'": raise ModuleNotFoundError( "PostgreSQL driver not found. " "Try running `pip install 'piccolo[postgres]'`") elif str(exc) == "No module named 'aiosqlite'": raise ModuleNotFoundError( "SQLite driver not found. " "Try running `pip install 'piccolo[sqlite]'`") else: raise exc else: return module
def get_engine(self, module_name: t.Optional[str] = None) -> t.Optional[Engine]: piccolo_conf = self.get_piccolo_conf_module(module_name=module_name) engine: t.Optional[Engine] = getattr(piccolo_conf, ENGINE_VAR, None) if not engine: colored_warning( f"{module_name} doesn't define a {ENGINE_VAR} variable.", level=Level.high, ) elif not isinstance(engine, Engine): colored_warning( f"{module_name} contains a {ENGINE_VAR} variable of the " "wrong type - it should be an Engine subclass.", level=Level.high, ) return engine
def check_version(self): """ Warn if the database version isn't supported. """ try: version_number = self.get_version() except Exception as exception: colored_warning( f"Unable to fetch server version: {exception}", level=Level.high, ) return engine_type = self.engine_type.capitalize() print(f"Running {engine_type} version {version_number}") if version_number < self.min_version_number: message = ( f"This version of {self.engine_type} isn't supported " f"(< {self.min_version_number}) - some features might not be " "available. For instructions on installing databases, see the " "Piccolo docs.") colored_warning(message, stacklevel=3)
def __post_init__(self): self.app_configs: t.Dict[str, AppConfig] = {} app_names = [] for app in self.apps: try: app_conf_module = import_module(app) app_config: AppConfig = getattr(app_conf_module, "APP_CONFIG") except (ImportError, AttributeError) as e: if app.endswith(".piccolo_app"): raise e app += ".piccolo_app" app_conf_module = import_module(app) app_config: AppConfig = getattr(app_conf_module, "APP_CONFIG") colored_warning( f"App {app[:-12]} should end with `.piccolo_app`", level=Level.medium, ) self.app_configs[app_config.app_name] = app_config app_names.append(app_config.app_name) self._validate_app_names(app_names)
def get_version(self) -> float: """ Returns the version of Postgres being run. """ loop = asyncio.new_event_loop() with ThreadPoolExecutor(max_workers=1) as executor: future = executor.submit( loop.run_until_complete, self._run_in_new_connection("SHOW server_version"), ) try: response: t.Sequence[t.Dict] = future.result() # type: ignore except ConnectionRefusedError as exception: # Suppressing the exception, otherwise importing piccolo_conf.py # containing an engine will raise an ImportError. colored_warning("Unable to connect to database") print(exception) return 0.0 else: version_string = response[0]["server_version"] return self._parse_raw_version_string( version_string=version_string)
def test_colored_warning(self): """ Just make sure no errors are raised. """ colored_warning(message="TESTING!")
def __init__( self, *tables: t.Union[t.Type[Table], TableConfig], forms: t.List[FormConfig] = [], auth_table: t.Type[BaseUser] = BaseUser, session_table: t.Type[SessionsBase] = SessionsBase, session_expiry: timedelta = timedelta(hours=1), max_session_expiry: timedelta = timedelta(days=7), increase_expiry: t.Optional[timedelta] = timedelta(minutes=20), page_size: int = 15, read_only: bool = False, rate_limit_provider: t.Optional[RateLimitProvider] = None, production: bool = False, site_name: str = "Piccolo Admin", ) -> None: super().__init__( title=site_name, description="Piccolo API documentation" ) ####################################################################### # Convert any table arguments which are plain ``Table`` classes into # ``TableConfig`` instances. table_configs: t.List[TableConfig] = [] for table in tables: if isinstance(table, TableConfig): table_configs.append(table) else: table_configs.append(TableConfig(table_class=table)) self.table_configs = table_configs for table_config in table_configs: table_class = table_config.table_class for column in table_class._meta.columns: if column._meta.secret and column._meta.required: message = ( f"{table_class._meta.tablename}." f"{column._meta._name} is using `secret` and " f"`required` column args which are incompatible. " f"You may encounter unexpected behavior when using " f"this table within Piccolo Admin." ) colored_warning(message, level=Level.high) ####################################################################### self.auth_table = auth_table self.site_name = site_name self.forms = forms self.form_config_map = {form.slug: form for form in self.forms} with open(os.path.join(ASSET_PATH, "index.html")) as f: self.template = f.read() ####################################################################### api_app = FastAPI(docs_url=None) api_app.mount("/docs/", swagger_ui(schema_url="../openapi.json")) for table_config in table_configs: table_class = table_config.table_class visible_column_names = table_config.get_visible_column_names() visible_filter_names = table_config.get_visible_filter_names() rich_text_columns_names = ( table_config.get_rich_text_columns_names() ) FastAPIWrapper( root_url=f"/tables/{table_class._meta.tablename}/", fastapi_app=api_app, piccolo_crud=PiccoloCRUD( table=table_class, read_only=read_only, page_size=page_size, schema_extra={ "visible_column_names": visible_column_names, "visible_filter_names": visible_filter_names, "rich_text_columns": rich_text_columns_names, }, ), fastapi_kwargs=FastAPIKwargs( all_routes={ "tags": [f"{table_class._meta.tablename.capitalize()}"] }, ), ) api_app.add_api_route( path="/tables/", endpoint=self.get_table_list, # type: ignore methods=["GET"], response_model=t.List[str], tags=["Tables"], ) api_app.add_api_route( path="/meta/", endpoint=self.get_meta, # type: ignore methods=["GET"], tags=["Meta"], response_model=MetaResponseModel, ) api_app.add_api_route( path="/forms/", endpoint=self.get_forms, # type: ignore methods=["GET"], tags=["Forms"], response_model=t.List[FormConfigResponseModel], ) api_app.add_api_route( path="/forms/{form_slug:str}/", endpoint=self.get_single_form, # type: ignore methods=["GET"], tags=["Forms"], ) api_app.add_api_route( path="/forms/{form_slug:str}/schema/", endpoint=self.get_single_form_schema, # type: ignore methods=["GET"], tags=["Forms"], ) api_app.add_api_route( path="/forms/{form_slug:str}/", endpoint=self.post_single_form, # type: ignore methods=["POST"], tags=["Forms"], ) api_app.add_api_route( path="/user/", endpoint=self.get_user, # type: ignore methods=["GET"], tags=["User"], response_model=UserResponseModel, ) ####################################################################### auth_app = FastAPI() if not rate_limit_provider: rate_limit_provider = InMemoryLimitProvider( limit=1000, timespan=300 ) auth_app.mount( path="/login/", app=RateLimitingMiddleware( app=session_login( auth_table=self.auth_table, session_table=session_table, session_expiry=session_expiry, max_session_expiry=max_session_expiry, redirect_to=None, production=production, ), provider=rate_limit_provider, ), ) auth_app.add_route( path="/logout/", route=session_logout(session_table=session_table), methods=["POST"], ) ####################################################################### self.router.add_route( path="/", endpoint=self.get_root, methods=["GET"] ) self.mount( path="/css", app=StaticFiles(directory=os.path.join(ASSET_PATH, "css")), ) self.mount( path="/js", app=StaticFiles(directory=os.path.join(ASSET_PATH, "js")), ) auth_middleware = partial( AuthenticationMiddleware, backend=SessionsAuthBackend( auth_table=auth_table, session_table=session_table, admin_only=True, increase_expiry=increase_expiry, ), on_error=handle_auth_exception, ) self.mount(path="/api", app=auth_middleware(api_app)) self.mount(path="/auth", app=auth_app) # We make the meta endpoint available without auth, because it contains # the site name. self.add_api_route("/meta/", endpoint=self.get_meta) # type: ignore
def main(): """ The entrypoint to the Piccolo CLI. """ # In case it's run from an entrypoint: sys.path.insert(0, os.getcwd()) ########################################################################### # Run in diagnose mode if requested. diagnose = get_diagnose_flag() if diagnose: print("Diagnosis...") if Finder(diagnose=True).get_app_registry(): print("Everything OK") return ########################################################################### cli = CLI(description="Piccolo CLI") ########################################################################### # Register the base apps. for _app_config in [ app_config, asgi_config, fixtures_config, meta_config, migrations_config, playground_config, project_config, schema_config, shell_config, sql_shell_config, tester_config, user_config, ]: for command in _app_config.commands: cli.register( command.callable, group_name=_app_config.app_name, aliases=command.aliases, ) ########################################################################### # Get user defined apps. try: APP_REGISTRY: AppRegistry = Finder().get_app_registry() except (ImportError, AttributeError): print("Can't import the APP_REGISTRY from piccolo_conf - some " "commands may be missing. If this is a new project don't worry. " f"To see a full traceback use `piccolo {DIAGNOSE_FLAG}`") else: for app_name, _app_config in APP_REGISTRY.app_configs.items(): for command in _app_config.commands: if cli.command_exists(group_name=app_name, command_name=command.callable.__name__): # Skipping - already registered. continue cli.register( command.callable, group_name=app_name, aliases=command.aliases, ) if "migrations" not in sys.argv: # Show a warning if any migrations haven't been run. # Don't run it if it looks like the user is running a migration # command, as this information is redundant. try: havent_ran_count = run_sync( CheckMigrationManager(app_name="all").havent_ran_count()) if havent_ran_count: message = (f"{havent_ran_count} migration hasn't" if havent_ran_count == 1 else f"{havent_ran_count} migrations haven't") colored_warning( message=("=> {} been run - the app " "might not behave as expected.\n" "To check which use:\n" " piccolo migrations check\n" "To run all migrations:\n" " piccolo migrations forwards all\n" ).format(message), level=Level.high, ) except Exception: pass ########################################################################### cli.run()
async def close_connection_pool(self) -> None: if self.pool: await self.pool.close() self.pool = None else: colored_warning("No pool is running.")
def _connection_pool_warning(self): message = ( f"Connection pooling is not supported for {self.engine_type}." ) logger.warning(message) colored_warning(message, stacklevel=3)
async def _run_alter_columns(self, backwards=False): for table_class_name in self.alter_columns.table_class_names: alter_columns = self.alter_columns.for_table_class_name( table_class_name) if not alter_columns: continue _Table: t.Type[Table] = create_table_class( class_name=table_class_name, class_kwargs={"tablename": alter_columns[0].tablename}, ) for alter_column in alter_columns: params = (alter_column.old_params if backwards else alter_column.params) old_params = (alter_column.params if backwards else alter_column.old_params) ############################################################### # Change the column type if possible column_class = (alter_column.old_column_class if backwards else alter_column.column_class) old_column_class = (alter_column.column_class if backwards else alter_column.old_column_class) if (old_column_class is not None) and (column_class is not None): if old_column_class != column_class: old_column = old_column_class(**old_params) old_column._meta._table = _Table old_column._meta._name = alter_column.column_name old_column._meta.db_column_name = ( alter_column.db_column_name) new_column = column_class(**params) new_column._meta._table = _Table new_column._meta._name = alter_column.column_name new_column._meta.db_column_name = ( alter_column.db_column_name) using_expression: t.Optional[str] = None # Postgres won't automatically cast some types to # others. We may as well try, as it will definitely # fail otherwise. if new_column.value_type != old_column.value_type: if old_params.get("default", ...) is not None: # Unless the column's default value is also # something which can be cast to the new type, # it will also fail. Drop the default value for # now - the proper default is set later on. await _Table.alter().drop_default(old_column ).run() using_expression = "{}::{}".format( alter_column.db_column_name, new_column.column_type, ) # We can't migrate a SERIAL to a BIGSERIAL or vice # versa, as SERIAL isn't a true type, just an alias to # other commands. if issubclass(column_class, Serial) and issubclass( old_column_class, Serial): colored_warning( "Unable to migrate Serial to BigSerial and " "vice versa. This must be done manually.") else: await _Table.alter().set_column_type( old_column=old_column, new_column=new_column, using_expression=using_expression, ).run() ############################################################### null = params.get("null") if null is not None: await _Table.alter().set_null( column=alter_column.db_column_name, boolean=null).run() length = params.get("length") if length is not None: await _Table.alter().set_length( column=alter_column.db_column_name, length=length).run() unique = params.get("unique") if unique is not None: # When modifying unique contraints, we need to pass in # a column type, and not just the column name. column = Column() column._meta._table = _Table column._meta._name = alter_column.column_name column._meta.db_column_name = alter_column.db_column_name await _Table.alter().set_unique(column=column, boolean=unique).run() index = params.get("index") index_method = params.get("index_method") if index is None: if index_method is not None: # If the index value hasn't changed, but the # index_method value has, this indicates we need # to change the index type. column = Column() column._meta._table = _Table column._meta._name = alter_column.column_name column._meta.db_column_name = ( alter_column.db_column_name) await _Table.drop_index([column]).run() await _Table.create_index([column], method=index_method, if_not_exists=True).run() else: # If the index value has changed, then we are either # dropping, or creating an index. column = Column() column._meta._table = _Table column._meta._name = alter_column.column_name column._meta.db_column_name = alter_column.db_column_name if index is True: kwargs = ({ "method": index_method } if index_method else {}) await _Table.create_index([column], if_not_exists=True, **kwargs).run() else: await _Table.drop_index([column]).run() # None is a valid value, so retrieve ellipsis if not found. default = params.get("default", ...) if default is not ...: column = Column() column._meta._table = _Table column._meta._name = alter_column.column_name column._meta.db_column_name = alter_column.db_column_name if default is None: await _Table.alter().drop_default(column=column).run() else: column.default = default await _Table.alter().set_default( column=column, value=column.get_default_value()).run() # None is a valid value, so retrieve ellipsis if not found. digits = params.get("digits", ...) if digits is not ...: await _Table.alter().set_digits( column=alter_column.db_column_name, digits=digits, ).run()
async def response_handler(self, response): m2m_selects = [ i for i in self.columns_delegate.selected_columns if isinstance(i, M2MSelect) ] for m2m_select in m2m_selects: m2m_name = m2m_select.m2m._meta.name secondary_table = m2m_select.m2m._meta.secondary_table secondary_table_pk = secondary_table._meta.primary_key if self.engine_type == "sqlite": # With M2M queries in SQLite, we always get the value back as a # list of strings, so we need to do some type conversion. value_type = (m2m_select.columns[0].__class__.value_type if m2m_select.as_list and m2m_select.serialisation_safe else secondary_table_pk.value_type) try: for row in response: data = row[m2m_name] row[m2m_name] = ( [value_type(i) for i in row[m2m_name]] if data else []) except ValueError: colored_warning("Unable to do type conversion for the " f"{m2m_name} relation") # If the user requested a single column, we just return that # from the database. Otherwise we request the primary key # value, so we can fetch the rest of the data in a subsequent # SQL query - see below. if m2m_select.as_list: if m2m_select.serialisation_safe: pass else: response = await self._splice_m2m_rows( response, secondary_table, secondary_table_pk, m2m_name, m2m_select, as_list=True, ) else: if (len(m2m_select.columns) == 1 and m2m_select.serialisation_safe): column_name = m2m_select.columns[0]._meta.name for row in response: row[m2m_name] = [{ column_name: i } for i in row[m2m_name]] else: response = await self._splice_m2m_rows( response, secondary_table, secondary_table_pk, m2m_name, m2m_select, ) elif self.engine_type == "postgres": if m2m_select.as_list: # We get the data back as an array, and can just return it # unless it's JSON. if (type(m2m_select.columns[0]) in (JSON, JSONB) and m2m_select.load_json): for row in response: data = row[m2m_name] row[m2m_name] = [load_json(i) for i in data] elif m2m_select.serialisation_safe: # If the columns requested can be safely serialised, they # are returned as a JSON string, so we need to deserialise # it. for row in response: data = row[m2m_name] row[m2m_name] = load_json(data) if data else [] else: # If the data can't be safely serialised as JSON, we get # back an array of primary key values, and need to # splice in the correct values using Python. response = await self._splice_m2m_rows( response, secondary_table, secondary_table_pk, m2m_name, m2m_select, ) ####################################################################### # If no columns were specified, it's a select *, so we know that # no columns were selected from related tables. was_select_star = len(self.columns_delegate.selected_columns) == 0 if self.limit_delegate._first: if len(response) == 0: return None if self.output_delegate._output.nested and not was_select_star: return make_nested(response[0]) else: return response[0] elif self.output_delegate._output.nested and not was_select_star: return [make_nested(i) for i in response] else: return response