Ejemplo n.º 1
0
async def render_model_view(request: Request, model_id: Text) -> HTTPResponse:
    """ render model data view """
    model_data = cfg.models[model_id]
    columns_names = model_data["columns_names"]
    table_name = get_table_name(model_id)
    model = cfg.app.db.tables[table_name]
    query = cfg.app.db.select([model])
    columns_data = model_data["columns_data"]
    try:
        rows = await query.gino.all()
    except asyncpg.exceptions.UndefinedTableError:
        await cfg.app.db.gino.create_all(tables=[model])
        rows = await query.gino.all()
    output = []
    for row in rows:
        row = {columns_names[num]: field for num, field in enumerate(row)}
        row["_id"] = create_obj_id_for_query(
            get_obj_id_from_row(model_data, row))
        for index in cfg.models[model_id]["hashed_indexes"]:
            row[columns_names[index]] = "*************"
        output.append(row)
    output = output[::-1]

    _response = cfg.jinja.render(
        "model_view.html",
        request,
        model=model_id,
        columns=columns_data_for_ui(columns_data, model_data),
        model_data=output,
        unique=cfg.models[model_id]["identity"],
    )
    return _response
Ejemplo n.º 2
0
def extract_column_data(model_id: Text) -> Dict:
    """ extract data about column """
    _hash = "_hash"
    columns_data, hashed_indexes = {}, []
    table_name = get_table_name(model_id)
    for num, column in enumerate(cfg.app.db.tables[table_name].columns):
        if _hash in column.name:
            name = column.name.split(_hash)[0]
            type_ = HashColumn
            hashed_indexes.append(num)
        else:
            name = column.name
            type_ = types_map.get(str(column.type).split("(")[0])
            if not type_:
                logger.error(f"{column.type} was not found in types_map")
                type_ = str
        if len(str(column.type).split("(")) > 1:
            len_ = int(str(column.type).split("(")[1].split(")")[0])
        else:
            len_ = None
        columns_data[name] = {
            "type": type_,
            "len": len_,
            "nullable": column.nullable,
            "unique": column.unique,
            "primary": column.primary_key,
            "foreign_keys": column.foreign_keys,
            "db_type": column.type,
            "sequence": isinstance(column.default, sqlalchemy.sql.schema.Sequence),
        }
    required = [
        key
        for key, value in columns_data.items()
        if value["nullable"] is False or value["primary"]
    ]
    unique_keys = [
        key for key, value in columns_data.items() if value["unique"] is True
    ]
    foreign_keys = {}
    for column_name, data in columns_data.items():
        for key in data["foreign_keys"]:
            foreign_keys[key._colspec.split(".")[0]] = (
                column_name,
                key._colspec.split(".")[1],
            )

    primary_keys = [
        key for key, value in columns_data.items() if value["primary"] is True
    ]
    table_details = {
        "unique_columns": unique_keys,
        "required_columns": required,
        "columns_data": columns_data,
        "primary_keys": primary_keys,
        "columns_names": list(columns_data.keys()),
        "hashed_indexes": hashed_indexes,
        "foreign_keys": foreign_keys,
        "identity": primary_keys,
    }
    return table_details
Ejemplo n.º 3
0
async def deepcopy_recursive(
    model: Model,
    object_id: str,
    model_data: Dict,
    *,
    new_fk_link_id: Optional[str] = None,
    fk_column: Optional[Column] = None,
    new_id: Optional[str] = None,
):
    logger.debug(
        f"Making a deepcopy of {model} with id {object_id} linking to foreign key"
        f" {fk_column} with id {new_fk_link_id}"
    )
    new_obj_key = await create_object_copy(
        model.__tablename__,
        object_id,
        model_data,
        fk_column=fk_column,
        new_fk_link_id=new_fk_link_id,
        new_id=new_id,
    )
    if len(identity(model)) == 0:
        primary_key_col = object_id
        return (
            "Deepcopy does not available for tables without primary keys right now",
            "error",
        )
    else:
        primary_key_col = identity(model)[0]

    dependent_models = {}
    # TODO(ehborisov): check how it works in the case of composite key
    for m_id, data in cfg.models.items():
        table_name = get_table_name(m_id)
        for column in cfg.app.db.tables[table_name].columns:
            if column.references(primary_key_col):
                dependent_models[data["model"]] = column
    for dep_model in dependent_models:
        fk_column = dependent_models[dep_model]
        all_referencing_instance_ids = (
            await dep_model.select(identity(dep_model)[0].name)
            .where(fk_column == object_id[primary_key_col.name])
            .gino.all()
        )
        # TODO(ehborisov): can gather be used there? Only if we have a connection pool?
        for inst_id in all_referencing_instance_ids:
            result = await deepcopy_recursive(
                dep_model,
                {identity(dep_model)[0].name: inst_id[0]},
                model_data,
                new_fk_link_id=new_obj_key[identity(model)[0].name],
                fk_column=fk_column,
            )
            if isinstance(result, tuple):
                return result
    logger.debug(f"Finished copying, returning newly created object's id {new_obj_key}")
    return new_obj_key
Ejemplo n.º 4
0
async def count_elements_in_db():
    data = {}
    for model_id, value in cfg.models.items():
        try:
            table_name = get_table_name(model_id)
            sql_query = f"SELECT COUNT(*) FROM {table_name}"
            data[model_id] = (await
                              cfg.app.db.status(cfg.app.db.text(sql_query)
                                                ))[1][0][0]
        except asyncpg.exceptions.UndefinedTableError:
            data[model_id] = "Table does not exist"
    return data