def __get_rows(self, request, data): table = actions._get_table(data['schema'], table=data['table']) params = {} params_count = 0 columns = data.get('columns') if not columns: query = table.select() else: columns = [getattr(table.c, c) for c in columns] query = sqla.select(columns=columns) where_clauses = data.get('where') if where_clauses: for clause in where_clauses: first = getattr(table.c, clause['left']['column']) second = clause['right'] operator = parser.parse_sqla_operator(clause['operator'], first, second) query = query.where(operator) orderby = data.get('orderby') if orderby: query = query.order_by(orderby) limit = data.get('limit') if limit and limit.isdigit(): query = query.limit(int(limit)) offset = data.get('offset') if offset and offset.isdigit(): query = query.offset(int(offset)) cursor = actions._load_cursor(request.data['cursor_id']) actions._execute_sqla(query, cursor)
def post(self, request, schema, table): table_obj = actions._get_table(schema=schema, table=table) raw_input = request.data metadata, error = actions.try_parse_metadata(raw_input) if metadata is not None: compiler = JSONCompiler() table_obj.comment = json.dumps(compiler.visit(metadata)) cursor = actions.load_cursor_from_context(request.data) # Surprisingly, SQLAlchemy does not seem to escape comment strings # properly. Certain strings cause errors database errors. # This MAY be a security issue. Therefore, we do not use # SQLAlchemy's compiler here but do it manually. sql = "COMMENT ON TABLE {schema}.{table} IS %s".format( schema=table_obj.schema, table=table_obj.name) cursor.execute(sql, (table_obj.comment, )) return JsonResponse(raw_input) else: raise APIError(error)
def __get_rows(self, request, data): table = actions._get_table(data["schema"], table=data["table"]) params = {} params_count = 0 columns = data.get("columns") if not columns: query = table.select() else: columns = [actions.get_column_obj(table, c) for c in columns] query = sqla.select(columns=columns) where_clauses = data.get("where") if where_clauses: query = query.where(parser.parse_condition(where_clauses)) orderby = data.get("orderby") if orderby: if isinstance(orderby, list): query = query.order_by(*map(parser.parse_expression, orderby)) elif isinstance(orderby, str): query = query.order_by(orderby) else: raise APIError("Unknown order_by clause: " + orderby) limit = data.get("limit") if limit and limit.isdigit(): query = query.limit(int(limit)) offset = data.get("offset") if offset and offset.isdigit(): query = query.offset(int(offset)) cursor = sessions.load_cursor_from_context(request.data) actions._execute_sqla(query, cursor)
def __get_rows(self, request, data): table = actions._get_table(data['schema'], table=data['table']) params = {} params_count = 0 columns = data.get('columns') if not columns: query = table.select() else: columns = [getattr(table.c, c) for c in columns] query = sqla.select(columns=columns) where_clauses = data.get('where') if where_clauses: query = query.where(parser.parse_condition(where_clauses)) orderby = data.get('orderby') if orderby: if isinstance(orderby, list): query = query.order_by(*map(parser.parse_expression, orderby)) elif isinstance(orderby, str): query = query.order_by(orderby) else: raise APIError('Unknown order_by clause: ' + orderby) limit = data.get('limit') if limit and limit.isdigit(): query = query.limit(int(limit)) offset = data.get('offset') if offset and offset.isdigit(): query = query.offset(int(offset)) cursor = sessions.load_cursor_from_context(request.data) actions._execute_sqla(query, cursor)
def get(self, request, schema, table): table_obj = actions._get_table(schema=schema, table=table) comment = table_obj.comment return JsonResponse(json.loads(comment) if comment else {})
def get(self, request, schema, table, row_id=None): schema, table = actions.get_table_name(schema, table, restrict_schemas=False) columns = request.GET.getlist("column") where = request.GET.getlist("where") if row_id and where: raise actions.APIError( "Where clauses and row id are not allowed in the same query") orderby = request.GET.getlist("orderby") if row_id and orderby: raise actions.APIError( "Order by clauses and row id are not allowed in the same query" ) limit = request.GET.get("limit") if row_id and limit: raise actions.APIError( "Limit by clauses and row id are not allowed in the same query" ) offset = request.GET.get("offset") if row_id and offset: raise actions.APIError( "Order by clauses and row id are not allowed in the same query" ) format = request.GET.get("form") if offset is not None and not offset.isdigit(): raise actions.APIError("Offset must be integer") if limit is not None and not limit.isdigit(): raise actions.APIError("Limit must be integer") if not all(parser.is_pg_qual(c) for c in columns): raise actions.APIError("Columns are no postgres qualifiers") if not all(parser.is_pg_qual(c) for c in orderby): raise actions.APIError( "Columns in groupby-clause are no postgres qualifiers") # OPERATORS could be EQUALS, GREATER, LOWER, NOTEQUAL, NOTGREATER, NOTLOWER # CONNECTORS could be AND, OR # If you connect two values with an +, it will convert the + to a space. Whatever. where_clauses = self.__read_where_clause(where) if row_id: clause = { "operands": [{ "type": "column", "column": "id" }, row_id], "operator": "EQUALS", "type": "operator", } if where_clauses: where_clauses = conjunction(clause, where_clauses) else: where_clauses = clause # TODO: Validate where_clauses. Should not be vulnerable data = { "schema": schema, "table": table, "columns": columns, "where": where_clauses, "orderby": orderby, "limit": limit, "offset": offset, } return_obj = self.__get_rows(request, data) session = sessions.load_session_from_context( return_obj.pop("context")) if "context" in return_obj else None # Extract column names from description if "description" in return_obj: cols = [col[0] for col in return_obj["description"]] else: cols = [] return_obj["data"] = [] return_obj["rowcount"] = 0 if format == "csv": pseudo_buffer = Echo() writer = csv.writer(pseudo_buffer, quoting=csv.QUOTE_ALL) response = OEPStream( (writer.writerow(x) for x in itertools.chain([cols], return_obj["data"])), content_type="text/csv", session=session, ) response[ "Content-Disposition"] = 'attachment; filename="{schema}__{table}.csv"'.format( schema=schema, table=table) return response elif format == "datapackage": pseudo_buffer = Echo() writer = csv.writer(pseudo_buffer, quoting=csv.QUOTE_ALL) zf = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) csv_name = "{schema}__{table}.csv".format(schema=schema, table=table) zf.write_iter( csv_name, (writer.writerow(x).encode("utf-8") for x in itertools.chain([cols], return_obj["data"]))) table_obj = actions._get_table(schema=schema, table=table) if table_obj.comment: zf.writestr("datapackage.json", table_obj.comment.encode("utf-8")) else: zf.writestr( "datapackage.json", json.dumps(JSONCompiler().visit( OEPMetadata())).encode("utf-8")) response = OEPStream( (chunk for chunk in zf), content_type="application/zip", session=session, ) response[ "Content-Disposition"] = 'attachment; filename="{schema}__{table}.zip"'.format( schema=schema, table=table) return response else: if row_id: dict_list = [ dict(zip(cols, row)) for row in return_obj["data"] ] if dict_list: dict_list = dict_list[0] else: raise Http404 # TODO: Figure out what JsonResponse does different. return JsonResponse(dict_list, safe=False) return stream((dict(zip(cols, row)) for row in return_obj["data"]), session=session)