def put(self, request, schema, table, row_id=None): schema, table = actions.get_table_name(schema, table) if not row_id: return JsonResponse(actions._response_error('This methods requires an id'), status=status.HTTP_400_BAD_REQUEST) column_data = request.data['query'] if row_id and column_data.get('id', int(row_id)) != int(row_id): raise actions.APIError( 'Id in URL and query do not match. Ids may not change.', status=status.HTTP_409_CONFLICT) engine = actions._get_engine() conn = engine.connect() # check whether id is already in use exists = conn.execute('select count(*) ' 'from {schema}.{table} ' 'where id = {id};'.format(schema=schema, table=table, id=row_id)).first()[0] > 0 if row_id else False conn.close() if exists: response = self.__update_rows(request, schema, table, column_data, row_id) actions.apply_changes(schema, table) return JsonResponse(response) else: result = self.__insert_row(request, schema, table, column_data, row_id) actions.apply_changes(schema, table) return JsonResponse(result, status=status.HTTP_201_CREATED)
def get(self, request, schema, table, column=None): schema, table = actions.get_table_name(schema, table, restrict_schemas=False) response = actions.describe_columns(schema, table) if column: try: response = response[column] except KeyError: raise actions.APIError('The column specified is not part of ' 'this table.') return JsonResponse(response)
def put(self, request, schema, table): """ Every request to unsave http methods have to contain a "csrftoken". This token is used to deny cross site reference forwarding. In every request the header had to contain "X-CSRFToken" with the actual csrftoken. The token can be requested at / and will be returned as cookie. :param request: :return: """ if schema not in PLAYGROUNDS and schema not in UNVERSIONED_SCHEMAS: raise PermissionDenied if schema.startswith("_"): raise PermissionDenied if request.user.is_anonymous: raise PermissionDenied if actions.has_table(dict(schema=schema, table=table), {}): raise APIError("Table already exists") json_data = request.data["query"] constraint_definitions = [] column_definitions = [] for constraint_definiton in json_data.get("constraints", []): constraint_definiton.update({ "action": "ADD", "c_table": table, "c_schema": schema }) constraint_definitions.append(constraint_definiton) if "columns" not in json_data: raise actions.APIError("Table contains no columns") for column_definition in json_data["columns"]: column_definition.update({"c_table": table, "c_schema": schema}) column_definitions.append(column_definition) metadata = json_data.get("metadata") result = self.__create_table(request, schema, table, column_definitions, constraint_definitions, metadata=metadata) perm, _ = login_models.UserPermission.objects.get_or_create( table=DBTable.load(schema, table), holder=request.user) perm.level = login_models.ADMIN_PERM perm.save() request.user.save() return JsonResponse({}, status=status.HTTP_201_CREATED)
def put(self, request, schema, table): """ Every request to unsave http methods have to contain a "csrftoken". This token is used to deny cross site reference forwarding. In every request the header had to contain "X-CSRFToken" with the actual csrftoken. The token can be requested at / and will be returned as cookie. :param request: :return: """ if schema not in ['model_draft', 'sandbox', 'test']: raise PermissionDenied if schema.startswith('_'): raise PermissionDenied if request.user.is_anonymous(): raise PermissionDenied if actions.has_table(dict(schema=schema, table=table), {}): raise APIError('Table already exists') json_data = request.data['query'] constraint_definitions = [] column_definitions = [] for constraint_definiton in json_data.get('constraints', []): constraint_definiton.update({ "action": "ADD", "c_table": table, "c_schema": schema }) constraint_definitions.append(constraint_definiton) if 'columns' not in json_data: raise actions.APIError("Table contains no columns") for column_definition in json_data['columns']: column_definition.update({"c_table": table, "c_schema": schema}) column_definitions.append(column_definition) result = actions.table_create(schema, table, column_definitions, constraint_definitions) perm, _ = login_models.UserPermission.objects.get_or_create( table=DBTable.load(schema, table), holder=request.user) perm.level = login_models.ADMIN_PERM perm.save() request.user.save() return JsonResponse(result, status=status.HTTP_201_CREATED)
def put(self, request, schema, table, row_id=None, action=None): if action: raise APIError( "This request type (PUT) is not supported. The " "'new' statement is only possible in POST requests." ) schema, table = actions.get_table_name(schema, table) if not row_id: return JsonResponse( actions._response_error("This methods requires an id"), status=status.HTTP_400_BAD_REQUEST, ) column_data = request.data["query"] if row_id and column_data.get("id", int(row_id)) != int(row_id): raise actions.APIError( "Id in URL and query do not match. Ids may not change.", status=status.HTTP_409_CONFLICT, ) engine = actions._get_engine() # check whether id is already in use exists = ( engine.execute( "select count(*) " "from {schema}.{table} " "where id = {id};".format(schema=schema, table=table, id=row_id) ).first()[0] > 0 if row_id else False ) if exists: response = self.__update_rows(request, schema, table, column_data, row_id) actions.apply_changes(schema, table) return JsonResponse(response) else: result = self.__insert_row(request, schema, table, column_data, row_id) actions.apply_changes(schema, table) return JsonResponse(result, status=status.HTTP_201_CREATED)
def get(self, request, schema, table, row_id=None): schema, table = actions.get_table_name(schema, table, restrict_schemas=False) columns = request.GET.getlist("column") where = request.GET.getlist("where") if row_id and where: raise actions.APIError( "Where clauses and row id are not allowed in the same query" ) orderby = request.GET.getlist("orderby") if row_id and orderby: raise actions.APIError( "Order by clauses and row id are not allowed in the same query" ) limit = request.GET.get("limit") if row_id and limit: raise actions.APIError( "Limit by clauses and row id are not allowed in the same query" ) offset = request.GET.get("offset") if row_id and offset: raise actions.APIError( "Order by clauses and row id are not allowed in the same query" ) format = request.GET.get("form") if offset is not None and not offset.isdigit(): raise actions.APIError("Offset must be integer") if limit is not None and not limit.isdigit(): raise actions.APIError("Limit must be integer") if not all(parser.is_pg_qual(c) for c in columns): raise actions.APIError("Columns are no postgres qualifiers") if not all(parser.is_pg_qual(c) for c in orderby): raise actions.APIError( "Columns in groupby-clause are no postgres qualifiers" ) # OPERATORS could be EQUALS, GREATER, LOWER, NOTEQUAL, NOTGREATER, NOTLOWER # CONNECTORS could be AND, OR # If you connect two values with an +, it will convert the + to a space. Whatever. where_clauses = self.__read_where_clause(where) if row_id: clause = { "operands": [{"type": "column", "column": "id"}, row_id], "operator": "EQUALS", "type": "operator", } if where_clauses: where_clauses = conjunction(clause, where_clauses) else: where_clauses = clause # TODO: Validate where_clauses. Should not be vulnerable data = { "schema": schema, "table": table, "columns": columns, "where": where_clauses, "orderby": orderby, "limit": limit, "offset": offset, } return_obj = self.__get_rows(request, data) session = sessions.load_session_from_context(return_obj.pop("context")) if "context" in return_obj else None # Extract column names from description if "description" in return_obj: cols = [col[0] for col in return_obj["description"]] else: cols = [] return_obj["data"] = [] return_obj["rowcount"] = 0 if format == "csv": pseudo_buffer = Echo() writer = csv.writer(pseudo_buffer, quoting=csv.QUOTE_ALL) response = OEPStream( ( writer.writerow(x) for x in itertools.chain([cols], return_obj["data"]) ), content_type="text/csv", session=session, ) response[ "Content-Disposition" ] = 'attachment; filename="{schema}__{table}.csv"'.format( schema=schema, table=table ) return response else: if row_id: dict_list = [dict(zip(cols, row)) for row in return_obj["data"]] if dict_list: dict_list = dict_list[0] else: raise Http404 # TODO: Figure out what JsonResponse does different. return JsonResponse(dict_list, safe=False) return stream((dict(zip(cols, row)) for row in return_obj["data"]), session=session)
def get(self, request, schema, table, row_id=None): schema, table = actions.get_table_name(schema, table, restrict_schemas=False) columns = request.GET.getlist('column') where = request.GET.get('where') if row_id and where: raise actions.APIError('Where clauses and row id are not allowed in the same query') orderby = request.GET.getlist('orderby') if row_id and orderby: raise actions.APIError('Order by clauses and row id are not allowed in the same query') limit = request.GET.get('limit') if row_id and limit: raise actions.APIError('Limit by clauses and row id are not allowed in the same query') offset = request.GET.get('offset') if row_id and offset: raise actions.APIError('Order by clauses and row id are not allowed in the same query') if offset is not None and not offset.isdigit(): raise actions.APIError("Offset must be integer") if limit is not None and not limit.isdigit(): raise actions.APIError("Limit must be integer") if not all(parser.is_pg_qual(c) for c in columns): raise actions.APIError("Columns are no postgres qualifiers") if not all(parser.is_pg_qual(c) for c in orderby): raise actions.APIError("Columns in groupby-clause are no postgres qualifiers") # OPERATORS could be EQUALS, GREATER, LOWER, NOTEQUAL, NOTGREATER, NOTLOWER # CONNECTORS could be AND, OR # If you connect two values with an +, it will convert the + to a space. Whatever. where_clauses = self.__read_where_clause(where) if row_id: where_clauses.append({'left': {'type': 'column', 'column': 'id'}, 'operator': 'EQUALS', 'right': row_id, 'type': 'operator_binary'}) # TODO: Validate where_clauses. Should not be vulnerable data = {'schema': schema, 'table': table, 'columns': columns, 'where': where_clauses, 'orderby': orderby, 'limit': limit, 'offset': offset } return_obj = self.__get_rows(request, data) # Extract column names from description cols = [col[0] for col in return_obj['description']] dict_list = [dict(zip(cols,row)) for row in return_obj['data']] if row_id: if dict_list: dict_list = dict_list[0] else: raise Http404 # TODO: Figure out what JsonResponse does different. return JsonResponse(dict_list, safe=False)
def get(self, request, schema, table, row_id=None): schema, table = actions.get_table_name(schema, table, restrict_schemas=False) columns = request.GET.getlist('column') where = request.GET.getlist('where') if row_id and where: raise actions.APIError( 'Where clauses and row id are not allowed in the same query') orderby = request.GET.getlist('orderby') if row_id and orderby: raise actions.APIError( 'Order by clauses and row id are not allowed in the same query' ) limit = request.GET.get('limit') if row_id and limit: raise actions.APIError( 'Limit by clauses and row id are not allowed in the same query' ) offset = request.GET.get('offset') if row_id and offset: raise actions.APIError( 'Order by clauses and row id are not allowed in the same query' ) format = request.GET.get('form') if offset is not None and not offset.isdigit(): raise actions.APIError("Offset must be integer") if limit is not None and not limit.isdigit(): raise actions.APIError("Limit must be integer") if not all(parser.is_pg_qual(c) for c in columns): raise actions.APIError("Columns are no postgres qualifiers") if not all(parser.is_pg_qual(c) for c in orderby): raise actions.APIError( "Columns in groupby-clause are no postgres qualifiers") # OPERATORS could be EQUALS, GREATER, LOWER, NOTEQUAL, NOTGREATER, NOTLOWER # CONNECTORS could be AND, OR # If you connect two values with an +, it will convert the + to a space. Whatever. where_clauses = self.__read_where_clause(where) if row_id: clause = { 'operands': [{ 'type': 'column', 'column': 'id' }, row_id], 'operator': 'EQUALS', 'type': 'operator' } if where_clauses: where_clauses = conjunction(clause, where_clauses) else: where_clauses = clause # TODO: Validate where_clauses. Should not be vulnerable data = { 'schema': schema, 'table': table, 'columns': columns, 'where': where_clauses, 'orderby': orderby, 'limit': limit, 'offset': offset } return_obj = self.__get_rows(request, data) # Extract column names from description cols = [col[0] for col in return_obj['description']] if format == 'csv': pseudo_buffer = Echo() writer = csv.writer(pseudo_buffer, quoting=csv.QUOTE_ALL) response = StreamingHttpResponse( (writer.writerow(x) for x in itertools.chain([cols], return_obj['data'])), content_type="text/csv") response[ 'Content-Disposition'] = 'attachment; filename="{schema}__{table}.csv"'.format( schema=schema, table=table) return response else: if row_id: dict_list = [ dict(zip(cols, row)) for row in return_obj['data'] ] if dict_list: dict_list = dict_list[0] else: raise Http404 # TODO: Figure out what JsonResponse does different. return JsonResponse(dict_list, safe=False) return stream((dict(zip(cols, row)) for row in return_obj['data']))