def post(self, request): sp_name = request.data.get('spName', None) # get form based on sp_name if '_' in sp_name: format_sp_name = Mapper.underscore_to_titlecase(sp_name) form_name = format_sp_name + 'Form' else: form_name = sp_name + 'Form' if hasattr(self.forms, form_name): form_cls = getattr(self.forms, form_name) else: # form hasn't been created which means we throw an error until we create the form return Response({'errorMessage': self.ErrorMessages.INVALID_STORED_PROCEDURE.format(sp_name)}, status=status.HTTP_400_BAD_REQUEST) form = form_cls(request.data) # validate the model that was sent in if form.is_valid(): # call stored procedure sp_service = SQLExecutorService(self.connection_data['server'], self.connection_data['database'], self.connection_data['username'], self.connection_data['password'], port=self.connection_data['port'], engine=self.connection_data['engine']) sp_params = form.get_params() result = sp_service.call_stored_procedure(sp_name, sp_params) camel_cased_results = Mapper().underscore_to_camelcase(result) # may need some type of serialization return Response(camel_cased_results, status=status.HTTP_200_OK) else: # not valid sp params return Response({'errorMessage': self.ErrorMessages.INVALID_PARAMS.format(sp_name)}, status=status.HTTP_400_BAD_REQUEST)
def add_includes_to_view_model(self, field_obj, include_items, view_model): # do similar to what serialize one does in order to avoid overwriting objects # i.e. resident__person, resident__unit includes = {} for include in include_items: field = Mapper.camelcase_to_underscore(include.split('__', 1)[0]) if '__' in include: item = Mapper.camelcase_to_underscore( include.split('__', 1)[1]) if field in includes.keys(): includes[field].append(item) else: includes[field] = [item] else: includes[field] = [] for field, related_items in includes.items(): if related_items and getattr(field_obj, field) is not None: related_obj = getattr(field_obj, field) view_model[field] = self.serialize_related( related_obj, related_items) else: item_to_serialize = getattr(field_obj, field, None) if item_to_serialize is not None: # do we check if they are basic types and then not try and serialize? if type(item_to_serialize) in [ int, bool, str, float, dict ]: view_model[field] = item_to_serialize else: dict_item = self.model_to_dict(item_to_serialize) view_model[field] = dict_item else: view_model[field] = None
def call_stored_procedure(self, procedure_name, params): sp_command = self.build_sp_command(procedure_name, params) # execute command with pymssql.connect(self.connection_data['server'], self.connection_data['username'], self.connection_data['password'], self.connection_data['database']) as conn: with conn.cursor() as cursor: cursor.execute(sp_command) try: result = cursor.fetchall() except: # returns exception if the stored procedure doesn't return a value conn.commit() return [] # commit so any inserts / updates are actually executed conn.commit() # grab column names back from the description -- could also possibly use as_dict column_names = [field[0] for field in cursor.description] mapped_result = [ dict(zip(column_names, row)) for row in result ] # now untitle case them, camel case them and finally snake_case them camel_cased_results = Mapper().titlecase_to_camelcase( mapped_result) snake_case_results = Mapper().camelcase_to_underscore( camel_cased_results) return snake_case_results
def create_response(self, body=None, response_status=None, error_message=None, content_type='application/json', serialize=False, exclude=[], include=[], fields=[], count=None, using_cache=False, cache_key=None): if using_cache: response = Response(body, status=status.HTTP_200_OK, content_type=content_type) response['Hit'] = 1 return response if body is None and error_message: body = { 'errorMessage': error_message } if not response_status: response_status = status.HTTP_400_BAD_REQUEST if not response_status: response_status = status.HTTP_200_OK if serialize: if body is None: body = {} else: serializer = self.serializer(exclude=exclude, include=include, fields=fields) body = serializer.serialize(body) body = Mapper.underscore_to_camelcase(body) if count is not None: body = { 'count': count, 'data': body } if cache_key and response_status == status.HTTP_200_OK: if hasattr(self.model, 'CACHE_TIME'): cache_time = self.model.CACHE_TIME cache.set(cache_key, body, cache_time) return Response(body, status=response_status, content_type=content_type)
def call_stored_procedure(self, procedure_name, params_formatter): connection, is_managed = self.get_connection() with connection.cursor() as cursor: cursor.execute(''' SELECT pg_catalog.pg_get_function_arguments(p.oid) FROM pg_catalog.pg_proc p WHERE p.proname ~ '^({0})$' AND pg_catalog.pg_function_is_visible(p.oid) LIMIT 1; '''.format(procedure_name.lower())) result = cursor.fetchall() params_result = [] if len(result) == 0: params_result = None else: for x in result[0][0].split(','): params_result.append(x.lstrip().split(' ')[0]) if len(params_result) == 1 and params_result[0] == '': params_result = [] cursor.callproc(procedure_name, params_formatter(params_result)) result = self.dictfetchall(cursor) if not is_managed: connection.close() return Mapper.camelcase_to_underscore(result)
def test_underscore_to_camelcase_array_of_mixed_types(self): int_type_value = random.randint(1, 10) str_type_value = str(uuid.uuid4())[:4] bool_type_value = False obj_type_value = {'camel_case': 1} ary_type_value = [int_type_value, obj_type_value] camel_case_mock = MagicMock( obj_type_value={'camelCase': 1}, ary_type_value=[int_type_value, { 'camelCase': 1 }]) underscore = { 'camel_case': [ int_type_value, str_type_value, obj_type_value, ary_type_value, bool_type_value ] } camel_case = { 'camelCase': [ int_type_value, str_type_value, camel_case_mock.obj_type_value, camel_case_mock.ary_type_value, bool_type_value ] } val = Mapper.underscore_to_camelcase(underscore) self.assertEqual(val, camel_case)
def one_model_to_dict(self, model): foreign_key_fields, decimal_fields, binary_fields, all_fields = self.get_fields_by_type( type(model)) if self.fields: field_names = [ field_name for field_name in all_fields if field_name in self.fields or Mapper.string_underscore_to_camelcase(field_name) in self.fields or '{}_id'.format(field_name) in self.fields or Mapper.string_underscore_to_camelcase('{}_id'.format( field_name)) in self.fields ] else: field_names = all_fields raw_model = {} for field_name in field_names: if field_name in foreign_key_fields: id_key = '{}_id'.format(field_name) raw_model[id_key] = getattr(model, id_key) else: raw_attribute = getattr(model, field_name) if field_name in decimal_fields and raw_attribute is not None: raw_model[field_name] = float(raw_attribute) elif field_name in binary_fields and raw_attribute is not None: # this is a bad solution as we get more # binary types we could run into problems as this returns and unencoded string # rather than the actual byte array -- if we need the actual byte array we are # going to need to fix up the serializer raw_model[field_name] = binary_string_to_string( bytes(raw_attribute)) else: raw_model[field_name] = raw_attribute if hasattr(model, 'get_excludes'): for exclude in model.get_excludes(): if exclude in raw_model.keys(): del raw_model[exclude] return raw_model
def call_stored_procedure(self, procedure_name, params): conn = psycopg2.connect( 'dbname={0} user={1} password={2} host={3} port={4}'.format( self.connection_data['database'], self.connection_data['username'], self.connection_data['password'], self.connection_data['server'], self.connection_data['port'])) with conn.cursor() as c: c.callproc(procedure_name, params) res = self.dictfetchall(c) c.close() snake_case_results = Mapper().camelcase_to_underscore(res) return snake_case_results
def call_stored_procedure(self, procedure_name, params_formatter): with pymssql.connect(self.connection_data['server'], self.connection_data['username'], self.connection_data['password'], self.connection_data['database']) as conn: with conn.cursor() as cursor: # this may need to change to only get IN params not out params -- we will see cursor.execute( 'SELECT PARAMETER_NAME FROM INFORMATION_SCHEMA.PARAMETERS WHERE SPECIFIC_NAME=%s AND PARAMETER_MODE=%s ORDER BY ORDINAL_POSITION', (procedure_name, 'IN')) result = cursor.fetchall() params = [row[0] for row in result] sp_command = self.build_sp_command(procedure_name, params_formatter(params), params) cursor.execute(sp_command) try: result = cursor.fetchall() except: # returns exception if the stored procedure doesn't return a value conn.commit() return [] # commit so any inserts / updates are actually executed conn.commit() # grab column names back from the description -- could also possibly use as_dict column_names = [field[0] for field in cursor.description] mapped_result = [ dict(zip(column_names, row)) for row in result ] # now untitle case them, camel case them and finally snake_case them camel_cased_results = Mapper.titlecase_to_camelcase( mapped_result) snake_case_results = Mapper.camelcase_to_underscore( camel_cased_results) return snake_case_results
def post(self, request): sp_name = request.data.get('spName', None) # get form based on sp_name if '_' in sp_name: format_sp_name = Mapper.underscore_to_titlecase(sp_name) form_name = format_sp_name + 'Form' else: form_name = sp_name + 'Form' if hasattr(self.forms, form_name): form_cls = getattr(self.forms, form_name) else: # form hasn't been created which means we throw an error until we create the form return Response( { 'errorMessage': self.ErrorMessages.INVALID_STORED_PROCEDURE.format(sp_name) }, status=status.HTTP_400_BAD_REQUEST) form = form_cls(request.data) # validate the model that was sent in if form.is_valid(): result = form.execute_sp() camel_cased_results = Mapper().underscore_to_camelcase(result) # may need some type of serialization return Response(camel_cased_results, status=status.HTTP_200_OK) else: # not valid sp params return Response( { 'errorMessage': self.ErrorMessages.INVALID_PARAMS.format(sp_name) }, status=status.HTTP_400_BAD_REQUEST)
def test_camelcase_to_underscore_array_of_bools(self): camel_case = {'camelCase': [True, False]} underscore = {'camel_case': [True, False]} val = Mapper.camelcase_to_underscore(camel_case) self.assertEqual(val, underscore)
def test_camelcase_to_underscore_array_of_strings(self): camel_case = {'camelCase': ['camelCase']} underscore = {'camel_case': ['camelCase']} val = Mapper.camelcase_to_underscore(camel_case) self.assertEqual(val, underscore)
def test_underscore_t0_titlecase(self): underscore = 'sum_charges' title = 'SumCharges' val = Mapper.underscore_to_titlecase(underscore) self.assertEqual(val, title)
def test_title_case_mixed_bag(self): title = 'PMSystemID' camel = 'pmSystemId' val = Mapper.titlecase_to_camelcase(title) self.assertEqual(val, camel)
def test_title_case_full_upper(self): upper = 'SSN' lower = 'ssn' val = Mapper.titlecase_to_camelcase(upper) self.assertEqual(val, lower)
def test_underscore_to_camelcase_embedded(self): underscore = [{'camel_case': [{'more_camel_case': 5}]}] camel_case = [{'camelCase': [{'moreCamelCase': 5}]}] val = Mapper.underscore_to_camelcase(underscore) self.assertEqual(val, camel_case)
def serialize_one(self, obj): view_model = self.model_to_dict(obj) includes = {} for include in self.include: if self.fields and include not in self.fields: continue field = Mapper.camelcase_to_underscore(include.split('__', 1)[0]) if '__' in include: item = Mapper.camelcase_to_underscore( include.split('__', 1)[1]) if field in includes.keys(): includes[field].append(item) else: includes[field] = [item] else: includes[field] = [] # make a new list that groups related items together i.e. address__state and address__address_type # maybe have it be a dictionary { address: [state, address_type], num_communities: None } for field, related_items in includes.items(): if related_items: try: related_obj = getattr(obj, field) related_obj_exists = True except AttributeError: related_obj_exists = False if related_obj_exists and related_obj is not None: if related_obj is not None: view_model[field] = self.serialize_related( related_obj, related_items) else: view_model[field] = None else: view_model[field] = None else: field = Mapper.camelcase_to_underscore(field) try: related_obj = getattr(obj, field) related_obj_exists = True except AttributeError: related_obj_exists = False if related_obj_exists and related_obj is not None: if type(related_obj) is list: related_obj_result = [] for related_obj_item in related_obj: if type(related_obj_item) in [ int, bool, str, float, dict ]: related_obj_result.append(related_obj_item) else: related_view_model = self.model_to_dict( related_obj_item) related_obj_result.append(related_view_model) view_model[field] = related_obj_result else: if type(related_obj) in [int, bool, str, float, dict]: view_model[field] = related_obj else: related_view_model = self.model_to_dict( related_obj) view_model[field] = related_view_model else: view_model[field] = None if self.exclude: for exclude_item in self.exclude: if exclude_item in view_model.keys(): del view_model[exclude_item] return view_model
def test_underscore_to_camelcase(self): underscore = 'camel_case' camel_case = 'camelCase' val = Mapper.underscore_to_camelcase(underscore) self.assertEqual(val, camel_case)
def get(self, request, pk=None, parent_resource=None, parent_pk=None): meta_request = request.query_params.get('meta', False) if meta_request: # grab models fields and return them as a dict meta_data = self.model.get_meta_data() return self.create_response(body=meta_data) # handle caching cache_key = None if hasattr(self.model, 'CACHE'): cache_key = request.get_full_path() result = cache.get(cache_key, None) if result: return self.create_response(body=result, using_cache=True, cache_key=cache_key) is_single_result = False empty_is_error = False simple = True obj = None # if we have a primary key we are returning one result if pk: if not parent_resource and not parent_pk and 'GET' not in self.supported_methods: return self.create_response( error_message=ErrorMessages.GET_NOT_SUPPORTED.format( self.model.__name__)) # try to get the sub resource of a parent -- this is to ensure you can only get access to sub items if you # have access to their parent item if parent_resource and parent_pk and self.linked_objects: if 'GET_SUB' not in self.supported_methods: return self.create_response( error_message=ErrorMessages.GET_SUB_NOT_SUPPORTED. format(self.model.__name__)) obj = self.get_obj_from_linked_objects(pk, parent_resource, parent_pk) is_single_result = True empty_is_error = True # try to get the obj from db with no parent resource else: obj = self.model.objects.using(self.read_db).filter(pk=pk) is_single_result = True empty_is_error = True else: # we could be a sub resource so we need to check if a parent_resource was passed in if parent_pk and parent_resource and self.linked_objects: snake_cased_url_tail = Mapper.camelcase_to_underscore( request.get_full_path().split('/')[-1]) lives_on_parent_results = [ i for i in self.linked_objects if 'lives_on_parent' in i and i['lives_on_parent'] and i['sub_resource_name'] == snake_cased_url_tail ] if len(lives_on_parent_results) > 0: if 'GET_SUB' not in self.supported_methods: return self.create_response( error_message=ErrorMessages.GET_SUB_NOT_SUPPORTED. format(self.model.__name__)) linked_object = lives_on_parent_results[0] child_id_field_name = linked_object[ 'sub_resource_name'] + '_id' child_id = linked_object['parent_cls'].objects.using( self.read_db).values(child_id_field_name).get( pk=parent_pk)[child_id_field_name] obj = self.model.objects.using( self.read_db).filter(pk=child_id) is_single_result = True # there is no empty_is_error here to preserve legacy behavior of empty object in this specific case else: # check if this method has authorized sub resources if 'GET_LIST_SUB' not in self.supported_methods: return self.create_response( error_message=ErrorMessages. GET_LIST_SUB_NOT_SUPPORTED.format( self.model.__name__)) # find the resource that this request is looking for obj = self.get_obj_from_linked_objects( pk, parent_resource, parent_pk) if pk is not None: is_single_result = True empty_is_error = True else: # trying to get ALL items in DB if 'GET_LIST' not in self.supported_methods: return self.create_response( error_message=ErrorMessages.GET_LIST_NOT_SUPPORTED. format(self.model.__name__)) obj = self.model.objects.using(self.read_db).all() # handle includes req_includes = request.query_params.get('include', []) if req_includes: req_includes = req_includes.split(',') if type(req_includes) is not list: req_includes = [req_includes] model_includes = self.model.get_includes() if hasattr( self.model, 'get_includes') else [] include = [ Mapper.camelcase_to_underscore(include.strip()) for include in req_includes if Mapper.camelcase_to_underscore( include.strip()) in model_includes ] # handle fields # if they explicitly ask for the field do we need to make them pass includes as well? currently yes req_fields = request.query_params.get('fields', []) if req_fields: req_fields = req_fields.split(',') if type(req_fields) is not list: req_fields = [req_fields] # todo: mucky and should be cleaned up a tid model_fields = [] foreign_key_ids = [] for field in self.model._meta.get_fields(): model_fields.append(field.name) if type(field) in [OneToOneField, ForeignKey]: foreign_key_ids.append(field.name + '_id') requested_fields = [ Mapper.camelcase_to_underscore(field.strip()) for field in req_fields if Mapper.camelcase_to_underscore(field.strip()) in model_fields or field.strip() in include or Mapper.camelcase_to_underscore(field.strip()) in foreign_key_ids ] fields = requested_fields if len(fields) > 0: model_primary_key_name = [ field.attname for field in self.model._meta.get_fields() if hasattr(field, 'primary_key') and field.primary_key ][0] if model_primary_key_name not in fields: simple = False if simple and len(req_fields) == 0: fields = [ field.attname for field in self.model._meta.get_fields() if not field.auto_created and field.concrete ] full_includes = [] multi_field = [] if len(include) > 0: includes_on_model = [] for field_name in include: try: includes_on_model.append(self.get_field_tree(field_name)) except: # handling property includes is insanity. Give up on optimizations and use the old way simple = False # nested includes are not currently supported in the happy path if len( [include for include in includes_on_model if len(include) > 2]) > 0: simple = False if simple: for include_field_tree in includes_on_model: for include_field in include_field_tree[1:]: if hasattr(include_field, 'related_model'): exclude_fields = include_field.related_model.get_excludes( ) if hasattr(include_field.related_model, 'get_excludes') else [] include_fields = [ include_field.name + '__' + field.attname for field in include_field.related_model._meta.get_fields() if not field.auto_created and field.concrete and field.name not in exclude_fields ] fields.extend(include_fields) full_includes.append(include_field.name) if hasattr(include_field, 'multiple') and include_field.multiple: multi_field.append(include_field.name) else: for include_field_tree in includes_on_model: if len(include_field_tree) == 2 and type( include_field_tree[1]) == ForeignKey: obj = obj.select_related(include_field_tree[0]) elif type(include_field_tree[1]) in (ForeignKey, ManyToManyRel, ManyToOneRel, OneToOneRel): obj = obj.prefetch_related(include_field_tree[0]) # gefilter fish filters = request.query_params.get('filters', []) if filters: filter_kwargs = {} exclude_filter_kwargs = {} isolated_filter_kwargs = {} filterable_properties = {} filterable_property_kwargs = {} filters = filters.split('|') # todo: rename this for filter in filters: exclude_filter = False isolate_filter = False filter_array = filter.split('=') filter_name = filter_array[0] filter_value = filter_array[1] if len( filter_array) > 1 else None # check if this is a filterable property if hasattr(self.model, 'get_filterable_properties'): filterable_property = filter_name in self.model.get_filterable_properties( ).keys() else: filterable_property = False # snake case the name filter_name = Mapper.camelcase_to_underscore(filter_name) if filter_name[0] == '!': if filter_value: exclude_filter = True filter_name = filter_name[1:] if '__contains_all' in filter_name: isolate_filter = True # if filter is in model filters then add it to the kwargs model_filters = self.model.get_filters() if filter_name in model_filters.keys(): if model_filters[filter_name]['list']: filter_value = [ self.format_filter(filter_name, item, model_filters) for item in filter_value.split(',') ] # if its a not we need to add it to excluded filters if exclude_filter: exclude_filter_kwargs[filter_name] = filter_value elif isolate_filter: isolated_filter_kwargs[filter_name] = filter_value elif filterable_property: filterable_property_query = self.model.get_filterable_properties( )[filter_name]['query'] # todo: i don't like this but don't have the time to make this better # todo: if we are annotating we should automatically account for the __in filterable_properties[filter_name.rstrip( '__in')] = filterable_property_query filterable_property_kwargs[ filter_name] = filter_value else: filter_kwargs[filter_name] = filter_value else: if exclude_filter: exclude_filter_kwargs[ filter_name] = self.format_filter( filter_name, filter_value, model_filters) elif isolate_filter: isolated_filter_kwargs[ filter_name] = self.format_filter( filter_name, filter_value, model_filters) else: if filterable_property: filterable_property_query = self.model.get_filterable_properties( )[filter_name]['query'] filterable_properties[ filter_name] = filterable_property_query filterable_property_kwargs[ filter_name] = self.format_filter( filter_name, filter_value, model_filters) elif 'revicontains' in filter_name: # create an annotation that is the field name + _rev and pass that to filter # with an F function to query each row in the db to see if it contains a substr # of the passed in filter field_name = filter_name.split('__')[0] field_rev = field_name + '_rev' annotate_kwargs = { field_rev: Value(filter_value, output_field=CharField()) } obj = obj.using( self.read_db).annotate(**annotate_kwargs) filter_kwargs[field_rev + '__icontains'] = F(field_name) else: filter_kwargs[ filter_name] = self.format_filter( filter_name, filter_value, model_filters) # narrow down items with the filters obj = obj.using(self.read_db).filter(**filter_kwargs) # filter out filterable properties if filterable_properties: obj = obj.using( self.read_db).annotate(**filterable_properties).filter( **filterable_property_kwargs) for filter_name, filter_value in isolated_filter_kwargs.items(): filter_name = filter_name.replace('__contains_all', '') for x in range(0, len(filter_value)): kwargs = {'{0}'.format(filter_name): filter_value[x]} obj = obj.using(self.read_db).filter(**kwargs) # exclude any items that shouldnt be in the final list obj = obj.using(self.read_db).exclude(**exclude_filter_kwargs) # handle distinct distinct = request.query_params.get('distinct', False) if distinct: obj = obj.using(self.read_db).distinct() # handle ordering order_by = request.query_params.get('orderBy', None) if order_by: order_by = Mapper.camelcase_to_underscore(order_by) obj = obj.using(self.read_db).order_by(order_by) # handle paging Mr. Herman page = request.query_params.get('page', None) page_size = request.query_params.get('pageSize', None) count_only = request.query_params.get('countOnly', None) data_only = request.query_params.get('noCount', None) total_items = None if count_only or (page_size and int(page_size) == 0): total_items = obj.using(self.read_db).count() return self.create_response(body=[], serialize=True, include=None, exclude=None, fields=None, count=total_items, using_cache=False, cache_key=None, optimized_serialize=True) if page and page_size: # todo: if they didnt pass in an order_by and there is paging use default models paging if that doesnt # todo: exist use id -- if that doesnt exist dont order # need to get total items for response if paging if data_only: total_items = -1 else: total_items = obj.using(self.read_db).count() page = int(page) page_size = int(page_size) start = (page - 1) * page_size end = start + page_size obj = obj[start:end] # setup excludes excludes = self.model.get_excludes() if hasattr( self.model, 'get_excludes') else [] if simple: fields = [field for field in fields if field not in excludes] for field_name in fields: try: field = self.model._meta.get_field(field_name) if hasattr(field, 'multiple') and field.multiple: multi_field.append(field.name) except: pass body = list(obj.values(*fields)) if order_by: body_by_primary_key = OrderedDict() else: body_by_primary_key = {} # pk is not always id model_primary_key_name = [ field.attname for field in self.model._meta.get_fields() if hasattr(field, 'primary_key') and field.primary_key ][0] for body_item in body: if body_item[ model_primary_key_name] not in body_by_primary_key: body_by_primary_key[body_item[model_primary_key_name]] = [] body_by_primary_key[body_item[model_primary_key_name]].append( body_item) for primary_key in body_by_primary_key: body_items = body_by_primary_key[primary_key] # process full includes if full_includes: for body_item in body_items: for include_field in full_includes: field_names = [ field for field in body_item if include_field + '__' in field ] field_names_to_remove = [ field for field in field_names if field not in include ] body_item[include_field] = { field_name.replace(include_field + "__", ''): body_item[field_name] for field_name in field_names } if all(val == None for val in body_item[include_field].values()): if include_field in multi_field: body_item[include_field] = [] else: body_item[include_field] = None for field_name in field_names_to_remove: del body_item[field_name] # handle possible many to many relationships if len(body_items) > 1: keys = [key for key in body_items[0]] checked_values = {} differences = set() for item in body_items: for key in keys: if key not in checked_values: checked_values[key] = item[key] if checked_values[key] != item[key]: differences.add(key) if len(differences) > 0: item = body_items[0] for difference in differences: all_items = [ body_item[difference] for body_item in body_items ] if all(type(item) is dict for item in all_items): # a little uniquefying magic, courtesy of stack overflow https://stackoverflow.com/a/7090833 item[difference] = [ dict(tupleized) for tupleized in set( tuple(item.items()) for item in all_items) ] else: item[difference] = all_items body_by_primary_key[primary_key] = [item] else: raise Exception('duplicate object for key') # at this point it should be one item for field_name in multi_field: if not type(body_by_primary_key[primary_key][0] [field_name]) is list: body_by_primary_key[primary_key][0][field_name] = [ body_by_primary_key[primary_key][0][field_name] ] body = [ body_by_primary_key[primary_key][0] for primary_key in body_by_primary_key ] for item in body: handle_bytes_decoding(item) if is_single_result: if len(body) == 0: if empty_is_error: raise self.DoesNotExist( ErrorMessages.DOES_NOT_EXIST.format( self.model.__name__, pk)) body = {} elif len(body) == 1: body = body[0] else: raise Exception('duplicate object for key') return self.create_response(body=body, serialize=True, include=include, exclude=excludes, fields=fields, count=total_items, using_cache=False, cache_key=cache_key, optimized_serialize=True) else: # evaluate the query body = list(obj) if is_single_result: if len(body) == 0: if empty_is_error: raise self.DoesNotExist( ErrorMessages.DOES_NOT_EXIST.format( self.model.__name__, pk)) body = {} elif len(body) == 1: body = body[0] else: raise Exception('duplicate object for key') return self.create_response(body=body, serialize=True, include=include, exclude=excludes, fields=requested_fields, count=total_items, using_cache=False, cache_key=cache_key)
def get(self, request, pk=None, parent_resource=None, parent_pk=None): meta_request = request.query_params.get('meta', False) if meta_request: # grab models fields and return them as a dict meta_data = self.model.get_meta_data() return self.create_response(body=meta_data) # handle caching cache_key = None if hasattr(self.model, 'CACHE'): cache_key = request.get_full_path() result = cache.get(cache_key, None) if result: return self.create_response(body=result, using_cache=True, cache_key=cache_key) obj = None # if we have a primary key we are returning one result if pk: if 'GET' not in self.supported_methods: return self.create_response(error_message=ErrorMessages.GET_NOT_SUPPORTED.format(self.model.__name__)) # try to get the obj from db try: obj = self.model.objects.using(self.read_db).get(pk=pk) except self.DoesNotExist: return self.create_response(error_message=ErrorMessages.DOES_NOT_EXIST.format(self.model.__name__, pk)) else: # we could be a sub resource so we need to check if a parent_resource was passed in if parent_pk and parent_resource and self.linked_objects: snake_cased_url_tail = Mapper.camelcase_to_underscore(request.get_full_path().split('/')[-1]) lives_on_parent_results = [i for i in self.linked_objects if 'lives_on_parent' in i and i['lives_on_parent'] and i['sub_resource_name'] == snake_cased_url_tail] if len(lives_on_parent_results) > 0: if 'GET_SUB' not in self.supported_methods: return self.create_response(error_message=ErrorMessages.GET_SUB_NOT_SUPPORTED.format(self.model.__name__)) linked_object = lives_on_parent_results[0] parent_obj = linked_object['parent_cls'].objects.using(self.read_db).get(pk=parent_pk) obj = getattr(parent_obj, linked_object['sub_resource_name']) else: # check if this method has authorized sub resources if 'GET_LIST_SUB' not in self.supported_methods: return self.create_response(error_message=ErrorMessages.GET_LIST_SUB_NOT_SUPPORTED.format(self.model.__name__)) # find the resource that this request is looking for for linked_object in self.linked_objects: if linked_object['parent_resource'] == parent_resource: if linked_object['parent_cls']: # get the resource parent_obj = linked_object['parent_cls'].objects.using(self.read_db).get(pk=parent_pk) else: parent_obj = parent_pk # setup kwargs for django's orm to query kwargs = { linked_object['parent_name']: parent_obj } # if there is a linking table do that logic if linked_object['linking_cls']: # get the linking table items linked_objs = linked_object['linking_cls'].objects.using(self.read_db).filter(**kwargs) # go through linking table items and get the sub resources from each entry into a list linked_obj_ids = [getattr(linked_obj, linked_object['sub_resource_name']).id for linked_obj in linked_objs] obj = self.model.objects.using(self.read_db).filter(pk__in=linked_obj_ids) # no linking table and the link is on this obj itself else: obj = self.model.objects.using(self.read_db).filter(**kwargs) else: # trying to get ALL items in DB if 'GET_LIST' not in self.supported_methods: return self.create_response(error_message=ErrorMessages.GET_LIST_NOT_SUPPORTED.format(self.model.__name__)) obj = self.model.objects.using(self.read_db).all() # handle includes req_includes = request.query_params.get('include', []) if req_includes: req_includes = req_includes.split(',') if type(req_includes) is not list: req_includes = [req_includes] model_includes = self.model.get_includes() if hasattr(self.model, 'get_includes') else [] include = [include.strip() for include in req_includes if Mapper.camelcase_to_underscore(include.strip()) in model_includes] # handle fields # if they explicitly ask for the field do we need to make them pass includes as well? currently yes req_fields = request.query_params.get('fields', []) if req_fields: req_fields = req_fields.split(',') if type(req_fields) is not list: req_fields = [req_fields] # todo: mucky and should be cleaned up a tid model_fields = [] foreign_key_ids = [] for field in self.model._meta.get_fields(): model_fields.append(field.name) if type(field) in [OneToOneField, ForeignKey]: foreign_key_ids.append(field.name + '_id') fields = [field.strip() for field in req_fields if Mapper.camelcase_to_underscore(field.strip()) in model_fields or field.strip() in include or Mapper.camelcase_to_underscore(field.strip()) in foreign_key_ids] # gefilter fish filters = request.query_params.get('filters', []) if filters: filter_kwargs = {} exclude_filter_kwargs = {} isolated_filter_kwargs = {} filters = filters.split('|') for filter in filters: exclude_filter = False isolate_filter = False filter_array = filter.split('=') filter_name = filter_array[0] filter_value = filter_array[1] if len(filter_array) > 1 else None # snake case the name filter_name = Mapper.camelcase_to_underscore(filter_name) if filter_name[0] == '!': if filter_value: exclude_filter = True filter_name = filter_name[1:] if '__contains_all' in filter_name: isolate_filter = True #filter_name = filter_name.replace('__contains_all', '') # if filter is in model filters then add it to the kwargs model_filters = self.model.get_filters() if filter_name in model_filters.keys(): if model_filters[filter_name]['list']: filter_value = [self.format_filter(filter_name, item, model_filters) for item in filter_value.split(',')] # if its a not we need to add it to excluded filters if exclude_filter: exclude_filter_kwargs[filter_name] = filter_value elif isolate_filter: isolated_filter_kwargs[filter_name] = filter_value else: filter_kwargs[filter_name] = filter_value else: if exclude_filter: exclude_filter_kwargs[filter_name] = self.format_filter(filter_name, filter_value, model_filters) elif isolate_filter: isolated_filter_kwargs[filter_name] = self.format_filter(filter_name, filter_value, model_filters) else: filter_kwargs[filter_name] = self.format_filter(filter_name, filter_value, model_filters) # narrow down items with the filters obj = obj.using(self.read_db).filter(**filter_kwargs) for filter_name, filter_value in isolated_filter_kwargs.items(): filter_name = filter_name.replace('__contains_all', '') for x in range(0, len(filter_value)): kwargs = { '{0}'.format(filter_name): filter_value[x] } obj = obj.using(self.read_db).filter(**kwargs) # exclude any items that shouldnt be in the final list obj = obj.using(self.read_db).exclude(**exclude_filter_kwargs) # handle distinct distinct = request.query_params.get('distinct', False) if distinct: obj = obj.using(self.read_db).distinct() # handle ordering order_by = request.query_params.get('orderBy', None) if order_by: order_by = Mapper.camelcase_to_underscore(order_by) obj = obj.using(self.read_db).order_by(order_by) # handle paging Mr. Herman page = request.query_params.get('page', None) page_size = request.query_params.get('pageSize', None) total_items = None if page and page_size: # todo: if they didnt pass in an order_by and there is paging use default models paging if that doesnt # todo: exist use id -- if that doesnt exist dont order # need to get total items for response if paging total_items = obj.using(self.read_db).count() page = int(page) page_size = int(page_size) start = (page - 1) * page_size end = start + page_size obj = obj[start:end] # setup excludes excludes = self.model.get_excludes() if hasattr(self.model, 'get_excludes') else [] return self.create_response(body=obj, serialize=True, include=include, exclude=excludes, fields=fields, count=total_items, using_cache=False, cache_key=cache_key)
def model_to_dict(self, obj): # if its a related manager field (many to one or many to many) -- get all objs if issubclass(type(obj), Manager): obj = obj.all() if type(obj) == DjangoQuerySet or type(obj) == list: json_data_str = serializers.serialize('json', obj) plain_dict = json.loads(json_data_str) model_dict = [ self.format_data(item['fields'], item['pk']) for item in plain_dict ] if len(model_dict) == 0: return [] cls = type(obj[0]) # serializers take a query set so if it isn't we need to make it a list else: json_data_str = serializers.serialize('json', [obj]) plain_dict = json.loads(json_data_str) model_dict = plain_dict[0]['fields'] if obj._meta.pk.attname != 'id': model_dict[obj._meta.pk.attname] = plain_dict[0]['pk'] else: model_dict['id'] = plain_dict[0]['pk'] cls = type(obj) # get a list of all foreign keys on this obj foreign_key_fields = [ field.name for field in cls._meta.get_fields() if type(field) in [DjangoForeignKey, OneToOneField] ] decimal_fields = [ field.name for field in cls._meta.get_fields() if type(field) is DecimalField ] binary_fields = [ field.name for field in cls._meta.get_fields() if type(field) is BinaryField ] if type(model_dict) is list: for item in model_dict: updated_fields = {} fields_to_remove = [] for key in item.keys(): if key in foreign_key_fields: # get current value -- should be an id value = item[key] # create new field with appended _id field_with_id = key + '_id' # set new id field updated_fields[field_with_id] = value #remove field from dict fields_to_remove.append(key) if key in decimal_fields and item[key] is not None: updated_fields[key] = float(item[key]) if key in binary_fields and item[key] is not None: # this is because the serializer is serializing from the db and adding on # bytearray(b\ and also )' onto the end. this is a bad solution as we get more # binary types we could run into problems as this returns and unencoded string # rather than the actual byte array -- if we need the actual byte array we are # going to need to fix up the serializer updated_fields[ key] = convert_serialized_binary_to_string( item[key]) # setting all updated fields after looping through the keys to avoid changing the item we are looping # through for updated_field in updated_fields.keys(): item[updated_field] = updated_fields[updated_field] # remove all fields that we don't need after iterating through the items [ item.pop(field_to_remove, None) for field_to_remove in fields_to_remove ] else: updated_fields = {} fields_to_remove = [] for key in model_dict.keys(): if key in foreign_key_fields: # get current value -- should be an id value = model_dict[key] # create new field with appended _id field_with_id = key + '_id' # set new id field updated_fields[field_with_id] = value # remove field from dict fields_to_remove.append(key) if key in decimal_fields and model_dict[key] is not None: updated_fields[key] = float(model_dict[key]) if key in binary_fields and model_dict[key] is not None: # this is because the serializer is serializing from the db and adding on # bytearray(b\ and also )' onto the end. this is a bad solution as we get more # binary types we could run into problems as this returns and unencoded string # rather than the actual byte array -- if we need the actual byte array we are # going to need to fix up the serializer updated_fields[key] = convert_serialized_binary_to_string( model_dict[key]) # setting all updated fields after looping through the keys to avoid changing the item we are looping # through for updated_field in updated_fields.keys(): model_dict[updated_field] = updated_fields[updated_field] # remove all fields that we don't need after iterating through the items [ model_dict.pop(field_to_remove, None) for field_to_remove in fields_to_remove ] if self.fields: if type(model_dict) is list: pruned_model_dicts = [] for item in model_dict: pruned_model_dict = {} for key in item.keys(): if key in self.fields or Mapper.underscore_to_camelcase( key) in self.fields: pruned_model_dict[key] = item[key] pruned_model_dicts.append(pruned_model_dict) return pruned_model_dict else: pruned_model_dict = {} for key in model_dict.keys(): if key in self.fields or Mapper.underscore_to_camelcase( key) in self.fields: pruned_model_dict[key] = model_dict[key] return pruned_model_dict else: return model_dict
def test_camelcase_to_underscore_empty_array(self): camel_case = {'camelCase': []} underscore = {'camel_case': []} val = Mapper.camelcase_to_underscore(camel_case) self.assertEqual(val, underscore)
def test_underscore_to_backwards_compatible(self): underscore = 'address_line_1' camel_case = 'addressLine_1' val = Mapper.underscore_to_camelcase(underscore) self.assertEqual(val, camel_case)
def test_camelcase_to_underscore_array_of_objects(self): camel_case = {'camelCase': [{'camelCase': 1}]} underscore = {'camel_case': [{'camel_case': 1}]} val = Mapper.camelcase_to_underscore(camel_case) self.assertEqual(val, underscore)
def post(self, request, parent_resource=None, parent_pk=None): # check we are authorized to POST if not parent_resource and not parent_pk and 'POST' not in self.supported_methods: return self.create_response(error_message=ErrorMessages.POST_NOT_SUPPORTED.format(self.model.__name__)) reference_fields = None # have to figure out the parent_source to obj mappings i.e. /resource -> 'field_name' attr on the model if parent_resource and parent_pk and self.model.resource_mapping \ and parent_resource in self.model.resource_mapping.keys(): reference_fields = { self.model.resource_mapping[parent_resource]: parent_pk } # this is if we are using some id vs fk for parent elif parent_resource and parent_pk: # find the right linked obj for linked_object in self.linked_objects: if linked_object['parent_resource'] == parent_resource: # should only do this if there is no parent_cls if there is a parent_cls there should be a resource # mapping! if not linked_object['parent_cls']: request.data[linked_object['parent_name']] = parent_pk # check if they are passing an id in to just create a linking class id = request.data.get('id', None) # if i have an id and not an attribute linked_objects throw error if id and not hasattr(self, 'linked_objects'): return self.create_response(error_message=ErrorMessages.POST_SUB_WITH_ID_AND_NO_LINKING_CLASS. format(self.model.__name__)) # im safe to assume i have linked_objects now lets check if we have a linking class if id: linked_classes = [linked_obj for linked_obj in self.linked_objects if linked_obj['parent_resource'] == parent_resource and linked_obj['linking_cls']] if len(linked_classes) < 1: return self.create_response(error_message=ErrorMessages.POST_SUB_WITH_ID_AND_NO_LINKING_CLASS. format(self.model.__name__)) # occasionally we may need to transform the request to fit the model -- this is generally an unrestful call if hasattr(self.model, 'transform_request'): try: transformed_request = self.model.transform_request(request) except Exception as e: # args[0] is the message passed along in the exception return self.create_response(error_message=e.args[0]) try: obj = self.model.parse(transformed_request, existing_id=id, reference_fields=reference_fields) except ParseException as ex: return self.create_response(error_message=ex.args[0], response_status=status.HTTP_400_BAD_REQUEST) else: # check for reference fields to parse them into the model try: obj = self.model.parse(request.data, existing_id=id, reference_fields=reference_fields) except ParseException as ex: return self.create_response(error_message=ex.args[0], response_status=status.HTTP_400_BAD_REQUEST) obj.cascade_save(write_db=self.write_db) # save linking table items -- todo: move this into cascade_save? if parent_pk and parent_resource and self.linked_objects: if 'POST_SUB' not in self.supported_methods: return self.create_response(error_message=ErrorMessages.POST_SUB_NOT_SUPPORTED(self.model.__name__)) snake_cased_url_tail = Mapper.camelcase_to_underscore(request.get_full_path().split('/')[-1]) self.execute_on_linked_object(obj, self.linked_objects, parent_resource, parent_pk, snake_cased_url_tail, self.write_db) # for linked_object in self.linked_objects: # if linked_object['parent_resource'] == parent_resource: # # get the resource # parent_obj = linked_object['parent_cls'].objects.get(pk=parent_pk) # # if there is a linking table do that logic # if linked_object['linking_cls']: # ***************** linking class ********************** # # create new linking table # new_linking_obj = linked_object['linking_cls']() # setattr(new_linking_obj, linked_object['parent_name'], parent_obj) # setattr(new_linking_obj, linked_object['sub_resource_name'], obj) # new_linking_obj.save() # # no linking table -- could have been taken care of the in parse so check if the field is null # else: # lives_on_parent = linked_object['lives_on_parent'] if 'lives_on_parent' \ # in linked_object.keys() else False # if lives_on_parent: # ********** lives on parent ***************** # sub_resource_url = Mapper.camelcase_to_underscore(request.get_full_path().split('/')[-1]) # if sub_resource_url == linked_object['sub_resource_name']: # parent_obj = linked_object['parent_cls'].objects.get(pk=parent_pk) # setattr(parent_obj, linked_object['sub_resource_name'], obj) # parent_obj.save() # else: # *************** lives on child **************** # if not hasattr(obj, linked_object['parent_name']): # setattr(obj, linked_object['parent_name'], parent_obj) # obj.save() return self.create_response(obj, response_status=status.HTTP_201_CREATED, serialize=True)
def test_camelcase_to_underscore_not_capitalized(self): camel_case = 'camelCase' underscore = 'camel_case' val = Mapper.camelcase_to_underscore(camel_case) self.assertEqual(val, underscore)
def post(self, request, parent_resource=None, parent_pk=None): # check we are authorized to POST if not parent_resource and not parent_pk and 'POST' not in self.supported_methods: return self.create_response( error_message=ErrorMessages.POST_NOT_SUPPORTED.format( self.model.__name__)) reference_fields = None # have to figure out the parent_source to obj mappings i.e. /resource -> 'field_name' attr on the model if parent_resource and parent_pk and self.model.resource_mapping \ and parent_resource in self.model.resource_mapping.keys(): reference_fields = { self.model.resource_mapping[parent_resource]: parent_pk } # this is if we are using some id vs fk for parent elif parent_resource and parent_pk: # find the right linked obj for linked_object in self.linked_objects: if linked_object['parent_resource'] == parent_resource: # should only do this if there is no parent_cls if there is a parent_cls there should be a resource # mapping! if not linked_object['parent_cls']: request.data[linked_object['parent_name']] = parent_pk # check if they are passing an id in to just create a linking class id = request.data.get('id', None) # if i have an id and not an attribute linked_objects throw error if id and not hasattr(self, 'linked_objects'): return self.create_response(error_message=ErrorMessages. POST_SUB_WITH_ID_AND_NO_LINKING_CLASS. format(self.model.__name__)) # im safe to assume i have linked_objects now lets check if we have a linking class if id: linked_classes = [ linked_obj for linked_obj in self.linked_objects if linked_obj['parent_resource'] == parent_resource and linked_obj['linking_cls'] ] if len(linked_classes) < 1: return self.create_response( error_message=ErrorMessages. POST_SUB_WITH_ID_AND_NO_LINKING_CLASS.format( self.model.__name__)) # occasionally we may need to transform the request to fit the model -- this is generally an unrestful call if hasattr(self.model, 'transform_request'): transformed_request = self.model.transform_request(request) obj = self.model.parse(transformed_request, existing_id=id, reference_fields=reference_fields, request=request) else: # check for reference fields to parse them into the model obj = self.model.parse(request.data, existing_id=id, reference_fields=reference_fields, request=request) obj.cascade_save(write_db=self.write_db) # save linking table items -- todo: move this into cascade_save? if parent_pk and parent_resource and self.linked_objects: if 'POST_SUB' not in self.supported_methods: return self.create_response( error_message=ErrorMessages.POST_SUB_NOT_SUPPORTED( self.model.__name__)) snake_cased_url_tail = Mapper.camelcase_to_underscore( request.get_full_path().split('/')[-1]) self.execute_on_linked_object(obj, self.linked_objects, parent_resource, parent_pk, snake_cased_url_tail, self.write_db) # save extra linkers links = request.query_params.get('links', None) if links: for link in links.split('|'): link_reference, link_id = link.split('=') model, link_id_field_name = link_reference.split('__') link_model = obj.__class__._meta.get_field( Mapper.camelcase_to_underscore(model)).related_model back_reference_field_name = next( x for x in link_model._meta.get_fields() if hasattr(x, 'related_model') and x.related_model == obj.__class__).attname link_model( **{ back_reference_field_name: obj.id, Mapper.camelcase_to_underscore(link_id_field_name): link_id }).save() return self.create_response(obj, response_status=status.HTTP_201_CREATED, serialize=True)