Exemplo n.º 1
0
def log_request_body(info, response_or_error):
    body = info.context._body.decode('utf-8')
    try:
        json_body = json.loads(body)
        (
            logger.error
            if isinstance(response_or_error, ErrorType) else logger.debug
        )(f" User: {info.context.user} \n Action: {json_body['operationName']} \n Variables: {json_body['variables']} \n Body:  {json_body['query']}",
          )
        if hasattr(response_or_error, '_meta') and isinstance(
                response_or_error._meta, MutationOptions):
            # Just log top level types
            if isinstance(response_or_error, (Model)):
                mutation_response = json.dumps(R.omit(
                    ['_state'], response_or_error.__dict__),
                                               sort_keys=True,
                                               indent=1,
                                               cls=MyDjangoJSONEncoder)
                logger.debug(f'Mutation returned {mutation_response}')
            elif isinstance(response_or_error, (BaseType)):
                try:
                    mutation_response = json.dumps(
                        R.omit(['_state'], response_or_error.__dict__),
                        sort_keys=True,
                        indent=1,
                    )
                    logger.debug(f'Mutation returned {mutation_response}')
                except:
                    logger.debug(
                        f'Mutation returned {response_or_error.__class__}')
        else:
            if hasattr(response_or_error, 'objects'):
                count = response_or_error.objects.count()
                # Log up to 100 ids, don't log if it's a larger set because it might be a paging query
                ids = R.join(' ', [
                    '', 'having ids:',
                    R.join(
                        ', ',
                        R.map(R.prop("id"),
                              response_or_error.objects.values('id')))
                ]) if count < 100 else ""
                logger.debug(
                    f'Paginated Query Page {response_or_error.page} of page size {response_or_error.page_size} out of total pages {response_or_error.pages} returned {count} results{ids}'
                )
            elif hasattr(response_or_error, 'count'):
                count = response_or_error.count()
                # Log up to 100 ids, don't log if it's a larger set because it might be a paging query
                ids = R.join(' ', [
                    '', 'having ids:',
                    R.join(', ',
                           R.map(R.prop("id"), response_or_error.values('id')))
                ]) if count < 100 else ""
                logger.debug(f'Query returned {count} results{ids}')
            else:
                id = R.prop('id', response_or_error)
                logger.debug(f'Query returned single result {id}')

    except Exception as e:
        logging.error(body)
Exemplo n.º 2
0
    def accumulate_nodes(accum, raw_node, i):
        """
            Accumulate each node, keying by the name of the node's stage key
            Since nodes share stage keys these each result is an array of nodes
        :param accum:
        :param raw_node:
        :param i:
        :return:
        """
        location_obj = resolve_coordinates(default_location, R.prop_or(None, location_key, raw_node), i)
        location = R.prop('location', location_obj)
        is_generalized = R.prop('isGeneralized', location_obj)
        # The key where then node is stored is the stage key
        node_stage = raw_node[stage_key]
        # Get key from name or it's already a key
        key = R.prop('key', R.prop_or(dict(key=node_stage), node_stage, stage_by_name))

        # Copy all properties from resource.data  except settings and raw_data
        # Also grab raw_node properties
        # This is for arbitrary properties defined in the data
        # We put them in properties and propertyValues since graphql hates arbitrary key/values
        properties = R.merge(
            R.omit(['settings', 'rawData'], R.prop('data', resource)),
            raw_node
        )
        properties[node_name_key] = humanize(properties[node_name_key])
        return R.merge(
            # Omit accum[key] since we'll concat it with the new node
            R.omit([key], accum),
            {
                # concat accum[key] or [] with the new node
                key: R.concat(
                    R.prop_or([], key, accum),
                    # Note that the value is an array so we can combine nodes with the same stage key
                    [
                        dict(
                            value=string_to_float(R.prop(value_key, raw_node)),
                            type='Feature',
                            geometry=dict(
                                type='Point',
                                coordinates=location
                            ),
                            name=R.prop(node_name_key, raw_node),
                            isGeneralized=is_generalized,
                            properties=list(R.keys(properties)),
                            propertyValues=list(R.values(properties))
                        )
                    ]
                )
            }
        )
Exemplo n.º 3
0
 def test_create(self):
     values = dict(
         name='Candy',
         region=dict(id=R.head(self.regions).id),
         data=R.merge(
             sample_settings,
             dict(
                 material='Candy',
                 raw_data=[
                     'Other Global Imports;Shipments, location generalized;51.309933, 3.055030;Source;22,469,843',
                     'Knauf (Danilith) BE;Waregemseweg 156-142 9790 Wortegem-Petegem, Belgium;50.864762, 3.479308;Conversion;657,245',
                     "MPRO Bruxelles;Avenue du Port 67 1000 Bruxelles, Belgium;50.867486, 4.352543;Distribution;18,632",
                     'Residential Buildings (all typologies);Everywhere in Brussels;NA;Demand;3,882,735',
                     'Duplex House Typology;Everywhere in Brussels;NA;Demand;13,544',
                     'Apartment Building Typology;Everywhere in Brussels;NA;Demand;34,643',
                     'New West Gypsum Recycling;9130 Beveren, Sint-Jansweg 9 Haven 1602, Kallo, Belgium;51.270229, 4.261048;Reconversion;87,565',
                     'Residential Buildings (all typologies);Everywhere in Brussels;NA;Sink;120,000',
                     'RecyPark South;1190 Forest, Belgium;50.810799, 4.314789;Sink;3,130',
                     'RecyPark Nord;Rue du Rupel, 1000 Bruxelles, Belgium;50.880181, 4.377136;Sink;1,162'
                 ]
             )
         )
     )
     result = graphql_update_or_create_resource(self.client, values)
     dump_errors(result)
     assert not R.has('errors', result), R.dump_json(R.prop('errors', result))
     # look at the users added and omit the non-determinant dateJoined
     result_path_partial = R.item_path(['data', 'createResource', 'resource'])
     self.assertMatchSnapshot(R.omit(omit_props, result_path_partial(result)))
Exemplo n.º 4
0
def get_paginator(qs, page_size, page, paginated_type, order_by, **kwargs):
    """
    Adapted from https://gist.github.com/mbrochh/f92594ab8188393bd83c892ef2af25e6
    Creates a pagination_type based on the paginated_type function
    :param qs:
    :param page_size:
    :param page:
    :param paginated_type:
    :param order_by default id. Optional kwarg to order by in django format as a string, e.g. '-key,+name'
    :param kwargs: Additional kwargs to pass paginated_type function, usually unneeded
    :return:
    """
    p = Paginator(qs.order_by(*(order_by or 'id').split(',')), page_size)
    try:
        page_obj = p.page(page)
    except PageNotAnInteger:
        page_obj = p.page(1)
    except EmptyPage:
        page_obj = p.page(p.num_pages)
    return paginated_type(page=page_obj.number,
                          pages=p.num_pages,
                          page_size=page_size,
                          has_next=page_obj.has_next(),
                          has_prev=page_obj.has_previous(),
                          objects=page_obj.object_list,
                          **R.omit(['order_by'], kwargs))
Exemplo n.º 5
0
def form_sample_user_state_data(regions, projects, data):
    """
    Given data in the form dict(region_keys=[...], ...), converts region_keys to
    regions=[{id:x}, {id:y}, ...] by resolving the regions
    :param regions: Persisted regions
    :param projects: Persisted projects
    :param {dict} data: Sample data in the form:
    dict(
        userRegions=[
            dict(
                region=dict(key='belgium'),  # key is converted to persisted Region's id
                mapbox=dict(viewport=dict(
                    latitude=50.5915,
                    longitude=2.0165,
                    zoom=7
                )),
            )
        ]
    ),
    :return: Data in the form dict(userRegions=[dict(region=dict(id=x), mapbox=..., ...), ...])
    """
    return R.merge(
        # Rest of data that's not regions
        R.omit(['userRegions', 'userProjects'], data),
        dict(userRegions=user_state_scope_instances('region', 'userRegions',
                                                    regions, data),
             userProjects=user_state_scope_instances('project', 'userProjects',
                                                     projects, data)))
Exemplo n.º 6
0
def create_default_schema(class_config={}):
    """
        Merges the default graphene types defined in this schema with an application using this library
        that has it's own graphene types. The latter can define overrides for all the default graphene types
        defined in this file. UserState and GroupState are created based on a merger of the types
    :param class_config:
    :return:
    """

    # Merge the incoming class_config with our defaults
    merged_class_config = R.merge(default_class_config(), class_config)

    # We use region, project, and location to create user_state and group_state
    # This is because user_state and group_state store settings for a user or group about those enties
    # For instance, what regions does a group have access to or what location is selected by a user
    user_state = create_user_state_query_and_mutation_classes(
        merged_class_config)
    group_state = create_group_state_query_and_mutation_classes(
        merged_class_config)

    # Note that user_search is a data class, not a model class, so isn't queried/mutated directly, but via user_state
    # additional_user_scope_schemas and additional_django_model_user_scopes are used for configured
    # UserStateSchema
    query_and_mutation_class_lookups = R.merge(
        R.omit([
            'user_search', 'additional_user_scope_schemas',
            'additional_django_model_user_scopes'
        ], merged_class_config),
        dict(user_state=user_state, group_state=group_state))
    return create_schema(query_and_mutation_class_lookups)
Exemplo n.º 7
0
def reverse_relationships():
    from rescape_region.schema_models.scope.project.project_schema import ProjectType, project_fields
    # Model a reverse relationship so we can get the locations of a project
    return dict(
        projects=dict(
            graphene_type=ProjectType,
            fields=R.omit(['locations'], project_fields),
            type_modifier=lambda *type_and_args: List(*type_and_args)
        )
    )
Exemplo n.º 8
0
def create_sample_location(cls, location_dict):
    # Save the location with the complete data
    if R.has('key', location_dict):
        # rescape_region uses a key for uniqueness
        return cls.objects.update_or_create(key=R.prop('key', location_dict),
                                            defaults=R.omit(['key'],
                                                            location_dict))[0]
    else:
        # other implementors should delete duplicates first
        location = cls(**location_dict)
        location.save()
        return location
Exemplo n.º 9
0
def user_state_scope_instances(scope_key, user_scope_key, scope_instances,
                               data):
    """
        Creates scope instance dicts for the given instances. New scope instances can be
        passed as well for Project, which instructs the server to create the Project when
        creating/updating the userProject
    :param scope_key: 'region', 'project', etc
    :param user_scope_key: 'userRegions', 'userProjects', etc
    :param scope_instances: regions or projects or ...
    :param data: The userState data to put the instances in. E.g. data.userRegions gets mapped to include
    the resolved regions
    :return:
    """

    scope_instances_by_key = R.map_prop_value_as_index('key', scope_instances)

    def resolve_scope_instance(scope_key, user_scope_instance):
        # Replace key with id
        id = R.compose(
            # third get the id if it exists
            R.prop_or(None, 'id'),
            # second resolve the scope instance if it exists
            lambda k: R.prop_or(None, k, scope_instances_by_key),
            # first get the key
            R.item_str_path(f'{scope_key}.key'))(user_scope_instance)
        return {
            scope_key:
            R.compact_dict(
                dict(
                    # Resolve the persisted Scope instance by key
                    id=id
                ) if id else dict(
                    # Otherwise pass everything so the server can create the instance
                    # (Currently only supported for projects)
                    user_scope_instance[scope_key]))
        }

    return R.map(
        # Find the id of th scope instance that matches,
        # returning dict(id=scope_instance_id). We can't return the whole scope instance
        # because we are saving within json data, not the Django ORM
        # If the scope instance is new and doesn't match anything, create the user scope instance
        # without an id so that the server saves it (Only implemented for Project, not Region thus far)
        lambda user_scope_instance: R.merge(
            # Other stuff like mapbox
            R.omit([scope_key], user_scope_instance),
            # The project or region
            resolve_scope_instance(scope_key, user_scope_instance)),
        R.prop(user_scope_key, data))
    def test_update_fields_for_create_or_update(self):
        values = dict(
            email="*****@*****.**",
            username="******",
            first_name='T',
            last_name='Rex',
            # Normally we'd use make_password here
            password=make_password("rrrrhhh", salt='not_random'))
        self.assertMatchSnapshot(
            input_type_parameters_for_update_or_create(user_fields, values))

        foo_values = dict(
            key='fooKey',
            name='Foo Name',
            # Pretend this is a saved user id
            user=dict(id=5),
            data=dict(example=2.2))
        self.assertMatchSnapshot(
            R.omit(['password'],
                   input_type_parameters_for_update_or_create(
                       foo_fields, foo_values)))
Exemplo n.º 11
0
def resolver_for_feature_collection(resource, context, **kwargs):
    """
        Like resolver but takes care of converting the geos value stored in the field to a dict that
        has the values we want to resolve, namely type and features.
    :param {string} resource: The instance whose json field data is being resolved
    :param {ResolveInfo} context: Graphene context which contains the fields queried in field_asts
    :return: {DataTuple} Standard resolver return value
    """

    # Take the camelized keys. We don't store data fields slugified. We leave them camelized
    selections = R.map(lambda sel: sel.name.value, context.field_asts[0].selection_set.selections)
    # Recover the json by parsing the string provided by GeometryCollection and mapping the geometries property to features
    json = R.compose(
        # Map the value GeometryCollection to FeatureCollection for the type property
        R.map_with_obj(lambda k, v: R.if_else(
            R.equals('type'),
            R.always('FeatureCollection'),
            R.always(v)
        )(k)),
        # Map geometries to features: [{type: Feature, geometry: geometry}]
        lambda dct: R.merge(
            # Remove geometries
            R.omit(['geometries'], dct),
            # Add features containing the geometries
            dict(features=R.map(
                lambda geometry: dict(type='Feature', geometry=geometry),
                R.prop_or([], 'geometries', dct))
            )
        ),
    )(ast.literal_eval(R.prop(context.field_name, resource).json))
    # Identify the keys that are actually in resource[json_field_name]
    all_selections = R.filter(
        lambda key: key in json,
        selections
    )
    # Pick out the values that we want
    result = R.pick(all_selections, json)

    # Return in the standard Graphene DataTuple
    return namedtuple('DataTuple', R.keys(result))(*R.values(result))
    def mutate(self, info, search_location_data=None):
        deleted_search_location_response = delete_if_marked_for_delete(
            SearchLocation, UpsertSearchLocation, 'search_location',
            search_location_data)
        if deleted_search_location_response:
            return deleted_search_location_response

        modified_search_location_data = R.compose(
            # Make sure that all props are unique that must be, either by modifying values or erring.
            lambda data: enforce_unique_props(search_location_fields, data),
            # Remove the many to many values. They are saved separately
            lambda data: R.omit(['jurisdictions'], data))(search_location_data)

        update_or_create_values = input_type_parameters_for_update_or_create(
            search_location_fields, modified_search_location_data)
        search_location, created = update_or_create_with_revision(
            SearchLocation, update_or_create_values)

        # SearchJurisdictions can be created during the creation of search_locations
        if R.prop_or(False, 'jurisdictions', search_location_data):
            existing_search_intersections_by_id = R.index_by(
                R.prop('id'), search_location.jurisdictions.all())
            for search_jurisdiction_unsaved in R.prop('intersections',
                                                      search_location_data):
                # existing instances have an id
                search_jursidiction_id = R.prop_or(
                    None, 'id', search_jurisdiction_unsaved)
                search_jurisdiction, created = update_or_create_with_revision(
                    SearchJurisdiction,
                    R.merge(
                        R.prop(search_jursidiction_id,
                               existing_search_intersections_by_id)
                        if search_jursidiction_id else {},
                        search_jurisdiction_unsaved))
                # Once saved, add it to the search location
                search_location.jurisdictions.set(search_jurisdiction)

        return UpsertSearchLocation(search_location=search_location)
Exemplo n.º 13
0
def create_sample_user_state(cls, regions, projects, user_state_dict):
    """
    Persists sample user state data into a UserState
    :param cls: The UserState class
    :param {[Region]} regions: Persisted sample regions
    :param {[Projects]} projects: Persisted sample projects
    :param user_state_dict: Sample data in the form: dict(
        username="******",  # This will be mapped to the User id in create_sample_user_state
        data=dict(
            userRegions=[
                dict(
                    region=dict(key='belgium'),  # key is converted to persisted Region's id
                    mapbox=dict(viewport=dict(
                        latitude=50.5915,
                        longitude=2.0165,
                        zoom=7
                    )),
                )
            ]
        )
    ),
    :param locations
    :param search_locations Search locations that match 0 or more locations
    :return:
    """
    user = get_user_model().objects.get(username=user_state_dict['username'])
    user_state_values = R.merge_deep(
        # Skip username and data, they are handled above and below
        R.omit(['username', 'data'], user_state_dict),
        # Convert data.region_keys to data.user_region ids
        dict(user=user,
             data=form_sample_user_state_data(regions, projects,
                                              R.prop('data',
                                                     user_state_dict))))
    # Save the user_state with the complete data
    user_state = cls(**user_state_values)
    user_state.save()
    return user_state
def quiz_model_paginated_query(client,
                               model_class,
                               paginated_query,
                               result_name,
                               page_count_expected,
                               props,
                               omit_props,
                               order_by=None,
                               page_size=1):
    """
        Tests a pagination query for a model with variables
    :param client: Apollo client
    :param model_class: Model class
    :param paginated_query: Model's pagination query
    :param page_count_expected: The number of pages expected when the page_size is 1, in other words the
    number of items in the database that match props
    :param result_name: The name of the results in data.[result_name].objects
    :param props: The props to query, not including pagination
    :param omit_props: Props to omit from assertions because they are nondeterminate
    :param order_by: Order by page-level prop
    :param page_size: Default 1
    :return the first result (first page) and final result (last page) for further testing:
    """
    result = paginated_query(client,
                             variables=dict(page=1,
                                            page_size=page_size,
                                            order_by=order_by,
                                            objects=R.to_array_if_not(props)))

    # Check against errors
    assert not R.has('errors', result), R.dump_json(
        R.map(lambda e: format_error(e), R.prop('errors', result)))
    first_page_objects = R.item_path(['data', result_name, 'objects'], result)
    # Assert we got 1 result because our page is size 1
    assert page_size == R.compose(
        R.length,
        R.map(R.omit(omit_props)),
    )(first_page_objects)

    remaining_ids = list(
        set(
            R.map(
                R.prop('id'),
                model_class.objects.filter(*process_filter_kwargs(
                    model_class, **R.map_keys(underscore, props))).order_by(
                        *order_by.split(',')))) -
        set(R.map(R.compose(int, R.prop('id')), first_page_objects)))

    page_info = R.item_path(['data', result_name], result)
    # We have page_size pages so there should be a total number of pages
    # of what we specified for page_count_expected
    assert page_info['pages'] == page_count_expected
    assert page_info['hasNext'] == True
    assert page_info['hasPrev'] == False
    # Get the final page
    new_result = paginated_query(client,
                                 variables=dict(
                                     page=page_count_expected,
                                     page_size=page_info['pageSize'],
                                     order_by=order_by,
                                     objects=R.to_array_if_not(props)))
    # Make sure the new_result matches one of the remaining ids
    assert R.contains(
        R.item_path(['data', result_name, 'objects', 0, 'id'], new_result),
        remaining_ids)

    new_page_info = R.item_path(['data', result_name], new_result)
    # Still expect the same page count
    assert new_page_info['pages'] == page_count_expected
    # Make sure it's the last page
    assert new_page_info['hasNext'] == False
    assert new_page_info['hasPrev'] == True
    return [result, new_result]
 def omit_to_many(scope_obj):
     return R.omit(R.map(R.prop('attname'), model._meta.many_to_many),
                   scope_obj)
def handle_can_mutate_related(model, related_model_scope_config, data,
                              validated_scope_objs_instances_and_ids):
    """
        Mutates the given related models of an instance if permitted
        See rescape-region's UserState for a working usage
    :param model: The related model
    :param related_model_scope_config: Configuration of the related model relative to the referencing instance
    :param data: The data containing thphee related models dicts to possibly mutate with
    :param validated_scope_objs_instances_and_ids: Config of the related objects that have been validated as
    existing in the database for objects not being created
    :return: Possibly mutates instances, returns data with newly created ids set
    """
    def make_fields_unique_if_needed(scope_obj):
        # If a field needs to be unique, like a key, call it's unique_with method
        return R.map_with_obj(
            lambda key, value: R.item_str_path_or(
                R.identity, f'field_config.{key}.unique_with',
                related_model_scope_config)(scope_obj), scope_obj)

    def convert_foreign_key_to_id(scope_obj):
        # Find ForeignKey attributes and map the class field name to the foreign key id field
        # E.g. region to region_id, user to user_id, etc
        converters = R.compose(
            R.from_pairs, R.map(lambda field: [field.name, field.attname]),
            R.filter(lambda field: R.isinstance(ForeignKey, field)))(
                model._meta.fields)
        # Convert scopo_obj[related_field] = {id: x} to scope_obj[related_field_id] = x
        return R.from_pairs(
            R.map_with_obj_to_values(
                lambda key, value: [converters[key],
                                    R.prop('id', value)]
                if R.has(key, converters) else [key, value], scope_obj))

    def omit_to_many(scope_obj):
        return R.omit(R.map(R.prop('attname'), model._meta.many_to_many),
                      scope_obj)

    # This indicates that scope_objs were submitted that didn't have ids
    # This is allowed if those scope_objs can be created/updated when the userState is mutated
    if R.prop_or(False, 'can_mutate_related', related_model_scope_config):
        for scope_obj_key_value in validated_scope_objs_instances_and_ids[
                'scope_objs']:

            scope_obj = scope_obj_key_value['value']
            scope_obj_path = scope_obj_key_value['key']
            if R.length(R.keys(R.omit(['id'], scope_obj))):
                modified_scope_obj = R.compose(
                    convert_foreign_key_to_id, omit_to_many,
                    make_fields_unique_if_needed)(scope_obj)
                if R.prop_or(False, 'id', scope_obj):
                    # Update, we don't need the result since it's already in user_state.data
                    instance, created = model.objects.update_or_create(
                        defaults=R.omit(['id'], modified_scope_obj),
                        **R.pick(['id'], scope_obj))
                else:
                    # Create
                    instance = model(**modified_scope_obj)
                    instance.save()
                    # We need to replace the object
                    # passed in with an object containing the id of the instance
                    data = R.fake_lens_path_set(scope_obj_path.split('.'),
                                                R.pick(['id'], instance), data)

                for to_many in model._meta.many_to_many:
                    if to_many.attname in R.keys(scope_obj):
                        # Set existing related values to the created/updated instances
                        getattr(instance, to_many.attname).set(
                            R.map(R.prop('id'), scope_obj[to_many.attname]))
    return data