Exemplo n.º 1
0
def create_default_schema(class_config={}):
    """
        Merges the default graphene types defined in this schema with an application using this library
        that has it's own graphene types. The latter can define overrides for all the default graphene types
        defined in this file. UserState and GroupState are created based on a merger of the types
    :param class_config:
    :return:
    """

    # Merge the incoming class_config with our defaults
    merged_class_config = R.merge(default_class_config(), class_config)

    # We use region, project, and location to create user_state and group_state
    # This is because user_state and group_state store settings for a user or group about those enties
    # For instance, what regions does a group have access to or what location is selected by a user
    user_state = create_user_state_query_and_mutation_classes(
        merged_class_config)
    group_state = create_group_state_query_and_mutation_classes(
        merged_class_config)

    # Note that user_search is a data class, not a model class, so isn't queried/mutated directly, but via user_state
    # additional_user_scope_schemas and additional_django_model_user_scopes are used for configured
    # UserStateSchema
    query_and_mutation_class_lookups = R.merge(
        R.omit([
            'user_search', 'additional_user_scope_schemas',
            'additional_django_model_user_scopes'
        ], merged_class_config),
        dict(user_state=user_state, group_state=group_state))
    return create_schema(query_and_mutation_class_lookups)
Exemplo n.º 2
0
def create_sample_resource(region, resource_dict):
    # Generate our sample resources, computing and storing their Sankey graph data
    graph = generate_sankey_data(resource_dict)
    data = R.merge(R.prop('data', resource_dict), dict(graph=graph))
    # Save the resource with the complete dataa
    resource = Resource(
        **R.merge(resource_dict, dict(region=region, data=data)))
    resource.save()
    return resource
Exemplo n.º 3
0
def increment_prop_until_unique(django_class, strategy, prop, additional_filter_props, django_instance_data):
    """
        Increments the given prop of the given django as given by data['prop'] until it matches nothing in
        the database. Note that this includes checks against soft deleted instances where the deleted prop is non-null
        (assumes the use of SafeDeleteModel on the model class)
    :param django_class: Django class to query
    :param prop: The prop to ensure uniqueness
    :param additional_filter_props: Other props, such as user id, to filter by. This allows incrementing a name
    dependent on the current user, for instance. This can be a dict or a function expecting the django_instance_data
    and returning a dict
    :param strategy: function to try to make a value unique. Expects all potential matching values--all values
    that begin with the value of the property--the prop value, and the current index. It's called for each matching
    value to guarentee the strategy will eventually get a unique value. For instance, if prop is key and it equals
    'foo', and 'foo', 'foo1', 'foo2', and 'foo3' are in the db, strategy will be called with an array of 4 values 4
    times, with index 0 through 3. If strategy is None the default strategy is to append index+1 to the duplicate name
    :param django_instance_data: The data containing the prop
    :return: The data merged with the uniquely named prop
    """
    prop_value = R.prop(prop, django_instance_data)
    pk = R.prop_or(None, 'id', django_instance_data)

    strategy = strategy or default_strategy
    # Include deleted objects here. It's up to additional_filter_props to deal with the deleted=date|None property
    all_objects = django_class.all_objects if R.has('all_objects', django_class) else django_class.objects
    matching_values = all_objects.filter(
        # Ignore value matching the pk if this is an update operation.
        # In other words we can update the key to what it already is, aka do nothing
        *R.compact([
            ~Q(id=pk) if pk else None,
        ]),
        **R.merge(
            {'%s__startswith' % prop: prop_value},
            # Give the filter props the instance f they are a function
            R.when(
                lambda f: inspect.isfunction(f),
                lambda f: f(django_instance_data)
            )(additional_filter_props or {})
        )
    ).values_list(prop, flat=True).order_by(prop)

    success = prop_value
    for i, matching_key in enumerate(matching_values):
        success = None
        attempt = strategy(matching_values, prop_value, i)
        if attempt not in matching_values:
            success = attempt
            break
    if not success:
        raise Exception("Could not generate unique prop value %s. The following matching ones exist %s" % (
            prop_value, matching_values))
    return R.merge(django_instance_data, {prop: success})
Exemplo n.º 4
0
    def accumulate_nodes(accum, raw_node, i):
        """
            Accumulate each node, keying by the name of the node's stage key
            Since nodes share stage keys these each result is an array of nodes
        :param accum:
        :param raw_node:
        :param i:
        :return:
        """
        location_obj = resolve_coordinates(default_location, R.prop_or(None, location_key, raw_node), i)
        location = R.prop('location', location_obj)
        is_generalized = R.prop('isGeneralized', location_obj)
        # The key where then node is stored is the stage key
        node_stage = raw_node[stage_key]
        # Get key from name or it's already a key
        key = R.prop('key', R.prop_or(dict(key=node_stage), node_stage, stage_by_name))

        # Copy all properties from resource.data  except settings and raw_data
        # Also grab raw_node properties
        # This is for arbitrary properties defined in the data
        # We put them in properties and propertyValues since graphql hates arbitrary key/values
        properties = R.merge(
            R.omit(['settings', 'rawData'], R.prop('data', resource)),
            raw_node
        )
        properties[node_name_key] = humanize(properties[node_name_key])
        return R.merge(
            # Omit accum[key] since we'll concat it with the new node
            R.omit([key], accum),
            {
                # concat accum[key] or [] with the new node
                key: R.concat(
                    R.prop_or([], key, accum),
                    # Note that the value is an array so we can combine nodes with the same stage key
                    [
                        dict(
                            value=string_to_float(R.prop(value_key, raw_node)),
                            type='Feature',
                            geometry=dict(
                                type='Point',
                                coordinates=location
                            ),
                            name=R.prop(node_name_key, raw_node),
                            isGeneralized=is_generalized,
                            properties=list(R.keys(properties)),
                            propertyValues=list(R.values(properties))
                        )
                    ]
                )
            }
        )
 def test_update(self):
     result, update_result = quiz_model_mutation_update(
         self.client, graphql_update_or_create_resource,
         'createResource.resource', 'updateResource.resource',
         dict(
             key='candy',
             name='Candy',
             region=dict(id=self.region.id),
             data=R.merge(
                 sample_settings,
                 dict(
                     material='Candy',
                     rawData=[
                         'Other Global Imports;Shipments, location generalized;51.309933, 3.055030;Source;22,469,843',
                         'Knauf (Danilith) BE;Waregemseweg 156-142 9790 Wortegem-Petegem, Belgium;50.864762, 3.479308;Conversion;657,245',
                         "MPRO Bruxelles;Avenue du Port 67 1000 Bruxelles, Belgium;50.867486, 4.352543;Distribution;18,632",
                         'Residential Buildings (all typologies);Everywhere in Brussels;NA;Demand;3,882,735',
                         'Duplex House Typology;Everywhere in Brussels;NA;Demand;13,544',
                         'Apartment Building Typology;Everywhere in Brussels;NA;Demand;34,643',
                         'New West Gypsum Recycling;9130 Beveren, Sint-Jansweg 9 Haven 1602, Kallo, Belgium;51.270229, 4.261048;Reconversion;87,565',
                         'Residential Buildings (all typologies);Everywhere in Brussels;NA;Sink;120,000',
                         'RecyPark South;1190 Forest, Belgium;50.810799, 4.314789;Sink;3,130',
                         'RecyPark Nord;Rue du Rupel, 1000 Bruxelles, Belgium;50.880181, 4.377136;Sink;1,162'
                     ]))), dict(key='popcorn', name='Popcorn'))
     versions = Version.objects.get_for_object(
         Resource.objects.get(id=R.item_str_path(
             'data.updateResource.resource.id', update_result)))
     assert len(versions) == 2
Exemplo n.º 6
0
 def test_create(self):
     values = dict(
         name='Candy',
         region=dict(id=R.head(self.regions).id),
         data=R.merge(
             sample_settings,
             dict(
                 material='Candy',
                 raw_data=[
                     'Other Global Imports;Shipments, location generalized;51.309933, 3.055030;Source;22,469,843',
                     'Knauf (Danilith) BE;Waregemseweg 156-142 9790 Wortegem-Petegem, Belgium;50.864762, 3.479308;Conversion;657,245',
                     "MPRO Bruxelles;Avenue du Port 67 1000 Bruxelles, Belgium;50.867486, 4.352543;Distribution;18,632",
                     'Residential Buildings (all typologies);Everywhere in Brussels;NA;Demand;3,882,735',
                     'Duplex House Typology;Everywhere in Brussels;NA;Demand;13,544',
                     'Apartment Building Typology;Everywhere in Brussels;NA;Demand;34,643',
                     'New West Gypsum Recycling;9130 Beveren, Sint-Jansweg 9 Haven 1602, Kallo, Belgium;51.270229, 4.261048;Reconversion;87,565',
                     'Residential Buildings (all typologies);Everywhere in Brussels;NA;Sink;120,000',
                     'RecyPark South;1190 Forest, Belgium;50.810799, 4.314789;Sink;3,130',
                     'RecyPark Nord;Rue du Rupel, 1000 Bruxelles, Belgium;50.880181, 4.377136;Sink;1,162'
                 ]
             )
         )
     )
     result = graphql_update_or_create_resource(self.client, values)
     dump_errors(result)
     assert not R.has('errors', result), R.dump_json(R.prop('errors', result))
     # look at the users added and omit the non-determinant dateJoined
     result_path_partial = R.item_path(['data', 'createResource', 'resource'])
     self.assertMatchSnapshot(R.omit(omit_props, result_path_partial(result)))
Exemplo n.º 7
0
    def mutate(self, info, resource_data=None):
        # We must merge in existing resource.data if we are updating
        if R.has('id', resource_data):
            # New data gets priority, but this is a deep merge.
            resource_data['data'] = R.merge_deep(
                Resource.objects.get(id=resource_data['id']).data,
                R.prop_or({}, 'data', resource_data))
            # Modifies defaults value to add .data.graph
            # We could decide in the future to generate this derived data on the client, but it's easy enough to do here

        modified_resource_data = enforce_unique_props(resource_fields,
                                                      resource_data)
        # Make sure that all props are unique that must be, either by modifying values or erring.
        update_or_create_values = input_type_parameters_for_update_or_create(
            resource_fields, modified_resource_data)

        # Add the sankey data unless we are updating the instance without updating instance.data
        update_or_create_values_with_sankey_data = R.merge(
            update_or_create_values,
            dict(defaults=add_sankey_graph_to_resource_dict(
                update_or_create_values['defaults']))) if R.has(
                    'defaults',
                    update_or_create_values) else update_or_create_values

        resource, created = update_or_create_with_revision(
            Resource, update_or_create_values_with_sankey_data)
        return UpsertResource(resource=resource)
Exemplo n.º 8
0
def form_sample_user_state_data(regions, projects, data):
    """
    Given data in the form dict(region_keys=[...], ...), converts region_keys to
    regions=[{id:x}, {id:y}, ...] by resolving the regions
    :param regions: Persisted regions
    :param projects: Persisted projects
    :param {dict} data: Sample data in the form:
    dict(
        userRegions=[
            dict(
                region=dict(key='belgium'),  # key is converted to persisted Region's id
                mapbox=dict(viewport=dict(
                    latitude=50.5915,
                    longitude=2.0165,
                    zoom=7
                )),
            )
        ]
    ),
    :return: Data in the form dict(userRegions=[dict(region=dict(id=x), mapbox=..., ...), ...])
    """
    return R.merge(
        # Rest of data that's not regions
        R.omit(['userRegions', 'userProjects'], data),
        dict(userRegions=user_state_scope_instances('region', 'userRegions',
                                                    regions, data),
             userProjects=user_state_scope_instances('project', 'userProjects',
                                                     projects, data)))
Exemplo n.º 9
0
 def format_error(error):
     # Why don't other people care about stack traces? It's absurd to have add this myself
     trace = traceback.format_exception(etype=type(error),
                                        value=error,
                                        tb=error.__traceback__)
     if isinstance(error, GraphQLError):
         return R.merge(dict(trace=trace), format_graphql_error(error))
     return {"message": six.text_type(error), "trace": trace}
Exemplo n.º 10
0
            def fetch_and_merge(modified_user_state_data, props):
                existing = UserState.objects.filter(**props)
                # If the user doesn't have a user state yet
                if not R.length(existing):
                    return modified_user_state_data

                return merge_data_fields_on_update(
                    ['data'],
                    R.head(existing),
                    # Merge existing's id in case it wasn't in user_state_data
                    R.merge(modified_user_state_data, R.pick(['id'], existing))
                )
Exemplo n.º 11
0
def region_resolver(manager_method, **kwargs):
    """

    Resolves the regions for model get_region_model()
    :param manager_method: 'filter', 'get', or 'count'
    :param kwargs: Filter arguments for the Region
    :return:
    """

    q_expressions = process_filter_kwargs(
        get_region_model(), **R.merge(dict(deleted__isnull=True), kwargs))
    return getattr(get_region_model().objects, manager_method)(*q_expressions)
Exemplo n.º 12
0
 def mutate(self, info, group_data=None):
     group_model = Group()
     data = R.merge(
         group_data,
         dict(password=make_password(R.prop('password', group_data),
                                     salt='not_random'))
         if R.prop_or(False, 'password', group_data) else {})
     update_or_create_values = input_type_parameters_for_update_or_create(
         group_fields, data)
     group, created = update_or_create_with_revision(
         group_model, update_or_create_values)
     return UpsertGroup(group=group)
Exemplo n.º 13
0
 def mutate(self, info, user_data=None):
     user_model = get_user_model()
     data = R.merge(
         user_data,
         dict(password=make_password(R.prop('password', user_data),
                                     salt='not_random'))
         if R.prop_or(False, 'password', user_data) else {})
     update_or_create_values = input_type_parameters_for_update_or_create(
         user_fields, data)
     user, created = update_or_create_with_revision(
         user_model, update_or_create_values)
     return UpsertUser(user=user)
Exemplo n.º 14
0
def user_state_scope_instances(scope_key, user_scope_key, scope_instances,
                               data):
    """
        Creates scope instance dicts for the given instances. New scope instances can be
        passed as well for Project, which instructs the server to create the Project when
        creating/updating the userProject
    :param scope_key: 'region', 'project', etc
    :param user_scope_key: 'userRegions', 'userProjects', etc
    :param scope_instances: regions or projects or ...
    :param data: The userState data to put the instances in. E.g. data.userRegions gets mapped to include
    the resolved regions
    :return:
    """

    scope_instances_by_key = R.map_prop_value_as_index('key', scope_instances)

    def resolve_scope_instance(scope_key, user_scope_instance):
        # Replace key with id
        id = R.compose(
            # third get the id if it exists
            R.prop_or(None, 'id'),
            # second resolve the scope instance if it exists
            lambda k: R.prop_or(None, k, scope_instances_by_key),
            # first get the key
            R.item_str_path(f'{scope_key}.key'))(user_scope_instance)
        return {
            scope_key:
            R.compact_dict(
                dict(
                    # Resolve the persisted Scope instance by key
                    id=id
                ) if id else dict(
                    # Otherwise pass everything so the server can create the instance
                    # (Currently only supported for projects)
                    user_scope_instance[scope_key]))
        }

    return R.map(
        # Find the id of th scope instance that matches,
        # returning dict(id=scope_instance_id). We can't return the whole scope instance
        # because we are saving within json data, not the Django ORM
        # If the scope instance is new and doesn't match anything, create the user scope instance
        # without an id so that the server saves it (Only implemented for Project, not Region thus far)
        lambda user_scope_instance: R.merge(
            # Other stuff like mapbox
            R.omit([scope_key], user_scope_instance),
            # The project or region
            resolve_scope_instance(scope_key, user_scope_instance)),
        R.prop(user_scope_key, data))
def validate_and_mutate_scope_instances(scope_instances_config, data):
    """
        Inspect the data and find all scope instances within data
        For UserState, for instance, this includes userRegions[*].region, userProject[*].project and within
        userRegions and userProjects userSearch.userSearchLocations[*].search_location and whatever the implementing
        libraries define in addition
    :param scope_instances_config: See user_state_schema.user_state_scope_instances_config for an example
    :param data: The instance data field containing the scope instances
    :return: The updated data with scope instances possibly created/updated if allowed. If creates occur
    then the scope instance will now have an id. Otherwise no changes are visible
    """

    validated_scope_objs_instances_and_ids_sets = R.map(
        lambda scope_instance_config: find_scope_instances(
            scope_instance_config, data), scope_instances_config)

    # Some scope instances can be created or modified when embedded in the data. This helps
    # make mutation of the instance, such as UserState,
    # a one step process, so that new Projects, SearchLocations, etc. can
    # be created without having to call mutation for them separately ahead of times, which would create
    # a series of mutations that weren't failure-protected as a single transaction
    for i, validated_scope_objs_instances_and_ids in enumerate(
            validated_scope_objs_instances_and_ids_sets):
        scope = R.merge(
            scope_instances_config[i],
            dict(model=scope_instances_config[i]['model'].__name__))

        # If any scope instances with an id specified in new_data don't exist, throw an error
        if R.length(validated_scope_objs_instances_and_ids['scope_ids']
                    ) != R.length(
                        validated_scope_objs_instances_and_ids['instances']):
            ids = R.join(', ',
                         validated_scope_objs_instances_and_ids['scope_ids'])
            instances_string = R.join(
                ', ',
                R.map(lambda instance: str(instance),
                      validated_scope_objs_instances_and_ids['instances']))
            raise Exception(
                f"For scope {dumps(scope)} Some scope ids among ids:[{ids}] being saved in user state do not exist. Found the following instances in the database: {instances_string or 'None'}. UserState.data is {dumps(data)}"
            )

        # Create/Update any scope instances that permit it
        model = scope_instances_config[i]['model']
        data = handle_can_mutate_related(
            model, scope, data, validated_scope_objs_instances_and_ids)
    return data
Exemplo n.º 16
0
 def mutate(self, info, foo_data=None):
     modified_foo_data = R.merge(
         # Make sure unique fields are enforced, here by incrementing foo.key
         enforce_unique_props(foo_fields, foo_data),
         dict(
             # Force the FeatureCollection geojson into the GEOSGeometryCollection. This is just Geometry
             geo_collection=ewkt_from_feature_collection(
                 foo_data['geojson'])
             if R.prop('geojson', foo_data) else {},
             # Put the full FeatureCollection geojson into the geojson field.
             geojson=foo_data['geojson']
             if R.prop('geojson', foo_data) else {}))
     update_or_create_values = input_type_parameters_for_update_or_create(
         foo_fields, modified_foo_data)
     foo, created = update_or_create_with_revision(Foo,
                                                   update_or_create_values)
     return UpsertFoo(foo=foo)
Exemplo n.º 17
0
def resolver_for_feature_collection(resource, context, **kwargs):
    """
        Like resolver but takes care of converting the geos value stored in the field to a dict that
        has the values we want to resolve, namely type and features.
    :param {string} resource: The instance whose json field data is being resolved
    :param {ResolveInfo} context: Graphene context which contains the fields queried in field_asts
    :return: {DataTuple} Standard resolver return value
    """

    # Take the camelized keys. We don't store data fields slugified. We leave them camelized
    selections = R.map(lambda sel: sel.name.value, context.field_asts[0].selection_set.selections)
    # Recover the json by parsing the string provided by GeometryCollection and mapping the geometries property to features
    json = R.compose(
        # Map the value GeometryCollection to FeatureCollection for the type property
        R.map_with_obj(lambda k, v: R.if_else(
            R.equals('type'),
            R.always('FeatureCollection'),
            R.always(v)
        )(k)),
        # Map geometries to features: [{type: Feature, geometry: geometry}]
        lambda dct: R.merge(
            # Remove geometries
            R.omit(['geometries'], dct),
            # Add features containing the geometries
            dict(features=R.map(
                lambda geometry: dict(type='Feature', geometry=geometry),
                R.prop_or([], 'geometries', dct))
            )
        ),
    )(ast.literal_eval(R.prop(context.field_name, resource).json))
    # Identify the keys that are actually in resource[json_field_name]
    all_selections = R.filter(
        lambda key: key in json,
        selections
    )
    # Pick out the values that we want
    result = R.pick(all_selections, json)

    # Return in the standard Graphene DataTuple
    return namedtuple('DataTuple', R.keys(result))(*R.values(result))
Exemplo n.º 18
0
class RescapeTokenQuery(graphene.ObjectType):
    current_token = graphene.Field(
        graphql_jwt.ObtainJSONWebToken,
        payload=GenericScalar(required=True),
        refresh_expires_in=graphene.Int(required=True),
        **R.merge(
            dict(token=graphene.Field(graphene.String, required=True))
            if jwt_settings.JWT_HIDE_TOKEN_FIELDS else {},
            dict(refresh_token=graphene.Field(graphene.String, required=True))
            if jwt_settings.JWT_HIDE_TOKEN_FIELDS
            and jwt_settings.JWT_LONG_RUNNING_REFRESH_TOKEN else {}))

    def resolve_current_token(self, info):
        """
            Resolve the current user or return None if there isn't one
        :param self:
        :param info:
        :return: The current user or None
        """
        context = info.context
        user = R.prop_or(None, 'user', context)
        return user if not isinstance(user, AnonymousUser) else None
    def mutate(self, info, search_location_data=None):
        deleted_search_location_response = delete_if_marked_for_delete(
            SearchLocation, UpsertSearchLocation, 'search_location',
            search_location_data)
        if deleted_search_location_response:
            return deleted_search_location_response

        modified_search_location_data = R.compose(
            # Make sure that all props are unique that must be, either by modifying values or erring.
            lambda data: enforce_unique_props(search_location_fields, data),
            # Remove the many to many values. They are saved separately
            lambda data: R.omit(['jurisdictions'], data))(search_location_data)

        update_or_create_values = input_type_parameters_for_update_or_create(
            search_location_fields, modified_search_location_data)
        search_location, created = update_or_create_with_revision(
            SearchLocation, update_or_create_values)

        # SearchJurisdictions can be created during the creation of search_locations
        if R.prop_or(False, 'jurisdictions', search_location_data):
            existing_search_intersections_by_id = R.index_by(
                R.prop('id'), search_location.jurisdictions.all())
            for search_jurisdiction_unsaved in R.prop('intersections',
                                                      search_location_data):
                # existing instances have an id
                search_jursidiction_id = R.prop_or(
                    None, 'id', search_jurisdiction_unsaved)
                search_jurisdiction, created = update_or_create_with_revision(
                    SearchJurisdiction,
                    R.merge(
                        R.prop(search_jursidiction_id,
                               existing_search_intersections_by_id)
                        if search_jursidiction_id else {},
                        search_jurisdiction_unsaved))
                # Once saved, add it to the search location
                search_location.jurisdictions.set(search_jurisdiction)

        return UpsertSearchLocation(search_location=search_location)
Exemplo n.º 20
0
 def sample_user_state_with_search_locations_and_additional_scope_instances(
         user_scope_name, sample_user_state):
     return R.fake_lens_path_set(
         f'data.{user_scope_name}'.split('.'),
         R.map(
             lambda user_scope: R.compose(
                 # Gives applications a chance to add the needed additional scope instances,
                 # e.g. userDesignFeatures
                 lambda user_scope:
                 create_additional_scope_instance_properties(user_scope),
                 lambda user_scope: R.merge(
                     user_scope,
                     dict(userSearch=dict(userSearchLocations=R.map(
                         lambda i_search_location: dict(
                             # Just return with the id since the full data is in the database
                             searchLocation=R.pick(['id'],
                                                   i_search_location[1]),
                             # Set the first search_location to active
                             activity=dict(isActive=i_search_location[0] ==
                                           0)),
                         enumerate(search_locations))))))(user_scope),
             R.item_str_path(f'data.{user_scope_name}', sample_user_state)),
         sample_user_state)
Exemplo n.º 21
0
        def mutate(self, info, user_state_data=None):
            """
                Update or create the user state
            :param info:
            :param user_state_data:
            :return:
            """

            # Check that all the scope instances in user_state.data exist. We permit deleted instances for now.
            new_data = R.prop_or({}, 'data', user_state_data)
            # Copy since Graphene reuses this data
            copied_new_data = copy.deepcopy(new_data)
            old_user_state_data = UserState.objects.get(
                id=user_state_data['id']
            ).data if R.prop_or(None, 'id', user_state_data) else None

            # Inspect the data and find all scope instances within UserState.data
            # This includes userRegions[*].region, userProject[*].project and within userRegions and userProjects
            # userSearch.userSearchLocations[*].search_location and whatever the implementing libraries define
            # in addition
            updated_new_data = validate_and_mutate_scope_instances(
                user_state_scope_instances_config,
                copied_new_data
            )

            # If either userProjects or userRegions are null, it means those scope instances aren't part
            # of the update, so merge in the old values
            if R.prop_or(None, 'id', user_state_data) and R.any_satisfy(
                    lambda user_scope_key: not R.prop_or(None, user_scope_key, updated_new_data),
                    ['userProjects', 'userRegions']
            ):
                # The special update case where one userScope collection is null,
                # indicates that we are only updating one userScope object. The rest
                # should remain the same and not be removed
                for user_scope_key in ['userProjects', 'userRegions']:
                    # Database values
                    old_user_scopes_by_id = user_scope_instances_by_id(
                        user_scope_key,
                        old_user_state_data
                    )
                    # New values with updates applied
                    new_user_scopes_by_id = user_scope_instances_by_id(
                        user_scope_key,
                        updated_new_data
                    )
                    # Prefer the old over the new, merging all objects but overriding lists
                    # We override lists because a non-null list always replaces the old list in the database
                    updated_new_data[user_scope_key] = R.values(R.merge_deep(
                        old_user_scopes_by_id,
                        new_user_scopes_by_id,
                        MyMerger(
                            # pass in a list of tuples,with the
                            # strategies you are looking to apply
                            # to each type.
                            [
                                (list, ["override_non_null"]),
                                (dict, ["merge"])
                            ],
                            # next, choose the fallback strategies,
                            # applied to all other types:
                            ["override"],
                            # finally, choose the strategies in
                            # the case where the types conflict:
                            ["override"]
                        )
                    ))

            # Update user_state_data the updated data
            modified_user_state_data = R.merge(user_state_data, dict(data=updated_new_data))

            # id or user.id can be used to identify the existing instance
            id_props = R.compact_dict(
                dict(
                    id=R.prop_or(None, 'id', modified_user_state_data),
                    user_id=R.item_str_path_or(None, 'user.id', modified_user_state_data)
                )
            )

            def fetch_and_merge(modified_user_state_data, props):
                existing = UserState.objects.filter(**props)
                # If the user doesn't have a user state yet
                if not R.length(existing):
                    return modified_user_state_data

                return merge_data_fields_on_update(
                    ['data'],
                    R.head(existing),
                    # Merge existing's id in case it wasn't in user_state_data
                    R.merge(modified_user_state_data, R.pick(['id'], existing))
                )

            modified_data = R.if_else(
                R.compose(R.length, R.keys),
                lambda props: fetch_and_merge(modified_user_state_data, props),
                lambda _: modified_user_state_data
            )(id_props)

            update_or_create_values = input_type_parameters_for_update_or_create(
                user_state_fields,
                # Make sure that all props are unique that must be, either by modifying values or erring.
                enforce_unique_props(
                    user_state_fields,
                    modified_data)
            )

            user_state, created = update_or_create_with_revision(UserState, update_or_create_values)
            return UpsertUserState(user_state=user_state)
Exemplo n.º 22
0
    def resolve_resources(self, info, **kwargs):
        q_expressions = process_filter_kwargs(
            Resource, **R.merge(dict(deleted__isnull=True), kwargs))

        return Resource.objects.filter(*q_expressions)
Exemplo n.º 23
0
    def resolve_settings(self, info, **kwargs):
        q_expressions = process_filter_kwargs(
            Settings, **R.merge(dict(deleted__isnull=True), kwargs))

        return Settings.objects.filter(*q_expressions)
Exemplo n.º 24
0
        def resolve_group_states(self, info, **kwargs):
            q_expressions = process_filter_kwargs(GroupState, **R.merge(dict(deleted__isnull=True), kwargs))

            return R.prop('model_class', group_state_config).objects.filter(
                *q_expressions
            )
Exemplo n.º 25
0
 def _resolve_locations(info, **kwargs):
     # Default to not deleted, it can be overridden by kwargs
     return query_with_filter_and_order_kwargs(Location, **R.merge(dict(deleted__isnull=True), kwargs))