def _model_resolver_for_dict_field(resource, context, **kwargs):
        # Don't underscore the field_name. field_name points at a Django model, but the object holding field name
        # is by definition json, or we wouldn't be using this resolver
        field_name = context.field_name
        try:
            # resource is either a DataTuple or dict, not sure why
            id = R.prop_or(None, 'id', R.prop_or(dict(), field_name, resource) if\
                isinstance(resource, dict) else\
                getattr(resource, field_name))
        except Exception as e:
            raise e
        # If no instance id is assigned to this data, we can't resolve it
        if not id:
            return None

        # Now filter based on any query arguments beyond id. If it doesn't match we also return None
        found =  first(model_class.objects.filter(
            **dict(
                # These are Q expressions
                *flatten_query_kwargs(model_class, kwargs),
                id=id
            )
        ), None)

        def no_instance_error(_):
            raise Exception(f'For model {model_class.__name__} and id {id}, no instances were found, either deleted or not')

        # If we didn't find the instances search for delete instances if safedelete is implemented
        return found or (issubclass(model_class, SafeDeleteModel) and R.if_else(lambda q: q.count(), first, no_instance_error)(model_class.objects.all(force_visibility=True).filter(
            **dict(
                # These are Q expressions
                *flatten_query_kwargs(model_class, kwargs),
                id=id
            )
        )))
    def accumulate_nodes(accum, raw_node, i):
        """
            Accumulate each node, keying by the name of the node's stage key
            Since nodes share stage keys these each result is an array of nodes
        :param accum:
        :param raw_node:
        :param i:
        :return:
        """
        location_obj = resolve_coordinates(default_location, R.prop_or(None, location_key, raw_node), i)
        location = R.prop('location', location_obj)
        is_generalized = R.prop('isGeneralized', location_obj)
        # The key where then node is stored is the stage key
        node_stage = raw_node[stage_key]
        # Get key from name or it's already a key
        key = R.prop('key', R.prop_or(dict(key=node_stage), node_stage, stage_by_name))

        # Copy all properties from resource.data  except settings and raw_data
        # Also grab raw_node properties
        # This is for arbitrary properties defined in the data
        # We put them in properties and propertyValues since graphql hates arbitrary key/values
        properties = R.merge(
            R.omit(['settings', 'rawData'], R.prop('data', resource)),
            raw_node
        )
        properties[node_name_key] = humanize(properties[node_name_key])
        return R.merge(
            # Omit accum[key] since we'll concat it with the new node
            R.omit([key], accum),
            {
                # concat accum[key] or [] with the new node
                key: R.concat(
                    R.prop_or([], key, accum),
                    # Note that the value is an array so we can combine nodes with the same stage key
                    [
                        dict(
                            value=string_to_float(R.prop(value_key, raw_node)),
                            type='Feature',
                            geometry=dict(
                                type='Point',
                                coordinates=location
                            ),
                            name=R.prop(node_name_key, raw_node),
                            isGeneralized=is_generalized,
                            properties=list(R.keys(properties)),
                            propertyValues=list(R.values(properties))
                        )
                    ]
                )
            }
        )
def find_scope_instances(user_state_scope, new_data):
    """
        Retrieve the scope instances to verify the Ids.
        Scope instances must have ids unless they are allowed to be created/updated
        during the userState mutation (such as searchLocations)
    :param new_data: The data to search
    :param user_state_scope Dict with 'pick' in the shape of the instances we are looking for in new_data,
    e.g. dict(userRegions={region: True}) to search new_data.userRegions[] for all occurrences of {region:...}
     and 'key' which indicates the actually key of the instance (e.g. 'region' for regions)
    :return: dict(
        instances=Instances actually in the database,
    )
    """
    def until(key, value):
        return key != R.prop('key', user_state_scope)

    return R.compose(
        lambda scope_dict: dict(
            # See which instances with ids are actually in the database
            # If any are missing we have an invalid update or need to create those instances if permitted
            instances=list(
                find_scope_instances_by_id(R.prop('model', user_state_scope),
                                           scope_dict['scope_ids'])),
            # The path from userRegions or userProjects to the scope instances, used to replace
            # a null update value with the existing values
            user_scope_path=list(R.keys(R.flatten_dct(user_state_scope, '.')))[
                0],
            **scope_dict),
        lambda scope_objs: dict(
            # Unique by id or accept if there is no id, this loses data, but it's just for validation
            scope_objs=R.unique_by(
                lambda obj: R.prop_or(str(now()), 'id', obj['value']),
                scope_objs),
            scope_ids=R.unique_by(
                R.identity,
                compact(
                    R.map(
                        lambda scope_obj: R.prop_or(None, 'id', scope_obj[
                            'value']), scope_objs)))),
        # Use the pick key property to find the scope instances in the data
        # If we don't match anything we can get null or an empty item. Filter/compact these out
        R.filter(lambda obj: obj['value'] and (not isinstance(
            obj['value'], list) or R.length(obj['value']) != 0)),
        R.map(lambda pair: dict(key=pair[0], value=pair[1])),
        lambda flattened_data: R.to_pairs(flattened_data),
        lambda data: R.flatten_dct_until(
            R.pick_deep_all_array_items(R.prop('pick', user_state_scope), data
                                        ), until, '.'))(new_data)
Beispiel #4
0
def dump_graphql_keys(dct):
    """
        Convert a dict to a graphql input parameter keys in the form
        Also camelizes keys if the are slugs and handles complex types. If a value has read=IGNORE it is omitted
        key1
        key2
        key3
        key4 {
            subkey1
            ...
        }
        ...
    :param dct: keyed by field
    :return:
    """
    from rescape_graphene.graphql_helpers.schema_helpers import IGNORE, DENY
    return R.join('\n', R.values(R.map_with_obj(
        dump_graphene_type,
        R.filter_dict(
            lambda key_value: not R.compose(
                lambda v: R.contains(v, [IGNORE, DENY]),
                lambda v: R.prop_or(None, 'read', v)
            )(key_value[1]),
            dct
        )
    )))
Beispiel #5
0
    def mutate(self, info, resource_data=None):
        # We must merge in existing resource.data if we are updating
        if R.has('id', resource_data):
            # New data gets priority, but this is a deep merge.
            resource_data['data'] = R.merge_deep(
                Resource.objects.get(id=resource_data['id']).data,
                R.prop_or({}, 'data', resource_data))
            # Modifies defaults value to add .data.graph
            # We could decide in the future to generate this derived data on the client, but it's easy enough to do here

        modified_resource_data = enforce_unique_props(resource_fields,
                                                      resource_data)
        # Make sure that all props are unique that must be, either by modifying values or erring.
        update_or_create_values = input_type_parameters_for_update_or_create(
            resource_fields, modified_resource_data)

        # Add the sankey data unless we are updating the instance without updating instance.data
        update_or_create_values_with_sankey_data = R.merge(
            update_or_create_values,
            dict(defaults=add_sankey_graph_to_resource_dict(
                update_or_create_values['defaults']))) if R.has(
                    'defaults',
                    update_or_create_values) else update_or_create_values

        resource, created = update_or_create_with_revision(
            Resource, update_or_create_values_with_sankey_data)
        return UpsertResource(resource=resource)
def accumulate_sankey_graph(accumulated_graph, resource):
    """
        Given an accumulated graph and
        and a current Resource object, process the resource object and add the results to the accumulated graph
    :param accumulated_graph:
    :param resource: A Resource
    :return:
    """

    links = R.item_path(['graph', 'links'], resource.data)
    nodes = R.item_path(['graph', 'nodes'], resource.data)

    # Combine the nodes and link with previous accumulated_graph nodes and links
    return dict(
        nodes=R.concat(R.prop_or([], 'nodes', accumulated_graph), nodes),
        # Naively create a link between every node of consecutive stages
        links=R.concat(R.prop_or([], 'links', accumulated_graph), links))
Beispiel #7
0
 def resolve(self, next, root, info, **kwargs):
     # Block introspection in PROD to save time unless the param forceIntrospection=true is passed
     block_introspection = True or not R.prop_or(False, 'forceIntrospection', info.context.GET) and settings.PROD
     if block_introspection and info.field_name.lower() in ['__schema', '_introspection']:
         query = GraphQLObjectType(
             "Query", lambda: {"Introspection": GraphQLField(GraphQLString, resolver=lambda *_: "Disabled")}
         )
         info.schema = GraphQLSchema(query=query)
         return next(root, info, **kwargs)
     return next(root, info, **kwargs)
def assert_no_errors(result):
    """
        Assert no graphql request errors
    :param result: The request Result
    :return: None
    """
    assert not (R.prop_or(False, 'errors', result)
                and R.prop('errors', result)), R.dump_json(
                    R.map(lambda e: format_error(e),
                          R.dump_json(R.prop('errors', result))))
 def resolve_current_token(self, info):
     """
         Resolve the current user or return None if there isn't one
     :param self:
     :param info:
     :return: The current user or None
     """
     context = info.context
     user = R.prop_or(None, 'user', context)
     return user if not isinstance(user, AnonymousUser) else None
 def resolve_scope_instance(scope_key, user_scope_instance):
     # Replace key with id
     id = R.compose(
         # third get the id if it exists
         R.prop_or(None, 'id'),
         # second resolve the scope instance if it exists
         lambda k: R.prop_or(None, k, scope_instances_by_key),
         # first get the key
         R.item_str_path(f'{scope_key}.key'))(user_scope_instance)
     return {
         scope_key:
         R.compact_dict(
             dict(
                 # Resolve the persisted Scope instance by key
                 id=id
             ) if id else dict(
                 # Otherwise pass everything so the server can create the instance
                 # (Currently only supported for projects)
                 user_scope_instance[scope_key]))
     }
Beispiel #11
0
def user_scope_instances_by_id(user_scope_key, user_state_data):
    # Resolve the user scope instances
    return R.from_pairs(
        R.map(
            lambda user_scope: [
                R.item_path_or(None, [scope_key_lookup[user_scope_key], 'id'], user_scope),
                user_scope
            ],
            R.prop_or([], user_scope_key, user_state_data)
        )
    )
 def mutate(self, info, group_data=None):
     group_model = Group()
     data = R.merge(
         group_data,
         dict(password=make_password(R.prop('password', group_data),
                                     salt='not_random'))
         if R.prop_or(False, 'password', group_data) else {})
     update_or_create_values = input_type_parameters_for_update_or_create(
         group_fields, data)
     group, created = update_or_create_with_revision(
         group_model, update_or_create_values)
     return UpsertGroup(group=group)
 def mutate(self, info, user_data=None):
     user_model = get_user_model()
     data = R.merge(
         user_data,
         dict(password=make_password(R.prop('password', user_data),
                                     salt='not_random'))
         if R.prop_or(False, 'password', user_data) else {})
     update_or_create_values = input_type_parameters_for_update_or_create(
         user_fields, data)
     user, created = update_or_create_with_revision(
         user_model, update_or_create_values)
     return UpsertUser(user=user)
Beispiel #14
0
def resolver_for_data_field(resource, context, **kwargs):
    """
        Like resolver_for_dict_field, but default the data property to {userRegions:[], userProjects:[]}
        in case a mutation messes it up (This should be a temporary problem until we fix the mutation).
    :param resource:
    :param context:
    :params kwargs: Arguments to filter with
    :return:
    """
    selections = resolve_selections(context)
    field_name = context.field_name
    data = getattr(resource, field_name) if (hasattr(resource, field_name) and R.prop(field_name, resource)) else {}
    data['userRegions'] = R.prop_or([], 'userRegions', data)
    data['userProjects'] = R.prop_or([], 'userProjects', data)

    # We only let this value through if it matches the kwargs
    # TODO data doesn't include full values for embedded model values, rather just {id: ...}. So if kwargs have
    # searches on other values of the model this will fail. The solution is to load the model values, but I
    # need some way to figure out where they are in data
    passes = R.dict_matches_params_deep(kwargs, data)
    # Pick the selections from our resource json field value default to {} if resource[field_name] is null
    return pick_selections(selections, data) if passes else namedtuple('DataTuple', [])()
Beispiel #15
0
def increment_prop_until_unique(django_class, strategy, prop, additional_filter_props, django_instance_data):
    """
        Increments the given prop of the given django as given by data['prop'] until it matches nothing in
        the database. Note that this includes checks against soft deleted instances where the deleted prop is non-null
        (assumes the use of SafeDeleteModel on the model class)
    :param django_class: Django class to query
    :param prop: The prop to ensure uniqueness
    :param additional_filter_props: Other props, such as user id, to filter by. This allows incrementing a name
    dependent on the current user, for instance. This can be a dict or a function expecting the django_instance_data
    and returning a dict
    :param strategy: function to try to make a value unique. Expects all potential matching values--all values
    that begin with the value of the property--the prop value, and the current index. It's called for each matching
    value to guarentee the strategy will eventually get a unique value. For instance, if prop is key and it equals
    'foo', and 'foo', 'foo1', 'foo2', and 'foo3' are in the db, strategy will be called with an array of 4 values 4
    times, with index 0 through 3. If strategy is None the default strategy is to append index+1 to the duplicate name
    :param django_instance_data: The data containing the prop
    :return: The data merged with the uniquely named prop
    """
    prop_value = R.prop(prop, django_instance_data)
    pk = R.prop_or(None, 'id', django_instance_data)

    strategy = strategy or default_strategy
    # Include deleted objects here. It's up to additional_filter_props to deal with the deleted=date|None property
    all_objects = django_class.all_objects if R.has('all_objects', django_class) else django_class.objects
    matching_values = all_objects.filter(
        # Ignore value matching the pk if this is an update operation.
        # In other words we can update the key to what it already is, aka do nothing
        *R.compact([
            ~Q(id=pk) if pk else None,
        ]),
        **R.merge(
            {'%s__startswith' % prop: prop_value},
            # Give the filter props the instance f they are a function
            R.when(
                lambda f: inspect.isfunction(f),
                lambda f: f(django_instance_data)
            )(additional_filter_props or {})
        )
    ).values_list(prop, flat=True).order_by(prop)

    success = prop_value
    for i, matching_key in enumerate(matching_values):
        success = None
        attempt = strategy(matching_values, prop_value, i)
        if attempt not in matching_values:
            success = attempt
            break
    if not success:
        raise Exception("Could not generate unique prop value %s. The following matching ones exist %s" % (
            prop_value, matching_values))
    return R.merge(django_instance_data, {prop: success})
    def mutate(self, info, search_location_data=None):
        deleted_search_location_response = delete_if_marked_for_delete(
            SearchLocation, UpsertSearchLocation, 'search_location',
            search_location_data)
        if deleted_search_location_response:
            return deleted_search_location_response

        modified_search_location_data = R.compose(
            # Make sure that all props are unique that must be, either by modifying values or erring.
            lambda data: enforce_unique_props(search_location_fields, data),
            # Remove the many to many values. They are saved separately
            lambda data: R.omit(['jurisdictions'], data))(search_location_data)

        update_or_create_values = input_type_parameters_for_update_or_create(
            search_location_fields, modified_search_location_data)
        search_location, created = update_or_create_with_revision(
            SearchLocation, update_or_create_values)

        # SearchJurisdictions can be created during the creation of search_locations
        if R.prop_or(False, 'jurisdictions', search_location_data):
            existing_search_intersections_by_id = R.index_by(
                R.prop('id'), search_location.jurisdictions.all())
            for search_jurisdiction_unsaved in R.prop('intersections',
                                                      search_location_data):
                # existing instances have an id
                search_jursidiction_id = R.prop_or(
                    None, 'id', search_jurisdiction_unsaved)
                search_jurisdiction, created = update_or_create_with_revision(
                    SearchJurisdiction,
                    R.merge(
                        R.prop(search_jursidiction_id,
                               existing_search_intersections_by_id)
                        if search_jursidiction_id else {},
                        search_jurisdiction_unsaved))
                # Once saved, add it to the search location
                search_location.jurisdictions.set(search_jurisdiction)

        return UpsertSearchLocation(search_location=search_location)
 def mutate(self, info, resource_data=None):
     """
         In addition to creating the correct default and update values, this also adds the sankey graph
         data to resource.data['graph']
     :param info:
     :param resource_data:
     :return:
     """
     update_or_create_values = input_type_parameters_for_update_or_create(resource_fields, resource_data)
     # Modifies defaults value to add .data.graph
     # We could decide in the future to generate this derived data on the client, but it's easy enough to do here
     add_sankey_graph_to_resource_dict(update_or_create_values['defaults'])
     if R.prop_or(False, 'id', update_or_create_values):
         resource, created = Resource.objects.update_or_create(**update_or_create_values)
     else:
         resource = Resource(**update_or_create_values['defaults'])
         resource.save()
     return UpsertResource(resource=resource)
Beispiel #18
0
def user_project_data_fields(class_config):
    project_class_config = R.prop('project', class_config)
    location_class_config = R.prop('location', class_config)
    additional_user_scope_schemas = R.prop('additional_user_scope_schemas', class_config)\
        if R.prop_or(None, 'additional_user_scope_schemas', class_config) else {}

    return dict(
        # References a Project
        project=dict(type=R.prop('graphene_class', project_class_config),
                     graphene_type=R.prop('graphene_class',
                                          project_class_config),
                     fields=R.prop('graphene_fields', project_class_config),
                     type_modifier=lambda *type_and_args: Field(
                         *type_and_args,
                         resolver=model_resolver_for_dict_field(
                             R.prop('model_class', project_class_config)))),
        # The mapbox state for the user's use of this Project
        mapbox=dict(
            type=MapboxDataType,
            graphene_type=MapboxDataType,
            fields=mapbox_data_fields,
            type_modifier=lambda *type_and_args: Field(
                *type_and_args, resolver=resolver_for_dict_field),
        ),
        locations=dict(
            type=R.prop('graphene_class', location_class_config),
            graphene_type=R.prop('graphene_class', location_class_config),
            fields=R.prop('graphene_fields', location_class_config),
            type_modifier=lambda *type_and_args: List(*type_and_args)),
        # Is the project active for the user and similar
        activity=dict(
            type=ActivityDataType,
            graphene_type=ActivityDataType,
            fields=activity_data_fields,
            type_modifier=lambda *type_and_args: Field(
                *type_and_args, resolver=resolver_for_dict_field),
        ),
        # A list of user_searches that reference application specific classes
        userSearch=user_search_field_for_user_state_scopes(
            *R.props(['graphene_class', 'graphene_fields'],
                     R.prop('user_search', class_config))),
        **additional_user_scope_schemas)
Beispiel #19
0
def enforce_unique_props(property_fields, django_instance_data):
    """
        Called in the mutate function of the Graphene Type class. Ensures that all properties marked
        as unique_with
    :param property_fields: The Graphene Type property fields dict. This is checked for unique_with,
    which when present points at a function that expects the django_instance_data and returns the django_instance_data
    modified so that the property in question has a unique value.
    :param django_instance_data: dict of an instance to be created or updated
    :param for_update if True this is for an update mutation so props are not required
    :return: The modified django_instance_data for any property that needs to have a unique value
    """

    # If any prop needs to be unique then run its unique_with function, which updates it to a unique value
    # By querying the database for duplicate. This is mainly for non-pk fields like a key
    return R.reduce(
        lambda reduced, prop_field_tup: prop_field_tup[1]['unique_with'](reduced) if
        R.has(prop_field_tup[0], reduced) and R.prop_or(False, 'unique_with', prop_field_tup[1]) else
        reduced,
        django_instance_data,
        property_fields.items()
    )
def create_raw_links(delineator, resource):
    """
        Creates links from the csv if present
    :param resource: The Resource object
    :return: Raw node data
    """
    columns = R.item_path(['data', 'settings', 'columns'], resource)
    raw_data = R.item_path(['data', 'rawData'], resource)
    # Sometimes we split nodes and edges in the raw data into
    # dict(nodes=..., edges=...). Sometimes the raw data is just nodes
    # and we get the edges from the node data
    raw_links = R.prop_or(None, 'links', raw_data)
    return R.map(
        lambda line: R.from_pairs(
            zip(
                columns,
                R.map(lambda s: s.strip(), line.split(delineator))
            )
        ),
        raw_links
    ) if raw_links else raw_data
Beispiel #21
0
def resolve_paginated_for_type(paginated_type, type_resolver, **kwargs):
    """
        Resolver for paginated types
    :param paginated_type: The paginated Type, e.g. LocationPaginationType
    :param type_resolver: The resolver for the non-paginated type, e.g. location_resolver
    :param kwargs: The kwargs Array of prop sets for the non-paginated objects in 'objects'.
    Normally it's just a 1-item array.
    Other required kwargs are for pagination are page_size and page and optional order_by
    :return: The paginated query
    """
    def reduce_or(q_expressions):
        return R.reduce(lambda qs, q: qs | q if qs else q, None, q_expressions)

    objects = R.prop_or({}, 'objects', kwargs)

    instances = reduce_or(
        R.map(lambda obj: type_resolver('filter', **obj), objects))

    return get_paginator(instances, R.prop('page_size', kwargs),
                         R.prop('page', kwargs), paginated_type,
                         R.prop('order_by', kwargs))
def resolver_for_feature_collection(resource, context, **kwargs):
    """
        Like resolver but takes care of converting the geos value stored in the field to a dict that
        has the values we want to resolve, namely type and features.
    :param {string} resource: The instance whose json field data is being resolved
    :param {ResolveInfo} context: Graphene context which contains the fields queried in field_asts
    :return: {DataTuple} Standard resolver return value
    """

    # Take the camelized keys. We don't store data fields slugified. We leave them camelized
    selections = R.map(lambda sel: sel.name.value, context.field_asts[0].selection_set.selections)
    # Recover the json by parsing the string provided by GeometryCollection and mapping the geometries property to features
    json = R.compose(
        # Map the value GeometryCollection to FeatureCollection for the type property
        R.map_with_obj(lambda k, v: R.if_else(
            R.equals('type'),
            R.always('FeatureCollection'),
            R.always(v)
        )(k)),
        # Map geometries to features: [{type: Feature, geometry: geometry}]
        lambda dct: R.merge(
            # Remove geometries
            R.omit(['geometries'], dct),
            # Add features containing the geometries
            dict(features=R.map(
                lambda geometry: dict(type='Feature', geometry=geometry),
                R.prop_or([], 'geometries', dct))
            )
        ),
    )(ast.literal_eval(R.prop(context.field_name, resource).json))
    # Identify the keys that are actually in resource[json_field_name]
    all_selections = R.filter(
        lambda key: key in json,
        selections
    )
    # Pick out the values that we want
    result = R.pick(all_selections, json)

    # Return in the standard Graphene DataTuple
    return namedtuple('DataTuple', R.keys(result))(*R.values(result))
Beispiel #23
0
def resolve_version_instance(model_versioned_type, resolver, **kwargs):
    """
        Queries for the version instance by for the given model type using the given resolver
        The kwargs must contain objects: [{id: the id}]
    :param model_versioned_type: Graphene model class created by create_version_container_type
    :param resolver: Resolver for the model
    :param kwargs: Must contain objects: [{id: the id}] to resolve the versions of the instance given by id
    :return:
    """
    # We technically receive an array but never accept more than the first item
    obj = R.head(R.prop('objects', kwargs))
    if not R.item_str_path_or(None, 'instance.id', obj):
        raise Exception(
            f"id required in kwargs.objects.instance for revisions query, but got: {kwargs}"
        )

    # Create the filter that only returns 1 location
    objs = resolver('filter', **R.prop_or({}, 'instance', obj)).order_by('id')
    return get_versioner(
        objs,
        model_versioned_type,
    )
def quiz_model_mutation_update(client, graphql_update_or_create_function,
                               create_path, update_path, values,
                               update_values):
    """
        Tests an update mutation for a model by calling a create with the given values then an update
        with the given update_values (plus the create id)
    :param client: The Apollo Client
    :param graphql_update_or_create_function: The update or create mutation function for the model. Expects client and input values
    :param create_path: The path to the result of the create in the data object (e.g. createRegion.region)
    :param update_path: The path to the result of the update in the data object (e.g. updateRegion.region)
    :param values: The input values to use for the create
    :param update_values: The input values to use for the update. This can be as little as one key value
    :return:
    """
    result = graphql_update_or_create_function(client, values=values)
    assert not R.has('errors', result), R.dump_json(
        R.map(lambda e: format_error(e), R.prop('errors', result)))
    # Extract the result and map the graphql keys to match the python keys
    created = R.compose(
        lambda r: R.map_keys(lambda key: underscore(key), r),
        lambda r: R.item_str_path(f'data.{create_path}', r))(result)
    # look at the users added and omit the non-determinant dateJoined
    assert values == pick_deep(created, values)
    # Update with the id and optionally key if there is one + update_values
    update_result = graphql_update_or_create_function(
        client,
        R.merge_all([
            dict(id=created['id']),
            dict(key=created['key'])
            if R.prop_or(False, 'key', created) else {}, update_values
        ]))
    assert not R.has('errors', update_result), R.dump_json(
        R.map(lambda e: format_error(e), R.prop('errors', update_result)))
    updated = R.item_str_path(f'data.{update_path}', update_result)
    assert created['id'] == updated['id']
    assert update_values == pick_deep(update_values, updated)
    return result, update_result
Beispiel #25
0
        def resolve_user_states(self, info, **kwargs):
            """
                Resolves only the user_state of the current user. If the user is_staff or is_superuser
                then the user id will not be passed implicitly to the query
            :param info:
            :param kwargs:
            :return:
            """
            context = info.context
            user = R.prop_or(None, 'user', context)
            admin = user.is_staff or user.is_superuser

            q_expressions = process_filter_kwargs(
                UserState,
                **R.merge_all([
                    dict(deleted__isnull=True),
                    kwargs,
                    dict(user__id=user.id) if not admin else {}
                ])
            )

            return UserState.objects.filter(
                *q_expressions
            )
    def process_stage(stage, i):
        # Get the current stage as the source if there are any in nodes_by_stage
        sources = R.prop_or(None, R.prop('key', stage), nodes_by_stages)
        if not sources:
            return []
        # Iterate through the stages until one with nodes is found
        target_stage = None
        try:
            target_stage = R.find(
                # Try to find nodes matching this potential target stage. There might not be any
                lambda stage: nodes_by_stages[R.prop('key', stage)]
                if R.has(R.prop('key', stage), nodes_by_stages) else None,
                stages[i + 1:R.length(stages)])
        except ValueError:
            # It's coo, find errors if none is found. We really need R.first
            pass

        # If no more stages contain nodes, we're done
        if not target_stage:
            return []
        targets = nodes_by_stages[R.prop('key', target_stage)]

        def prop_lookup(node, prop):
            return R.prop(
                prop, dict(zip(node['properties'], node['property_values'])))

        # Create the link with the source_node and target_node. Later we'll add
        # in source and target that points to the nodes overall index in the graph,
        # but we don't want to compute the overall indices yet
        return R.chain(
            lambda source: R.map(
                lambda target: dict(source_node=source,
                                    target_node=target,
                                    value=string_to_float(
                                        prop_lookup(source, value_key))),
                targets), sources)
Beispiel #27
0
        def mutate(self, info, user_state_data=None):
            """
                Update or create the user state
            :param info:
            :param user_state_data:
            :return:
            """

            # Check that all the scope instances in user_state.data exist. We permit deleted instances for now.
            new_data = R.prop_or({}, 'data', user_state_data)
            # Copy since Graphene reuses this data
            copied_new_data = copy.deepcopy(new_data)
            old_user_state_data = UserState.objects.get(
                id=user_state_data['id']
            ).data if R.prop_or(None, 'id', user_state_data) else None

            # Inspect the data and find all scope instances within UserState.data
            # This includes userRegions[*].region, userProject[*].project and within userRegions and userProjects
            # userSearch.userSearchLocations[*].search_location and whatever the implementing libraries define
            # in addition
            updated_new_data = validate_and_mutate_scope_instances(
                user_state_scope_instances_config,
                copied_new_data
            )

            # If either userProjects or userRegions are null, it means those scope instances aren't part
            # of the update, so merge in the old values
            if R.prop_or(None, 'id', user_state_data) and R.any_satisfy(
                    lambda user_scope_key: not R.prop_or(None, user_scope_key, updated_new_data),
                    ['userProjects', 'userRegions']
            ):
                # The special update case where one userScope collection is null,
                # indicates that we are only updating one userScope object. The rest
                # should remain the same and not be removed
                for user_scope_key in ['userProjects', 'userRegions']:
                    # Database values
                    old_user_scopes_by_id = user_scope_instances_by_id(
                        user_scope_key,
                        old_user_state_data
                    )
                    # New values with updates applied
                    new_user_scopes_by_id = user_scope_instances_by_id(
                        user_scope_key,
                        updated_new_data
                    )
                    # Prefer the old over the new, merging all objects but overriding lists
                    # We override lists because a non-null list always replaces the old list in the database
                    updated_new_data[user_scope_key] = R.values(R.merge_deep(
                        old_user_scopes_by_id,
                        new_user_scopes_by_id,
                        MyMerger(
                            # pass in a list of tuples,with the
                            # strategies you are looking to apply
                            # to each type.
                            [
                                (list, ["override_non_null"]),
                                (dict, ["merge"])
                            ],
                            # next, choose the fallback strategies,
                            # applied to all other types:
                            ["override"],
                            # finally, choose the strategies in
                            # the case where the types conflict:
                            ["override"]
                        )
                    ))

            # Update user_state_data the updated data
            modified_user_state_data = R.merge(user_state_data, dict(data=updated_new_data))

            # id or user.id can be used to identify the existing instance
            id_props = R.compact_dict(
                dict(
                    id=R.prop_or(None, 'id', modified_user_state_data),
                    user_id=R.item_str_path_or(None, 'user.id', modified_user_state_data)
                )
            )

            def fetch_and_merge(modified_user_state_data, props):
                existing = UserState.objects.filter(**props)
                # If the user doesn't have a user state yet
                if not R.length(existing):
                    return modified_user_state_data

                return merge_data_fields_on_update(
                    ['data'],
                    R.head(existing),
                    # Merge existing's id in case it wasn't in user_state_data
                    R.merge(modified_user_state_data, R.pick(['id'], existing))
                )

            modified_data = R.if_else(
                R.compose(R.length, R.keys),
                lambda props: fetch_and_merge(modified_user_state_data, props),
                lambda _: modified_user_state_data
            )(id_props)

            update_or_create_values = input_type_parameters_for_update_or_create(
                user_state_fields,
                # Make sure that all props are unique that must be, either by modifying values or erring.
                enforce_unique_props(
                    user_state_fields,
                    modified_data)
            )

            user_state, created = update_or_create_with_revision(UserState, update_or_create_values)
            return UpsertUserState(user_state=user_state)
Beispiel #28
0
def create_user_state_config(class_config):
    """
        Creates the UserStateType based on specific class_config
    :param class_config: A dict containing class configurations. The default is:
    dict(
        settings=dict(
            model_class=Settings,
            graphene_class=SettingsType,
            graphene_fields=settings_fields,
            query=SettingsQuery,
            mutation=SettingsMutation
        ),
        region=dict(
            model_class=Region,
            graphene_class=RegionType,
            graphene_fields=region_fields,
            query=RegionQuery,
            mutation=RegionMutation
        ),
        project=dict(
            model_class=Project,
            graphene_class=ProjectType,
            graphene_fields=project_fields,
            query=ProjectQuery,
            mutation=ProjectMutation
        ),
        resource=dict(
            model_class=Resource,
            graphene_class=ResourceType,
            graphene_fields=resource_fields,
            query=ResourceQuery,
            mutation=ResourceMutation
        ),
        location=get_location_schema(),
        user_search=get_user_search_data_schema(),
        search_location=get_search_location_schema()
        # additional_user_scope_schemas and additional_user_scopes
        # are passed in from a calling app
        # these are a dict of properties that need to go on user_regions and user_projects
        # at the same level as userSearch. For instance, a user's saved app selections could go here
        # additional_user_scope_schemas = dict(
        # userDesignFeatureLayers=dict(
        #    graphene_class=UserDesignFeatureDataType,
        #    graphene_fields=user_design_feature_data_fields
        # )
        # additional_user_scopes explains the path to Django models within additional_user_scope_schemas
        # additional_django_model_user_scopes = dict(
        # userDesignFeatureLayers=dict(
        #   designFeature=True
        # )
        # Would match the list of some django DesignFeature model instances
    )
    :return:
    """

    class UserStateType(DjangoObjectType, DjangoObjectTypeRevisionedMixin):
        """
            UserStateType models UserState, which represents the settings both imposed upon and chosen by the user
        """
        id = graphene.Int(source='pk')

        class Meta:
            model = UserState

    # Modify data field to use the resolver.
    # I guess there's no way to specify a resolver upon field creation, since graphene just reads the underlying
    # Django model to generate the fields
    UserStateType._meta.fields['data'] = Field(
        UserStateDataType(class_config),
        resolver=resolver_for_data_field
    )

    user_state_fields = merge_with_django_properties(UserStateType, dict(
        id=dict(create=DENY, update=REQUIRE),
        # This is a Foreign Key. Graphene generates these relationships for us, but we need it here to
        # support our Mutation subclasses and query_argument generation
        # For simplicity we limit fields to id. Mutations can only use id, and a query doesn't need other
        # details of the User--it can query separately for that
        user=dict(graphene_type=UserType, fields=user_fields),
        # This refers to the UserState, which is a representation of all the json fields of UserState.data
        data=dict(graphene_type=UserStateDataType(class_config), fields=user_state_data_fields(class_config),
                  default=lambda: dict()),
        **reversion_and_safe_delete_types
    ))

    user_state_mutation_config = dict(
        class_name='UserState',
        crud={
            CREATE: 'createUserState',
            UPDATE: 'updateUserState'
        },
        resolve=guess_update_or_create
    )

    additional_django_model_user_scopes = R.prop('additional_django_model_user_scopes', class_config) \
        if R.prop_or(None, 'additional_django_model_user_scopes', class_config) else {}
    additional_user_scope_schemas = R.prop('additional_user_scope_schemas', class_config) \
        if R.prop_or(None, 'additional_user_scope_schemas', class_config) else {}

    # The scope instance types expected in user_state.data
    user_state_scope_instances_config = R.concat([
        # dict(region=True) means search all userRegions for that dict
        dict(pick=dict(userRegions=dict(region=True)),
             key='region',
             model=get_region_model()
             ),
        # dict(project=True) means search all userProjects for that dict
        dict(
            pick=dict(userProjects=dict(project=True)),
            key='project',
            model=get_project_model(),
            # This is currently just needed for the field key's unique_with function
            field_config=project_fields,
            # Projects can be modified when userState is mutated
            can_mutate_related=True
        ),
        dict(
            pick=dict(
                userRegions=[
                    dict(
                        userSearch=dict(
                            # dict(searchLocation=True) means search all userSearchLocations for that dict
                            userSearchLocations=dict(
                                searchLocation=True,
                            )
                        )
                    )
                ],
                userProjects=[
                    dict(
                        userSearch=dict(
                            # dict(searchLocation=True) means search all userSearchLocations for that dict
                            userSearchLocations=dict(searchLocation=True)
                        )
                    )
                ]
            ),
            key='searchLocation',
            model=get_search_location_schema()['model_class'],
            # These can be modified when userState is mutated
            can_mutate_related=True
        ),
    ],
        # Map each additional_django_model_user_scopes to a scope config
        R.map_with_obj_to_values(
            lambda field_name, additional_django_model_user_scope: dict(
                pick=dict(
                    userRegions=[
                        {field_name: additional_django_model_user_scope}
                    ],
                    userProjects=[
                        {field_name: additional_django_model_user_scope}
                    ]
                ),
                # Assume the scope object is the deepest field
                key=list(R.keys(R.flatten_dct(additional_django_model_user_scope, '.')))[0].split('.')[-1],
                # model isn't needed unless can_mutate_related is true
                model=additional_user_scope_schemas[field_name]['model'],
                # These can be modified when userState is mutated
                can_mutate_related=R.prop_or(False, 'can_mutate_related', additional_django_model_user_scope)
            ),
            additional_django_model_user_scopes
        )
    )

    class UpsertUserState(Mutation):
        """
            Abstract base class for mutation
        """
        user_state = Field(UserStateType)

        def mutate(self, info, user_state_data=None):
            """
                Update or create the user state
            :param info:
            :param user_state_data:
            :return:
            """

            # Check that all the scope instances in user_state.data exist. We permit deleted instances for now.
            new_data = R.prop_or({}, 'data', user_state_data)
            # Copy since Graphene reuses this data
            copied_new_data = copy.deepcopy(new_data)
            old_user_state_data = UserState.objects.get(
                id=user_state_data['id']
            ).data if R.prop_or(None, 'id', user_state_data) else None

            # Inspect the data and find all scope instances within UserState.data
            # This includes userRegions[*].region, userProject[*].project and within userRegions and userProjects
            # userSearch.userSearchLocations[*].search_location and whatever the implementing libraries define
            # in addition
            updated_new_data = validate_and_mutate_scope_instances(
                user_state_scope_instances_config,
                copied_new_data
            )

            # If either userProjects or userRegions are null, it means those scope instances aren't part
            # of the update, so merge in the old values
            if R.prop_or(None, 'id', user_state_data) and R.any_satisfy(
                    lambda user_scope_key: not R.prop_or(None, user_scope_key, updated_new_data),
                    ['userProjects', 'userRegions']
            ):
                # The special update case where one userScope collection is null,
                # indicates that we are only updating one userScope object. The rest
                # should remain the same and not be removed
                for user_scope_key in ['userProjects', 'userRegions']:
                    # Database values
                    old_user_scopes_by_id = user_scope_instances_by_id(
                        user_scope_key,
                        old_user_state_data
                    )
                    # New values with updates applied
                    new_user_scopes_by_id = user_scope_instances_by_id(
                        user_scope_key,
                        updated_new_data
                    )
                    # Prefer the old over the new, merging all objects but overriding lists
                    # We override lists because a non-null list always replaces the old list in the database
                    updated_new_data[user_scope_key] = R.values(R.merge_deep(
                        old_user_scopes_by_id,
                        new_user_scopes_by_id,
                        MyMerger(
                            # pass in a list of tuples,with the
                            # strategies you are looking to apply
                            # to each type.
                            [
                                (list, ["override_non_null"]),
                                (dict, ["merge"])
                            ],
                            # next, choose the fallback strategies,
                            # applied to all other types:
                            ["override"],
                            # finally, choose the strategies in
                            # the case where the types conflict:
                            ["override"]
                        )
                    ))

            # Update user_state_data the updated data
            modified_user_state_data = R.merge(user_state_data, dict(data=updated_new_data))

            # id or user.id can be used to identify the existing instance
            id_props = R.compact_dict(
                dict(
                    id=R.prop_or(None, 'id', modified_user_state_data),
                    user_id=R.item_str_path_or(None, 'user.id', modified_user_state_data)
                )
            )

            def fetch_and_merge(modified_user_state_data, props):
                existing = UserState.objects.filter(**props)
                # If the user doesn't have a user state yet
                if not R.length(existing):
                    return modified_user_state_data

                return merge_data_fields_on_update(
                    ['data'],
                    R.head(existing),
                    # Merge existing's id in case it wasn't in user_state_data
                    R.merge(modified_user_state_data, R.pick(['id'], existing))
                )

            modified_data = R.if_else(
                R.compose(R.length, R.keys),
                lambda props: fetch_and_merge(modified_user_state_data, props),
                lambda _: modified_user_state_data
            )(id_props)

            update_or_create_values = input_type_parameters_for_update_or_create(
                user_state_fields,
                # Make sure that all props are unique that must be, either by modifying values or erring.
                enforce_unique_props(
                    user_state_fields,
                    modified_data)
            )

            user_state, created = update_or_create_with_revision(UserState, update_or_create_values)
            return UpsertUserState(user_state=user_state)

    class CreateUserState(UpsertUserState):
        """
            Create UserState mutation class
        """

        class Arguments:
            user_state_data = type('CreateUserStateInputType', (InputObjectType,),
                                   input_type_fields(user_state_fields, CREATE, UserStateType)
                                   )(required=True)

    class UpdateUserState(UpsertUserState):
        """
            Update UserState mutation class
        """

        class Arguments:
            user_state_data = type('UpdateUserStateInputType', (InputObjectType,),
                                   input_type_fields(user_state_fields, UPDATE, UserStateType))(required=True)

    graphql_update_or_create_user_state = graphql_update_or_create(user_state_mutation_config, user_state_fields)
    graphql_query_user_states = graphql_query(UserStateType, user_state_fields, 'userStates')

    return dict(
        model_class=UserState,
        graphene_class=UserStateType,
        graphene_fields=user_state_fields,
        create_mutation_class=CreateUserState,
        update_mutation_class=UpdateUserState,
        graphql_mutation=graphql_update_or_create_user_state,
        graphql_query=graphql_query_user_states
    )
Beispiel #29
0
from rescape_python_helpers import ramda as R
from rescape_graphene import resolver_for_dict_field, resolver_for_dict_list
from graphene import ObjectType, String, Float, List, Field, Int, Boolean

stage_data_fields = dict(key=dict(type=String),
                         name=dict(type=String),
                         targets=dict(type=String,
                                      type_modifier=lambda typ: List(typ)))

StageDataType = type(
    'StageDataType',
    (ObjectType, ),
    R.map_with_obj(
        # If we have a type_modifier function, pass the type to it, otherwise simply construct the type
        lambda k, v: R.prop_or(lambda typ: typ(), 'type_modifier', v)
        (R.prop('type', v)),
        stage_data_fields))

resource_settings_data_fields = dict(
    defaultLocation=dict(type=Float, type_modifier=lambda typ: List(Float)),
    unit=dict(type=String),
    columns=dict(type=String, type_modifier=lambda typ: List(typ)),
    stageKey=dict(type=String),
    valueKey=dict(type=String),
    locationKey=dict(type=String),
    nodeNameKey=dict(type=String),
    nodeColorKey=dict(type=String),
    linkColorKey=dict(type=String),
    stages=dict(
        type=StageDataType,
        graphene_type=StageDataType,
Beispiel #30
0
def resolve_field_type(field_config):
    field_type = R.prop_or(R.prop_or(None, 'graphene_type', field_config), 'type', field_config)
    if not field_type:
        raise Exception(f'field_config {json.dumps(field_config)} lacks a type or graphene_type')
    return field_type