def quiz_model_query(client,
                     model_query_function,
                     result_name,
                     variables,
                     expect_length=1):
    """
        Tests a query for a model with variables that produce exactly one result
    :param client: Apollo client
    :param model_query_function: Query function expecting the client and variables
    :param result_name: The name of the result object in the data object
    :param variables: key value variables for the query
    :param expect_length: Default 1. Optional number items to expect
    :return: returns the result for further assertions
    """
    all_result = model_query_function(client)
    assert not R.has('errors', all_result), R.dump_json(
        R.map(lambda e: format_error(e), R.prop('errors', all_result)))
    result = model_query_function(client, variables=variables)
    # Check against errors
    assert not R.has('errors', result), R.dump_json(
        R.map(lambda e: format_error(e), R.prop('errors', result)))
    # Simple assertion that the query looks good
    assert expect_length == R.length(R.item_path(['data', result_name],
                                                 result))
    return result
Exemple #2
0
 def test_query_foo_with_null_geojson(self):
     # Query using for foos based on the related User
     foo_results = graphql_query_foos(self.client,
                                      variables=dict(key='fookit')
                                      )
     assert not R.prop('errors', foo_results), R.dump_json(R.map(lambda e: format_error(e), R.prop('errors', foo_results)))
     assert 1 == R.length(R.map(R.omit_deep(omit_props), R.item_path(['data', 'foos'], foo_results)))
Exemple #3
0
def log_request_body(info, response_or_error):
    body = info.context._body.decode('utf-8')
    try:
        json_body = json.loads(body)
        (
            logger.error
            if isinstance(response_or_error, ErrorType) else logger.debug
        )(f" User: {info.context.user} \n Action: {json_body['operationName']} \n Variables: {json_body['variables']} \n Body:  {json_body['query']}",
          )
        if hasattr(response_or_error, '_meta') and isinstance(
                response_or_error._meta, MutationOptions):
            # Just log top level types
            if isinstance(response_or_error, (Model)):
                mutation_response = json.dumps(R.omit(
                    ['_state'], response_or_error.__dict__),
                                               sort_keys=True,
                                               indent=1,
                                               cls=MyDjangoJSONEncoder)
                logger.debug(f'Mutation returned {mutation_response}')
            elif isinstance(response_or_error, (BaseType)):
                try:
                    mutation_response = json.dumps(
                        R.omit(['_state'], response_or_error.__dict__),
                        sort_keys=True,
                        indent=1,
                    )
                    logger.debug(f'Mutation returned {mutation_response}')
                except:
                    logger.debug(
                        f'Mutation returned {response_or_error.__class__}')
        else:
            if hasattr(response_or_error, 'objects'):
                count = response_or_error.objects.count()
                # Log up to 100 ids, don't log if it's a larger set because it might be a paging query
                ids = R.join(' ', [
                    '', 'having ids:',
                    R.join(
                        ', ',
                        R.map(R.prop("id"),
                              response_or_error.objects.values('id')))
                ]) if count < 100 else ""
                logger.debug(
                    f'Paginated Query Page {response_or_error.page} of page size {response_or_error.page_size} out of total pages {response_or_error.pages} returned {count} results{ids}'
                )
            elif hasattr(response_or_error, 'count'):
                count = response_or_error.count()
                # Log up to 100 ids, don't log if it's a larger set because it might be a paging query
                ids = R.join(' ', [
                    '', 'having ids:',
                    R.join(', ',
                           R.map(R.prop("id"), response_or_error.values('id')))
                ]) if count < 100 else ""
                logger.debug(f'Query returned {count} results{ids}')
            else:
                id = R.prop('id', response_or_error)
                logger.debug(f'Query returned single result {id}')

    except Exception as e:
        logging.error(body)
def find_scope_instances(user_state_scope, new_data):
    """
        Retrieve the scope instances to verify the Ids.
        Scope instances must have ids unless they are allowed to be created/updated
        during the userState mutation (such as searchLocations)
    :param new_data: The data to search
    :param user_state_scope Dict with 'pick' in the shape of the instances we are looking for in new_data,
    e.g. dict(userRegions={region: True}) to search new_data.userRegions[] for all occurrences of {region:...}
     and 'key' which indicates the actually key of the instance (e.g. 'region' for regions)
    :return: dict(
        instances=Instances actually in the database,
    )
    """
    def until(key, value):
        return key != R.prop('key', user_state_scope)

    return R.compose(
        lambda scope_dict: dict(
            # See which instances with ids are actually in the database
            # If any are missing we have an invalid update or need to create those instances if permitted
            instances=list(
                find_scope_instances_by_id(R.prop('model', user_state_scope),
                                           scope_dict['scope_ids'])),
            # The path from userRegions or userProjects to the scope instances, used to replace
            # a null update value with the existing values
            user_scope_path=list(R.keys(R.flatten_dct(user_state_scope, '.')))[
                0],
            **scope_dict),
        lambda scope_objs: dict(
            # Unique by id or accept if there is no id, this loses data, but it's just for validation
            scope_objs=R.unique_by(
                lambda obj: R.prop_or(str(now()), 'id', obj['value']),
                scope_objs),
            scope_ids=R.unique_by(
                R.identity,
                compact(
                    R.map(
                        lambda scope_obj: R.prop_or(None, 'id', scope_obj[
                            'value']), scope_objs)))),
        # Use the pick key property to find the scope instances in the data
        # If we don't match anything we can get null or an empty item. Filter/compact these out
        R.filter(lambda obj: obj['value'] and (not isinstance(
            obj['value'], list) or R.length(obj['value']) != 0)),
        R.map(lambda pair: dict(key=pair[0], value=pair[1])),
        lambda flattened_data: R.to_pairs(flattened_data),
        lambda data: R.flatten_dct_until(
            R.pick_deep_all_array_items(R.prop('pick', user_state_scope), data
                                        ), until, '.'))(new_data)
def resolver_for_dict_list(resource, context, **kwargs):
    """
        Resolver for the data field that is a list. This extracts the desired json fields from the context
        and creates a tuple of the field values. Graphene has no built in way for drilling into json types.
        The property value must be a list or null. Null values will return null, list values will be processed
        in turn by graphene
    :param resource:
    :param context:
    :params kwargs: Arguments to filter with
    :return:
    """
    selections = resolve_selections(context)
    field_name = context.field_name
    # Value defaults to None. Empty is not the same as None
    value = R.prop(field_name, resource) if R.has(field_name, resource) else None

    return R.map(
        lambda data: pick_selections(selections, data),
        R.filter(
            # We only let this value through if it matches the kwargs
            # TODO data doesn't include full values for embedded model values, rather just {id: ...}. So if kwargs have
            # searches on other values of the model this will fail. The solution is to load the model values, but I
            # need some way to figure out where they are in data
            lambda data: R.dict_matches_params_deep(kwargs, data),
            value
        )
    ) if value else value
 def handle_floats(v):
     if hasattr(v, 'values'):
         # Multiple floats:w
         return R.map(lambda fv: float(fv.value), v.values)
     else:
         # Single float
         return float(v.value)
Exemple #7
0
def dump_graphql_data_object(dct):
    """
        Stringify a dict to a graphql input parameter key values in the form
        Also camelizes keys if the are slugs
        {"key1": "string value1", "key2": "number2", ...}
    :param dct:
    :return:
    """

    if dct == None:
        return 'null'
    elif isinstance(dct, dict):
        return '{%s}' % R.join(
            ', ',
            R.map(
                lambda key_value: R.join(
                    ': ',
                    [
                        camelize(quote(key_value[0]), False),
                        dump_graphql_data_object(key_value[1])
                    ]
                ),
                dct.items()
            )
        )
    elif isinstance(dct, list):
        return f"[{R.join(', ', R.map(lambda item: dump_graphql_data_object(item), dct))}]"
    else:
        return quote(dct)
Exemple #8
0
 def parse_value(cls, value):
     return geometrycollection_from_feature_collection(
         dict(type='FeatureCollection', features=R.map(
             lambda geometry: dict(type='Feature', geometry=geometry),
             value['geometries'])
          )
     )
def resolve_selections(context):
    """
        Returns the query fields for the current context.
    :param {ResolveInfo} context: The graphene resolution context
    :return: {[String]} The field names to that are in the query
    """
    return R.map(lambda sel: sel.name.value, context.field_asts[0].selection_set.selections)
Exemple #10
0
 def test_create(self):
     result, new_result = quiz_model_mutation_create(
         self.client, graphql_update_or_create_project, 'createProject.project',
         dict(
             name='Carre',
             key='carre',
             geojson={
                 'type': 'FeatureCollection',
                 'features': [{
                     "type": "Feature",
                     "geometry": {
                         "type": "Polygon",
                         "coordinates": [
                             [[49.5294835476, 2.51357303225], [51.4750237087, 2.51357303225],
                              [51.4750237087, 6.15665815596],
                              [49.5294835476, 6.15665815596], [49.5294835476, 2.51357303225]]]
                     }
                 }]
             },
             data=dict(),
             locations=R.map(R.compose(R.pick(['id']), lambda l: l.__dict__), self.locations),
             user=R.pick(['id'], R.head(self.users).__dict__),
         ),
         dict(key=r'carre.+'))
     versions = Version.objects.get_for_object(get_project_model().objects.get(
         id=R.item_str_path('data.createProject.project.id', result)
     ))
     assert len(versions) == 1
Exemple #11
0
 def test_update(self):
     result, update_result = quiz_model_mutation_update(
         self.client,
         graphql_update_or_create_project,
         'createProject.project',
         'updateProject.project',
         dict(
             name='Carre',
             key='carre',
             geojson={
                 'type': 'FeatureCollection',
                 'features': [{
                     "type": "Feature",
                     "geometry": {
                         "type": "Polygon",
                         "coordinates": [
                             [[49.4426671413, 5.67405195478], [50.1280516628, 5.67405195478],
                              [50.1280516628, 6.24275109216],
                              [49.4426671413, 6.24275109216], [49.4426671413, 5.67405195478]]]
                     }
                 }]
             },
             data=dict(),
             locations=R.map(R.compose(R.pick(['id']), lambda l: l.__dict__), self.locations),
             user=R.pick(['id'], R.head(self.users).__dict__),
         ),
         # Update the coords and limit to one location
         dict(
             geojson={
                 'features': [{
                     "type": "Feature",
                     "geometry": {
                         "type": "Polygon",
                         "coordinates": [
                             [[49.5294835476, 2.51357303225], [51.4750237087, 2.51357303225],
                              [51.4750237087, 6.15665815596],
                              [49.5294835476, 6.15665815596], [49.5294835476, 2.51357303225]]]
                     }
                 }]
             },
             locations=R.map(R.compose(R.pick(['id']), lambda l: l.__dict__), [R.head(self.locations)])
         )
     )
     versions = Version.objects.get_for_object(get_project_model().objects.get(
         id=R.item_str_path('data.updateProject.project.id', update_result)
     ))
     assert len(versions) == 2
Exemple #12
0
 def setUp(self):
     self.client = Client(schema)
     delete_sample_resources()
     self.resources = create_sample_resources()
     self.regions = list(set(R.map(lambda resource: resource.region, self.resources)))
     # Create a graph for all resources
     # This modifies each
     self.graph = create_sankey_graph_from_resources(self.resources)
Exemple #13
0
 def test_create_user(self):
     values = dict(username="******", firstName='T', lastName='Rex',
                   password=make_password("rrrrhhh", salt='not_random'))
     result = graphql_update_or_create_user(self.client, values)
     assert not R.prop('errors', result), R.dump_json(R.map(lambda e: format_error(e), R.prop('errors', result)))
     # look at the users added and omit the non-determinant values
     self.assertMatchSnapshot(
         R.omit_deep(omit_props, R.item_path(['data', 'createUser', 'user'], result)))
def validate_and_mutate_scope_instances(scope_instances_config, data):
    """
        Inspect the data and find all scope instances within data
        For UserState, for instance, this includes userRegions[*].region, userProject[*].project and within
        userRegions and userProjects userSearch.userSearchLocations[*].search_location and whatever the implementing
        libraries define in addition
    :param scope_instances_config: See user_state_schema.user_state_scope_instances_config for an example
    :param data: The instance data field containing the scope instances
    :return: The updated data with scope instances possibly created/updated if allowed. If creates occur
    then the scope instance will now have an id. Otherwise no changes are visible
    """

    validated_scope_objs_instances_and_ids_sets = R.map(
        lambda scope_instance_config: find_scope_instances(
            scope_instance_config, data), scope_instances_config)

    # Some scope instances can be created or modified when embedded in the data. This helps
    # make mutation of the instance, such as UserState,
    # a one step process, so that new Projects, SearchLocations, etc. can
    # be created without having to call mutation for them separately ahead of times, which would create
    # a series of mutations that weren't failure-protected as a single transaction
    for i, validated_scope_objs_instances_and_ids in enumerate(
            validated_scope_objs_instances_and_ids_sets):
        scope = R.merge(
            scope_instances_config[i],
            dict(model=scope_instances_config[i]['model'].__name__))

        # If any scope instances with an id specified in new_data don't exist, throw an error
        if R.length(validated_scope_objs_instances_and_ids['scope_ids']
                    ) != R.length(
                        validated_scope_objs_instances_and_ids['instances']):
            ids = R.join(', ',
                         validated_scope_objs_instances_and_ids['scope_ids'])
            instances_string = R.join(
                ', ',
                R.map(lambda instance: str(instance),
                      validated_scope_objs_instances_and_ids['instances']))
            raise Exception(
                f"For scope {dumps(scope)} Some scope ids among ids:[{ids}] being saved in user state do not exist. Found the following instances in the database: {instances_string or 'None'}. UserState.data is {dumps(data)}"
            )

        # Create/Update any scope instances that permit it
        model = scope_instances_config[i]['model']
        data = handle_can_mutate_related(
            model, scope, data, validated_scope_objs_instances_and_ids)
    return data
def quiz_model_mutation_create(client,
                               graphql_update_or_create_function,
                               result_path,
                               values,
                               second_create_results=None,
                               second_create_does_update=False):
    """
        Tests a create mutation for a model
    :param client: The Apollo Client
    :param graphql_update_or_create_function: The update or create mutation function for the model. Expects client and input values
    :param result_path: The path to the result of the create in the data object (e.g. createRegion.region)
    :param values: The input values to use for the create
    :param second_create_results: Object, tests a second create if specified. Use to make sure that create with the same values
    creates a new instance or updates, depending on what you expect it to do.
    The values of this should be regexes that match the created instance
    :param second_create_does_update: Default False. If True expects a second create with the same value to update rather than create a new instance
    :return: Tuple with two return values. The second is null if second_create_results is False
    """
    result = graphql_update_or_create_function(client, values=values)

    result_path_partial = R.item_str_path(f'data.{result_path}')
    assert not R.has('errors', result), R.dump_json(
        R.map(lambda e: format_error(e), R.prop('errors', result)))
    # Get the created value, using underscore to make the camelcase keys match python keys
    created = R.map_keys(lambda key: underscore(key),
                         result_path_partial(result))
    # get all the keys in values that are in created. This should match values if created has everything we expect
    assert values == pick_deep(created, values)
    # Try creating with the same values again, unique constraints will apply to force a create or an update will occur
    if second_create_results:
        new_result = graphql_update_or_create_function(client, values)
        assert not R.has('errors', new_result), R.dump_json(
            R.map(lambda e: format_error(e), R.prop('errors', new_result)))
        created_too = result_path_partial(new_result)
        if second_create_does_update:
            assert created['id'] == created_too['id']
        if not second_create_does_update:
            assert created['id'] != created_too['id']
        for path, value in R.flatten_dct(second_create_results, '.').items():
            assert re.match(value, R.item_str_path_or(None, path, created_too))
    else:
        new_result = None

    return result, new_result
def assert_no_errors(result):
    """
        Assert no graphql request errors
    :param result: The request Result
    :return: None
    """
    assert not (R.prop_or(False, 'errors', result)
                and R.prop('errors', result)), R.dump_json(
                    R.map(lambda e: format_error(e),
                          R.dump_json(R.prop('errors', result))))
def create_local_sample_search_locations(cls, sample_locations):
    """
        Create a sample search location that matches each location by name
    :param cls:
    :param sample_locations:
    :return:
    """
    delete_sample_search_locations(cls)
    return R.map(lambda kv: create_sample_search_location(cls, kv[1]),
                 enumerate(sample_locations))
def create_raw_nodes(resource):
    """
        Creates nodes for each column from the csv
    :param resource: The Resource object
    :return: Raw node data
    """
    columns = R.item_path(['data', 'settings', 'columns'], resource)
    raw_data = R.item_path(['data', 'raw_data'], resource)
    return R.map(lambda line: R.from_pairs(zip(columns, line.split(';'))),
                 raw_data)
Exemple #19
0
def create_sample_regions(cls):
    """
        Create sample regions
    :param cls The Region class
    :return:
    """
    delete_sample_regions(cls)
    # Convert all sample region dicts to persisted Region instances
    # Give each reach an owner
    return R.map(lambda kv: create_sample_region(cls, kv[1]),
                 enumerate(sample_regions))
Exemple #20
0
    def test_query(self):
        user_results = graphql_query_users(self.client)
        format_error(R.prop('errors', user_results)[0])
        assert not R.prop('errors', user_results), R.dump_json(R.map(lambda e: format_error(e), R.prop('errors', user_results)))
        assert 2 == R.length(R.map(R.omit_deep(omit_props), R.item_path(['data', 'users'], user_results)))

        # Query using for foos based on the related User
        foo_results = graphql_query_foos(
            self.client,
            variables=dict(
                user=R.pick(['id'], self.lion.__dict__),
                # Test filters
                name_contains='oo',
                name_contains_not='jaberwaki'
            )
        )
        assert not R.prop('errors', foo_results), R.dump_json(R.map(lambda e: format_error(e), R.prop('errors', foo_results)))
        assert 1 == R.length(R.map(R.omit_deep(omit_props), R.item_path(['data', 'foos'], foo_results)))
        # Make sure the Django instance in the json blob was resolved
        assert self.cat.id == R.item_path(['data', 'foos', 0, 'data', 'friend', 'id'], foo_results)
Exemple #21
0
def user_scope_instances_by_id(user_scope_key, user_state_data):
    # Resolve the user scope instances
    return R.from_pairs(
        R.map(
            lambda user_scope: [
                R.item_path_or(None, [scope_key_lookup[user_scope_key], 'id'], user_scope),
                user_scope
            ],
            R.prop_or([], user_scope_key, user_state_data)
        )
    )
def create_raw_links(delineator, resource):
    """
        Creates links from the csv if present
    :param resource: The Resource object
    :return: Raw node data
    """
    columns = R.item_path(['data', 'settings', 'columns'], resource)
    raw_data = R.item_path(['data', 'rawData'], resource)
    # Sometimes we split nodes and edges in the raw data into
    # dict(nodes=..., edges=...). Sometimes the raw data is just nodes
    # and we get the edges from the node data
    raw_links = R.prop_or(None, 'links', raw_data)
    return R.map(
        lambda line: R.from_pairs(
            zip(
                columns,
                R.map(lambda s: s.strip(), line.split(delineator))
            )
        ),
        raw_links
    ) if raw_links else raw_data
def resolver_for_feature_collection(resource, context, **kwargs):
    """
        Like resolver but takes care of converting the geos value stored in the field to a dict that
        has the values we want to resolve, namely type and features.
    :param {string} resource: The instance whose json field data is being resolved
    :param {ResolveInfo} context: Graphene context which contains the fields queried in field_asts
    :return: {DataTuple} Standard resolver return value
    """

    # Take the camelized keys. We don't store data fields slugified. We leave them camelized
    selections = R.map(lambda sel: sel.name.value, context.field_asts[0].selection_set.selections)
    # Recover the json by parsing the string provided by GeometryCollection and mapping the geometries property to features
    json = R.compose(
        # Map the value GeometryCollection to FeatureCollection for the type property
        R.map_with_obj(lambda k, v: R.if_else(
            R.equals('type'),
            R.always('FeatureCollection'),
            R.always(v)
        )(k)),
        # Map geometries to features: [{type: Feature, geometry: geometry}]
        lambda dct: R.merge(
            # Remove geometries
            R.omit(['geometries'], dct),
            # Add features containing the geometries
            dict(features=R.map(
                lambda geometry: dict(type='Feature', geometry=geometry),
                R.prop_or([], 'geometries', dct))
            )
        ),
    )(ast.literal_eval(R.prop(context.field_name, resource).json))
    # Identify the keys that are actually in resource[json_field_name]
    all_selections = R.filter(
        lambda key: key in json,
        selections
    )
    # Pick out the values that we want
    result = R.pick(all_selections, json)

    # Return in the standard Graphene DataTuple
    return namedtuple('DataTuple', R.keys(result))(*R.values(result))
 def convert_foreign_key_to_id(scope_obj):
     # Find ForeignKey attributes and map the class field name to the foreign key id field
     # E.g. region to region_id, user to user_id, etc
     converters = R.compose(
         R.from_pairs, R.map(lambda field: [field.name, field.attname]),
         R.filter(lambda field: R.isinstance(ForeignKey, field)))(
             model._meta.fields)
     # Convert scopo_obj[related_field] = {id: x} to scope_obj[related_field_id] = x
     return R.from_pairs(
         R.map_with_obj_to_values(
             lambda key, value: [converters[key],
                                 R.prop('id', value)]
             if R.has(key, converters) else [key, value], scope_obj))
    def setUp(self):
        delete_sample_user_states()
        self.user_state_schema = create_user_state_config(
            default_class_config())
        self.user_states = create_sample_user_states(
            UserState, get_region_model(), get_project_model(),
            get_location_schema()['model_class'],
            get_search_location_schema()['model_class'])
        # Gather all unique sample users
        self.users = list(
            set(R.map(lambda user_state: user_state.user, self.user_states)))
        self.client = client_for_testing(schema(), self.users[0])
        # Gather all unique sample regions
        self.regions = R.compose(
            # Forth Resolve persisted Regions
            R.map(lambda id: get_region_model().objects.get(id=id)),
            # Third make ids unique
            lambda ids: list(set(ids)),
            # Second map each to the region id
            R.map(R.item_str_path('region.id')),
            # First flat map the user regions of all user_states
            R.chain(lambda user_state: R.item_str_path(
                'data.userRegions', user_state.__dict__)))(self.user_states)
        # Gather all unique sample projects
        self.projects = R.compose(
            # Forth Resolve persisted Projects
            R.map(lambda id: get_project_model().objects.get(id=id)),
            # Third make ids unique
            lambda ids: list(set(ids)),
            # Second map each to the project id
            R.map(R.item_str_path('project.id')),
            # First flat map the user regions of all user_states
            R.chain(lambda user_state: R.item_str_path(
                'data.userProjects', user_state.__dict__)))(self.user_states)
        self.locations = create_local_sample_locations(
            get_location_schema()['model_class'])

        def extract_search_location_ids(user_regions):
            return R.map(
                R.item_str_path('searchLocation.id'),
                R.chain(R.item_str_path('userSearch.userSearchLocations'),
                        user_regions))

        # Gather all unique searches locations from userRegions.
        # user searches could also be in userProjects, but we'll ignore that
        self.search_locations = R.compose(
            # Forth Resolve persisted UserSearches
            lambda ids: R.map(
                lambda id: get_search_location_schema()['model_class'].objects.
                get(id=id), ids),
            # Third make ids unique
            lambda ids: list(set(ids)),
            # Chain to a flat list of user search location ids
            lambda user_regions: extract_search_location_ids(user_regions),
            # First flat map the user regions of all user_states
            R.chain(lambda user_state: R.item_str_path(
                'data.userRegions', user_state.__dict__)))(self.user_states)
        # Can be set by inheritors
        self.additional_user_scope_data = {}
def create_local_sample_locations(cls,
                                  sample_locations=local_sample_locations):
    """
        Create sample locations
    :param cls: THe Location class
    :param sample_locations Defaults to _sample_locations defined in this file. Apps using this can pass their own
    :return:
    """
    delete_sample_locations(cls)
    # Convert all sample location dicts to persisted Location instances
    # Give each reach an owner
    return R.map(lambda kv: create_sample_location(cls, kv[1]),
                 enumerate(sample_locations))
def create_sample_settings_sets(cls):
    """
        Create sample settings
    :param cls The Settings class
    :return:
    """
    delete_sample_settings(cls)
    # Convert all sample settings dicts to persisted Settings instances
    # Give each reach an owner
    return R.map(
        lambda kv: create_sample_settings(cls, kv[1]),
        enumerate(sample_settings)
    )
 def sample_user_state_with_search_locations_and_additional_scope_instances(
         user_scope_name, sample_user_state):
     return R.fake_lens_path_set(
         f'data.{user_scope_name}'.split('.'),
         R.map(
             lambda user_scope: R.compose(
                 # Gives applications a chance to add the needed additional scope instances,
                 # e.g. userDesignFeatures
                 lambda user_scope:
                 create_additional_scope_instance_properties(user_scope),
                 lambda user_scope: R.merge(
                     user_scope,
                     dict(userSearch=dict(userSearchLocations=R.map(
                         lambda i_search_location: dict(
                             # Just return with the id since the full data is in the database
                             searchLocation=R.pick(['id'],
                                                   i_search_location[1]),
                             # Set the first search_location to active
                             activity=dict(isActive=i_search_location[0] ==
                                           0)),
                         enumerate(search_locations))))))(user_scope),
             R.item_str_path(f'data.{user_scope_name}', sample_user_state)),
         sample_user_state)
Exemple #29
0
def quote_list(lst, tab):
    """
        Recursively quotes list values
    :param lst
    :return:
    """
    t = '\t' * tab

    return '[\n{0}{1}\n]'.format(
        t,
        '\n{0}'.format(t).join(
            R.map(lambda item: str(quote(item, tab)), lst)
        )
    )
def quiz_model_mutation_update(client, graphql_update_or_create_function,
                               create_path, update_path, values,
                               update_values):
    """
        Tests an update mutation for a model by calling a create with the given values then an update
        with the given update_values (plus the create id)
    :param client: The Apollo Client
    :param graphql_update_or_create_function: The update or create mutation function for the model. Expects client and input values
    :param create_path: The path to the result of the create in the data object (e.g. createRegion.region)
    :param update_path: The path to the result of the update in the data object (e.g. updateRegion.region)
    :param values: The input values to use for the create
    :param update_values: The input values to use for the update. This can be as little as one key value
    :return:
    """
    result = graphql_update_or_create_function(client, values=values)
    assert not R.has('errors', result), R.dump_json(
        R.map(lambda e: format_error(e), R.prop('errors', result)))
    # Extract the result and map the graphql keys to match the python keys
    created = R.compose(
        lambda r: R.map_keys(lambda key: underscore(key), r),
        lambda r: R.item_str_path(f'data.{create_path}', r))(result)
    # look at the users added and omit the non-determinant dateJoined
    assert values == pick_deep(created, values)
    # Update with the id and optionally key if there is one + update_values
    update_result = graphql_update_or_create_function(
        client,
        R.merge_all([
            dict(id=created['id']),
            dict(key=created['key'])
            if R.prop_or(False, 'key', created) else {}, update_values
        ]))
    assert not R.has('errors', update_result), R.dump_json(
        R.map(lambda e: format_error(e), R.prop('errors', update_result)))
    updated = R.item_str_path(f'data.{update_path}', update_result)
    assert created['id'] == updated['id']
    assert update_values == pick_deep(update_values, updated)
    return result, update_result