def quiz_model_query(client,
                     model_query_function,
                     result_name,
                     variables,
                     expect_length=1):
    """
        Tests a query for a model with variables that produce exactly one result
    :param client: Apollo client
    :param model_query_function: Query function expecting the client and variables
    :param result_name: The name of the result object in the data object
    :param variables: key value variables for the query
    :param expect_length: Default 1. Optional number items to expect
    :return: returns the result for further assertions
    """
    all_result = model_query_function(client)
    assert not R.has('errors', all_result), R.dump_json(
        R.map(lambda e: format_error(e), R.prop('errors', all_result)))
    result = model_query_function(client, variables=variables)
    # Check against errors
    assert not R.has('errors', result), R.dump_json(
        R.map(lambda e: format_error(e), R.prop('errors', result)))
    # Simple assertion that the query looks good
    assert expect_length == R.length(R.item_path(['data', result_name],
                                                 result))
    return result
Exemple #2
0
 def test_query(self):
     all_result = graphql_query_resources(self.client)
     assert not R.has('errors', all_result), R.dump_json(R.prop('errors', all_result))
     results = graphql_query_resources(self.client, dict(name='String'), variable_values=dict(name='Minerals'))
     # Check against errors
     assert not R.has('errors', results), R.dump_json(R.prop('errors', results))
     assert 1 == R.length(R.item_path(['data', 'resources'], results))
Exemple #3
0
    def mutate(self, info, resource_data=None):
        # We must merge in existing resource.data if we are updating
        if R.has('id', resource_data):
            # New data gets priority, but this is a deep merge.
            resource_data['data'] = R.merge_deep(
                Resource.objects.get(id=resource_data['id']).data,
                R.prop_or({}, 'data', resource_data))
            # Modifies defaults value to add .data.graph
            # We could decide in the future to generate this derived data on the client, but it's easy enough to do here

        modified_resource_data = enforce_unique_props(resource_fields,
                                                      resource_data)
        # Make sure that all props are unique that must be, either by modifying values or erring.
        update_or_create_values = input_type_parameters_for_update_or_create(
            resource_fields, modified_resource_data)

        # Add the sankey data unless we are updating the instance without updating instance.data
        update_or_create_values_with_sankey_data = R.merge(
            update_or_create_values,
            dict(defaults=add_sankey_graph_to_resource_dict(
                update_or_create_values['defaults']))) if R.has(
                    'defaults',
                    update_or_create_values) else update_or_create_values

        resource, created = update_or_create_with_revision(
            Resource, update_or_create_values_with_sankey_data)
        return UpsertResource(resource=resource)
def resolver_for_dict_list(resource, context, **kwargs):
    """
        Resolver for the data field that is a list. This extracts the desired json fields from the context
        and creates a tuple of the field values. Graphene has no built in way for drilling into json types.
        The property value must be a list or null. Null values will return null, list values will be processed
        in turn by graphene
    :param resource:
    :param context:
    :params kwargs: Arguments to filter with
    :return:
    """
    selections = resolve_selections(context)
    field_name = context.field_name
    # Value defaults to None. Empty is not the same as None
    value = R.prop(field_name, resource) if R.has(field_name, resource) else None

    return R.map(
        lambda data: pick_selections(selections, data),
        R.filter(
            # We only let this value through if it matches the kwargs
            # TODO data doesn't include full values for embedded model values, rather just {id: ...}. So if kwargs have
            # searches on other values of the model this will fail. The solution is to load the model values, but I
            # need some way to figure out where they are in data
            lambda data: R.dict_matches_params_deep(kwargs, data),
            value
        )
    ) if value else value
Exemple #5
0
 def instance_version(self):
     """
         Uses self._version if defined, else assumes the lastest version
     :return:
     """
     return self._version if R.has('_version',
                                   self) else self.latest_version
Exemple #6
0
 def test_create(self):
     values = dict(
         name='Candy',
         region=dict(id=R.head(self.regions).id),
         data=R.merge(
             sample_settings,
             dict(
                 material='Candy',
                 raw_data=[
                     'Other Global Imports;Shipments, location generalized;51.309933, 3.055030;Source;22,469,843',
                     'Knauf (Danilith) BE;Waregemseweg 156-142 9790 Wortegem-Petegem, Belgium;50.864762, 3.479308;Conversion;657,245',
                     "MPRO Bruxelles;Avenue du Port 67 1000 Bruxelles, Belgium;50.867486, 4.352543;Distribution;18,632",
                     'Residential Buildings (all typologies);Everywhere in Brussels;NA;Demand;3,882,735',
                     'Duplex House Typology;Everywhere in Brussels;NA;Demand;13,544',
                     'Apartment Building Typology;Everywhere in Brussels;NA;Demand;34,643',
                     'New West Gypsum Recycling;9130 Beveren, Sint-Jansweg 9 Haven 1602, Kallo, Belgium;51.270229, 4.261048;Reconversion;87,565',
                     'Residential Buildings (all typologies);Everywhere in Brussels;NA;Sink;120,000',
                     'RecyPark South;1190 Forest, Belgium;50.810799, 4.314789;Sink;3,130',
                     'RecyPark Nord;Rue du Rupel, 1000 Bruxelles, Belgium;50.880181, 4.377136;Sink;1,162'
                 ]
             )
         )
     )
     result = graphql_update_or_create_resource(self.client, values)
     dump_errors(result)
     assert not R.has('errors', result), R.dump_json(R.prop('errors', result))
     # look at the users added and omit the non-determinant dateJoined
     result_path_partial = R.item_path(['data', 'createResource', 'resource'])
     self.assertMatchSnapshot(R.omit(omit_props, result_path_partial(result)))
Exemple #7
0
    def mutate(self, info, settings_data=None):
        # We must merge in existing settings.data if we are updating data
        if R.has('id', settings_data) and R.has('data', settings_data):
            # New data gets priority, but this is a deep merge.
            settings_data['data'] = R.merge_deep(
                Settings.objects.get(id=settings_data['id']).data,
                settings_data['data'])

        # Make sure that all props are unique that must be, either by modifying values or erring.
        modified_settings_data = enforce_unique_props(settings_fields,
                                                      settings_data)
        update_or_create_values = input_type_parameters_for_update_or_create(
            settings_fields, modified_settings_data)

        settings, created = update_or_create_with_revision(
            Settings, update_or_create_values)
        return UpsertSettings(settings=settings)
def quiz_model_mutation_create(client,
                               graphql_update_or_create_function,
                               result_path,
                               values,
                               second_create_results=None,
                               second_create_does_update=False):
    """
        Tests a create mutation for a model
    :param client: The Apollo Client
    :param graphql_update_or_create_function: The update or create mutation function for the model. Expects client and input values
    :param result_path: The path to the result of the create in the data object (e.g. createRegion.region)
    :param values: The input values to use for the create
    :param second_create_results: Object, tests a second create if specified. Use to make sure that create with the same values
    creates a new instance or updates, depending on what you expect it to do.
    The values of this should be regexes that match the created instance
    :param second_create_does_update: Default False. If True expects a second create with the same value to update rather than create a new instance
    :return: Tuple with two return values. The second is null if second_create_results is False
    """
    result = graphql_update_or_create_function(client, values=values)

    result_path_partial = R.item_str_path(f'data.{result_path}')
    assert not R.has('errors', result), R.dump_json(
        R.map(lambda e: format_error(e), R.prop('errors', result)))
    # Get the created value, using underscore to make the camelcase keys match python keys
    created = R.map_keys(lambda key: underscore(key),
                         result_path_partial(result))
    # get all the keys in values that are in created. This should match values if created has everything we expect
    assert values == pick_deep(created, values)
    # Try creating with the same values again, unique constraints will apply to force a create or an update will occur
    if second_create_results:
        new_result = graphql_update_or_create_function(client, values)
        assert not R.has('errors', new_result), R.dump_json(
            R.map(lambda e: format_error(e), R.prop('errors', new_result)))
        created_too = result_path_partial(new_result)
        if second_create_does_update:
            assert created['id'] == created_too['id']
        if not second_create_does_update:
            assert created['id'] != created_too['id']
        for path, value in R.flatten_dct(second_create_results, '.').items():
            assert re.match(value, R.item_str_path_or(None, path, created_too))
    else:
        new_result = None

    return result, new_result
Exemple #9
0
def dump_errors(result):
    """
        Dump any errors to in the result to stderr
    :param result:
    :return:
    """
    if R.has('errors', result):
        for error in result['errors']:
            logger.error(format_error(error))
            if 'stack' in error:
                traceback.print_tb(error['stack'], limit=10, file=sys.stderr)
Exemple #10
0
    def mutate(self, info, location_data=None):
        with transaction.atomic():
            deleted_location_response = delete_if_marked_for_delete(Location, UpsertLocation, 'location', location_data)
            if deleted_location_response:
                return deleted_location_response

            # We must merge in existing location.data if we are updating data
            if R.has('id', location_data) and R.has('data', location_data):
                # New data gets priority, but this is a deep merge.
                location_data['data'] = R.merge_deep(
                    Location.objects.get(id=location_data['id']).data,
                    location_data['data']
                )

            # Make sure that all props are unique that must be, either by modifying values or erring.
            modified_location_data = enforce_unique_props(location_fields, location_data)
            update_or_create_values = input_type_parameters_for_update_or_create(location_fields, modified_location_data)

            location, created = update_or_create_with_revision(Location, update_or_create_values)
            return UpsertLocation(location=location)
def create_sample_location(cls, location_dict):
    # Save the location with the complete data
    if R.has('key', location_dict):
        # rescape_region uses a key for uniqueness
        return cls.objects.update_or_create(key=R.prop('key', location_dict),
                                            defaults=R.omit(['key'],
                                                            location_dict))[0]
    else:
        # other implementors should delete duplicates first
        location = cls(**location_dict)
        location.save()
        return location
 def convert_foreign_key_to_id(scope_obj):
     # Find ForeignKey attributes and map the class field name to the foreign key id field
     # E.g. region to region_id, user to user_id, etc
     converters = R.compose(
         R.from_pairs, R.map(lambda field: [field.name, field.attname]),
         R.filter(lambda field: R.isinstance(ForeignKey, field)))(
             model._meta.fields)
     # Convert scopo_obj[related_field] = {id: x} to scope_obj[related_field_id] = x
     return R.from_pairs(
         R.map_with_obj_to_values(
             lambda key, value: [converters[key],
                                 R.prop('id', value)]
             if R.has(key, converters) else [key, value], scope_obj))
Exemple #13
0
def increment_prop_until_unique(django_class, strategy, prop, additional_filter_props, django_instance_data):
    """
        Increments the given prop of the given django as given by data['prop'] until it matches nothing in
        the database. Note that this includes checks against soft deleted instances where the deleted prop is non-null
        (assumes the use of SafeDeleteModel on the model class)
    :param django_class: Django class to query
    :param prop: The prop to ensure uniqueness
    :param additional_filter_props: Other props, such as user id, to filter by. This allows incrementing a name
    dependent on the current user, for instance. This can be a dict or a function expecting the django_instance_data
    and returning a dict
    :param strategy: function to try to make a value unique. Expects all potential matching values--all values
    that begin with the value of the property--the prop value, and the current index. It's called for each matching
    value to guarentee the strategy will eventually get a unique value. For instance, if prop is key and it equals
    'foo', and 'foo', 'foo1', 'foo2', and 'foo3' are in the db, strategy will be called with an array of 4 values 4
    times, with index 0 through 3. If strategy is None the default strategy is to append index+1 to the duplicate name
    :param django_instance_data: The data containing the prop
    :return: The data merged with the uniquely named prop
    """
    prop_value = R.prop(prop, django_instance_data)
    pk = R.prop_or(None, 'id', django_instance_data)

    strategy = strategy or default_strategy
    # Include deleted objects here. It's up to additional_filter_props to deal with the deleted=date|None property
    all_objects = django_class.all_objects if R.has('all_objects', django_class) else django_class.objects
    matching_values = all_objects.filter(
        # Ignore value matching the pk if this is an update operation.
        # In other words we can update the key to what it already is, aka do nothing
        *R.compact([
            ~Q(id=pk) if pk else None,
        ]),
        **R.merge(
            {'%s__startswith' % prop: prop_value},
            # Give the filter props the instance f they are a function
            R.when(
                lambda f: inspect.isfunction(f),
                lambda f: f(django_instance_data)
            )(additional_filter_props or {})
        )
    ).values_list(prop, flat=True).order_by(prop)

    success = prop_value
    for i, matching_key in enumerate(matching_values):
        success = None
        attempt = strategy(matching_values, prop_value, i)
        if attempt not in matching_values:
            success = attempt
            break
    if not success:
        raise Exception("Could not generate unique prop value %s. The following matching ones exist %s" % (
            prop_value, matching_values))
    return R.merge(django_instance_data, {prop: success})
def quiz_model_mutation_update(client, graphql_update_or_create_function,
                               create_path, update_path, values,
                               update_values):
    """
        Tests an update mutation for a model by calling a create with the given values then an update
        with the given update_values (plus the create id)
    :param client: The Apollo Client
    :param graphql_update_or_create_function: The update or create mutation function for the model. Expects client and input values
    :param create_path: The path to the result of the create in the data object (e.g. createRegion.region)
    :param update_path: The path to the result of the update in the data object (e.g. updateRegion.region)
    :param values: The input values to use for the create
    :param update_values: The input values to use for the update. This can be as little as one key value
    :return:
    """
    result = graphql_update_or_create_function(client, values=values)
    assert not R.has('errors', result), R.dump_json(
        R.map(lambda e: format_error(e), R.prop('errors', result)))
    # Extract the result and map the graphql keys to match the python keys
    created = R.compose(
        lambda r: R.map_keys(lambda key: underscore(key), r),
        lambda r: R.item_str_path(f'data.{create_path}', r))(result)
    # look at the users added and omit the non-determinant dateJoined
    assert values == pick_deep(created, values)
    # Update with the id and optionally key if there is one + update_values
    update_result = graphql_update_or_create_function(
        client,
        R.merge_all([
            dict(id=created['id']),
            dict(key=created['key'])
            if R.prop_or(False, 'key', created) else {}, update_values
        ]))
    assert not R.has('errors', update_result), R.dump_json(
        R.map(lambda e: format_error(e), R.prop('errors', update_result)))
    updated = R.item_str_path(f'data.{update_path}', update_result)
    assert created['id'] == updated['id']
    assert update_values == pick_deep(update_values, updated)
    return result, update_result
Exemple #15
0
def dump_errors(result):
    """
        Dump any errors to in the result to stderr
    :param result:
    :return:
    """
    if R.has('errors', result):
        for error in result['errors']:
            logging.exception(traceback)
            if hasattr(error, 'stack'):
                # Syncrounous calls or something
                # See https://github.com/graphql-python/graphql-core/issues/237
                tb = error['stack']
            else:
                # Promises
                tb = error.__traceback__
            formatted_tb = traceback.format_tb(tb)
            error.stack = error.__traceback__
            # This hopefully includes the traceback
            logger.exception(format_error(error))
def resolver_for_dict_field(resource, context, **kwargs):
    """
        Resolver for the data field. This extracts the desired json fields from the context
        and creates a tuple of the field values. Graphene has no built in way for drilling into json types
    :param resource:
    :param context:
    :params kwargs: Arguments to filter with
    :return:
    """
    selections = resolve_selections(context)
    field_name = context.field_name
    # Get the value, even if non truthy if the attribute exists
    data = R.prop(field_name, resource) if R.has(field_name, resource) else {}
    # We only let this value through if it matches the kwargs
    # TODO data doesn't include full values for embedded model values, rather just {id: ...}. So if kwargs have
    # searches on other values of the model this will fail. The solution is to load the model values, but I
    # need some way to figure out where they are in data
    passes = R.dict_matches_params_deep(kwargs, data)
    # Pick the selections from our resource json field value default to {} if resource[field_name] is null
    return pick_selections(selections, data) if passes else namedtuple('DataTuple', [])()
Exemple #17
0
def enforce_unique_props(property_fields, django_instance_data):
    """
        Called in the mutate function of the Graphene Type class. Ensures that all properties marked
        as unique_with
    :param property_fields: The Graphene Type property fields dict. This is checked for unique_with,
    which when present points at a function that expects the django_instance_data and returns the django_instance_data
    modified so that the property in question has a unique value.
    :param django_instance_data: dict of an instance to be created or updated
    :param for_update if True this is for an update mutation so props are not required
    :return: The modified django_instance_data for any property that needs to have a unique value
    """

    # If any prop needs to be unique then run its unique_with function, which updates it to a unique value
    # By querying the database for duplicate. This is mainly for non-pk fields like a key
    return R.reduce(
        lambda reduced, prop_field_tup: prop_field_tup[1]['unique_with'](reduced) if
        R.has(prop_field_tup[0], reduced) and R.prop_or(False, 'unique_with', prop_field_tup[1]) else
        reduced,
        django_instance_data,
        property_fields.items()
    )
def quiz_model_versioned_query(client, model_class, model_query, result_name,
                               version_count_expected, props, omit_props):
    """
        Tests a versioned query for a model with variables
    :param client: Apollo client
    :param model_class: Model class
    :param model_query: Model's query that should return one result (as a filter)
    number of items in the database that match props
    :param result_name: The name of the results in data.[result_name].objects
    :param version_count_expected The number of versions of the instance we expect
    :param props: The props to query to find a single instance. Should just be {id:...}
    :param omit_props: Props to omit from assertions because they are nondeterminate
    :return:
    """
    result = model_query(
        client,
        variables=dict(objects=R.to_array_if_not(dict(instance=props))))
    # Check against errors
    assert not R.has('errors', result), R.dump_json(
        R.map(lambda e: format_error(e), R.prop('errors', result)))
    assert R.compose(
        R.length, R.item_str_path_or(
            [],
            f'data.{result_name}.objects'))(result) == version_count_expected
    def process_stage(stage, i):
        # Get the current stage as the source if there are any in nodes_by_stage
        sources = R.prop_or(None, R.prop('key', stage), nodes_by_stages)
        if not sources:
            return []
        # Iterate through the stages until one with nodes is found
        target_stage = None
        try:
            target_stage = R.find(
                # Try to find nodes matching this potential target stage. There might not be any
                lambda stage: nodes_by_stages[R.prop('key', stage)]
                if R.has(R.prop('key', stage), nodes_by_stages) else None,
                stages[i + 1:R.length(stages)])
        except ValueError:
            # It's coo, find errors if none is found. We really need R.first
            pass

        # If no more stages contain nodes, we're done
        if not target_stage:
            return []
        targets = nodes_by_stages[R.prop('key', target_stage)]

        def prop_lookup(node, prop):
            return R.prop(
                prop, dict(zip(node['properties'], node['property_values'])))

        # Create the link with the source_node and target_node. Later we'll add
        # in source and target that points to the nodes overall index in the graph,
        # but we don't want to compute the overall indices yet
        return R.chain(
            lambda source: R.map(
                lambda target: dict(source_node=source,
                                    target_node=target,
                                    value=string_to_float(
                                        prop_lookup(source, value_key))),
                targets), sources)
def quiz_model_paginated_query(client,
                               model_class,
                               paginated_query,
                               result_name,
                               page_count_expected,
                               props,
                               omit_props,
                               order_by=None,
                               page_size=1):
    """
        Tests a pagination query for a model with variables
    :param client: Apollo client
    :param model_class: Model class
    :param paginated_query: Model's pagination query
    :param page_count_expected: The number of pages expected when the page_size is 1, in other words the
    number of items in the database that match props
    :param result_name: The name of the results in data.[result_name].objects
    :param props: The props to query, not including pagination
    :param omit_props: Props to omit from assertions because they are nondeterminate
    :param order_by: Order by page-level prop
    :param page_size: Default 1
    :return the first result (first page) and final result (last page) for further testing:
    """
    result = paginated_query(client,
                             variables=dict(page=1,
                                            page_size=page_size,
                                            order_by=order_by,
                                            objects=R.to_array_if_not(props)))

    # Check against errors
    assert not R.has('errors', result), R.dump_json(
        R.map(lambda e: format_error(e), R.prop('errors', result)))
    first_page_objects = R.item_path(['data', result_name, 'objects'], result)
    # Assert we got 1 result because our page is size 1
    assert page_size == R.compose(
        R.length,
        R.map(R.omit(omit_props)),
    )(first_page_objects)

    remaining_ids = list(
        set(
            R.map(
                R.prop('id'),
                model_class.objects.filter(*process_filter_kwargs(
                    model_class, **R.map_keys(underscore, props))).order_by(
                        *order_by.split(',')))) -
        set(R.map(R.compose(int, R.prop('id')), first_page_objects)))

    page_info = R.item_path(['data', result_name], result)
    # We have page_size pages so there should be a total number of pages
    # of what we specified for page_count_expected
    assert page_info['pages'] == page_count_expected
    assert page_info['hasNext'] == True
    assert page_info['hasPrev'] == False
    # Get the final page
    new_result = paginated_query(client,
                                 variables=dict(
                                     page=page_count_expected,
                                     page_size=page_info['pageSize'],
                                     order_by=order_by,
                                     objects=R.to_array_if_not(props)))
    # Make sure the new_result matches one of the remaining ids
    assert R.contains(
        R.item_path(['data', result_name, 'objects', 0, 'id'], new_result),
        remaining_ids)

    new_page_info = R.item_path(['data', result_name], new_result)
    # Still expect the same page count
    assert new_page_info['pages'] == page_count_expected
    # Make sure it's the last page
    assert new_page_info['hasNext'] == False
    assert new_page_info['hasPrev'] == True
    return [result, new_result]
Exemple #21
0
 def test_query_current_user(self):
     result = user_schema.graphql_query_current_user(
         self.client,
     )
     assert not R.has('errors', result), R.dump_json(R.map(lambda e: format_error(e), R.dump_json(R.prop('errors', result))))
Exemple #22
0
 def test_query_current_user_no_auth(self):
     result = user_schema.graphql_query_current_user(
         client_for_testing(schema, None)
     )
     assert not R.has('errors', result), R.dump_json(R.map(lambda e: format_error(e), R.dump_json(R.prop('errors', result))))