def generate_model(model, size, shuffle=None):
    """ Generate model

    Generate 'size' sample models given a model and stores them in a temporary
    data base.

    Args :
        model : A reference to the class of the given model.
        size : An integer of the size of the sample models to be generated.
        shuffle : An optional boolean variable that will determine if
                  the sample input will be shuffled or not.

    Returns :
        A tuple that contains a reference to the class of the given model,
        and list of field that's not computed.
    """
    unique_fields = [(field.name,) for field in list_of_fields(model)
                     if (hasattr(field, 'unique') and field.unique
                         and not is_auto_field(field))]
    unique_together = []
    if hasattr(model._meta, 'unique_together'):
        unique_together = list(model._meta.unique_together)
    unique = unique_together + unique_fields
    unique = sort_unique_tuples(unique, model)
    unique_constraints = [unique_items(un_tuple) for un_tuple in unique]
    constraints = []
    if hasattr(model, 'Constraints'):
        constraints = model.Constraints.constraints
    constraints += unique_constraints
    if shuffle is None:
        shuffle = True
    to_be_computed = []
    dfs.size = size
    dfs([], 0, to_be_computed, constraints, model, shuffle)
    return model, to_be_computed
def field_sample_values(field):
    """ Field sample values

    Retrieves the list of sample values for a given field.

    Args :
        field : a reference to the class of the field.

    Returns :
    a list of sample values for the given field.
    """
    list_field_values = []
    if not is_auto_field(field):
        if is_reverse_related(field):
            # TODO(mostafa-mahmoud): Check if this case needs to be handled.
            pass
        elif is_related(field):
            model = field.rel.to
            list_field_values = list(model.objects.all())
            if 'ManyToMany' in relation_type(field) and list_field_values:
                siz = random.randint(1, len(list_field_values))
                list_field_values = [random.sample(list_field_values, siz)]
        else:
            found = False
            if hasattr(field.model, 'TestData'):
                model = field.model
                while (model.__base__ != Model
                       and not hasattr(model.TestData, field.name)):
                    model = model.__base__
                if field.name in model.TestData.__dict__.keys():
                    found = True
                    input_method = model.TestData.__dict__[field.name]
                    if isinstance(input_method, str):
                        app_name = field.model._meta.app_label
                        path = '%s/TestTemplates/%s' % (app_name, input_method)
                        input_file = open(path, 'r')
                        list_field_values = [word[:-1] for word in input_file]
                    elif (isinstance(input_method, list)
                          or isinstance(input_method, tuple)):
                        list_field_values = input_method
                    else:
                        if inspect.isfunction(input_method):
                            list_field_values = input_method()
            if not found:
                app_name = field.model._meta.app_label
                path = '%s/TestTemplates/sample__%s__%s' % (app_name,
                       field.model.__name__, field.name)
                if os.path.exists(path):
                    input_file = open(path, 'r')
                    list_field_values = [word[:-1] for word in input_file]
                else:
                    list_field_values = generate_random_values(field)
    return list(list_field_values)
def dfs(instances, cur_tuple, index, to_be_computed, constraints,
        model, to_be_shuffled):
    """
    Value generator for the fields of a given model by simulating
    a depth first search. The model will be saved in a (temporary) database.

    The interface of the predicate should be:
        boolean predicate(cur_tuple, model, field)
         - cur_tuple: List of tuples of the filled values of the field being
                      filled, in the format (str:field_name , field_value).
         - model: A reference to the class of the given model.
         - field: A reference to the class of the field being generated

         The function should handle that the given tuple might be not full,
         and it should depend that the previously generated models are stored
         in the temporary database, and it should return a boolean value that's
         true only if the required constraint is satisfied.

    :param int instances:
        The target number of generated instances of the model.
    :param cur_tuple:
        A list of pairs str:field_name, field_value of the values of
        the filled fields.
    :type cur_tuple: List(pair(str, .))
    :param int index:
        The index of the field being filled in the list of fields.
    :param List to_be_computed:
        A list used for accumulation of the ignored fields.
    :param List constraints:
        A list of predicate functions that will constraint the output.
    :param DjangoModel model: A reference to the class of the given model.
    :param boolean to_be_shuffled:
        A boolean variable that will determine if the sample data
        will be shuffled or not.
    :rtype: None
    """
    fields = list_of_fields(model)
    if index >= len(fields):
        dfs.total += 1
        create_model(model, cur_tuple)
        return 1
    else:
        list_field_values = field_sample_values(fields[index])
        if not list_field_values:
            many_to_many_related = (is_related(fields[index]) and 'ManyToMany'
                                    in relation_type(fields[index]))
            optional_field = not is_required(fields[index])
            auto_fld = is_auto_field(fields[index])
            if many_to_many_related or optional_field or auto_fld:
                if not is_auto_field(fields[index]):
                    to_be_computed.append(fields[index])
                return dfs(instances, cur_tuple, index + 1, to_be_computed,
                           constraints, model, to_be_shuffled)
        else:
            if to_be_shuffled:
                random.shuffle(list_field_values)
            instances_so_far = 0
            for field_id, nxt_field in enumerate(list_field_values):
                new_tuple = cur_tuple[:]
                new_tuple.append((fields[index].name, nxt_field))
                are_constraints_satisfied = True
                for cons in constraints:
                    if not cons(new_tuple, model, fields[index]):
                        are_constraints_satisfied = False
                        break
                if are_constraints_satisfied:
                    instances_remaining = instances - instances_so_far
                    remaining_values = len(list_field_values) - field_id
                    value_instances = ((instances_remaining - 1 +
                                       remaining_values) / remaining_values)
                    new_instances = dfs(value_instances, new_tuple, index + 1,
                                        to_be_computed, constraints, model,
                                        to_be_shuffled)
                    instances_so_far += new_instances
                    if instances_so_far >= instances or dfs.total >= dfs.size:
                        return instances_so_far
            return instances_so_far
def dfs(cur_tuple, index, to_be_computed, constraints, model, to_be_shuffled):
    """ Depth first search

    Generates values for the fields of a given model by simulating
    a depth first search.

    Args :
        cur_tuple : current tuple, a tuple of the values of the filled fields.
        index : the index of the field being filled in the list of fields.
        to_be_computed : A list used for accumulation of the ignored fields.
        constraints : a list of utility, that will constraint the output.
        model : a reference to the class of the given model.
        to_be_shuffled : A boolean variable that will determine if the sample
                         data will be shuffled or not.

    Returns:
        None

    The model will be saved in a temporary database.

    The interface of the predicate should be :
        predicate(cur_tuple, model, field)
            where:
                - cur_tuple : list of tuples of the filled values of the field
                              being filled, in the
                              format (field name , field value).

                - model : a reference to the class of the given model.

                - field : A reference to the class of the field being generated

         The function should handle that the given tuple might be not full,
         and it should depend that the previously generated models are stored
         in the temporary database, and it should return a boolean value that's
         true only if the required constraint is satisfied.

    """
    fields = list_of_fields(model)
    if dfs.size <= 0:
        return True
    if index >= len(fields):
        dfs.size -= 1
        create_model(model, cur_tuple)
    else:
        list_field_values = field_sample_values(fields[index])
        if not list_field_values:
            many_to_many_related = (is_related(fields[index]) and 'ManyToMany'
                                    in relation_type(fields[index]))
            optional_field = not is_required(fields[index])
            auto_fld = is_auto_field(fields[index])
            if many_to_many_related or optional_field or auto_fld:
                if not is_auto_field(fields[index]):
                    to_be_computed.append(fields[index])
                return dfs(cur_tuple, index + 1, to_be_computed,
                           constraints, model, to_be_shuffled)
        else:
            if to_be_shuffled:
                random.shuffle(list_field_values)
            for nxt_field in list_field_values:
                new_tuple = cur_tuple[:]
                new_tuple.append((fields[index].name, nxt_field))
                are_constraints_satisfied = True
                for cons in constraints:
                    if not cons(new_tuple, model, fields[index]):
                        are_constraints_satisfied = False
                        break
                if are_constraints_satisfied:
                    is_done = dfs(new_tuple, index + 1, to_be_computed,
                                  constraints, model, to_be_shuffled)
                    if is_done:
                        return True