def collate_individual_user_results():
    # A list to hold the resulting stats in
    project_by_project_results = []

    # loop over the projects in the project_configuration above
    for project in project_configuration:

        # A list to hold the resulting stats in
        individual_project_results = []

        # Get the basic details we need from the project
        project_short_name = project["project_short_name"]

        # Get a list of all potential include ids for this projects
        include_ids = project["include_user_ids"]

        # Create a list of user_ids for this project that have not completed the required number of tasks
        exclude_id_based_on_task_count = create_list_of_users_not_completing_req_no_of_tasks(project_short_name, min_no_tasks = 324)

        # Filter the include_ids removing any that should be excluded based on task count
        include_ids = [id for id in include_ids if id not in exclude_id_based_on_task_count]

        # Create a list of user_ids to exclude based on marginal distribution
        exclude_id_based_on_marginal_distribution = calculate_marginal_distribution_for_each_user(project_short_name)

        # Filter the include_ids removing any that should be excluded based on marginal distributions
        include_ids = [id for id in include_ids if id not in exclude_id_based_on_marginal_distribution]

        # Select the correct gold standard data set and associated expert_ids for each project
        gold_standard_data = define_gold_standard_data(project_short_name=project_short_name)
        expert_ids = define_gold_standard_ids(project_short_name=project_short_name)

        # Build the combined dict using the filtered ids and relevant gold standard data
        combined_dict = build_combined_dict_keyed_on_composite_key(project_short_name=project_short_name, user_ids_to_include=include_ids, expert_project_short_name=gold_standard_data, expert_user_ids_to_include=expert_ids)


        # Run the analysis on the combined dict
        create_user_agreement_pre_processing_dict(combined_dict)
        individual_user_results = create_user_agreement_dict(combined_dict)


        # Compile the project results object

        for user_id, results_dict in individual_user_results.items():
            project_results = {
                "project_name": project_short_name.split("-")[-1],
                "user_id": user_id,
                "accuracy": results_dict["accuracy_based_on_excluding_tied"],
                "sensitivity": results_dict["sensitivity_excluding_tied"],
                "specificity": results_dict["specificity_excluding_tied"],
                "precision": results_dict["precision_excluding_tied"],
                "f-measure": results_dict["f_measure_excluding_tied"],
                "kappa": results_dict["inter_rater_agreement_excluding_tied"]
            }
            individual_project_results.append(project_results)

        project_by_project_results.append(individual_project_results)

    return project_by_project_results
            image_dict = {}
            sensitivity_list = []
            specificity_list = []
            f_measure_list = []

            for composite_key, list_of_values in combined_dict.items():

                split_comp_ind = composite_key.split(":")
                image_ck = split_comp_ind[0]

                if image == image_ck:
                    image_dict.update({composite_key:list_of_values})
                else:
                    continue

            user_pp_agreement_dict = create_user_agreement_pre_processing_dict(image_dict)
            # N.b. you may have a zero divisor for some images if there are no true positives or false negatives, i.e. an image has no cancer in it according to the experts
            # If this is the case, use if denominator else 0 statements for sensitivity and f-measure in the create_user_agreement_dict code
            user_agreement_dict = create_user_agreement_dict(image_dict)

            for user_id, dict_of_classifications in user_agreement_dict.items():
                sensitivity_list.append(dict_of_classifications['sensitivity_excluding_tied'])
                specificity_list.append(dict_of_classifications['specificity_excluding_tied'])
                f_measure_list.append(dict_of_classifications['f_measure_excluding_tied'])
            # print(image,f_measure_list, sensitivity_list, specificity_list)
            average_sensitivity = round(float(sum(sensitivity_list)/len(sensitivity_list)),2)
            average_specificity = round(float(sum(specificity_list)/len(specificity_list)),2)
            average_f_measure = round(float(sum(f_measure_list)/len(f_measure_list)),2)
            print(image, "sensitivity:", average_sensitivity, "specificity:", average_specificity, "f-measure:",average_f_measure)
                # split the composite key to obtain the image number
                split_comp_ind = composite_key.split(":")
                image_ck = split_comp_ind[0]

                # if the image chosen to be left out above is not equal to the image for this composite key
                if leave_one_out != image_ck:
                    # store the information in the dict excluding the image left out
                    combined_dict_without_one_image.update({composite_key:list_of_values})
                # else if the image chosen to be left out above is equal to the image for this composite key
                elif leave_one_out == image_ck:
                    # store the information in the dict including the image left out
                    combined_dict_with_one_image.update({composite_key:list_of_values})

            # create user expert agreement dict on the basis of the n-1 images
            user_pp_agreement_dict = create_user_agreement_pre_processing_dict(combined_dict_without_one_image)

            # calculate weighting for each user
            dict_with_weightings = calculate_user_weighting(combined_dict_without_one_image)

            for user_id, weighting in dict_with_weightings.items():

                if user_id not in dict_with_all_weightings.keys():
                    dict_with_all_weightings[user_id] = []
                dict_with_all_weightings[user_id].append(weighting)

        for user_id, list_of_weightings in dict_with_all_weightings.items():
            dict_with_all_weightings[user_id] = float(sum(list_of_weightings))/len(list_of_weightings)

        weightings_list = list()