Example #1
0
def main(filename, systemname, print_us, print_ont, statistics, link, prolog,
         json, per_role, threshold, base, weights, spacy_nlp):
    """General class to run the entire program
	"""

    start_nlp_time = timeit.default_timer()
    nlp = spacy_nlp
    nlp_time = timeit.default_timer() - start_nlp_time

    start_parse_time = timeit.default_timer()
    miner = StoryMiner()

    # Read the input file
    set = Reader.parse(filename)
    us_id = 1

    # Keep track of all errors
    success = 0
    fail = 0
    list_of_fails = []
    errors = ""
    c = Counter()

    # Keeps track of all succesfully created User Stories objects
    us_instances = []
    failed_stories = []
    success_stories = []

    # Parse every user story (remove punctuation and mine)
    for s in set:
        try:
            user_story = parse(s, us_id, systemname, nlp, miner)
            user_story = c.count(user_story)
            success = success + 1
            us_instances.append(user_story)
            success_stories.append(s)
        except ValueError as err:
            failed_stories.append([us_id, s, err.args])
            errors += "\n[User Story " + str(us_id) + " ERROR] " + str(
                err.args[0]) + "! (\"" + " ".join(str.split(s)) + "\")"
            fail = fail + 1
        us_id = us_id + 1

    # Print errors (if found)
    if errors:
        Printer.print_head("PARSING ERRORS")
        print(errors)

    parse_time = timeit.default_timer() - start_parse_time

    # Generate the term-by-user story matrix (m), and additional data in two other matrices
    start_matr_time = timeit.default_timer()

    matrix = Matrix(base, weights)
    matrices = matrix.generate(us_instances,
                               ' '.join([u.sentence for u in us_instances]),
                               nlp)
    m, count_matrix, stories_list, rme = matrices

    matr_time = timeit.default_timer() - start_matr_time

    # Print details per user story, if argument '-u'/'--print_us' is chosen
    if print_us:
        print("Details:\n")
        for us in us_instances:
            Printer.print_us_data(us)

    # Generate the ontology
    start_gen_time = timeit.default_timer()

    patterns = Constructor(nlp, us_instances, m)
    out = patterns.make(systemname, threshold, link)
    output_ontology, output_prolog, output_ontobj, output_prologobj, onto_per_role = out

    # Print out the ontology in the terminal, if argument '-o'/'--print_ont' is chosen
    if print_ont:
        Printer.print_head("MANCHESTER OWL")
        print(output_ontology)

    gen_time = timeit.default_timer() - start_gen_time

    # Gather statistics and print the results
    stats_time = 0
    if statistics:
        start_stats_time = timeit.default_timer()

        statsarr = Statistics.to_stats_array(us_instances)

        Printer.print_head("USER STORY STATISTICS")
        Printer.print_stats(statsarr[0], True)
        Printer.print_stats(statsarr[1], True)
        Printer.print_subhead(
            "Term - by - User Story Matrix ( Terms w/ total weight 0 hidden )")
        hide_zero = m[(m['sum'] > 0)]
        print(hide_zero)

        stats_time = timeit.default_timer() - start_stats_time

    # Write output files
    w = Writer()

    folder = "output/" + str(systemname)
    reports_folder = folder + "/reports"
    stats_folder = reports_folder + "/stats"

    outputfile = w.make_file(folder + "/ontology", str(systemname), "omn",
                             output_ontology)
    files = [["Manchester Ontology", outputfile]]

    outputcsv = ""
    sent_outputcsv = ""
    matrixcsv = ""

    if statistics:
        files.append([
            "General statistics",
            w.make_file(stats_folder, str(systemname), "csv", statsarr[0])
        ])
        files.append([
            "Term-by-User Story matrix",
            w.make_file(stats_folder,
                        str(systemname) + "-term_by_US_matrix", "csv", m)
        ])
        files.append([
            "Sentence statistics",
            w.make_file(stats_folder,
                        str(systemname) + "-sentences", "csv", statsarr[1])
        ])
    if prolog:
        files.append([
            "Prolog",
            w.make_file(folder + "/prolog", str(systemname), "pl",
                        output_prolog)
        ])
    if json:
        output_json_li = [str(us.toJSON()) for us in us_instances]
        output_json = "\n".join(output_json_li)
        files.append([
            "JSON",
            w.make_file(folder + "/json",
                        str(systemname) + "-user_stories", "json", output_json)
        ])
    if per_role:
        for o in onto_per_role:
            files.append([
                "Individual Ontology for '" + str(o[0]) + "'",
                w.make_file(folder + "/ontology",
                            str(systemname) + "-" + str(o[0]), "omn", o[1])
            ])

    # Print the used ontology generation settings
    Printer.print_gen_settings(matrix, base, threshold)

    # Print details of the generation
    Printer.print_details(fail, success, nlp_time, parse_time, matr_time,
                          gen_time, stats_time)

    report_dict = {
        "stories":
        us_instances,
        "failed_stories":
        failed_stories,
        "systemname":
        systemname,
        "us_success":
        success,
        "us_fail":
        fail,
        "times": [[
            "Initializing Natural Language Processor (<em>spaCy</em> v" +
            pkg_resources.get_distribution("spacy").version + ")", nlp_time
        ], ["Mining User Stories", parse_time],
                  ["Creating Factor Matrix", matr_time],
                  ["Generating Manchester Ontology", gen_time],
                  ["Gathering statistics", stats_time]],
        "dir":
        os.path.dirname(os.path.realpath(__file__)),
        "inputfile":
        filename,
        "inputfile_lines":
        len(set),
        "outputfiles":
        files,
        "threshold":
        threshold,
        "base":
        base,
        "matrix":
        matrix,
        "weights":
        m['sum'].copy().reset_index().sort_values(
            ['sum'], ascending=False).values.tolist(),
        "counts":
        count_matrix.reset_index().values.tolist(),
        "classes":
        output_ontobj.classes,
        "relationships":
        output_prologobj.relationships,
        "types":
        list(count_matrix.columns.values),
        "ontology":
        multiline(output_ontology)
    }

    # Finally, generate a report
    report = w.make_file(reports_folder,
                         str(systemname) + "_REPORT", "html",
                         generate_report(report_dict))
    files.append(["Report", report])

    # Print the location and name of all output files
    for file in files:
        if str(file[1]) != "":
            print(
                str(file[0]) + " file succesfully created at: \"" +
                str(file[1]) + "\"")

    # Return objects so that they can be used as input for other tools
    return {
        'us_instances': us_instances,
        'output_ontobj': output_ontobj,
        'output_prologobj': output_prologobj,
        'matrix': m
    }
Example #2
0
def main(*args):
    """Main CLI entry-point

    Args:
        *args: Manually supplied arguments
	"""
    p = ArgumentParser(usage=f'''python -m vn.py <INPUT FILE> [<args>]

    ///////////////////////////////////////////
    //              Visual Narrator          //
    ///////////////////////////////////////////
    {description}

    This program has multiple functionalities:
        (1) Mine user story information
        (2) Generate an ontology from a user story set
        (3) Generate Prolog from a user story set (including links to 'role', 'means' and 'ends')
        (4) Get statistics for a user story set
    ''',
                       epilog=f'''(*) Utrecht University.
                {author}, 2015-2019''')

    # Positional arguments (required; p)
    if "--return-args" not in args:
        p.add_argument("filename",
                       help="input file with user stories",
                       metavar="INPUT FILE",
                       type=lambda x: _is_valid_file(p, x))
    p.add_argument('--version',
                   action='version',
                   version=f'%(prog) v{__version__} by {author}')

    # General arguments (g_p)
    g_p = p.add_argument_group("general arguments (optional)")
    g_p.add_argument(
        "-n",
        "--name",
        dest="system_name",
        help=
        "your system name, as used in ontology and output file(s) generation",
        required=False)
    g_p.add_argument("-u",
                     "--print_us",
                     dest="print_us",
                     help="print data per user story in the console",
                     action="store_true",
                     default=False)
    g_p.add_argument("-o",
                     "--print_ont",
                     dest="print_ont",
                     help="print ontology in the console",
                     action="store_true",
                     default=False)
    g_p.add_argument(
        "-l",
        "--link",
        dest="link",
        help="link ontology classes to user story they originate from",
        action="store_true",
        default=False)
    g_p.add_argument("--prolog",
                     dest="prolog",
                     help="generate prolog output (.pl)",
                     action="store_true",
                     default=False)
    g_p.add_argument("--return-args",
                     dest="return_args",
                     help="return arguments instead of call VN",
                     action="store_true",
                     default=False)
    g_p.add_argument("--json",
                     dest="json",
                     help="export user stories as json (.json)",
                     action="store_true",
                     default=False)

    # Statistics arguments (s_p)
    s_p = p.add_argument_group("statistics arguments (optional)")
    s_p.add_argument(
        "-s",
        "--statistics",
        dest="statistics",
        help="show user story set statistics and output these to a .csv file",
        action="store_true",
        default=False)

    # Generation tuning (w_p)
    w_p = p.add_argument_group("conceptual model generation tuning (optional)")
    w_p.add_argument("-p",
                     "--per_role",
                     dest="per_role",
                     help="create an additional conceptual model per role",
                     action="store_true",
                     default=False)
    w_p.add_argument(
        "-t",
        dest="threshold",
        help=
        f"set threshold for conceptual model generation (INT, default = {DEFAULT_THRESHOLD:.2f})",
        type=float,
        default=DEFAULT_THRESHOLD)
    w_p.add_argument(
        "-b",
        dest="base_weight",
        help=f"set the base weight (INT, default = {DEFAULT_BASE})",
        type=int,
        default=DEFAULT_BASE)
    w_p.add_argument(
        "-wfr",
        dest="weight_func_role",
        help=
        f"weight of functional role (FLOAT, default = {DEFAULT_WEIGHTS['func_role']:.2f})",
        type=float,
        default=DEFAULT_WEIGHTS['func_role'])
    w_p.add_argument(
        "-wdo",
        dest="weight_main_obj",
        help=
        f"weight of main object (FLOAT, default = {DEFAULT_WEIGHTS['main_obj']:.2f})",
        type=float,
        default=DEFAULT_WEIGHTS['main_obj'])
    w_p.add_argument(
        "-wffm",
        dest="weight_ff_means",
        help=
        f"weight of noun in free form means (FLOAT, default = {DEFAULT_WEIGHTS['ff_means']:.2f})",
        type=float,
        default=DEFAULT_WEIGHTS['ff_means'])
    w_p.add_argument(
        "-wffe",
        dest="weight_ff_ends",
        help=
        f"weight of noun in free form ends (FLOAT, default = {DEFAULT_WEIGHTS['ff_ends']:.2f})",
        type=float,
        default=DEFAULT_WEIGHTS['ff_ends'])
    w_p.add_argument(
        "-wcompound",
        dest="weight_compound",
        help=
        f"weight of nouns in compound compared to head (FLOAT, default = {DEFAULT_WEIGHTS['compound']:.2f})",
        type=float,
        default=DEFAULT_WEIGHTS['compound'])

    if len(args) < 1:
        args = p.parse_args()
    else:
        args = p.parse_args(args)

    weights = {
        'func_role': args.weight_func_role,
        'main_obj': args.weight_main_obj,
        'ff_means': args.weight_ff_means,
        'ff_ends': args.weight_ff_ends,
        'compound': args.weight_compound
    }

    if not args.system_name or args.system_name == '':
        args.system_name = "System"
    if not args.return_args:
        visualnarrator = VisualNarrator(threshold=args.threshold,
                                        base=args.base_weight,
                                        weights=weights,
                                        stats=args.statistics,
                                        link=args.link,
                                        prolog=args.prolog,
                                        json=args.json,
                                        per_role=args.per_role)
        return visualnarrator.run(args.filename,
                                  args.system_name,
                                  print_us=args.print_us,
                                  print_ont=args.print_ont,
                                  stories=Reader.parse(args.filename))
    else:
        return args
Example #3
0
    def run(self,
            filename,
            systemname,
            print_us = False,
            print_ont = False,
            stories = None,
            write_local = True):
        """Single run of Visual Narrator

        Args:
            filename (str): File name to read
            systemname (str): Name of System (for output and in model)

            print_us (bool): print data per user story in the console
            print_ont (bool): print ontology in the console

            stories (list): preprocessed stories (from filename)
        Returns:
            dict: dictionary with US objects, Ontology + Prolog + JSON objects, matrix
        """
        if stories is None:
            stories = Reader.parse(filename)

        # Mine stories
        us_instances, failed_stories  = self._mine_stories(stories, systemname, log_time=self.time)

        # Generate the term-by-user story matrix (m), and additional data in two other matrices
        m, count_matrix = self._get_matrix(us_instances, log_time=self.time)

        # Print details per user story, if argument '-u'/'--print_us' is chosen
        if print_us:
            print("Details:\n")
            for us in us_instances:
                Printer.print_us_data(us)

        # Generate the outputs
        output_ontology, output_prolog, onto_per_role = \
            self._get_gen(us_instances, m, systemname, print_ont, log_time=self.time)

        # Gather statistics and print the results
        statsarr = self._get_stats(us_instances, m, log_time=self.time)

        # Print the used ontology generation settings
        Printer.print_gen_settings(self.matrix, self.base, self.threshold)

        # Print details of the generation
        fail = len(failed_stories)
        success = len(us_instances)
        time_nlp = self.time['INITIALIZE_NLP']
        time_mine = self.time['_MINE_STORIES']
        time_matr = self.time['_GET_MATRIX']
        time_gen = self.time['_GET_GEN']
        time_stats = self.time['_GET_STATS']

        Printer.print_details(fail, success, time_nlp, time_mine, time_matr, time_gen, time_stats)
        self.time['INITIALIZE_NLP'] = 0

        output_json = json.dumps([us.toJSON() for us in us_instances], indent=4)

        files = []
        if write_local:
            w = Writer
            files, reports_folder = self.write_files(w,
                                                     systemname,
                                                     str(output_ontology),
                                                     str(output_prolog),
                                                     output_json,
                                                     statsarr,
                                                     m,
                                                     onto_per_role)

        report_dict = {
            "stories": us_instances,
            "failed_stories": failed_stories,
            "systemname": systemname,
            "us_success": success,
            "us_fail": fail,
            "times": [["Initializing Natural Language Processor (<em>spaCy</em> v" + pkg_resources.get_distribution("spacy").version + ")" , time_nlp],
                      ["Mining User Stories", time_mine],
                      ["Creating Factor Matrix", time_matr],
                      ["Generating Manchester Ontology / Prolog", time_gen],
                      ["Gathering statistics", time_stats]],
            "dir": sys.path[0],
            "inputfile": filename,
            "inputfile_lines": len(stories),
            "outputfiles": files,
            "threshold": self.threshold,
            "base": self.base,
            "matrix": self.matrix,
            "weights": m['sum'].copy().reset_index().sort_values(['sum'], ascending=False).values.tolist(),
            "counts": count_matrix.reset_index().values.tolist(),
            "classes": output_ontology.classes,
            "relationships": output_ontology.relationships,
            "types": list(count_matrix.columns.values),
            "ontology": multiline(str(output_ontology)),
            "print_prolog": self.prolog,
            "prolog": multiline(str(output_prolog)),
            "write_local": write_local
        }

        # Finally, generate a report
        output_report = self.generate_report(report_dict)

        # Write output files
        if write_local:
            report = w.make_file(reports_folder, str(systemname) + "_REPORT", "html", output_report)
            files.append(["Report", report])

            # Print the location and name of all output files
            for file in files:
                if str(file[1]) != "":
                    print(f"{file[0]} file succesfully created at: \"{file[1]}\"")

        # Return objects so that they can be used as input for other tools
        return {'us_instances': us_instances,
                'output_ontobj': str(output_ontology),
                'output_prologobj': str(output_prolog),
                'output_json': output_json,
                'matrix': m,
                'report': output_report}
Example #4
0
def program(*args):
    p = ArgumentParser(usage='''run.py <INPUT FILE> [<args>]

///////////////////////////////////////////
//              Visual Narrator          //
///////////////////////////////////////////

This program has multiple functionalities:
	(1) Mine user story information
	(2) Generate an ontology from a user story set
	(3) Generate Prolog from a user story set (including links to 'role', 'means' and 'ends')
	(4) Get statistics for a user story set
''',
                       epilog='''{*} Utrecht University.
			M.J. Robeer, 2015-2017''')

    if "--return-args" not in args:
        p.add_argument("filename",
                       help="input file with user stories",
                       metavar="INPUT FILE",
                       type=lambda x: is_valid_file(p, x))
    p.add_argument('--version',
                   action='version',
                   version='Visual Narrator v0.9 BETA by M.J. Robeer')

    g_p = p.add_argument_group("general arguments (optional)")
    g_p.add_argument(
        "-n",
        "--name",
        dest="system_name",
        help=
        "your system name, as used in ontology and output file(s) generation",
        required=False)
    g_p.add_argument("-u",
                     "--print_us",
                     dest="print_us",
                     help="print data per user story in the console",
                     action="store_true",
                     default=False)
    g_p.add_argument("-o",
                     "--print_ont",
                     dest="print_ont",
                     help="print ontology in the console",
                     action="store_true",
                     default=False)
    g_p.add_argument(
        "-l",
        "--link",
        dest="link",
        help="link ontology classes to user story they originate from",
        action="store_true",
        default=False)
    g_p.add_argument("--prolog",
                     dest="prolog",
                     help="generate prolog output (.pl)",
                     action="store_true",
                     default=False)
    g_p.add_argument("--return-args",
                     dest="return_args",
                     help="return arguments instead of call VN",
                     action="store_true",
                     default=False)
    g_p.add_argument("--json",
                     dest="json",
                     help="export user stories as json (.json)",
                     action="store_true",
                     default=False)
    g_p.add_argument("--split",
                     dest="split",
                     help="Process the stories one by one",
                     action="store_true",
                     default=False)
    s_p = p.add_argument_group("statistics arguments (optional)")
    s_p.add_argument(
        "-s",
        "--statistics",
        dest="statistics",
        help="show user story set statistics and output these to a .csv file",
        action="store_true",
        default=False)

    w_p = p.add_argument_group("conceptual model generation tuning (optional)")
    w_p.add_argument("-p",
                     "--per_role",
                     dest="per_role",
                     help="create an additional conceptual model per role",
                     action="store_true",
                     default=False)
    w_p.add_argument(
        "-t",
        dest="threshold",
        help=
        "set threshold for conceptual model generation (INT, default = 1.0)",
        type=float,
        default=1.0)
    w_p.add_argument("-b",
                     dest="base_weight",
                     help="set the base weight (INT, default = 1)",
                     type=int,
                     default=1)
    w_p.add_argument("-wfr",
                     dest="weight_func_role",
                     help="weight of functional role (FLOAT, default = 1.0)",
                     type=float,
                     default=1)
    w_p.add_argument("-wdo",
                     dest="weight_main_obj",
                     help="weight of main object (FLOAT, default = 1.0)",
                     type=float,
                     default=1)
    w_p.add_argument(
        "-wffm",
        dest="weight_ff_means",
        help="weight of noun in free form means (FLOAT, default = 0.7)",
        type=float,
        default=0.7)
    w_p.add_argument(
        "-wffe",
        dest="weight_ff_ends",
        help="weight of noun in free form ends (FLOAT, default = 0.5)",
        type=float,
        default=0.5)
    w_p.add_argument(
        "-wcompound",
        dest="weight_compound",
        help=
        "weight of nouns in compound compared to head (FLOAT, default = 0.66)",
        type=float,
        default=0.66)

    if (len(args) < 1):
        args = p.parse_args()
    else:
        args = p.parse_args(args)

    weights = [
        args.weight_func_role, args.weight_main_obj, args.weight_ff_means,
        args.weight_ff_ends, args.weight_compound
    ]

    if not args.system_name or args.system_name == '':
        args.system_name = "System"
    if not args.return_args:
        spacy_nlp = initialize_nlp()
        if args.split:
            stories = Reader.parse(args.filename)
            for s in stories:
                file = open('./tmp.txt', 'w+')
                file.write(s)
                file.close()
                main(open('./tmp.txt', 'r'), args.system_name, args.print_us,
                     args.print_ont, args.statistics, args.link, args.prolog,
                     args.json, args.per_role, args.threshold,
                     args.base_weight, weights, spacy_nlp)
            return
        else:
            return main(args.filename, args.system_name, args.print_us,
                        args.print_ont, args.statistics, args.link,
                        args.prolog, args.json, args.per_role, args.threshold,
                        args.base_weight, weights, spacy_nlp)
    else:
        return args
Example #5
0
def main(filename, systemname, print_us, print_ont, statistics, link, prolog,
         json, per_role, threshold, base, weights, spacy_nlp):
    """General class to run the entire program
	"""

    start_nlp_time = timeit.default_timer()
    nlp = spacy_nlp
    nlp_time = timeit.default_timer() - start_nlp_time

    start_parse_time = timeit.default_timer()
    miner = StoryMiner()

    # Read the input file
    set = Reader.parse(filename)
    us_id = 1

    # Keep track of all errors
    success = 0
    fail = 0
    list_of_fails = []
    errors = ""
    c = Counter()

    # Keeps track of all succesfully created User Stories objects
    us_instances = []
    failed_stories = []
    success_stories = []

    # Parse every user story (remove punctuation and mine)
    for s in set:
        try:
            user_story = parse(s, us_id, systemname, nlp, miner)
            user_story = c.count(user_story)
            success = success + 1
            us_instances.append(user_story)
            success_stories.append(s)
        except ValueError as err:
            failed_stories.append([us_id, s, err.args])
            errors += "\n[User Story " + str(us_id) + " ERROR] " + str(
                err.args[0]) + "! (\"" + " ".join(str.split(s)) + "\")"
            fail = fail + 1
        us_id = us_id + 1

    # Print errors (if found)
    if errors:
        Printer.print_head("PARSING ERRORS")
        print(errors)

    parse_time = timeit.default_timer() - start_parse_time

    # Generate the term-by-user story matrix (m), and additional data in two other matrices
    start_matr_time = timeit.default_timer()

    matrix = Matrix(base, weights)
    matrices = matrix.generate(us_instances,
                               ' '.join([u.sentence for u in us_instances]),
                               nlp)
    m, count_matrix, stories_list, rme = matrices

    matr_time = timeit.default_timer() - start_matr_time

    # Generate the ontology
    start_gen_time = timeit.default_timer()

    patterns = Constructor(nlp, us_instances, m)
    out = patterns.make(systemname, threshold, link)
    output_ontology, output_prolog, output_ontobj, output_prologobj, onto_per_role = out

    all_classes_list = []
    i = 0
    for class_vn in output_ontobj.classes:
        one_concept = {
            'id': i,
            'class_name': class_vn.name,
            'parent_name': class_vn.parent,
            'occurs_in': occurence_list(class_vn.stories),
            'weight': '0',
            'group': class_vn.is_role
        }
        all_classes_list.append(one_concept)
        i += 1
    nodes = [{
        "id": cl["id"],
        "label": cl["class_name"],
        "weight": cl["weight"]
    } for cl in all_classes_list]
    relationships_query = output_prologobj.relationships

    all_relationships_list = []
    for relationship in relationships_query:
        one_concept = {
            'relationship_domain': relationship.domain,
            'relationship_name': relationship.name,
            'relationship_range': relationship.range
        }
        all_relationships_list.append(one_concept)

    edges_id_list = []
    concepts_query = []
    concepts_dict = {}
    concepts_dict_list = []
    relationshipslist = []
    i = 0
    for class_vn in all_classes_list:

        one_concept = {
            'class_id': i,
            'class_name': class_vn['class_name'],
            'parent_name': class_vn['parent_name'],
            'weight': '0',
            'group': class_vn['group']
        }
        concepts_query.append(one_concept)
        i += 1
    for concept in concepts_query:
        # print(concept)
        concepts_dict[concept['class_id']] = concept['class_name']
        concepts_dict_list.append([concept['class_id'], concept['class_name']])
    i = 0
    for rel in all_relationships_list:  #app.py 868
        # print(rel)
        relationshipslist.append([
            rel['relationship_domain'], rel['relationship_range'],
            rel['relationship_name']
        ])
        for concept in concepts_dict_list:
            if rel['relationship_domain'] == concept[1]:
                x = concept[0]

        for concept in concepts_dict_list:
            if rel['relationship_range'] == concept[1]:
                y = concept[0]

        if rel['relationship_name'] == 'isa':
            edges_id_dict = {
                'from': x,
                'to': y,
                'label': rel['relationship_name'],
                'dashes': "true"
            }
        else:
            edges_id_dict = {
                'from': x,
                'to': y,
                'label': rel['relationship_name']
            }
        i += 1
        # ELSE??
        edges_id_list.append(edges_id_dict)

    print({'nodes': nodes, 'edges': edges_id_list})
    return ({'nodes': nodes, 'edges': edges_id_list})
Example #6
0
def main(filename, systemname, print_us, print_ont, statistics, link, prolog,
         json, per_role, threshold, base, weights, spacy_nlp):
    """General class to run the entire program
	"""

    start_nlp_time = timeit.default_timer()
    nlp = spacy_nlp
    nlp_time = timeit.default_timer() - start_nlp_time

    start_parse_time = timeit.default_timer()
    miner = StoryMiner()

    # Read the input file
    set = Reader.parse(filename)
    us_id = 1

    # Keep track of all errors
    success = 0
    fail = 0
    list_of_fails = []
    errors = ""
    c = Counter()

    # Keeps track of all succesfully created User Stories objects
    us_instances = []
    failed_stories = []
    success_stories = []

    # Parse every user story (remove punctuation and mine)
    for s in set:
        try:
            user_story = parse(s, us_id, systemname, nlp, miner)
            user_story = c.count(user_story)
            success = success + 1
            us_instances.append(user_story)
            success_stories.append(s)
        except ValueError as err:
            failed_stories.append([us_id, s, err.args])
            errors += "\n[User Story " + str(us_id) + " ERROR] " + str(
                err.args[0]) + "! (\"" + " ".join(str.split(s)) + "\")"
            fail = fail + 1
        us_id = us_id + 1

    # Print errors (if found)
    if errors:
        Printer.print_head("PARSING ERRORS")
        print(errors)

    parse_time = timeit.default_timer() - start_parse_time

    # Generate the term-by-user story matrix (m), and additional data in two other matrices
    start_matr_time = timeit.default_timer()

    matrix = Matrix(base, weights)
    matrices = matrix.generate(us_instances,
                               ' '.join([u.sentence for u in us_instances]),
                               nlp)
    m, count_matrix, stories_list, rme = matrices

    matr_time = timeit.default_timer() - start_matr_time

    # Print details per user story, if argument '-u'/'--print_us' is chosen
    if print_us:
        print("Details:\n")
        for us in us_instances:
            Printer.print_us_data(us)

    # Generate the ontology
    start_gen_time = timeit.default_timer()

    patterns = Constructor(nlp, us_instances, m)
    out = patterns.make(systemname, threshold, link)
    output_ontology, output_prolog, output_ontobj, output_prologobj, onto_per_role = out

    print("HEY THIS IS THE OUTPUT_ONTOBJ WITH THE CLASSES APPARENTLY???")
    print(output_ontobj.classes)
    all_classes_list = []
    for class_vn in output_ontobj.classes:
        one_concept = {
            'class_name': class_vn.name,
            'parent_name': class_vn.parent,
            'occurs_in': occurence_list(class_vn.stories),
            'weight': '0',
            'group': class_vn.is_role
        }
        all_classes_list.append(one_concept)

    print(all_classes_list)
    # nodes = []
    # for cl in all_classes_list:
    # 	print(cl)
    # 	nodes.append({"label": cl['class_name']})
    # taking out class_id from the nodes. idk if this will bite me later.
    nodes = [{
        "label": cl["class_name"],
        "weight": cl["weight"]
    } for cl in all_classes_list]
    # print(nodes)
    print('IDK WHAT THIS IS BUT IMMA PRINT IT OUT TOO')
    relationships_query = output_prologobj.relationships

    all_relationships_list = []
    for relationship in relationships_query:
        one_concept = {
            'relationship_domain': relationship.domain,
            'relationship_name': relationship.name,
            'relationship_range': relationship.range
        }
        all_relationships_list.append(one_concept)

    print(all_relationships_list)

    edges_id_list = []
    concepts_query = []
    concepts_dict = {}
    concepts_dict_list = []
    relationshipslist = []
    i = 0
    for class_vn in all_classes_list:

        one_concept = {
            'class_id': i,
            'class_name': class_vn['class_name'],
            'parent_name': class_vn['parent_name'],
            'weight': '0',
            'group': class_vn['group']
        }
        concepts_query.append(one_concept)
        i += 1

    # print(concepts_query)
    for concept in concepts_query:
        print(concept)
        concepts_dict[concept['class_id']] = concept['class_name']
        concepts_dict_list.append([concept['class_id'], concept['class_name']])

    print('THIS IS WHAT UR CURRENTLY LOOKING AT')
    print(concepts_dict_list)
    i = 0
    for rel in all_relationships_list:  #app.py 868
        # print(rel)
        relationshipslist.append([
            rel['relationship_domain'], rel['relationship_range'],
            rel['relationship_name']
        ])
        for concept in concepts_dict_list:
            if rel['relationship_domain'] == concept[1]:
                x = concept[0]

        for concept in concepts_dict_list:
            if rel['relationship_range'] == concept[1]:
                y = concept[0]

        if rel['relationship_name'] == 'isa':
            edges_id_dict = {
                'id': i,
                'from': x,
                'to': y,
                'label': rel['relationship_name'],
                'dashes': "true"
            }
        else:
            edges_id_dict = {
                'id': i,
                'from': x,
                'to': y,
                'label': rel['relationship_name']
            }
        i += 1
        # ELSE??
        edges_id_list.append(edges_id_dict)

    print(edges_id_list)

    # Print out the ontology in the terminal, if argument '-o'/'--print_ont' is chosen
    if print_ont:
        Printer.print_head("MANCHESTER OWL")
        print(output_ontology)

    gen_time = timeit.default_timer() - start_gen_time

    # Gather statistics and print the results
    stats_time = 0
    if statistics:
        start_stats_time = timeit.default_timer()

        statsarr = Statistics.to_stats_array(us_instances)

        Printer.print_head("USER STORY STATISTICS")
        Printer.print_stats(statsarr[0], True)
        Printer.print_stats(statsarr[1], True)
        Printer.print_subhead(
            "Term - by - User Story Matrix ( Terms w/ total weight 0 hidden )")
        hide_zero = m[(m['sum'] > 0)]
        print(hide_zero)

        stats_time = timeit.default_timer() - start_stats_time

    # Write output files
    w = Writer()

    folder = "output/" + str(systemname)
    reports_folder = folder + "/reports"
    stats_folder = reports_folder + "/stats"

    outputfile = w.make_file(folder + "/ontology", str(systemname), "omn",
                             output_ontology)
    files = [["Manchester Ontology", outputfile]]

    outputcsv = ""
    sent_outputcsv = ""
    matrixcsv = ""

    if statistics:
        files.append([
            "General statistics",
            w.make_file(stats_folder, str(systemname), "csv", statsarr[0])
        ])
        files.append([
            "Term-by-User Story matrix",
            w.make_file(stats_folder,
                        str(systemname) + "-term_by_US_matrix", "csv", m)
        ])
        files.append([
            "Sentence statistics",
            w.make_file(stats_folder,
                        str(systemname) + "-sentences", "csv", statsarr[1])
        ])
    if prolog:
        files.append([
            "Prolog",
            w.make_file(folder + "/prolog", str(systemname), "pl",
                        output_prolog)
        ])
    if json:
        output_json_li = [str(us.toJSON()) for us in us_instances]
        output_json = "\n".join(output_json_li)
        files.append([
            "JSON",
            w.make_file(folder + "/json",
                        str(systemname) + "-user_stories", "json", output_json)
        ])
    if per_role:
        for o in onto_per_role:
            files.append([
                "Individual Ontology for '" + str(o[0]) + "'",
                w.make_file(folder + "/ontology",
                            str(systemname) + "-" + str(o[0]), "omn", o[1])
            ])

    # Print the used ontology generation settings
    Printer.print_gen_settings(matrix, base, threshold)

    # Print details of the generation
    Printer.print_details(fail, success, nlp_time, parse_time, matr_time,
                          gen_time, stats_time)

    report_dict = {
        "stories":
        us_instances,
        "failed_stories":
        failed_stories,
        "systemname":
        systemname,
        "us_success":
        success,
        "us_fail":
        fail,
        "times": [[
            "Initializing Natural Language Processor (<em>spaCy</em> v" +
            pkg_resources.get_distribution("spacy").version + ")", nlp_time
        ], ["Mining User Stories", parse_time],
                  ["Creating Factor Matrix", matr_time],
                  ["Generating Manchester Ontology", gen_time],
                  ["Gathering statistics", stats_time]],
        "dir":
        os.path.dirname(os.path.realpath(__file__)),
        "inputfile":
        filename,
        "inputfile_lines":
        len(set),
        "outputfiles":
        files,
        "threshold":
        threshold,
        "base":
        base,
        "matrix":
        matrix,
        "weights":
        m['sum'].copy().reset_index().sort_values(
            ['sum'], ascending=False).values.tolist(),
        "counts":
        count_matrix.reset_index().values.tolist(),
        "classes":
        output_ontobj.classes,
        "relationships":
        output_prologobj.relationships,
        "types":
        list(count_matrix.columns.values),
        "ontology":
        multiline(output_ontology)
    }

    # Finally, generate a report
    report = w.make_file(reports_folder,
                         str(systemname) + "_REPORT", "html",
                         generate_report(report_dict))
    files.append(["Report", report])

    # Print the location and name of all output files
    for file in files:
        if str(file[1]) != "":
            print(
                str(file[0]) + " file succesfully created at: \"" +
                str(file[1]) + "\"")

    # Return objects so that they can be used as input for other tools
    return {
        'us_instances': us_instances,
        'output_ontobj': output_ontobj,
        'output_prologobj': output_prologobj,
        'matrix': m
    }