Exemplo n.º 1
0
def rebase(
        log_obj: Union[EventLog, EventStream, pd.DataFrame],
        case_id: str = constants.CASE_CONCEPT_NAME,
        activity_key: str = xes_constants.DEFAULT_NAME_KEY,
        timestamp_key: str = xes_constants.DEFAULT_TIMESTAMP_KEY,
        start_timestamp_key: str = xes_constants.DEFAULT_START_TIMESTAMP_KEY):
    """
    Re-base the log object, changing the case ID, activity and timestamp attributes.

    Parameters
    -----------------
    log_obj
        Log object
    case_id
        Case identifier
    activity_key
        Activity
    timestamp_key
        Timestamp
    start_timestamp_key
        Start timestamp

    Returns
    -----------------
    rebased_log_obj
        Rebased log object
    """
    import pm4py

    if isinstance(log_obj, pd.DataFrame):
        return format_dataframe(log_obj,
                                case_id=case_id,
                                activity_key=activity_key,
                                timestamp_key=timestamp_key,
                                start_timestamp_key=start_timestamp_key)
    elif isinstance(log_obj, EventLog):
        log_obj = pm4py.convert_to_dataframe(log_obj)
        log_obj = format_dataframe(log_obj,
                                   case_id=case_id,
                                   activity_key=activity_key,
                                   timestamp_key=timestamp_key,
                                   start_timestamp_key=start_timestamp_key)
        from pm4py.objects.conversion.log import converter
        return converter.apply(log_obj,
                               variant=converter.Variants.TO_EVENT_LOG)
    elif isinstance(log_obj, EventStream):
        log_obj = pm4py.convert_to_dataframe(log_obj)
        log_obj = format_dataframe(log_obj,
                                   case_id=case_id,
                                   activity_key=activity_key,
                                   timestamp_key=timestamp_key,
                                   start_timestamp_key=start_timestamp_key)
        return pm4py.convert_to_event_stream(log_obj)
Exemplo n.º 2
0
def execute_script():
    # loads a XES event log
    event_log = pm4py.read_xes("../tests/input_data/receipt.xes")
    # gets the dataframe out of the event log (through conversion)
    dataframe = pm4py.convert_to_dataframe(event_log)
    # discovers the log skeleton model
    log_skeleton = log_skeleton_discovery.apply(
        event_log,
        parameters={
            log_skeleton_discovery.Variants.CLASSIC.value.Parameters.NOISE_THRESHOLD:
            0.03
        })
    # apply conformance checking
    conf_result = log_skeleton_conformance.apply(event_log, log_skeleton)
    # gets the diagnostic result out of the dataframe
    diagnostics = log_skeleton_conformance.get_diagnostics_dataframe(
        event_log, conf_result)
    # merges the dataframe containing the events, and the diagnostics dataframe, using the pd.merge method
    merged_df = pd.merge(dataframe,
                         diagnostics,
                         how="left",
                         left_on="case:concept:name",
                         right_on="case_id",
                         suffixes=('', '_diagn'))
    print(merged_df)
Exemplo n.º 3
0
 def test_csv(self):
     df = pd.read_csv("input_data/running-example.csv")
     df = pm4py.format_dataframe(df, case_id="case:concept:name", activity_key="concept:name",
                                 timestamp_key="time:timestamp")
     log2 = pm4py.convert_to_event_log(df)
     stream1 = pm4py.convert_to_event_stream(log2)
     df2 = pm4py.convert_to_dataframe(log2)
     pm4py.write_xes(log2, "test_output_data/log.xes")
     os.remove("test_output_data/log.xes")
Exemplo n.º 4
0
def execute_script():
    log_path = os.path.join("..", "tests", "input_data",
                            "interval_event_log.csv")
    dataframe = pm4py.read_csv(log_path)
    log_path = os.path.join("..", "tests", "input_data", "reviewing.xes")
    log = pm4py.read_xes(log_path)
    dataframe = pm4py.convert_to_dataframe(log)
    parameters = {}
    #parameters[constants.PARAMETER_CONSTANT_START_TIMESTAMP_KEY] = "start_timestamp"
    parameters[constants.PARAMETER_CONSTANT_TIMESTAMP_KEY] = "time:timestamp"
    parameters[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] = "concept:name"
    parameters[constants.PARAMETER_CONSTANT_CASEID_KEY] = "case:concept:name"
    parameters["strict"] = True
    parameters["format"] = "svg"
    start_activities = sa_get.get_start_activities(dataframe,
                                                   parameters=parameters)
    end_activities = ea_get.get_end_activities(dataframe,
                                               parameters=parameters)
    att_count = att_get.get_attribute_values(dataframe,
                                             "concept:name",
                                             parameters=parameters)
    parameters["start_activities"] = start_activities
    parameters["end_activities"] = end_activities
    soj_time = soj_time_get.apply(dataframe, parameters=parameters)
    print("soj_time")
    print(soj_time)
    conc_act = conc_act_get.apply(dataframe, parameters=parameters)
    print("conc_act")
    print(conc_act)
    efg = efg_get.apply(dataframe, parameters=parameters)
    print("efg")
    print(efg)
    dfg_freq, dfg_perf = df_statistics.get_dfg_graph(
        dataframe, measure="both", start_timestamp_key="start_timestamp")
    dfg_gv_freq = dfg_vis_fact.apply(dfg_freq,
                                     activities_count=att_count,
                                     variant=dfg_vis_fact.Variants.FREQUENCY,
                                     soj_time=soj_time,
                                     parameters=parameters)
    dfg_vis_fact.view(dfg_gv_freq)
    dfg_gv_perf = dfg_vis_fact.apply(dfg_perf,
                                     activities_count=att_count,
                                     variant=dfg_vis_fact.Variants.PERFORMANCE,
                                     soj_time=soj_time,
                                     parameters=parameters)
    dfg_vis_fact.view(dfg_gv_perf)
    net, im, fm = dfg_conv.apply(dfg_freq)
    gviz = pn_vis.apply(net, im, fm, parameters=parameters)
    pn_vis.view(gviz)
Exemplo n.º 5
0
def execute_script():
    ENABLE_VISUALIZATION = True

    # reads a XES into an event log
    log1 = pm4py.read_xes("../tests/input_data/running-example.xes")

    # reads a CSV into a dataframe
    df = pd.read_csv("../tests/input_data/running-example.csv")
    # formats the dataframe with the mandatory columns for process mining purposes
    df = pm4py.format_dataframe(df,
                                case_id="case:concept:name",
                                activity_key="concept:name",
                                timestamp_key="time:timestamp")
    # converts the dataframe to an event log
    log2 = pm4py.convert_to_event_log(df)

    # converts the log read from XES into a stream and dataframe respectively
    stream1 = pm4py.convert_to_event_stream(log1)
    df2 = pm4py.convert_to_dataframe(log1)

    # writes the log1 to a XES file
    pm4py.write_xes(log1, "ru1.xes")

    dfg, dfg_sa, dfg_ea = pm4py.discover_dfg(log1)
    petri_alpha, im_alpha, fm_alpha = pm4py.discover_petri_net_alpha(log1)
    petri_inductive, im_inductive, fm_inductive = pm4py.discover_petri_net_inductive(
        log1)
    petri_heuristics, im_heuristics, fm_heuristics = pm4py.discover_petri_net_heuristics(
        log1)
    tree_inductive = pm4py.discover_tree_inductive(log1)
    heu_net = pm4py.discover_heuristics_net(log1)

    pm4py.write_dfg(dfg, dfg_sa, dfg_ea, "ru_dfg.dfg")
    pm4py.write_petri_net(petri_alpha, im_alpha, fm_alpha, "ru_alpha.pnml")
    pm4py.write_petri_net(petri_inductive, im_inductive, fm_inductive,
                          "ru_inductive.pnml")
    pm4py.write_petri_net(petri_heuristics, im_heuristics, fm_heuristics,
                          "ru_heuristics.pnml")
    pm4py.write_process_tree(tree_inductive, "ru_inductive.ptml")

    dfg, dfg_sa, dfg_ea = pm4py.read_dfg("ru_dfg.dfg")
    petri_alpha, im_alpha, fm_alpha = pm4py.read_petri_net("ru_alpha.pnml")
    petri_inductive, im_inductive, fm_inductive = pm4py.read_petri_net(
        "ru_inductive.pnml")
    petri_heuristics, im_heuristics, fm_heuristics = pm4py.read_petri_net(
        "ru_heuristics.pnml")
    tree_inductive = pm4py.read_process_tree("ru_inductive.ptml")

    pm4py.save_vis_petri_net(petri_alpha, im_alpha, fm_alpha, "ru_alpha.png")
    pm4py.save_vis_petri_net(petri_inductive, im_inductive, fm_inductive,
                             "ru_inductive.png")
    pm4py.save_vis_petri_net(petri_heuristics, im_heuristics, fm_heuristics,
                             "ru_heuristics.png")
    pm4py.save_vis_process_tree(tree_inductive, "ru_inductive_tree.png")
    pm4py.save_vis_heuristics_net(heu_net, "ru_heunet.png")
    pm4py.save_vis_dfg(dfg, dfg_sa, dfg_ea, "ru_dfg.png")

    if ENABLE_VISUALIZATION:
        pm4py.view_petri_net(petri_alpha, im_alpha, fm_alpha, format="svg")
        pm4py.view_petri_net(petri_inductive,
                             im_inductive,
                             fm_inductive,
                             format="svg")
        pm4py.view_petri_net(petri_heuristics,
                             im_heuristics,
                             fm_heuristics,
                             format="svg")
        pm4py.view_process_tree(tree_inductive, format="svg")
        pm4py.view_heuristics_net(heu_net, format="svg")
        pm4py.view_dfg(dfg, dfg_sa, dfg_ea, format="svg")

    aligned_traces = pm4py.conformance_alignments(log1, petri_inductive,
                                                  im_inductive, fm_inductive)
    replayed_traces = pm4py.conformance_tbr(log1, petri_inductive,
                                            im_inductive, fm_inductive)

    fitness_tbr = pm4py.evaluate_fitness_tbr(log1, petri_inductive,
                                             im_inductive, fm_inductive)
    print("fitness_tbr", fitness_tbr)
    fitness_align = pm4py.evaluate_fitness_alignments(log1, petri_inductive,
                                                      im_inductive,
                                                      fm_inductive)
    print("fitness_align", fitness_align)
    precision_tbr = pm4py.evaluate_precision_tbr(log1, petri_inductive,
                                                 im_inductive, fm_inductive)
    print("precision_tbr", precision_tbr)
    precision_align = pm4py.evaluate_precision_alignments(
        log1, petri_inductive, im_inductive, fm_inductive)
    print("precision_align", precision_align)

    print("log start activities = ", pm4py.get_start_activities(log2))
    print("df start activities = ", pm4py.get_start_activities(df2))
    print("log end activities = ", pm4py.get_end_activities(log2))
    print("df end activities = ", pm4py.get_end_activities(df2))
    print("log attributes = ", pm4py.get_attributes(log2))
    print("df attributes = ", pm4py.get_attributes(df2))
    print("log org:resource values = ",
          pm4py.get_attribute_values(log2, "org:resource"))
    print("df org:resource values = ",
          pm4py.get_attribute_values(df2, "org:resource"))

    print("start_activities len(filt_log) = ",
          len(pm4py.filter_start_activities(log2, ["register request"])))
    print("start_activities len(filt_df) = ",
          len(pm4py.filter_start_activities(df2, ["register request"])))
    print("end_activities len(filt_log) = ",
          len(pm4py.filter_end_activities(log2, ["pay compensation"])))
    print("end_activities len(filt_df) = ",
          len(pm4py.filter_end_activities(df2, ["pay compensation"])))
    print(
        "attributes org:resource len(filt_log) (cases) cases = ",
        len(
            pm4py.filter_attribute_values(log2,
                                          "org:resource", ["Ellen"],
                                          level="case")))
    print(
        "attributes org:resource len(filt_log) (cases)  events = ",
        len(
            pm4py.filter_attribute_values(log2,
                                          "org:resource", ["Ellen"],
                                          level="event")))
    print(
        "attributes org:resource len(filt_df) (events) cases = ",
        len(
            pm4py.filter_attribute_values(df2,
                                          "org:resource", ["Ellen"],
                                          level="case")))
    print(
        "attributes org:resource len(filt_df) (events) events = ",
        len(
            pm4py.filter_attribute_values(df2,
                                          "org:resource", ["Ellen"],
                                          level="event")))
    print(
        "attributes org:resource len(filt_df) (events) events notpositive = ",
        len(
            pm4py.filter_attribute_values(df2,
                                          "org:resource", ["Ellen"],
                                          level="event",
                                          retain=False)))

    print("variants log = ", pm4py.get_variants(log2))
    print("variants df = ", pm4py.get_variants(df2))
    print(
        "variants filter log = ",
        len(
            pm4py.filter_variants(log2, [[
                "register request", "examine thoroughly", "check ticket",
                "decide", "reject request"
            ]])))
    print(
        "variants filter df = ",
        len(
            pm4py.filter_variants(df2, [[
                "register request", "examine thoroughly", "check ticket",
                "decide", "reject request"
            ]])))
    print("variants filter percentage = ",
          len(pm4py.filter_variants_percentage(log2, threshold=0.8)))

    print(
        "paths filter log len = ",
        len(
            pm4py.filter_directly_follows_relation(
                log2, [("register request", "examine casually")])))
    print(
        "paths filter dataframe len = ",
        len(
            pm4py.filter_directly_follows_relation(
                df2, [("register request", "examine casually")])))

    print(
        "timeframe filter log events len = ",
        len(
            pm4py.filter_time_range(log2,
                                    "2011-01-01 00:00:00",
                                    "2011-02-01 00:00:00",
                                    mode="events")))
    print(
        "timeframe filter log traces_contained len = ",
        len(
            pm4py.filter_time_range(log2,
                                    "2011-01-01 00:00:00",
                                    "2011-02-01 00:00:00",
                                    mode="traces_contained")))
    print(
        "timeframe filter log traces_intersecting len = ",
        len(
            pm4py.filter_time_range(log2,
                                    "2011-01-01 00:00:00",
                                    "2011-02-01 00:00:00",
                                    mode="traces_intersecting")))
    print(
        "timeframe filter df events len = ",
        len(
            pm4py.filter_time_range(df2,
                                    "2011-01-01 00:00:00",
                                    "2011-02-01 00:00:00",
                                    mode="events")))
    print(
        "timeframe filter df traces_contained len = ",
        len(
            pm4py.filter_time_range(df2,
                                    "2011-01-01 00:00:00",
                                    "2011-02-01 00:00:00",
                                    mode="traces_contained")))
    print(
        "timeframe filter df traces_intersecting len = ",
        len(
            pm4py.filter_time_range(df2,
                                    "2011-01-01 00:00:00",
                                    "2011-02-01 00:00:00",
                                    mode="traces_intersecting")))

    # remove the temporary files
    os.remove("ru1.xes")
    os.remove("ru_dfg.dfg")
    os.remove("ru_alpha.pnml")
    os.remove("ru_inductive.pnml")
    os.remove("ru_heuristics.pnml")
    os.remove("ru_inductive.ptml")
    os.remove("ru_alpha.png")
    os.remove("ru_inductive.png")
    os.remove("ru_heuristics.png")
    os.remove("ru_inductive_tree.png")
    os.remove("ru_heunet.png")
    os.remove("ru_dfg.png")
def create_plotting_data(log, file_format, log_information):
    """
    Transforms a log, such that it can be easer
    used for plotting, removes unnecessary data,
    creates df from xes data, and renames columns

    input: XES/CSV log ,
           file_format str,
           log_information django session dict

    output: pandas df with pm4py default names for attributes in a two timestamp format
    """
    if file_format == "csv":

        # Select only the Relevant columns of the Dataframe
        if log_information["log_type"] == "noninterval":

            # Project the Log onto the Group activities

            log = log[[
                "case:concept:name",
                xes.DEFAULT_TIMESTAMP_KEY,
                xes.DEFAULT_TRACEID_KEY,
            ]]

        elif log_information["log_type"] == "lifecycle":

            log = log[[
                "case:concept:name",
                xes.DEFAULT_TIMESTAMP_KEY,
                xes.DEFAULT_TRACEID_KEY,
                xes.DEFAULT_TRANSITION_KEY,
            ]]
            log = data_transform.transform_lifecycle_csv_to_interval_csv(log)

        elif log_information["log_type"] == "timestamp":

            log = log[[
                "case:concept:name",
                xes.DEFAULT_TIMESTAMP_KEY,
                xes.DEFAULT_TRACEID_KEY,
                xes.DEFAULT_START_TIMESTAMP_KEY,
            ]]
            log = log.rename(
                {
                    log_information["start_timestamp"]:
                    xes.DEFAULT_START_TIMESTAMP_KEY,
                    log_information["end_timestamp"]:
                    xes.DEFAULT_TIMESTAMP_KEY,
                },
                axis=1,
            )

    # Simply load the log using XES
    elif file_format == "xes":

        log = convert_to_dataframe(log)

        if log_information["log_type"] == "noninterval":

            log[log_information["timestamp"]] = pd.to_datetime(
                log[log_information["timestamp"]], utc=True)
            log = log[[
                "case:concept:name",
                xes.DEFAULT_TIMESTAMP_KEY,
                xes.DEFAULT_TRACEID_KEY,
            ]]

        # Transform the Timestamp to Datetime, and rename the transition:lifecycle
        elif log_information["log_type"] == "lifecycle":

            # Convert the Timestamps to Datetime
            log[log_information["timestamp"]] = pd.to_datetime(
                log[log_information["timestamp"]], utc=True)

            # Rename the Columns to the XES defaults
            log = log.rename(
                {log_information["lifecycle"]: xes.DEFAULT_TRANSITION_KEY},
                axis=1)
            log = log[[
                "case:concept:name",
                xes.DEFAULT_TIMESTAMP_KEY,
                xes.DEFAULT_TRACEID_KEY,
                xes.DEFAULT_TRANSITION_KEY,
            ]]
            log = data_transform.transform_lifecycle_csv_to_interval_csv(log)

        elif log_information["log_type"] == "timestamp":

            # Convert the Timestamps to Datetime
            log[log_information["end_timestamp"]] = pd.to_datetime(
                log[log_information["end_timestamp"]], utc=True)
            log[log_information["start_timestamp"]] = pd.to_datetime(
                log[log_information["start_timestamp"]], utc=True)

            log = log[[
                "case:concept:name",
                xes.DEFAULT_TIMESTAMP_KEY,
                xes.DEFAULT_TRACEID_KEY,
                xes.DEFAULT_START_TIMESTAMP_KEY,
            ]]
            log = log.rename(
                {
                    log_information["start_timestamp"]:
                    xes.DEFAULT_START_TIMESTAMP_KEY,
                    log_information["end_timestamp"]:
                    xes.DEFAULT_TIMESTAMP_KEY,
                },
                axis=1,
            )

    return log
Exemplo n.º 7
0
def lstm_algorithm(index, len_of_points, resample_dataset, LSTM_CELLS):

    #print('Current path: ',os.getcwd())
    if not os.path.exists('results'):
        os.makedirs('results')

    COLOR_CYCLE = ["#4286f4", "#f44174"]

    split_percentage = 0.8
    """                        
    answer = input('Give me the length of window: ')
    if answer == 'max':
        len_of_points = 0
    else:
        len_of_points = int(answer)
       
    #len_of_points = 3
    """

    fileinfo = {
        0: {
            'filename': 'base_kasteren.csv',
            'separator': ' ',
            'columns':
            ['date', 'time', 'attr1', 'attr2', 'state', 'concept:name']
        },
        1: {
            'filename':
            'activity3.csv',
            'separator':
            ',',
            'columns': [
                'id', 'case:concept:name', 'subjectID', 'attr_starttime',
                'time:timestamp', 'concept:name', 'label_subactivity'
            ]
        },
        2: {
            'filename': 'activitylog_uci_detailed_labour.xes',
            'separator': '',
            'columns': []
        },
        3: {
            'filename': 'atmo1.csv',
            'separator': ' ',
            'columns': ['date', 'time', 'concept:name', 'state', 'activity']
        },
        4: {
            'filename':
            'activity1.csv',
            'separator':
            ',',
            'columns': [
                'id', 'case:concept:name', 'subjectID', 'attr_starttime',
                'time:timestamp', 'concept:name', 'label_subactivity'
            ]
        },
        5: {
            'filename':
            'activity2.csv',
            'separator':
            ',',
            'columns': [
                'id', 'case:concept:name', 'subjectID', 'attr_starttime',
                'time:timestamp', 'concept:name', 'label_subactivity'
            ]
        },
        6: {
            'filename': 'espa.xes',
            'separator': ';',
            'columns': []
        },
        7: {
            'filename':
            'activity3.csv',
            'separator':
            ',',
            'columns': [
                'id', 'case:concept:name', 'subjectID', 'attr_starttime',
                'time:timestamp', 'concept:name', 'label_subactivity'
            ]
        },
        8: {
            'filename':
            'activity4.csv',
            'separator':
            ',',
            'columns': [
                'id', 'case:concept:name', 'subjectID', 'attr_starttime',
                'time:timestamp', 'concept:name', 'label_subactivity'
            ]
        },
        9: {
            'filename':
            'activity5.csv',
            'separator':
            ',',
            'columns': [
                'id', 'case:concept:name', 'subjectID', 'attr_starttime',
                'time:timestamp', 'concept:name', 'label_subactivity'
            ]
        },
        10: {
            'filename':
            'activity6.csv',
            'separator':
            ',',
            'columns': [
                'id', 'case:concept:name', 'subjectID', 'attr_starttime',
                'time:timestamp', 'concept:name', 'label_subactivity'
            ]
        },
        11: {
            'filename':
            'activity7.csv',
            'separator':
            ',',
            'columns': [
                'id', 'case:concept:name', 'subjectID', 'attr_starttime',
                'time:timestamp', 'concept:name', 'label_subactivity'
            ]
        },
        12: {
            'filename': 'BPI_Challenge_2017.xes',
            'separator': '',
            'columns': []
        },
    }

    #choose file

    filename = fileinfo[index]['filename']
    filepath = '../datasets/' + fileinfo[index]['filename']

    dataframe = pd.DataFrame()
    if not os.path.exists('results/' + filename):
        os.makedirs('results/' + filename)
    if not os.path.exists('results/' + filename + '/' + str(len_of_points) +
                          '/'):
        os.makedirs('results/' + filename + '/' + str(len_of_points) + '/')

    #if it is a csv file
    if (filename.find('.csv') != -1):
        #load file to dataframe
        dataframe = pd.read_csv(filepath,
                                sep=fileinfo[index]['separator'],
                                names=fileinfo[index]['columns'],
                                low_memory=False)
        #for Kastern dataset prepare columns
        if index in [0, 3, 12]:
            dataframe[
                'time:timestamp'] = dataframe['date'] + ' ' + dataframe['time']
            dataframe['case:concept:name'] = dataframe['date']
            #dataframe = dataframe[dataframe['concept:name']!='None']

        #print ("file is csv ")
        #print(dataframe.head(20))

        #drop nan

        #convert csv to xes
        log = pm4py.convert_to_event_log(dataframe)

    else:
        #the file is xes
        #import log
        #xes_importer.iterparse.Parameters.MAX_TRACES = 10
        #parameters = {xes_importer.iterparse.Parameters.MAX_TRACES: 50}
        #log = xes_importer.apply('datasets/BPI Challenge 2018.xes.gz', parameters=parameters)
        log = pm4py.read_xes(filepath)
        print(log)
        #convert to dataframe
        dataframe = pm4py.convert_to_dataframe(log)
        print(dataframe)
        #print(dataframe['time:timestamp'][0].replace(tzinfo=timezone.utc).astimezone(tz=None))
        #dataframe['time:timestamp'] = dataframe['time:timestamp'].dt.tz_convert(None)

    if index in [2, 12]:
        #process time:timestamp remove zone information
        dataframe['time:timestamp'] = dataframe[
            'time:timestamp'].dt.tz_convert(None)

    #del log
    print('Dataframe print\n', dataframe)
    #get only start events if lifecycle:transition if column does not exists create it
    if 'lifecycle:transition' in dataframe.columns:

        dataframe = dataframe[dataframe['lifecycle:transition'] == 'complete']

    else:
        dataframe['lifecycle:transition'] = 'complete'

    #remove Start and End events
    dataframe = dataframe[dataframe['concept:name'] != 'Start']
    dataframe = dataframe[dataframe['concept:name'] != 'End']

    #sort by time
    if 'time:timestamp' in dataframe.columns:
        dataframe = dataframe.sort_values('time:timestamp')
    else:
        print('Error: no column time:timestamp in event log')

    #print('Sorted dataframe\n',dataframe)

    #plot time vs activity
    #fig, axes = plt.subplots(1, 1, figsize=(100, 100))
    #fig = dataframe.plot(x='time:timestamp', y='concept:name', kind="scatter").get_figure()
    #fig.savefig('results/'+filename+'/'+str(len_of_points) +'/conceptname.png', bbox_inches='tight')

    #plot time vs trace id
    #df.plot(x='col_name_1', y='col_name_2', style='o')
    #fig = dataframe.plot(x='time:timestamp', y='case:concept:name', kind="scatter").get_figure()
    #fig.savefig('results/'+filename+'/'+str(len_of_points) +'/caseconceptname.png', bbox_inches='tight')

    #keep only mandatory columns
    dataframe = dataframe[[
        'case:concept:name', 'concept:name', 'time:timestamp'
    ]]
    #convert sorted dataframe to log
    log = pm4py.convert_to_event_log(dataframe)

    #initial_df = dataframe.copy()
    #print('Initial dataframe\n',initial_df)
    #-----------------------------------------------------------------
    ############################################################
    #-------------- Resample -----------------------------------
    ###########################################################
    if resample_dataset:
        #preprocess timestamp to be prepared for resample
        #make time:timestamp datetime
        dataframe.loc[:, 'time:timestamp'] = pd.to_datetime(
            dataframe['time:timestamp'])
        #set time:timestamp as index
        dataframe = dataframe.set_index(["time:timestamp"])
        #remove duplicates
        #print('Duplicated\n')
        #print(dataframe[dataframe.index.duplicated()])
        dataframe = dataframe[~dataframe.index.duplicated(keep='first')]

        #------Reasample dataframe every 5min keep last value found if Nan-------------
        dataframe = dataframe.resample("5T").fillna("backfill")
        print('Resample', dataframe)
        #print( dataframe.last())

    #save resampled dataframe to csv
    dataframe.to_csv('../datasets/resampled_sorted_df.csv')

    #dataframe is initial event log sorted by time (start event only)
    #convert sorted by time dataframe back to log (xes)
    #log = pm4py.convert_to_event_log(dataframe)

    #-----------------------------------save to csv-------------------------------------
    #uncomment only if you need it
    #dataframe.to_csv('datasets/activitylog_uci_detailed_labour.csv')

    #print('\nDataframe LOG\n',dataframe)

    #--------------- Concat activities of a trace in one row ---------------
    #concat events with same case:concept:name (space separated)
    print('dataframe\n', dataframe)
    df = dataframe.groupby('case:concept:name', sort=False).agg(
        {'concept:name': lambda x: ' '.join(x)})
    print('df\n', df)
    df = df.reset_index()
    if len_of_points:
        print('--------------------------------------')
        df['concept:name'] = df['concept:name'].apply(
            lambda x: list(x.split(' ')))
        df['concept:name'] = df['concept:name'].apply(
            lambda x:
            [x[i:i + len_of_points] for i in range(0, len(x), len_of_points)])
        df = df.set_index('case:concept:name')['concept:name'].apply(
            pd.Series).stack().reset_index(level=0).rename(
                columns={0: 'concept:name'})
        df['concept:name'] = df['concept:name'].apply(lambda x: ' '.join(x))
        print('\ndftest\n', df)

    df = df.reset_index()  #check here

    #del dataframe

    #print the activities of the log
    activities = pm4py.get_attribute_values(log, 'concept:name')
    print('\nActivities:\n', activities)

    #split data from event log - 80% for train and 20% for test

    #shuffle before split
    #df = shuffle(df)
    #print('df', df,'\n')

    #----------------------- Split Train and Test data ----------------------
    #split rows depending on percentage
    split_rows = int(df.shape[0] * split_percentage)
    print('Split Rows', split_rows, '\n')

    #train dataframe
    train_df = df[:split_rows]
    train_df.to_csv('train.csv')
    print('Train Rows', train_df, '\n')

    #test dataframe
    test_df = df[split_rows:]
    test_df.to_csv('test.csv')
    #print('Test Rows', test_df,'\n')

    # --------------------------------------------------------------

    #data = df['concept:name'].copy().to_list()
    data = train_df['concept:name'].copy().to_list()
    #Just for Eating/Drinking
    #data = data.replace('Eating/Drinking','EatDrink')
    #print('Data\n',data)

    tokenizer = Tokenizer()
    #reads the words in data and gives an index for every words based on frequency
    tokenizer.fit_on_texts([data])
    print('Word index: ')
    print(tokenizer.word_index)

    #replace every word in the text to correspoding word index - returns list of list with one element so use [0] to get the one and only first list
    encoded = tokenizer.texts_to_sequences([data])[0]
    #print('encoded: \n')
    #print(encoded)
    vocab_size = len(tokenizer.word_index) + 1
    print('Vocabulary Size: %d' % vocab_size)
    #print('list\n',[e for e in encoded])
    #print('Min ',min([len(e) for e in encoded]))

    # LSTM 3 timesteps - prepare data - encode 2 words -> 1 word
    sequences = list()
    for i in range(n_input, len(encoded)):
        sequence = encoded[i - n_input:i + 1]
        sequences.append(sequence)
    print('Total Sequences: %d' % len(sequences))
    print('Sequences: \n')
    print(sequences)

    max_length = max([len(seq) for seq in sequences])  #max_length is 3
    # Pad sequence to be of the same length
    # length of sequence must be 3 (maximum)
    # 'pre' or 'post': pad either before or after each sequence
    sequences = pad_sequences(sequences, maxlen=max_length, padding='pre')
    print('Max Sequence Length: %d' % max_length)

    #convert list to array to get X,y train
    sequences = array(sequences)
    X, y = sequences[:, :-1], sequences[:, -1]
    print('X: \n')
    print(X)
    print('y: \n')
    print(y)

    #convert y to binary vectors
    y = to_categorical(y, num_classes=vocab_size)
    print('y: \n')
    print(y)

    #test data
    test_data = test_df['concept:name'].copy().to_list()

    test_encoded = tokenizer.texts_to_sequences([test_data])[0]

    test_sequences = list()

    for i in range(n_input, len(test_encoded)):
        test_sequence = test_encoded[i - n_input:i + 1]
        test_sequences.append(test_sequence)
    max_length = max([len(seq) for seq in test_sequences])
    test_sequences = pad_sequences(test_sequences,
                                   maxlen=max_length,
                                   padding='pre')

    test_sequences = array(test_sequences)
    test_X, test_y = test_sequences[:, :-1], test_sequences[:, -1]

    #convert y to binary vectors
    test_yl = to_categorical(test_y, num_classes=vocab_size)

    model = Sequential()
    #the first layer
    # - the largest integer (i.e. word index) in the input should be no larger than vocabulary size
    # - The Embedding layer is initialized with random weights and will learn an embedding for all of the words in the training dataset.
    # - output_dim (50): This is the size of the vector space in which words will be embedded (size of the embedding vectors). It defines the size of the output vectors from this layer for each word. For example, it could be 32 or 100 or even larger. Test different values for your problem.
    # - input_length: This is the length of input sequences (here is 2)
    # The Embedding layer has weights that are learned. If you save your model to file, this will include weights for the Embedding layer.
    # The output of the Embedding layer is a 2D vector with one embedding for each word in the input sequence of words (input document).
    # If you wish to connect a Dense layer directly to an Embedding layer, you must first flatten the 2D output matrix to a 1D vector using the Flatten layer.

    model.add(
        Embedding(vocab_size + 1, LSTM_CELLS, input_length=max_length - 1))

    model.add(LSTM(vocab_size))
    model.add(Dropout(0.1))
    model.add(Dense(vocab_size, activation='softmax'))
    opt = Adam(learning_rate=0.001)
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
    history = model.fit(X,
                        y,
                        epochs=500,
                        verbose=0,
                        batch_size=20,
                        validation_data=(test_X, test_yl))

    print(model.summary())
    model.save('lstm_model.h5')  # creates a HDF5 file

    #del model  # deletes the existing model

    #predict sequence of n_words activities
    def generate_seq(model, tokenizer, max_length, seed_text, n_words):
        #get input activity
        in_text = seed_text
        #print('in_text',in_text,'\n')
        #for the number of activities on sequence you want to predict
        for _ in range(n_words):
            encoded = tokenizer.texts_to_sequences([in_text])[0]
            #pad if less than max text length
            encoded = pad_sequences([encoded],
                                    maxlen=max_length,
                                    padding='pre')
            #print('in text ',in_text)
            #predict one activity
            #yhat = model.predict_classes(encoded, verbose=0)
            yhat = np.argmax(model.predict(encoded), axis=-1)
            out_word = ''
            for word, index in tokenizer.word_index.items():
                #convert predicted activity to word
                if index == yhat:
                    #print('Word',word,'\n')
                    out_word = word
                    break
            #feed the next input with the sequence of activities
            in_text += ' ' + out_word

        return in_text

    #load trained model
    #model = load_model('lstm_model.h5')

    # Evaluate network
    print('LSTM Network Evaluation:\n')
    train_score = model.evaluate(X, y, verbose=0)
    print('Train Score\n', train_score)
    score = model.evaluate(test_X, test_yl, verbose=0)
    print('Test Score\n')
    print(score)

    print('History\n')
    print(history.history.keys())
    # plot loss during training
    fig = plt.figure()
    plt.subplot(211)
    plt.title('Loss')
    plt.plot(history.history['loss'], label='train')
    plt.plot(history.history['val_loss'], label='test')
    plt.legend()
    fig.savefig('results/' + filename + '/' + str(len_of_points) + '/Loss.png',
                bbox_inches='tight')
    # plot accuracy during training
    fig = plt.figure()
    plt.subplot(212)
    plt.title('Accuracy')
    plt.plot(history.history['accuracy'], label='train')
    plt.plot(history.history['val_accuracy'], label='test')
    plt.legend()
    plt.show()
    fig.savefig('results/' + filename + '/' + str(len_of_points) +
                '/Accuracy.png',
                bbox_inches='tight')

    print('LSTM Results: ')
    print('\n')
    #generated_text = ''
    #sequence prediction
    for i in tokenizer.word_index:
        #print(tokenizer.index_word)
        w = generate_seq(model, tokenizer, max_length - 1, i, n_input + 1)
        #generated_text = generated_text.join('\n'+w)
        print(w)

    print('LSTM Results: ')
    print('\n')
    #for i in tokenizer.word_index:
    #	print(generate_seq(model, tokenizer, max_length-1, i , 1))
    all_data = df['concept:name'].copy().to_list()

    all_encoded = tokenizer.texts_to_sequences([all_data])[0]

    all_sequences = list()

    for i in range(n_input, len(all_encoded)):
        all_sequence = all_encoded[i - n_input:i + 1]
        all_sequences.append(all_sequence)
    max_length = max([len(seq) for seq in all_sequences])
    all_sequences = pad_sequences(all_sequences,
                                  maxlen=max_length,
                                  padding='pre')

    all_sequences = array(all_sequences)
    all_X, all_y = all_sequences[:, :-1], all_sequences[:, -1]

    #convert y to binary vectors
    all_yl = to_categorical(all_y, num_classes=vocab_size)

    #load trained model
    #model = load_model('lstm_model.h5')

    #print('Tokenizer \n',tokenizer)
    print('Tokenizer word index\n', tokenizer.word_index)

    np.set_printoptions(suppress=True)
    cnt = 0
    for i in range(len(all_X)):
        #yhat = model.predict_classes(all_X[i].reshape(1,2,1), verbose=0)
        yhat = np.argmax(model.predict(all_X[i].reshape(1, n_input, 1)),
                         axis=-1)
        df.loc[i, 'X_input'] = str(all_X[i])
        df.loc[i, 'Expected'] = all_y[i]
        df.loc[i, 'predicted'] = yhat

        #print('Expected:', all_y[i] , 'Predicted', yhat)
        prob = model.predict_proba(all_X[i].reshape(1, n_input, 1))[0]
        df.loc[i,
               'probabilities'] = ' '.join([str(elem) for elem in list(prob)])
        if (all_y[i] == yhat):
            df.loc[i, 'result'] = 'ok'
            cnt += 1
        else:
            df.loc[i, 'result'] = 'Error'

    #print(df['predicted'].replace(tokenizer.word_index))
    df.to_csv('results/' + filename + '/' + str(len_of_points) + '/resample_' +
              str(resample_dataset) + '_lstm.csv')
    print('Total successful: ', cnt, ' out of ', len(all_X), 'Percentage: ',
          cnt / len(all_X))

    # predict probabilities for test set
    yhat_probs = model.predict(test_X, verbose=0)
    # predict crisp classes for test set
    yhat_classes = model.predict_classes(test_X, verbose=0)
    print('yhat_classes\n', yhat_classes)
    # reduce to 1d array
    #yhat_probs = yhat_probs[:, 0]
    #yhat_classes = yhat_classes[:, 0]

    # accuracy: (tp + tn) / (p + n)
    accuracy = accuracy_score(test_y, yhat_classes)
    print('Accuracy: %f' % accuracy)
    # precision tp / (tp + fp)
    precision = precision_score(test_y, yhat_classes, average='weighted')
    print('Precision: %f' % precision)
    # recall: tp / (tp + fn)
    recall = recall_score(test_y, yhat_classes, average='weighted')
    print('Recall: %f' % recall)
    # f1: 2 tp / (2 tp + fp + fn)
    f1 = f1_score(test_y, yhat_classes, average='weighted')
    print('F1 score: %f' % f1)

    # kappa
    kappa = cohen_kappa_score(test_y, yhat_classes)
    print('Cohens kappa: %f' % kappa)
    # ROC AUC
    #auc = roc_auc_score(test_y, yhat_probs,multi_class='ovr')
    #print('ROC AUC: %f' % auc)
    # confusion matrix
    matrix = confusion_matrix(test_y, yhat_classes)
    print(matrix)
    fig = plt.figure()
    sns.heatmap(matrix, center=True)
    plt.show()
    fig.savefig('results/' + filename + '/' + str(len_of_points) +
                '/ConfusionMatrix.png',
                bbox_inches='tight')

    #headers
    #filename - resample - len of points - train loss + Accuracy - test score
    #write results to csv
    fd = open("total_results.csv", "a+")
    row = filename + '\t' + str(resample_dataset) + '\t' + str(
        len_of_points
    ) + '\t' + str(train_score[0]) + '\t' + str(train_score[1]) + '\t' + str(
        score[0]) + '\t' + str(score[1]) + '\t' + str(accuracy) + '\t' + str(
            precision) + '\t' + str(recall) + '\t' + str(f1) + '\t' + str(
                kappa) + '\t' + '' + '\t' + json.dumps(
                    tokenizer.word_index) + '\n'
    fd.write(row)
    fd.close()