Esempio n. 1
0
def plotdraw(mode, case, writeoutput):

    graphdata = GraphData(mode, case)

    # Create output directory
    config_setup.ensure_existence(os.path.join(graphdata.saveloc, "graphs"), make=True)

    for graph in graphdata.graphs:
        graphdata.graphdetails(graph)
        for weight_method in graphdata.weight_methods:
            print(weight_method)
            for scenario in graphdata.scenarios:
                print(scenario)
                basedir = os.path.join(
                    graphdata.saveloc, "weightdata", case, scenario, weight_method
                )

                sigtypes = next(os.walk(basedir))[1]

                for sigtype in sigtypes:
                    if sigtype in graphdata.significance_cases:
                        print(sigtype)
                        embedtypesdir = os.path.join(basedir, sigtype)
                        embedtypes = next(os.walk(embedtypesdir))[1]
                        for embedtype in embedtypes:
                            print(embedtype)
                            datadir = os.path.join(embedtypesdir, embedtype)
                            # Actual plot drawing execution starts here
                            drawplot(graphdata, scenario, datadir, graph, writeoutput)

    return None
Esempio n. 2
0
def drawplot(graphdata, scenario, datadir, graph, writeoutput):

    dirparts = data_processing.getfolders(datadir)
    dirparts[dirparts.index("weightdata")] = "graphs"
    savedir = dirparts[0]
    for pathpart in dirparts[1:]:
        savedir = os.path.join(savedir, pathpart)

    config_setup.ensure_existence(os.path.join(savedir))
    getattr(figtypes, graphdata.plot_type)(graphdata, graph, scenario, savedir)

    return None
Esempio n. 3
0
def drawplot(graphdata, scenario, datadir, graph, writeoutput):

    dirparts = data_processing.getfolders(datadir)
    dirparts[dirparts.index('weightdata')] = 'graphs'
    savedir = dirparts[0]
    for pathpart in dirparts[1:]:
        savedir = os.path.join(savedir, pathpart)

    config_setup.ensure_existence(os.path.join(savedir))
    getattr(figtypes, graphdata.plot_type)(graphdata, graph, scenario, savedir)

    return None
Esempio n. 4
0
    def filename(weightname, boxindex, causevar):
        boxstring = 'box{:03d}'.format(boxindex)

        filedir = config_setup.ensure_existence(
            os.path.join(weightstoredir, weightname, boxstring), make=True)

        filename = '{}.csv'.format(causevar)

        return os.path.join(filedir, filename)
Esempio n. 5
0
    def filename(weightname, boxindex, causevar):
        boxstring = "box{:03d}".format(boxindex)

        filedir = config_setup.ensure_existence(
            os.path.join(weightstoredir, weightname, boxstring), make=True
        )

        filename = "{}.csv".format(causevar)

        return os.path.join(filedir, filename)
Esempio n. 6
0
def calc_weights(weightcalcdata, method, scenario, writeoutput):
    """Determines the maximum weight between two variables by searching through
    a specified set of delays.

    Parameters
    ----------
        method : str
        Can be one of the following:
        'cross_correlation'
        'partial_correlation' -- does not support time delays
        'transfer_entropy_kernel'
        'transfer_entropy_kraskov'

    TODO: Fix partial correlation method to make use of time delays

    """

    if method == "cross_correlation":
        weightcalculator = CorrWeightcalc(weightcalcdata)
    elif method == "transfer_entropy_kernel":
        weightcalculator = TransentWeightcalc(weightcalcdata, "kernel")
    elif method == "transfer_entropy_kraskov":
        weightcalculator = TransentWeightcalc(weightcalcdata, "kraskov")
    elif method == "transfer_entropy_discrete":
        weightcalculator = TransentWeightcalc(weightcalcdata, "discrete")
    # elif method == 'partial_correlation':
    #     weightcalculator = PartialCorrWeightcalc(weightcalcdata)
    else:
        raise ValueError("Method not recognized")

    if weightcalcdata.sigtest:
        sigstatus = "sigtested"
    elif not weightcalcdata.sigtest:
        sigstatus = "nosigtest"

    if method == "transfer_entropy_kraskov":
        if weightcalcdata.additional_parameters["auto_embed"]:
            embedstatus = "autoembedding"
        else:
            embedstatus = "naive"
    else:
        embedstatus = "naive"

    vardims = len(weightcalcdata.variables)
    startindex = weightcalcdata.startindex
    size = weightcalcdata.testsize

    cause_dellist = []
    affected_dellist = []
    for index in range(vardims):
        if index not in weightcalcdata.causevarindexes:
            cause_dellist.append(index)
            logging.info("Deleted column " + str(index))
        if index not in weightcalcdata.affectedvarindexes:
            affected_dellist.append(index)
            logging.info("Deleted row " + str(index))

    if weightcalcdata.connections_used:
        newconnectionmatrix = weightcalcdata.connectionmatrix
    else:
        newconnectionmatrix = np.ones((vardims, vardims))
    # Substitute columns not used with zeros in connectionmatrix
    for cause_delindex in cause_dellist:
        newconnectionmatrix[:, cause_delindex] = np.zeros(vardims)
    # Substitute rows not used with zeros in connectionmatrix
    for affected_delindex in affected_dellist:
        newconnectionmatrix[affected_delindex, :] = np.zeros(vardims)

    # Initiate headerline for weightstore file
    # Create "Delay" as header for first row
    headerline = ["Delay"]
    for affectedvarindex in weightcalcdata.affectedvarindexes:
        affectedvarname = weightcalcdata.variables[affectedvarindex]
        headerline.append(affectedvarname)

    # Define filename structure for CSV file containing weights between
    # a specific causevar and all the subsequent affectedvars
    def filename(weightname, boxindex, causevar):
        boxstring = "box{:03d}".format(boxindex)

        filedir = config_setup.ensure_existence(
            os.path.join(weightstoredir, weightname, boxstring), make=True
        )

        filename = "{}.csv".format(causevar)

        return os.path.join(filedir, filename)

    # Store the weight calculation results in similar format as original data

    # Define weightstoredir up to the method level
    weightstoredir = config_setup.ensure_existence(
        os.path.join(
            weightcalcdata.saveloc,
            "weightdata",
            weightcalcdata.casename,
            scenario,
            method,
            sigstatus,
            embedstatus,
        ),
        make=True,
    )

    if weightcalcdata.single_entropies:
        # Initiate headerline for single signal entropies storage file
        signalent_headerline = weightcalcdata.variables
        # Define filename structure for CSV file

        def signalent_filename(name, boxindex):
            return signalent_filename_template.format(
                weightcalcdata.casename, scenario, name, boxindex
            )

        signalentstoredir = config_setup.ensure_existence(
            os.path.join(weightcalcdata.saveloc, "signal_entropies"), make=True
        )

        signalent_filename_template = os.path.join(
            signalentstoredir, "{}_{}_{}_box{:03d}.csv"
        )

    for boxindex in weightcalcdata.boxindexes:
        box = weightcalcdata.boxes[boxindex]

        # Calculate single signal entropies - do not worry about
        # delays, but still do it according to different boxes
        if weightcalcdata.single_entropies:
            # Calculate single signal entropies of all variables
            # and save output in similar format to
            # standard weight calculation results
            signalentlist = []
            for varindex, _ in enumerate(weightcalcdata.variables):
                vardata = box[:, varindex][startindex : startindex + size]
                entropy = data_processing.calc_signalent(vardata, weightcalcdata)
                signalentlist.append(entropy)

            # Write the signal entropies to file - one file for each box
            # Each file will only have one line as we are not
            # calculating for different delays as is done for the case of
            # variable pairs.

            # Need to add another axis to signalentlist in order to make
            # it a sequence so that it can work with writecsv_weightcalc
            signalentlist = np.asarray(signalentlist)
            signalentlist = signalentlist[np.newaxis, :]

            writecsv_weightcalc(
                signalent_filename("signal_entropy", boxindex + 1),
                signalentlist,
                signalent_headerline,
            )

        # Start parallelising code here
        # Create one process for each causevarindex

        ###########################################################

        non_iter_args = [
            weightcalcdata,
            weightcalculator,
            box,
            startindex,
            size,
            newconnectionmatrix,
            method,
            boxindex,
            filename,
            headerline,
            writeoutput,
        ]

        # Run the script that will handle multiprocessing
        gaincalc_oneset.run(non_iter_args, weightcalcdata.do_multiprocessing)

        ########################################################

    return None
Esempio n. 7
0
def dorankcalc(noderankdata, scenario, datadir, typename, rank_method,
               writeoutput, preprocessing):

    if noderankdata.datatype == 'file':
        noderankdata.get_boxes(scenario, datadir, typename)
        gainmatrices = get_gainmatrices(noderankdata, datadir, typename)
        delaymatrices = get_delaymatrices(noderankdata, datadir, typename)

    elif noderankdata.datatype == 'function':
        gainmatrices = [noderankdata.gainmatrix]
        delaymatrices = [np.zeros_like(noderankdata.gainmatrix)]
        noderankdata.boxes = [0]

    if len(noderankdata.boxes) > 1:
        generate_diffs = True
    else:
        generate_diffs = False

    rankinglist_name = 'rankinglist_{}.csv'
    modgainmatrix_name = 'modgainmatrix.csv'
    originalgainmatrix_name = 'originalgainmatrix.csv'
    graphfile_name = 'graph_{}.gml'
    transientdict_name = 'transientdict_{}.json'
    basevaldict_name = 'basevaldict_{}.json'
    boxrankdict_name = 'boxrankdict_{}.json'
    rel_boxrankdict_name = 'rel_boxrankdict_{}.json'


    if generate_diffs:
        dif_rankinglist_name = 'dif_rankinglist_{}.csv'
        dif_transientdict_name = 'dif_transientdict_{}.json'
        dif_basevaldict_name = 'dif_basevaldict_{}.json'
        dif_boxrankdict_name = 'dif_boxrankdict_{}.json'
        dif_rel_boxrankdict_name = 'dif_rel_boxrankdict_{}.json'
        dif_typename = 'dif_' + typename
        dif_gainmatrices = get_gainmatrices(noderankdata, datadir, dif_typename)

    # Create lists to store the backward ranking list
    # for each box and associated gainmatrix ranking result

    #backward_rankinglists = []
    backward_rankingdicts = []
    dif_backward_rankingdicts = []

    for index, gainmatrix in enumerate(gainmatrices):
        if preprocessing:
            modgainmatrix, _ = \
                gainmatrix_preprocessing(gainmatrix)
        else:
            modgainmatrix = gainmatrix

        if generate_diffs:
            dif_gainmatrix = dif_gainmatrices[index]
            # Take only positive values for now due to convergence issues
            # TODO: Investigate proper handling of negative edge changes
            mod_dif_gainmatrix = dif_gainmatrix_preprocessing(dif_gainmatrix)

        delays = delaymatrices[index]

#       _, dummyweight = gainmatrix_preprocessing(gainmatrix)
        # Set dummyweight to 10
        dummyweight = 10

        # This is where the actual ranking calculation happens
        rankingdict, rankinglist, connections, variables, gains = \
            calc_gainrank(modgainmatrix, noderankdata,
                          rank_method, dummyweight)

        if generate_diffs:
            # TODO: Review effect of positive differences only
            # Take only positive for now due to convergence issues, but investigate proper handling
            # of negative edge changes
            dif_rankingdict, dif_rankinglist, _, _, _ = \
                calc_gainrank(mod_dif_gainmatrix, noderankdata,
                              rank_method, dummyweight)
            # dif_backward_rankinglists.append(dif_rankinglist)
            dif_backward_rankingdicts.append(dif_rankingdict)

        # The rest of the function deals with storing the
        # results in the desired formats

        #backward_rankinglists.append(rankinglist)
        backward_rankingdicts.append(rankingdict)

        if writeoutput:
            # Make sure the correct directory exists
            # Take datadir and swop out 'weightdata' for 'noderank'
            savedir = data_processing.change_dirtype(
                datadir, 'weightdata', 'noderank')

            config_setup.ensure_existence(os.path.join(savedir, typename[:-7]))

            if preprocessing:

                # Save the modified gainmatrix
                writecsv_looprank(
                    os.path.join(savedir, typename[:-7],
                                 modgainmatrix_name),
                    modgainmatrix)

                # Save the original gainmatrix
                writecsv_looprank(
                    os.path.join(savedir, typename[:-7],
                                 originalgainmatrix_name),
                    gainmatrix)

        # Export graph files with dummy variables included
        # in backward rankings if available
#        directions = ['backward']

        if noderankdata.dummies:
            dummystatus = 'withdummies'
        else:
            dummystatus = 'nodummies'

        # Save the ranking list for each box
        savepath = config_setup.ensure_existence(
            os.path.join(savedir, typename[:-7],
                         'box{:03d}'.format(
                         noderankdata.boxes[index]+1), dummystatus))

        writecsv_looprank(
            os.path.join(savepath,
                         rankinglist_name.format(rank_method)),
            rankinglist)

        if generate_diffs:
            writecsv_looprank(
                os.path.join(savepath,
                             dif_rankinglist_name.format(rank_method)),
                dif_rankinglist)

        # Save the graphs to file
        graph, _ = \
            create_importance_graph(noderankdata,
                variables, connections, connections, gains, delays,
                rankingdict)
        graph_filename = \
            os.path.join(savepath,
                         graphfile_name.format(rank_method))

        # Decided to keep connections natural, will rotate hierarchical layout
        # in post-processing
        nx.readwrite.write_gml(graph, graph_filename)

        # Get ranking dictionaries
        transientdict, basevaldict, boxrankdict, rel_boxrankdict = \
            calc_transient_importancediffs(
                backward_rankingdicts,
                noderankdata.variablelist)

        if generate_diffs:
            # Get ranking dictionaries for difference gain arrays
            dif_transientdict, dif_basevaldict, dif_boxrankdict, dif_rel_boxrankdict = \
                calc_transient_importancediffs(
                    dif_backward_rankingdicts,
                    noderankdata.variablelist)

        # Store dictonaries using JSON

        # Normal dictionaries
        data_processing.write_dictionary(
            os.path.join(savepath, boxrankdict_name.format(rank_method)),
            boxrankdict)

        data_processing.write_dictionary(
            os.path.join(savepath, rel_boxrankdict_name.format(rank_method)),
            rel_boxrankdict)

        data_processing.write_dictionary(
            os.path.join(savepath, transientdict_name.format(rank_method)),
            transientdict)

        data_processing.write_dictionary(
            os.path.join(savepath, basevaldict_name.format(rank_method)),
            basevaldict)

        if generate_diffs:
            # Difference dictionaries
            data_processing.write_dictionary(
                os.path.join(savepath, dif_boxrankdict_name.format(rank_method)),
                dif_boxrankdict)

            data_processing.write_dictionary(
                os.path.join(savepath, dif_rel_boxrankdict_name.format(rank_method)),
                dif_rel_boxrankdict)

            data_processing.write_dictionary(
                os.path.join(savepath, dif_transientdict_name.format(rank_method)),
                dif_transientdict)

            data_processing.write_dictionary(
                os.path.join(savepath, dif_basevaldict_name.format(rank_method)),
                dif_basevaldict)

    return None
Esempio n. 8
0
    """Ranks the nodes in a network based on gain matrices already generated
    for different weight types.

    The results are stored in the noderank directory but retains the structure
    of the weightdata directory

    Notes
    -----
        Preprocessing is experimental and should always be set to False

    """
    noderankdata = NoderankData(mode, case)

    # Create output directory
    config_setup.ensure_existence(
        os.path.join(noderankdata.saveloc,
                     'noderank'), make=True)

    for scenario in noderankdata.scenarios:

        logging.info("Running scenario {}".format(scenario))
        # Update scenario-specific fields of noderankdata object
        noderankdata.scenariodata(scenario)

        for rank_method in noderankdata.rank_methods:
            for weight_method in noderankdata.weight_methods:
                
                basedir = os.path.join(noderankdata.saveloc, 'weightdata',
                       case, scenario, weight_method)

                if noderankdata.datatype == 'file':
Esempio n. 9
0
def calc_weights(weightcalcdata, method, scenario, writeoutput):
    """Determines the maximum weight between two variables by searching through
    a specified set of delays.

    Parameters
    ----------
        method : str
        Can be one of the following:
        'cross_correlation'
        'partial_correlation' -- does not support time delays
        'transfer_entropy_kernel'
        'transfer_entropy_kraskov'

    TODO: Fix partial correlation method to make use of time delays

    """

    if method == 'cross_correlation':
        weightcalculator = CorrWeightcalc(weightcalcdata)
    elif method == 'transfer_entropy_kernel':
        weightcalculator = TransentWeightcalc(weightcalcdata, 'kernel')
    elif method == 'transfer_entropy_kraskov':
        weightcalculator = TransentWeightcalc(weightcalcdata, 'kraskov')
    elif method == 'transfer_entropy_discrete':
        weightcalculator = TransentWeightcalc(weightcalcdata, 'discrete')
    elif method == 'partial_correlation':
        weightcalculator = PartialCorrWeightcalc(weightcalcdata)

    if weightcalcdata.sigtest:
        sigstatus = 'sigtested'
    elif not weightcalcdata.sigtest:
        sigstatus = 'nosigtest'

    if method == 'transfer_entropy_kraskov':
        if weightcalcdata.additional_parameters['auto_embed']:
            embedstatus = 'autoembedding'
        else:
            embedstatus = 'naive'
    else:
        embedstatus = 'naive'

    vardims = len(weightcalcdata.variables)
    startindex = weightcalcdata.startindex
    size = weightcalcdata.testsize

    cause_dellist = []
    affected_dellist = []
    for index in range(vardims):
        if index not in weightcalcdata.causevarindexes:
            cause_dellist.append(index)
            logging.info("Deleted column " + str(index))
        if index not in weightcalcdata.affectedvarindexes:
            affected_dellist.append(index)
            logging.info("Deleted row " + str(index))

    if weightcalcdata.connections_used:
        newconnectionmatrix = weightcalcdata.connectionmatrix
    else:
        newconnectionmatrix = np.ones((vardims, vardims))
    # Substitute columns not used with zeros in connectionmatrix
    for cause_delindex in cause_dellist:
        newconnectionmatrix[:, cause_delindex] = np.zeros(vardims)
    # Substitute rows not used with zeros in connectionmatrix
    for affected_delindex in affected_dellist:
        newconnectionmatrix[affected_delindex, :] = np.zeros(vardims)

    # Initiate headerline for weightstore file
    # Create "Delay" as header for first row
    headerline = ['Delay']
    for affectedvarindex in weightcalcdata.affectedvarindexes:
        affectedvarname = weightcalcdata.variables[affectedvarindex]
        headerline.append(affectedvarname)

    # Define filename structure for CSV file containing weights between
    # a specific causevar and all the subsequent affectedvars
    def filename(weightname, boxindex, causevar):
        boxstring = 'box{:03d}'.format(boxindex)

        filedir = config_setup.ensure_existence(os.path.join(
            weightstoredir, weightname, boxstring),
                                                make=True)

        filename = '{}.csv'.format(causevar)

        return os.path.join(filedir, filename)

    # Store the weight calculation results in similar format as original data

    # Define weightstoredir up to the method level
    weightstoredir = config_setup.ensure_existence(os.path.join(
        weightcalcdata.saveloc, 'weightdata', weightcalcdata.casename,
        scenario, method, sigstatus, embedstatus),
                                                   make=True)

    if weightcalcdata.single_entropies:
        # Initiate headerline for single signal entropies storage file
        signalent_headerline = weightcalcdata.variables

        # Define filename structure for CSV file

        def signalent_filename(name, boxindex):
            return signalent_filename_template.format(weightcalcdata.casename,
                                                      scenario, name, boxindex)

        signalentstoredir = config_setup.ensure_existence(os.path.join(
            weightcalcdata.saveloc, 'signal_entropies'),
                                                          make=True)

        signalent_filename_template = \
            os.path.join(signalentstoredir, '{}_{}_{}_box{:03d}.csv')

    for boxindex in weightcalcdata.boxindexes:
        box = weightcalcdata.boxes[boxindex]

        # Calculate single signal entropies - do not worry about
        # delays, but still do it according to different boxes
        if weightcalcdata.single_entropies:
            # Calculate single signal entropies of all variables
            # and save output in similar format to
            # standard weight calculation results
            signalentlist = []
            for varindex, _ in enumerate(weightcalcdata.variables):
                vardata = box[:, varindex][startindex:startindex + size]
                entropy = data_processing.calc_signalent(
                    vardata, weightcalcdata)
                signalentlist.append(entropy)

            # Write the signal entropies to file - one file for each box
            # Each file will only have one line as we are not
            # calculating for different delays as is done for the case of
            # variable pairs.

            # Need to add another axis to signalentlist in order to make
            # it a sequence so that it can work with writecsv_weightcalc
            signalentlist = np.asarray(signalentlist)
            signalentlist = \
                signalentlist[np.newaxis, :]

            writecsv_weightcalc(
                signalent_filename('signal_entropy', boxindex + 1),
                signalentlist, signalent_headerline)

        # Start parallelising code here
        # Create one process for each causevarindex

        ###########################################################

        non_iter_args = [
            weightcalcdata, weightcalculator, box, startindex, size,
            newconnectionmatrix, method, boxindex, filename, headerline,
            writeoutput
        ]

        # Run the script that will handle multiprocessing
        gaincalc_oneset.run(non_iter_args, weightcalcdata.do_multiprocessing)

        ########################################################

    return None
Esempio n. 10
0
    savedir = dirparts[0]
    for pathpart in dirparts[1:]:
        savedir = os.path.join(savedir, pathpart)

    config_setup.ensure_existence(os.path.join(savedir))
    getattr(figtypes, graphdata.plot_type)(graphdata, graph, scenario, savedir)

    return None


def plotdraw(mode, case, writeoutput):

    graphdata = GraphData(mode, case)

    # Create output directory
    config_setup.ensure_existence(os.path.join(graphdata.saveloc, 'graphs'),
                                  make=True)

    for graph in graphdata.graphs:
        graphdata.graphdetails(graph)
        for weight_method in graphdata.weight_methods:
            print(weight_method)
            for scenario in graphdata.scenarios:
                print(scenario)
                basedir = os.path.join(graphdata.saveloc, 'weightdata', case,
                                       scenario, weight_method)

                sigtypes = next(os.walk(basedir))[1]

                for sigtype in sigtypes:
                    if sigtype in graphdata.significance_cases:
                        print(sigtype)
Esempio n. 11
0
def noderankcalc(mode, case, writeoutput, preprocessing=False):
    """Ranks the nodes in a network based on gain matrices already generated
    for different weight types.

    The results are stored in the noderank directory but retains the structure
    of the weightdata directory

    Notes
    -----
        Preprocessing is experimental and should always be set to False

    """
    noderankdata = NoderankData(mode, case)

    # Create output directory
    config_setup.ensure_existence(
        os.path.join(noderankdata.saveloc, "noderank"), make=True
    )

    for scenario in noderankdata.scenarios:

        logging.info("Running scenario {}".format(scenario))
        # Update scenario-specific fields of noderankdata object
        noderankdata.scenariodata(scenario)

        for rank_method in noderankdata.rank_methods:
            for weight_method in noderankdata.weight_methods:

                basedir = os.path.join(
                    noderankdata.saveloc, "weightdata", case, scenario, weight_method
                )

                if noderankdata.datatype == "file":
                    sigtypes = next(os.walk(basedir))[1]
                elif noderankdata.datatype == "function":
                    sigtypes = ["test_nosig"]

                for sigtype in sigtypes:
                    print(sigtype)
                    embedtypesdir = os.path.join(basedir, sigtype)

                    if noderankdata.datatype == "file":
                        embedtypes = next(os.walk(embedtypesdir))[1]
                    elif noderankdata.datatype == "function":
                        embedtypes = ["test_noembed"]

                    for embedtype in embedtypes:
                        print(embedtype)
                        datadir = os.path.join(embedtypesdir, embedtype)

                        if weight_method[:16] == "transfer_entropy":
                            typenames = [
                                "weight_absolute_arrays",
                                "weight_directional_arrays",
                            ]
                            # 'signtested_weight_directional_arrays']
                            if sigtype == "sigtest":
                                typenames.append("sigweight_absolute_arrays")
                                typenames.append("sigweight_directional_arrays")
                                # typenames.append(
                                #    'signtested_sigweight_directional_arrays')
                        else:
                            typenames = ["weight_arrays"]
                            if sigtype == "sigtest":
                                typenames.append("sigweight_arrays")

                        for typename in typenames:
                            # Start the methods here
                            dorankcalc(
                                noderankdata,
                                scenario,
                                datadir,
                                typename,
                                rank_method,
                                writeoutput,
                                preprocessing,
                            )

    return None
Esempio n. 12
0
def dorankcalc(
    noderankdata, scenario, datadir, typename, rank_method, writeoutput, preprocessing
):

    if noderankdata.datatype == "file":
        noderankdata.get_boxes(scenario, datadir, typename)
        gainmatrices = get_gainmatrices(noderankdata, datadir, typename)
        delaymatrices = get_delaymatrices(noderankdata, datadir, typename)

    elif noderankdata.datatype == "function":
        gainmatrices = [noderankdata.gainmatrix]
        delaymatrices = [np.zeros_like(noderankdata.gainmatrix)]
        noderankdata.boxes = [0]

    if len(noderankdata.boxes) > 1:
        generate_diffs = True
    else:
        generate_diffs = False

    rankinglist_name = "rankinglist_{}.csv"
    modgainmatrix_name = "modgainmatrix.csv"
    originalgainmatrix_name = "originalgainmatrix.csv"
    graphfile_name = "graph_{}.gml"
    transientdict_name = "transientdict_{}.json"
    basevaldict_name = "basevaldict_{}.json"
    boxrankdict_name = "boxrankdict_{}.json"
    rel_boxrankdict_name = "rel_boxrankdict_{}.json"

    if generate_diffs:
        dif_rankinglist_name = "dif_rankinglist_{}.csv"
        dif_transientdict_name = "dif_transientdict_{}.json"
        dif_basevaldict_name = "dif_basevaldict_{}.json"
        dif_boxrankdict_name = "dif_boxrankdict_{}.json"
        dif_rel_boxrankdict_name = "dif_rel_boxrankdict_{}.json"
        dif_typename = "dif_" + typename
        dif_gainmatrices = get_gainmatrices(noderankdata, datadir, dif_typename)

    # Create lists to store the backward ranking list
    # for each box and associated gainmatrix ranking result

    # backward_rankinglists = []
    backward_rankingdicts = []
    dif_backward_rankingdicts = []

    for index, gainmatrix in enumerate(gainmatrices):
        if preprocessing:
            # modgainmatrix, _ = \
            #     gainmatrix_preprocessing(gainmatrix)
            modgainmatrix = gainmatrix_tobinary(gainmatrix)
        else:
            modgainmatrix = gainmatrix

        if generate_diffs:
            dif_gainmatrix = dif_gainmatrices[index]
            # Take only positive values for now due to convergence issues
            # TODO: Investigate proper handling of negative edge changes
            mod_dif_gainmatrix = dif_gainmatrix_preprocessing(
                dif_gainmatrix, method="floor"
            )

        delays = delaymatrices[index]

        #       _, dummyweight = gainmatrix_preprocessing(gainmatrix)
        # Set dummyweight to 10
        dummyweight = 10

        # This is where the actual ranking calculation happens
        rankingdict, rankinglist, connections, variables, gains = calc_gainrank(
            modgainmatrix, noderankdata, rank_method, dummyweight
        )

        if generate_diffs:
            # TODO: Review effect of positive differences only
            # Take only positive for now due to convergence issues, but investigate proper handling
            # of negative edge changes
            dif_rankingdict, dif_rankinglist, _, _, _ = calc_gainrank(
                mod_dif_gainmatrix, noderankdata, rank_method, dummyweight
            )
            # dif_backward_rankinglists.append(dif_rankinglist)
            dif_backward_rankingdicts.append(dif_rankingdict)

        # The rest of the function deals with storing the
        # results in the desired formats

        # backward_rankinglists.append(rankinglist)
        backward_rankingdicts.append(rankingdict)

        if writeoutput:
            # Make sure the correct directory exists
            # Take datadir and swop out 'weightdata' for 'noderank'
            savedir = data_processing.change_dirtype(datadir, "weightdata", "noderank")

            config_setup.ensure_existence(os.path.join(savedir, typename[:-7]))

            if preprocessing:

                # Save the modified gainmatrix
                writecsv_looprank(
                    os.path.join(savedir, typename[:-7], modgainmatrix_name),
                    modgainmatrix,
                )

                # Save the original gainmatrix
                writecsv_looprank(
                    os.path.join(savedir, typename[:-7], originalgainmatrix_name),
                    gainmatrix,
                )

        # Export graph files with dummy variables included
        # in backward rankings if available
        #        directions = ['backward']

        if noderankdata.dummies:
            dummystatus = "withdummies"
        else:
            dummystatus = "nodummies"

        # Save the ranking list for each box
        savepath = config_setup.ensure_existence(
            os.path.join(
                savedir,
                typename[:-7],
                "box{:03d}".format(noderankdata.boxes[index] + 1),
                dummystatus,
            )
        )

        writecsv_looprank(
            os.path.join(savepath, rankinglist_name.format(rank_method)), rankinglist
        )

        if generate_diffs:
            writecsv_looprank(
                os.path.join(savepath, dif_rankinglist_name.format(rank_method)),
                dif_rankinglist,
            )

        # Save the graphs to file
        graph, _ = create_importance_graph(
            noderankdata,
            variables,
            connections,
            connections,
            gains,
            delays,
            rankingdict,
        )
        graph_filename = os.path.join(savepath, graphfile_name.format(rank_method))

        # Decided to keep connections natural, will rotate hierarchical layout
        # in post-processing
        nx.readwrite.write_gml(graph, graph_filename)

        # Get ranking dictionaries
        transientdict, basevaldict, boxrankdict, rel_boxrankdict = calc_transient_importancediffs(
            backward_rankingdicts, noderankdata.variablelist
        )

        if generate_diffs:
            # Get ranking dictionaries for difference gain arrays
            dif_transientdict, dif_basevaldict, dif_boxrankdict, dif_rel_boxrankdict = calc_transient_importancediffs(
                dif_backward_rankingdicts, noderankdata.variablelist
            )

        # Store dictonaries using JSON

        # Normal dictionaries
        data_processing.write_dictionary(
            os.path.join(savepath, boxrankdict_name.format(rank_method)), boxrankdict
        )

        data_processing.write_dictionary(
            os.path.join(savepath, rel_boxrankdict_name.format(rank_method)),
            rel_boxrankdict,
        )

        data_processing.write_dictionary(
            os.path.join(savepath, transientdict_name.format(rank_method)),
            transientdict,
        )

        data_processing.write_dictionary(
            os.path.join(savepath, basevaldict_name.format(rank_method)), basevaldict
        )

        if generate_diffs:
            # Difference dictionaries
            data_processing.write_dictionary(
                os.path.join(savepath, dif_boxrankdict_name.format(rank_method)),
                dif_boxrankdict,
            )

            data_processing.write_dictionary(
                os.path.join(savepath, dif_rel_boxrankdict_name.format(rank_method)),
                dif_rel_boxrankdict,
            )

            data_processing.write_dictionary(
                os.path.join(savepath, dif_transientdict_name.format(rank_method)),
                dif_transientdict,
            )

            data_processing.write_dictionary(
                os.path.join(savepath, dif_basevaldict_name.format(rank_method)),
                dif_basevaldict,
            )

    return None