def _fit(self) -> None:
        from src.netgan.fit import fit
        sparse_adj = nx.to_scipy_sparse_matrix(self.input_graph)
        try:
            scores, tg_sum = fit(sparse_adj)
        except Exception as e:
            CP.print_orange(f'NetGAN fit failed\n{e}')
            scores, tg_sum = None, None

        self.params['scores'] = scores
        self.params['tg_sum'] = tg_sum

        return
 def _gen(self, gname: str, gen_id: int) -> nx.Graph:
     from src.netgan.netgan.utils import graph_from_scores
     assert 'scores' in self.params
     assert 'tg_sum' in self.params
     if self.params['scores'] is None or self.params['tg_sum'] is None:
         CP.print_orange('NetGAN gen failed')
         raise Exception('Generation failed!')
     else:
         gen_mat = graph_from_scores(self.params['scores'],
                                     self.params['tg_sum'])
         g = nx.from_numpy_array(gen_mat, create_using=nx.Graph())
         g.name = gname
     g.gen_id = gen_id
     return g
Example #3
0
    def laplacian_eigenvalues(self) -> np.array:
        """
        Returns eigenvalues of the Laplacian
        :return:
        """
        CP.print_none('Calculating Laplacian Eigenvalues')
        if self.graph.order() == 0 or self.graph.size() == 0:
            CP.print_orange(
                f'Graph has {self.graph.order()} nodes and {self.graph.size()} edges!'
            )
            laplacian_eigs = []
        else:
            laplacian_eigs = nx.laplacian_spectrum(self.graph)
        self.stats['laplacian_eigenvalues'] = laplacian_eigs

        return laplacian_eigs
Example #4
0
    def _calculate_all_stats(self):
        """
        Calculate all stats
        """
        CP.print_orange('Calculating all stats')

        object_methods = [
            method_name for method_name in dir(self)
            if callable(getattr(self, method_name))
            and not method_name.startswith('_')
        ]

        for method in object_methods:
            method = getattr(self, method)
            try:
                method()
            except NotImplementedError as e:
                pass
Example #5
0
    def __getitem__(self, item):
        """
        Allows square bracket indexing for stats - allow for some fuzzy matching
        """
        if item in self.stats:  # the stat has already been calculated
            return self.stats[item]

        # try some fuzzy matching to figure out the function to call based on the item
        object_methods = [
            method_name for method_name in dir(self)
            if callable(getattr(self, method_name))
            and not method_name.startswith('_')
        ]

        best_match_func = ''
        best_match_score = float('inf')

        for method in object_methods:
            dist = ed.eval(method, item)
            if dist == 0:
                best_match_score = dist
                best_match_func = method
                break

            if dist < best_match_score:
                best_match_score = dist
                best_match_func = method

        assert best_match_func != '', 'edit distance did not work'
        item = best_match_func
        if best_match_score != 0:
            CP.print_orange(
                f'Best matching function found for "{item}": "{best_match_func}()", edit distance: {best_match_score}'
            )

        if best_match_func not in self.stats:
            best_match_func = getattr(
                self, best_match_func
            )  # translates best_match_fun from string to a function object
            best_match_func()  # call the best match function

        assert item in self.stats, f'stat: {item} is not updated after function call'
        return self.stats[item]
def stats_computation(dataset, model, trial, filename, stats):
    path = Path(
        get_imt_output_directory()) / 'pickles' / dataset / model / filename
    graph_list = load_pickle(path)
    assert isinstance(
        graph_list,
        list), f'Expected type "list" and got type {type(graph_list)}.'
    assert all(isinstance(g, nx.Graph) for g in graph_list
               ), f'Expected a list of nx.Graph and got disappointed instead.'

    ColorPrint.print_orange(f'{filename} has length {len(graph_list)}')

    for idx, G in enumerate(graph_list):
        gs_obj = GraphStats(graph=G,
                            dataset=dataset,
                            model=model,
                            trial=trial,
                            iteration=idx)
        gs_obj.write_stats_jsons(stats=stats)

    return None
Example #7
0
def sublevel_parallel_computation(input_path, dataset, model, idx):
    #dac
    output_path = f'/afs/crc.nd.edu/user/t/tford5/infinity-mirror/output/pgd/{model}/'
    #dsg
    output_path = f'/afs/crc.nd.edu/user/t/tford5/infinity-mirror/infinity-mirror/output/pgd/{model}/'
    verify_dir(output_path)
    trial = get_trial_id(input_path, dataset, model, idx)

    output_filename = f'{output_path}/{dataset}_{model}_{trial}_pgd_full.csv'

    # don't do repeat work
    if verify_file(output_filename):
        ColorPrint.print_orange(f'{output_filename} Already Exists!')
        return dataset + model

    graph_list, trial = load_data(input_path, dataset, model, idx)

    pgds = []
    trials = []
    gens = []

    n_threads = 28

    graph_stats_list = compute_graph_stats(graph_list)
    pgds += compute_pgd(graph_stats_list, n_threads)
    trials += [trial for _ in graph_stats_list]
    gens += [x for x in range(len(graph_stats_list))]

    df_full = construct_full_table(pgds, trials, gens, model)
    df_full.to_csv(output_filename,
                   float_format='%.7f',
                   sep='\t',
                   index=False,
                   na_rep='nan')

    return dataset + model