def test_intersection_size(self): """Checks whether the intersection set size is correctly returned. """ results = _intersection( [networks['a'][0], networks['b'][0], networks['c'][0]], size=0.6, sign=True) self.assertEqual(results, 5)
def test_intersection_1_network(self): """Checks whether the intersection set size is correctly returned. """ results = _intersection(networks['a'], size=1, sign=False) self.assertEqual(results, 0)
def model_calcs(networks, args): """ Function for generating null models and carrying out calculations. :param networks: Dictionary with folder name as key and values as tuples (name, network object). :param args: Settings for running anuran :return: """ if args['core'] < 1: args['core'] = 1 logger.info("Setting cores for multiprocessing to 1.") # export intersections for size in args['size']: for group in networks: shared_edges = _intersection(networks[group], float(size), sign=args['sign'], edgelist=True) g = _construct_intersection(networks[group], shared_edges) nx.write_graphml(g, args['fp'] + '_' + group + '_' + str(size) + '_intersection.graphml') # first generate null models try: random, degree = generate_null(networks, n=args['perm'], npos=args['gperm'], core=args['core'], fraction=args['cs'], prev=args['prev']) except Exception: logger.error('Could not generate null models!', exc_info=True) sys.exit() set_sizes = None try: set_sizes = generate_sizes(networks, random, degree, core=args['core'], sign=args['sign'], fractions=args['cs'], prev=args['prev'], perm=args['nperm'], sizes=args['size']) set_sizes.to_csv(args['fp'] + '_sets.csv') set_differences = generate_size_differences(set_sizes, sizes=args['size']) set_differences.to_csv(args['fp'] + '_set_differences.csv') logger.info('Set sizes exported to: ' + args['fp'] + '_sets.csv') except Exception: logger.error('Failed to calculate set sizes!', exc_info=True) sys.exit() centralities = None if args['centrality']: try: centralities = generate_ci_frame(networks, random, degree, fractions=args['cs'], prev=args['prev'], perm=args['nperm'], core=args['core']) centralities.to_csv(args['fp'] + '_centralities.csv') logger.info('Centralities exported to: ' + args['fp'] + '_centralities.csv') except Exception: logger.error('Could not rank centralities!', exc_info=True) sys.exit() if args['network']: try: graph_properties = generate_graph_frame(networks, random, degree, fractions=args['cs'], core=args['prev'], perm=args['nperm']) graph_properties.to_csv(args['fp'] + '_graph_properties.csv') logger.info('Graph properties exported to: ' + args['fp'] + '_graph_properties.csv') except Exception: logger.error('Could not estimate graph properties!', exc_info=True) sys.exit() samples = None if args['sample']: try: samples = generate_sample_sizes(networks, random, degree, sign=args['sign'], core=args['core'], fractions=args['cs'], perm=args['nperm'], prev=args['prev'], sizes=args['size'], limit=args['sample'], number=args['number']) samples.to_csv(args['fp'] + '_subsampled_sets.csv') logger.info('Subsampled set sizes exported to: ' + args['fp'] + '_subsampled_sets.csv') except Exception: logger.error('Failed to subsample networks!', exc_info=True) sys.exit() central_stats = None if args['stats']: if args['stats'] == 'True': args['stats'] = True # add code for pvalue estimation set_stats = compare_set_sizes(set_sizes) set_stats.to_csv(args['fp'] + '_set_stats.csv') difference_stats = compare_set_sizes(set_differences) difference_stats.to_csv(args['fp'] + '_difference_stats.csv') if args['centrality'] and centralities is not None: central_stats = compare_centralities(centralities, mc=args['stats']) central_stats.to_csv(args['fp'] + '_centrality_stats.csv') if args['network']: graph_stats = compare_graph_properties(graph_properties) graph_stats.to_csv(args['fp'] + '_graph_stats.csv') # check if there is an order in the filenames for group in networks: prefixes = [x[0].split('_')[0] for x in networks[group]] try: prefixes = [int(x) for x in prefixes] except ValueError: pass if all(isinstance(x, int) for x in prefixes): centrality_correlation = correlate_centralities(group, centralities, mc=args['stats']) centrality_correlation.to_csv(args['fp'] + '_centrality_correlation.csv') graph_correlation = correlate_graph_properties(group, graph_properties) graph_correlation.to_csv(args['fp'] + '_centrality_correlation.csv') if args['draw']: try: for x in networks: subset_sizes = set_sizes[set_sizes['Group'] == x] draw_sets(subset_sizes, args['fp'] + '_' + x) subset_differences = set_differences[set_differences['Group'] == x] draw_set_differences(subset_differences, args['fp'] + '_' + x) if args['centrality']: subset_centralities = centralities[centralities['Group'] == x] draw_centralities(subset_centralities, args['fp'] + '_' + x) if args['sample']: subset_samples = samples[samples['Group'] == x] draw_samples(subset_samples, args['fp'] + '_' + x) if args['network']: subset_graphs = graph_properties[graph_properties['Group'] == x] draw_graphs(subset_graphs, args['fp'] + '_' + x) except Exception: logger.error('Could not draw data!', exc_info=True) sys.exit() if central_stats is not None: return central_stats