Ejemplo n.º 1
0
 def read_file(self, path):
     if self.folder:
         nx_graph = []
         for file in listdir(path):
             nx_graph.extend(nx.read_graph6(os.path.join(path, file)))
         return nx_graph
     return nx.read_graph6(path)
Ejemplo n.º 2
0
def get_number_of_total_sequences(numberNodes, numberBlocks):
    # each block have generated a "optimalSequences_n{n}_{0}.g6.txt" that contains a amount of optimal sequences for its block
    # for "optimalSequences_n10_0.g6.txt" there are 120052 optimal sequences,
    # for "optimalSequences_n10_99.g6.txt" there are 120020 optimal sequences
    Graphs = nx.read_graph6(f'{PATH_TO_GRAPHS_DATASETS}/n{numberNodes}_blocks/n{numberNodes}_0.g6')
    total = (numberBlocks - 1) * len(Graphs)
    # numberBlocks - 1, because until the last, all blocks has the same number of graphs, thus same number of sequences,
    # if only 1 block, total = 0
    Graphs = nx.read_graph6(f'{PATH_TO_GRAPHS_DATASETS}/n{numberNodes}_blocks/n{numberNodes}_{numberBlocks - 1}.g6') # numberBlocks - 1, it's 0 indexed
    total += len(Graphs)
    return total
Ejemplo n.º 3
0
def build_dataset(num_files, show):
    rows = num_files * 120052  # 120052 rows from each file
    cols = 100 + 10  # 10x10 adj list + 10 optimal labels
    data = np.zeros((rows, cols))
    row = 0
    processed = 0
    for idx in range(num_files):
        # get Y
        target_file_path = '{}opt_seq_n10_{}.g6.txt'.format(
            RESULT_DATA_PATH, idx)
        df = pd.read_csv(target_file_path,
                         sep=';',
                         dtype=int,
                         header=None,
                         skiprows=1,
                         usecols=list(range(1, 11)))
        Y = df.values

        # get x and store it in data
        data_file_path = '{}n10_{}.g6'.format(G6_GRAPH_PATH, idx)
        G = nx.read_graph6(data_file_path)
        for i, graph in enumerate(G):
            x = nx.to_numpy_array(graph).ravel()
            data[row] = np.concatenate((x, Y[i]))
            row += 1
        if idx % show == 0:
            processed += show
            print('{} files processed.'.format(processed))

    np.save(TARGET, data)
Ejemplo n.º 4
0
def load_graph(fname):
    if fname.endswith('.g6'):
        return nx.read_graph6(fname)
    elif not os.path.exists(fname) and fname.endswith('.json'):
        return load_graph(fname.replace('.json', '.g6'))
    with open(fname, 'r') as f:
        return deserialize_graph(f.read())
Ejemplo n.º 5
0
def vrcholovoBimagickyGrafTest(n):
    G = []
    G = read_graph6("zoznam grafov/graph" + str(n) + "c.g6")

    for i in range(len(G)):
        susedia = []
        for _ in range(n):
            susedia.append(set())

        for hrana in G[i].edges():
            susedia[hrana[0]].add(hrana[1])
            susedia[hrana[1]].add(hrana[0])

        vyhovuje = True

        for v1, v2 in combinations((i for i in range(n)), r=2):
            x = len(susedia[v1].difference(susedia[v2]))
            y = len(susedia[v2].difference(susedia[v1]))
            if (x * y == 0 and x + y > 0) or x == 1 or y == 1 or (x == 2
                                                                  and y == 2):
                vyhovuje = False
                break

            if not vyhovuje:
                break

        if vyhovuje:
            vypisRiesenie(
                "potencialne vrcholovo bimagicky graf s nasledovnymi hranami",
                G[i].edges(), {"pocet vrcholov: " + str(n)})
Ejemplo n.º 6
0
 def test_read_graph6(self):
     data="""DF{"""
     G=nx.parse_graph6(data)
     fh = StringIO(data)
     Gin=nx.read_graph6(fh)
     assert_equal(sorted(G.nodes()),sorted(Gin.nodes()))
     assert_equal(sorted(G.edges()),sorted(Gin.edges()))
Ejemplo n.º 7
0
 def test_read_equals_from_bytes(self):
     data = b"DF{"
     G = nx.from_graph6_bytes(data)
     fh = BytesIO(data)
     Gin = nx.read_graph6(fh)
     assert nodes_equal(G.nodes(), Gin.nodes())
     assert edges_equal(G.edges(), Gin.edges())
Ejemplo n.º 8
0
def execute_graphs_file(file_name):#read graph file and create file of  graph string
    file = open('bfs_tree_of_' + file_name + '.txt', "w+")
    for graph in nx.read_graph6(file_name + ".g6"):
        graph_str = find_graph_string(graph)
        file.write(graph_str + '\n')#write graph string in file
    file.close()
    return 'bfs_tree_of_'+ file_name + '.txt'
Ejemplo n.º 9
0
 def test_read_graph6(self):
     data = """DF{"""
     G = nx.parse_graph6(data)
     fh = StringIO(data)
     Gin = nx.read_graph6(fh)
     assert_nodes_equal(G.nodes(), Gin.nodes())
     assert_edges_equal(G.edges(), Gin.edges())
Ejemplo n.º 10
0
 def test_read_equals_from_bytes(self):
     data = b'DF{'
     G = nx.from_graph6_bytes(data)
     fh = BytesIO(data)
     Gin = nx.read_graph6(fh)
     assert_nodes_equal(G.nodes(), Gin.nodes())
     assert_edges_equal(G.edges(), Gin.edges())
Ejemplo n.º 11
0
 def test_read_many_graph6(self):
     # Read many graphs into list
     data="""DF{\nD`{\nDqK\nD~{\n"""
     fh = StringIO(data)
     glist=nx.read_graph6(fh)
     assert_equal(len(glist),4)
     for G in glist:
         assert_equal(sorted(G.nodes()),[0, 1, 2, 3, 4])
Ejemplo n.º 12
0
def seed_graph(M, connected=False):

    if connected: suffix = "c"
    else: suffix = "d1"

    for idx, seed in enumerate(
            nx.read_graph6(f"data/undirected/{M}{suffix}.g6")):
        yield nx.convert_node_labels_to_integers(nx.line_graph(seed))
Ejemplo n.º 13
0
 def test_read_many_graph6(self):
     # Read many graphs into list
     data = """DF{\nD`{\nDqK\nD~{\n"""
     fh = StringIO(data)
     glist = nx.read_graph6(fh)
     assert_equal(len(glist), 4)
     for G in glist:
         assert_equal(sorted(G.nodes()), [0, 1, 2, 3, 4])
Ejemplo n.º 14
0
 def test_read_many_graph6(self):
     """Test for reading many graphs from a file into a list."""
     data = b"DF{\nD`{\nDqK\nD~{\n"
     fh = BytesIO(data)
     glist = nx.read_graph6(fh)
     assert len(glist) == 4
     for G in glist:
         assert sorted(G) == list(range(5))
Ejemplo n.º 15
0
 def test_read_many_graph6(self):
     """Test for reading many graphs from a file into a list."""
     data = b'DF{\nD`{\nDqK\nD~{\n'
     fh = BytesIO(data)
     glist = nx.read_graph6(fh)
     assert_equal(len(glist), 4)
     for G in glist:
         assert_equal(sorted(G), list(range(5)))
Ejemplo n.º 16
0
 def test_roundtrip(self):
     for i in list(range(13)) + [31, 47, 62, 63, 64, 72]:
         G = nx.random_graphs.gnm_random_graph(i, i * i // 4, seed=i)
         f = BytesIO()
         nx.write_graph6(G, f)
         f.seek(0)
         H = nx.read_graph6(f)
         assert_nodes_equal(G.nodes(), H.nodes())
         assert_edges_equal(G.edges(), H.edges())
Ejemplo n.º 17
0
def read_graph6(file_path):
    """
    Reads the file and returns array of nx.Graph object, one for each
    line of input file.
    :param file_path:
    :return:
    """
    G = nx.read_graph6(file_path)
    return G
Ejemplo n.º 18
0
 def test_roundtrip(self):
     for i in list(range(13)) + [31, 47, 62, 63, 64, 72]:
         G = nx.random_graphs.gnm_random_graph(i, i * i // 4, seed=i)
         f = BytesIO()
         nx.write_graph6(G, f)
         f.seek(0)
         H = nx.read_graph6(f)
         assert nodes_equal(G.nodes(), H.nodes())
         assert edges_equal(G.edges(), H.edges())
Ejemplo n.º 19
0
 def test_read_graph6(self):
     data="""DF{"""
     G=nx.parse_graph6(data)
     (fd,fname)=tempfile.mkstemp()
     fh=open(fname,'w')
     b=fh.write(data)
     fh.close()
     Gin=nx.read_graph6(fname)
     assert_equal(sorted(G.nodes()),sorted(Gin.nodes()))
     assert_equal(sorted(G.edges()),sorted(Gin.edges()))
     os.close(fd)
     os.unlink(fname)
Ejemplo n.º 20
0
 def test_read_graph6(self):
     data="""DF{"""
     G=nx.parse_graph6(data)
     (fd,fname)=tempfile.mkstemp()
     fh=open(fname,'w')
     b=fh.write(data)
     fh.close()
     Gin=nx.read_graph6(fname)
     assert_equal(sorted(G.nodes()),sorted(Gin.nodes()))
     assert_equal(sorted(G.edges()),sorted(Gin.edges()))
     os.close(fd)
     os.unlink(fname)
Ejemplo n.º 21
0
def load_stdgraphs(size: int) -> List[nx.Graph]:
    """Load standard graph validation sets

    For each size (from 6 to 32 graph nodes) the dataset consists of
    100 graphs drawn from the Erdős-Rényi ensemble with edge
    probability 50%.
    """
    from pkg_resources import resource_stream

    if size < 6 or size > 32:
        raise ValueError('Size out of range.')

    filename = 'datasets/data/graph{}er100.g6'.format(size)
    fdata = resource_stream('quantumflow', filename)
    return nx.read_graph6(fdata)
def build_dataset(num_files, show):
    rows = num_files * 120052  # 120052 rows from each file, because
    # each opt_seq_n10_{idx}.g6.txt has 120052 optimal sequences
    #cols = 100 + 1 + 10  # 10x10 adj list + optimal_band + 10 optimal labels
    cols = 45 + 1 + 10  # reduced adj list + optimal_band + 10 optimal labels
    data = np.zeros((rows, cols))
    row = 0
    processed = 0
    for idx in range(num_files):
        # get Y
        target_file_path = '{}opt_seq_n10_{}.g6.txt'.format(
            RESULT_DATA_PATH, idx)
        df = pd.read_csv(target_file_path,
                         sep=';',
                         dtype=int,
                         header=None,
                         skiprows=1,
                         usecols=list(range(1, 11)))
        Y = df.values
        for i, permutation in enumerate(Y):
            p = get_cp_fixed(permutation)
            Y[i] = p.copy()

        # get x and store it in data
        data_file_path = '{}n10_{}.g6'.format(G6_GRAPH_PATH, idx)
        G = nx.read_graph6(data_file_path)
        for i, graph in enumerate(G):
            opt_band = get_bandwidth(graph, Y[i])
            #x = nx.to_numpy_array(graph).ravel()
            x = nx.to_numpy_array(graph)
            x_ravel = np.zeros(
                45)  # matriz 10x10=100; 100-diagonal=90; sup=90/2=45
            j = 0
            k = 1
            for u in range(9):
                for v in range(k, 10):
                    x_ravel[j] = x[u][v]
                    j += 1
                k += 1
            #data[row] = np.concatenate((x,np.array([opt_band]),Y[i]))
            data[row] = np.concatenate(
                (x_ravel.copy(), np.array([opt_band]), Y[i]))
            row += 1
        if idx % show == 0:
            processed += show
            print('{} files processed.'.format(processed))

    np.save(TARGET, data)
Ejemplo n.º 23
0
def load_g6_graphs(path, name):

    ### code used to load SR graphs obtained from here http://users.cecs.anu.edu.au/~bdm/data/graphs.html
    ### we don't split the data, because no training is performed (the network is used with random weights for the SR experiment)

    dataset = nx.read_graph6(os.path.join(path, name + '.g6'))
    Graph = namedtuple('Graph', ['node_features', 'edge_mat', 'label'])
    graph_list = list()
    for i, datum in enumerate(dataset):
        x = torch.ones(datum.number_of_nodes(), 1)
        edge_index = to_undirected(
            torch.tensor(list(datum.edges())).transpose(1, 0))
        graph = Graph(x, edge_index, torch.tensor(i).long())
        graph_list.append(graph)
    num_classes = len(dataset)

    return graph_list, num_classes
Ejemplo n.º 24
0
def find_graph_with_probability_in_file(file_path, target_probability,
                                        num_phases, infection_rate):
    graphs = nx.read_graph6(file_path)
    best_diff = 10000
    best_graph = None
    for G in graphs:
        vertices = G.nodes()
        edges = G.edges()
        for v in vertices:
            edges_copied = [[e[0], e[1]] for e in edges]
            for index, edge in enumerate(edges_copied):
                if edge[0] == v:
                    edge[0] = -1
                if edge[1] == v:
                    edge[1] = -1
                if edge[0] == 0:
                    edge[0] = v
                if edge[1] == 0:
                    edge[1] = v
                if edge[0] == -1:
                    edge[0] = 0
                if edge[1] == -1:
                    edge[1] = 0
                edges_copied[index] = (edge[0], edge[1])
            diff = abs(
                calculate_with_markov(vertices, edges_copied, num_phases,
                                      infection_rate) - target_probability)
            if diff < best_diff:
                best_diff = diff
                best_graph = edges_copied
    print(best_graph)
    for row in vertices:
        line = ''
        for column in vertices:
            line += '1' if (row, column) in best_graph or (
                column, row) in best_graph else '0'
        print(line)
    print(
        calculate_with_markov(vertices, best_graph, num_phases,
                              infection_rate))
Ejemplo n.º 25
0
def writeOptimalSequenceTextFileForBlock(block, numberNodes):
    # get a block of graphs to write the optimal sequences file for that block, under "opt_results" folder
    # the "optimal sequences" file contains all optimal sequences for each graph in that block 
    Graphs = nx.read_graph6(f'{PATH_TO_GRAPHS_DATASETS}/n{numberNodes}_blocks/n{numberNodes}_{block}.g6')
    print(f"There are {len(Graphs)} non-isomorphic graphs of {numberNodes} nodes in the block {block} (.g6 file)")
    for i in range(len(Graphs)):
        writeGraphAsTextFile(Graphs[i], i)

    result_file = f'optimalSequences_n{numberNodes}_{block}.g6.txt'
    saveAllGraphsOptimalBandsInOneTextFile(len(Graphs), result_file, numberNodes, block)

    optimal_sequence_dict = load_opt_seq(result_file, numberNodes)

    larger,same,smaller = test_result(Graphs, optimal_sequence_dict)

    # Write tests results in the 150 blanks spaces that were reserved
    arr = [len(larger),same,smaller]
    s = ';'.join(list(map(str, arr)))
    path = f'./opt_results/n{numberNodes}_blocks/{result_file}'
    with open(path, 'r+') as file:
        file.seek(0) # move a cursor writer (or reader) to position 0
        file.write(s)
Ejemplo n.º 26
0
def get_custom_edge_list(ks, substructure_type=None, filename=None):
    '''
        Instantiates a list of `edge_list`s representing substructures
        of type `substructure_type` with sizes specified by `ks`.
    '''
    if substructure_type is None and filename is None:
        raise ValueError(
            'You must specify either a type or a filename where to read substructures from.'
        )
    edge_lists = []
    for k in ks:
        if substructure_type is not None:
            graphs_nx = getattr(nx, substructure_type)(k)
        else:
            graphs_nx = nx.read_graph6(
                os.path.join(filename, 'graph{}c.g6'.format(k)))
        if isinstance(graphs_nx, list) or isinstance(graphs_nx,
                                                     types.GeneratorType):
            edge_lists += [list(graph_nx.edges) for graph_nx in graphs_nx]
        else:
            edge_lists.append(list(graphs_nx.edges))
    return edge_lists
Ejemplo n.º 27
0
def load_graphs(mode, num):
    '''
    Loads a random sample of graphs from the graphs file. Returns a mapping of
    the graph to its chromatic number.
    '''
    log('loading graphs...')

    if 's' in mode:
        fname = str(GRAPH_DIR / SML_NAME)
    elif 'm' in mode:
        fname = str(GRAPH_DIR / MED_NAME)
    elif 'l' in mode:
        fname = str(GRAPH_DIR / LRG_NAME)
    else:
        raise Exception(f'Could not recognize mode: {mode}.')

    with open(fname, 'r', newline='\n', encoding='ISO-8859-1') as f:
        length = sum(1 for _ in f)

    if num >= length:
        num = length - 1
        log(f'WARNING: Can only load up to {length} graphs from this file.')

    with open(fname, 'r') as f:
        lines = f.readlines()
    raw_graphs = random.sample(lines, num)

    # create temporary file so that nx can read it
    temp_path = 'temp'
    with open(temp_path, 'w', newline='\n') as f:
        f.writelines(raw_graphs)

    graphs = nx.read_graph6(temp_path)
    os.remove(temp_path)

    log(f'...{len(graphs)} graphs loaded.')

    return graphs
Ejemplo n.º 28
0
def build_dataset(*args):
    numberBlocks, numberNodes, numberTotalSequences, verbose = args

    rows = numberTotalSequences
    numberDigitsAdjcencyMatrix = (numberNodes * numberNodes - numberNodes) // 2
    # cols = 45 + 1 + 10
    # 10x10 upper triangle not optimal adjcency list + optimal_band (value) + 10 optimal labels (nodelist)
    # We are handling with symmetric adjcency lists (get the upper triangle from the main diagonal)
    cols = numberDigitsAdjcencyMatrix + 1 + numberNodes
    data = np.zeros((rows,cols))
    row = 0
    for block in range(numberBlocks):
        optimalSequence_i_file = f'./opt_results/n{numberNodes}_blocks/optimalSequences_n{numberNodes}_{block}.g6.txt'
        optimalSequences = pd.read_csv(optimalSequence_i_file, sep=';', dtype=int, header=None, skiprows=1, usecols=list(range(1, numberNodes + 1))).values
        optimalSequences = list(map(get_cp_fixed, optimalSequences))
        # optimalSequences = np.array(optimalSequences)
        # optimalSequences is a matrix, idx 0 (row 0) is graph 0, 
        # index 0 contains a list that represents its optimal sequence nodelist.
        # Data from the optimalSequence_n{NUMBER_NODES}_g6.txt
        # of course that another approach would be use load_opt_seq function, 
        # but rather than returning a dict like load_opt_seq, this time we got an array

        # each row holds the upper triangle flattened, optimal bandwidth and optimal nodelist as columns
        # this will be the dataset to be passed into the neural network, stored as a ".csv" matrix
        Graphs = nx.read_graph6(f'{PATH_TO_GRAPHS_DATASETS}/n{numberNodes}_blocks/n{numberNodes}_{block}.g6')
        for i, graph in enumerate(Graphs):
            opt_band = get_optimal_bandwidth(graph, optimalSequences[i])
            floatAdjMatrix = nx.to_numpy_array(graph)
            # "nx.to_numpy_array" is the same as "nx.adjacency matrix", but later we'll
            # use pytorch, a neural network works better with floats, since we have lot of 'wx + b' operations
            upperTriangleFlatten = np.array([floatAdjMatrix[row][column] for row in range(numberNodes - 1) for column in range(row + 1, numberNodes)])
            data[row] = np.concatenate((np.array(upperTriangleFlatten), np.array([opt_band]), optimalSequences[i]))
            row += 1
        if verbose and block % 4 == 0:
            print(f'{block + 1} blocks processed, total of {len(Graphs)} optimal sequences in the block just executed.')
    # np.save(target, data)
    buildCSVDataset(data, numberNodes)
Ejemplo n.º 29
0
def main(args):
	
	file_name = ' '.join(args.graph_file)
	
	# read graph file to networkx.DiGraph object
	ext = file_name.split('.')[-1]
	if ext == 'graphml':
		graph = nx.read_graphml(file_name)
	elif ext == 'gml':
		graph = nx.read_gml(file_name)
	elif ext == 'gexf':
		graph = nx.read_gexf(file_name)
	elif ext == 'g6':
		graph = nx.read_graph6(file_name)
	elif ext == 's6':
		graph = nx.read_sparse6(file_name)
	elif ext == 'gpickle' or ext == 'p':
		graph = nx.read_gpickle(file_name)
	elif ext == 'yaml':
		graph = nx.read_yaml(file_name)
	else:
		print "Graph file format not supported. Supported fileformats: graphml (recommended), gexf, gml, g6, s6, gpickle, yaml"

	for n in graph.nodes():
		# from nicholas' tulip output
		if not 'x' in graph.node[n].keys():
			if 'graphics' in graph.node[n].keys():
				g = graph.node[n].pop('graphics')
				graph.node[n]['x'] = g['x']
				graph.node[n]['y'] = g['y']
				if 'h' in g.keys() and not 'node_type' in graph.node[n].keys():
					if g['h'] == 1:
						graph.node[n]['node_type'] = 'reaction'
					elif g['h'] == 2.5:
						graph.node[n]['node_type'] = 'species'
					else:
						graph.node[n]['node_type'] = 'ignore'
		if not 'label' in graph.node[n].keys():
			graph.node[n]['label'] = n

	if compatible_graph(graph):
		# get dictionary with layout info
		d = read_graph(graph)

		# get font
		font = ImageFont.truetype(args.font_file, 1000)

		# add cofactors
		if args.add_cofactors_from_sbml:
			sbml_file = ' '.join(args.add_cofactors_from_sbml)
			cofactors = get_cofactors_from_sbml(d, sbml_file)
			for r in cofactors:
				for s in cofactors[r]:
					d['edge_type'][(r,s)]=cofactors[r][s]['role']
		else:
			cofactors = None
		
		# get the data to assemble the svg file (editable version)
		svg_data = get_svgdata(
			d= d,
			font=font, 
			font_size= args.font_size, 
			scale=args.scale,
			padding=args.padding,
			padding_labels= args.padding_labels,
			normalize=args.normalize,
			overlap=args.overlap,
			defdir = args.r_direction,
			cofactors = cofactors,
			reverse_cof = args.reverse_cof)
		
		# assemble svg file and save (editable version)
		doc = get_svgdoc(**svg_data)
		doc.save(args.svg_name)
		print 'output svg saved in', args.svg_name
Ejemplo n.º 30
0
import more_itertools as miter
import itertools as iter
import networkx as nx
import string
# import matplotlib.pyplot as plt

G = nx.read_graph6('G.g6')
H = nx.read_graph6('H.g6')
# G = nx.petersen_graph()
# H = nx.complete_graph(3)

G_nodes = G.nodes()
H_nodes = H.nodes()

iterable = string.ascii_lowercase[0:len(G_nodes)]
is_homomorphism_found = False
for part in miter.set_partitions(iterable, len(H_nodes)):
    is_correct_homomorphism = True
    if is_homomorphism_found:
        break
    for p in part:
        if not is_correct_homomorphism:
            break
        if len(p) == 1:
            continue
        all_combs = iter.combinations(p, 2)
        for a_comb in all_combs:
            v1 = ord(a_comb[0]) - 97
            v2 = ord(a_comb[1]) - 97
            if G.has_edge(v1, v2):
                is_correct_homomorphism = False
Ejemplo n.º 31
0
def _cli():

    parser = argparse.ArgumentParser(
        description=__description__)

    parser.add_argument('--version', action='version', version=__version__)

    parser.add_argument('-v', '--verbose', action='store_true')

    parser.add_argument('-i', '--fin', action='store', dest='fin',
                        default='', metavar='FILE',
                        help='Read model from file')

    parser.add_argument('-o', '--fout', action='store', dest='fout',
                        default='', metavar='FILE',
                        help='Write model to file')

    parser.add_argument('-N', '--nodes', type=int, dest='nodes',
                        default=DEFAULT_NODES)

    parser.add_argument('-P', '--steps', type=int, dest='steps',
                        default=DEFAULT_STEPS)

    parser.add_argument('--epochs', type=int, dest='epochs',
                        default=EPOCHS)

    parser.add_argument('--lr', type=float, dest='learning_rate',
                        default=LEARNING_RATE)

    parser.add_argument('-T', '--train', action='store', dest='ftrain',
                        default='', metavar='FILE',
                        help='Collection of graphs to train on')

    parser.add_argument('-V', '--validation',
                        action='store', dest='fvalidation',
                        default='', metavar='FILE',
                        help='Validation graph dataset')

    opts = vars(parser.parse_args())

    verbose = opts.pop('verbose')
    epochs = opts.pop('epochs')
    steps = opts.pop('steps')
    nodes = opts.pop('nodes')
    learning_rate = opts.pop('learning_rate')

    fin = opts.pop('fin')
    fout = opts.pop('fout')

    ftrain = opts.pop('ftrain')
    fvalidation = opts.pop('fvalidation')

    if ftrain:
        graphs = nx.read_graph6(ftrain)
    else:
        graphs = [nx.gnp_random_graph(nodes, 0.5)
                  for _ in range(TRAINING_GRAPHS)]

    if fvalidation:
        validation = nx.read_graph6(fvalidation)
    else:
        validation = qf.datasets.load_stdgraphs(nodes)

    init_beta = None
    init_gamma = None
    if fin:
        with open(fin) as f:
            data = json.load(f)
        init_beta = np.asarray(data['beta'])
        init_gamma = np.asarray(data['gamma'])

    model = QAOAMaxcutModel(nodes, steps, init_beta, init_gamma)
    model.train(graphs, validation, epochs, learning_rate, verbose)

    if fout:
        with open(fout, 'w') as f:
            model.dump(f)
Ejemplo n.º 32
0
def read_one_graph(file_name, graph_number):
    return nx.read_graph6(file_name + '.g6') [graph_number] 
Ejemplo n.º 33
0
"""okay so my idea is to create a new hashtable for all these subgraphs and generate corpus and then destroy them"""
def create_partial_vocab(neigbourhood,hash_table,key_mid,d,vocab):
    """take 2*len(neigbourhood) such samples"""
    for i in range(1,2*len(neigbourhood)):
        choices = [np.random.choice(hash_table.values()) for i in range(1,d)]
        choices.insert(int(math.floor(d/2)),hash_table[key_mid])
        choices = map(str,choices)
        vocab.append(" ".join(choices))
    print vocab



if __name__ == "__main__":
    i = 3
    file_name = "/home/kris/Desktop/ML_DeppWalk/Data/all_graph10/graph4.g6"
    full_hash_table = []
    print file_name
    file_list = nx.read_graph6(file_name)
    d = 5 #context window size
    vocab = [] #vocab """we migh have to save it to the disk and start again if size too big"""
    ret_start_val = 0
    total_neigh = []
    present_neighbourhood = gen_neigbourhood(i)
    start_val = max(0,ret_start_val)
    total_neigh.append(present_neighbourhood)
    ret_hash_table,key_mid = populate_table(i,start_val,present_neighbourhood)



Ejemplo n.º 34
0
from kuramoto import *
from scipy.sparse import csr_matrix
from math import floor
from scipy.sparse.csgraph import reverse_cuthill_mckee

#path = 'C:/Users/hbass/Desktop/fca/FCA-ML/adjacency-dynamics/'
path = "/mnt/l/home/fca30/"
path = "/mnt/l/home/kura30/"

# read initial coloring, labels, indices and graph6
coloring = pd.read_csv(path + 'color.csv', header=None).to_numpy()
# dataout = [i for i in np.load(path+'labels (4).npy',
#     allow_pickle=True)]
dataout = [i for i in pd.read_csv(path + 'sync.csv', header=None).to_numpy()]
indices = [i for i in pd.read_csv(path + 'ind.csv', header=None).to_numpy()]
graphs = nx.read_graph6(path + 'tag.csv')
count = True


def width(colors, kappa):
    """
    computes width from a color list
    """
    ordered = list(set(colors))
    lordered = len(ordered)
    threshold = floor(kappa / 2)
    if ordered == 0:
        assert ("Empty array or logic error.")

    elif lordered == 1:
        return 0
def create_isomorphism_database(db_out,
                                pkls_out,
                                boxes,
                                sizes,
                                path_geng=None,
                                path_RI=None):
    conn = sqlite3.connect(db_out)
    cursor = conn.cursor()

    cursor.execute('''DROP TABLE IF EXISTS subgraphs''')
    cursor.execute('''CREATE TABLE subgraphs (
                          id_pkl INTEGER,
                          n_graphs INTEGER,
                          graph6 TEXT,
                          k INTEGER,
                          k_partite TEXT,
                          k_valences TEXT,
                          nodes_valences TEXT,
                          n_nodes INTEGER,
                          n_edges INTEGER,
                          PRIMARY KEY (graph6, k_partite, nodes_valences)
                   );''')
    conn.commit()

    id_pkl = 0

    for G, p in calculate_complete_multipartite_graphs(sizes, boxes):

        print([path_geng, str(G.number_of_nodes()), "-d1", "-D2", "-q"])
        proc = subprocess.Popen(
            [path_geng, str(len(G.nodes)), "-d1", "-D2", "-q"],
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE)
        geng_out, err = proc.communicate()

        proc.stdout.close()
        proc.stderr.close()

        for i, line_geng in enumerate(geng_out.split()):

            print(line_geng)

            sG = nx.read_graph6(BytesIO(line_geng))

            k_gfu = tempfile.NamedTemporaryFile(mode="w", delete=False)
            k_gfu.write(graph_to_ri(G, "k_graph"))
            k_gfu.seek(0)

            s_gfu = tempfile.NamedTemporaryFile(mode="w", delete=False)
            s_gfu.write(graph_to_ri(sG, "subgraph"))
            s_gfu.seek(0)

            proc = subprocess.Popen(
                [path_RI, "mono", "geu", k_gfu.name, s_gfu.name],
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE)
            RI_out, err = proc.communicate()

            k_gfu.close()
            s_gfu.close()

            mappings = []
            subgraphs = {}

            for line in RI_out.decode("utf-8").splitlines():
                if line[0] == "{":
                    mappings.append(eval(line))

                if len(mappings) == 20000:
                    gi = graph_info(
                        p,
                        sG,
                        mappings,
                    )

                    for vn in gi[0]:

                        if vn not in subgraphs:
                            subgraphs[vn] = gi[0][vn]
                            # print vn, result[0][vn], result[1][0], result[1][1], len(result[1][1])
                        else:

                            before = len(subgraphs[vn])
                            for es in gi[0][vn]:
                                if es not in subgraphs[vn]:
                                    subgraphs[vn].append(es)
                                    # print vn, es, result[1][0], result[1][1], len(result[1][1])
                            after = len(subgraphs[vn])
                            print(before, after)

                    mappings = []

            if len(mappings) > 0:
                gi = graph_info(
                    p,
                    sG,
                    mappings,
                )
                # job = job_server.submit(graphInfo, (p, sG, mappings, ), (valences,), modules=(), globals=globals())
                # jobs.append(job)

                for vn in gi[0]:

                    if vn not in subgraphs:
                        subgraphs[vn] = gi[0][vn]
                        # print vn, result[0][vn], result[1][0], result[1][1], len(result[1][1])
                    else:

                        before = len(subgraphs[vn])
                        for es in gi[0][vn]:
                            if es not in subgraphs[vn]:
                                subgraphs[vn].append(es)
                                # print vn, es, result[1][0], result[1][1], len(result[1][1])
                        after = len(subgraphs[vn])
                        print(before, after)

            if len(subgraphs) > 0:

                for vn in subgraphs:

                    root = {}
                    for fr in subgraphs[vn]:
                        parent = root
                        for e in fr:
                            parent = parent.setdefault(e, {})

                    vt = tuple([sum(v) for v in eval(vn)])
                    print("INSERT:", i, line_geng.decode("utf-8"),
                          len(subgraphs[vn]), len(p), str(p), vt, vn,
                          sG.number_of_nodes(), sG.number_of_edges())

                    id_pkl += 1
                    cursor.execute(
                        '''INSERT INTO subgraphs (id_pkl, 
                                      n_graphs, 
                                      graph6,
                                      k,
                                      k_partite,
                                      k_valences,
                                      nodes_valences,
                                      n_nodes, n_edges) 
                                      values (?, ?, ?, ?, ?, ?, ?, ?, ?)''',
                        (id_pkl, len(
                            subgraphs[vn]), line_geng, len(p), str(p), str(vt),
                         str(vn), sG.number_of_nodes(), sG.number_of_edges()))
                    pickle.dump(
                        root,
                        open(os.path.join(pkls_out, "{}.pkl".format(id_pkl)),
                             "wb"))
            conn.commit()
    conn.close()