Пример #1
0
    def test_counterexample_bipartite(self):
        # This is a counter example that shows that the hashing algorithm is not
        # perfectly identifiable (i.e. there are non-isomorphic graphs with the same
        # hash). If this tests fails, it means the algorithm must have been changed
        # in some way that allows it to identify these graphs as non-isomoprhic.
        matrix1 = np.array([[0, 1, 1, 1, 1, 0, 0, 0, 0, 0],
                            [0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
                            [0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
                            [0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
                            [0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
                            [0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
                            [0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
                            [0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
                            [0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
                            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])

        matrix2 = np.array([[0, 1, 1, 1, 1, 0, 0, 0, 0, 0],
                            [0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
                            [0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
                            [0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
                            [0, 0, 0, 0, 0, 1, 0, 0, 1, 0],
                            [0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
                            [0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
                            [0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
                            [0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
                            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])

        labels = [-1, 1, 1, 1, 1, 2, 2, 2, 2, -2]

        # This takes far too long to run so commenting it out. The graphs are
        # non-isomorphic fairly obviously from visual inspection.
        # self.assertFalse(graph_util.is_isomorphic((matrix1, labels),
        #                                           (matrix2, labels)))
        self.assertEqual(graph_util.hash_module(matrix1, labels),
                         graph_util.hash_module(matrix2, labels))
Пример #2
0
def generate_buckets(vertices, bits, max_edges, num_ops, verify_isomorphism):

    buckets = {}
    matrix = np.fromfunction(graph_util.gen_is_edge_fn(bits),
                             (vertices, vertices),
                             dtype=np.int8)
    if (not graph_util.is_full_dag(matrix)
            or graph_util.num_edges(matrix) > max_edges):
        return

    # Iterate through all possible labelings
    for labeling in itertools.product(
            *[range(num_ops) for _ in range(vertices - 2)]):
        labeling = [-1] + list(labeling) + [-2]
        fingerprint = graph_util.hash_module(matrix, labeling)

        if fingerprint not in buckets:
            buckets[fingerprint] = (matrix.tolist(), labeling)

        # This catches the "false positive" case of two models which are not
        # isomorphic hashing to the same bucket.
        elif verify_isomorphism:
            canonical_graph = buckets[fingerprint]
            if not graph_util.is_isomorphic(
                (matrix.tolist(), labeling), canonical_graph):
                logging.fatal(
                    'Matrix:\n%s\nLabel: %s\nis not isomorphic to'
                    ' canonical matrix:\n%s\nLabel: %s', str(matrix),
                    str(labeling), str(canonical_graph[0]),
                    str(canonical_graph[1]))
                sys.exit()
    return buckets
Пример #3
0
  def test_random_isomorphism_hashing(self):
    # Tests that hash_module always provides the same hash for randomly
    # generated isomorphic graphs.
    for _ in range(1000):
      # Generate random graph. Note: the algorithm works (i.e. same hash ==
      # isomorphic graphs) for all directed graphs with coloring and does not
      # require the graph to be a DAG.
      size = random.randint(3, 20)
      matrix = np.random.randint(0, 2, [size, size])
      labels = [random.randint(0, 10) for _ in range(size)]

      # Generate permutation of matrix and labels.
      perm = np.random.permutation(size).tolist()
      pmatrix, plabels = graph_util.permute_graph(matrix, labels, perm)

      # Hashes should be identical.
      hash1 = graph_util.hash_module(matrix, labels)
      hash2 = graph_util.hash_module(pmatrix, plabels)
      self.assertEqual(hash1, hash2)
Пример #4
0
def generate_graph(max_vertices, max_edges, num_ops, verify_isomorphism,
                   output_file):

    FLAGS = Namespace(max_vertices=max_vertices,
                      num_ops=num_ops,
                      max_edges=max_edges,
                      verify_isomorphism=verify_isomorphism,
                      output_file=output_file)

    total_graphs = 0  # Total number of graphs (including isomorphisms)
    # hash --> (matrix, label) for the canonical graph associated with each hash
    buckets = {}

    logging.info('Using %d vertices, %d op labels, max %d edges',
                 FLAGS.max_vertices, FLAGS.num_ops, FLAGS.max_edges)
    for vertices in range(2, FLAGS.max_vertices + 1):
        for bits in range(2**(vertices * (vertices - 1) // 2)):
            # Construct adj matrix from bit string
            matrix = np.fromfunction(graph_util.gen_is_edge_fn(bits),
                                     (vertices, vertices),
                                     dtype=np.int8)

            # Discard any graphs which can be pruned or exceed constraints
            if (not graph_util.is_full_dag(matrix)
                    or graph_util.num_edges(matrix) > FLAGS.max_edges):
                continue

            # Iterate through all possible labelings
            for labeling in itertools.product(
                    *[range(FLAGS.num_ops) for _ in range(vertices - 2)]):
                total_graphs += 1
                labeling = [-1] + list(labeling) + [-2]
                fingerprint = graph_util.hash_module(matrix, labeling)

                if fingerprint not in buckets:
                    buckets[fingerprint] = (matrix.tolist(), labeling)

                # This catches the "false positive" case of two models which are not
                # isomorphic hashing to the same bucket.
                elif FLAGS.verify_isomorphism:
                    canonical_graph = buckets[fingerprint]
                    if not graph_util.is_isomorphic(
                        (matrix.tolist(), labeling), canonical_graph):
                        logging.fatal(
                            'Matrix:\n%s\nLabel: %s\nis not isomorphic to'
                            ' canonical matrix:\n%s\nLabel: %s', str(matrix),
                            str(labeling), str(canonical_graph[0]),
                            str(canonical_graph[1]))
                        sys.exit()

        logging.info('Up to %d vertices: %d graphs (%d without hashing)',
                     vertices, len(buckets), total_graphs)

    with open(FLAGS.output_file, 'w') as f:
        json.dump(buckets, f, sort_keys=True)
Пример #5
0
  def hash_spec(self, canonical_ops):
    """Computes the isomorphism-invariant graph hash of this spec.

    Args:
      canonical_ops: list of operations in the canonical ordering which they
        were assigned (i.e. the order provided in the config['available_ops']).

    Returns:
      MD5 hash of this spec which can be used to query the dataset.
    """
    # Invert the operations back to integer label indices used in graph gen.
    labeling = [-1] + [canonical_ops.index(op) for op in self.ops[1:-1]] + [-2]
    return graph_util.hash_module(self.matrix, labeling)
Пример #6
0
def make_graphs(vertices, bits):
    matrix = np.fromfunction(graph_util.gen_is_edge_fn(bits),
                             (vertices, vertices),
                             dtype=np.int8)

    if graph_util.num_edges(matrix) > max_edges:
        return []

    if not graph_util.is_full_dag(matrix):
        return []

    out = []
    for labeling in itertools.product(
            *[range(num_ops) for _ in range(vertices - 2)]):
        labeling = [-1] + list(labeling) + [-2]

        out.append({
            "hash": graph_util.hash_module(matrix, labeling),
            "adj": matrix.tolist(),
            "labeling": labeling,
        })

    return out
Пример #7
0
  def test_hash_module(self):
    # Diamond graph with label permutation
    matrix1 = np.array(
        [[0, 1, 1, 0,],
         [0, 0, 0, 1],
         [0, 0, 0, 1],
         [0, 0, 0, 0]])
    label1 = [-1, 1, 2, -2]
    label2 = [-1, 2, 1, -2]

    hash1 = graph_util.hash_module(matrix1, label1)
    hash2 = graph_util.hash_module(matrix1, label2)
    self.assertEqual(hash1, hash2)

    # Simple graph with edge permutation
    matrix1 = np.array(
        [[0, 1, 1, 0, 0],
         [0, 0, 0, 0, 1],
         [0, 0, 0, 1, 0],
         [0, 0, 0, 0, 1],
         [0, 0, 0, 0, 0]])
    label1 = [-1, 1, 2, 3, -2]

    matrix2 = np.array(
        [[0, 1, 0, 1, 0],
         [0, 0, 1, 0, 0],
         [0, 0, 0, 0, 1],
         [0, 0, 0, 0, 1],
         [0, 0, 0, 0, 0]])
    label2 = [-1, 2, 3, 1, -2]

    matrix3 = np.array(
        [[0, 1, 1, 0, 0],
         [0, 0, 0, 1, 0],
         [0, 0, 0, 0, 1],
         [0, 0, 0, 0, 1],
         [0, 0, 0, 0, 0]])
    label3 = [-1, 2, 1, 3, -2]

    hash1 = graph_util.hash_module(matrix1, label1)
    hash2 = graph_util.hash_module(matrix2, label2)
    hash3 = graph_util.hash_module(matrix3, label3)
    self.assertEqual(hash1, hash2)
    self.assertEqual(hash2, hash3)

    hash4 = graph_util.hash_module(matrix1, label2)
    self.assertNotEqual(hash4, hash1)

    hash5 = graph_util.hash_module(matrix1, label3)
    self.assertNotEqual(hash5, hash1)

    # Connected non-isomorphic regular graphs on 6 interior vertices (8 total)
    matrix1 = np.array(
        [[0, 1, 0, 0, 0, 0, 0, 0],
         [0, 0, 1, 1, 0, 0, 1, 0],
         [0, 0, 0, 0, 1, 1, 0, 0],
         [0, 0, 0, 0, 1, 1, 0, 0],
         [0, 0, 0, 0, 0, 0, 1, 0],
         [0, 0, 0, 0, 0, 0, 1, 0],
         [0, 0, 0, 0, 0, 0, 0, 1],
         [0, 0, 0, 0, 0, 0, 0, 0]])
    matrix2 = np.array(
        [[0, 1, 0, 0, 0, 0, 0, 0],
         [0, 0, 1, 1, 0, 1, 0, 0],
         [0, 0, 0, 0, 1, 0, 1, 0],
         [0, 0, 0, 0, 1, 1, 0, 0],
         [0, 0, 0, 0, 0, 0, 1, 0],
         [0, 0, 0, 0, 0, 0, 1, 0],
         [0, 0, 0, 0, 0, 0, 0, 1],
         [0, 0, 0, 0, 0, 0, 0, 0]])
    label1 = [-1, 1, 1, 1, 1, 1, 1, -2]

    hash1 = graph_util.hash_module(matrix1, label1)
    hash2 = graph_util.hash_module(matrix2, label1)
    self.assertNotEqual(hash1, hash2)

    # Non-isomorphic tricky case (breaks if you don't include self)
    hash1 = graph_util.hash_module(
        np.array([[0, 1, 0, 0, 0],
                  [0, 0, 1, 0, 0],
                  [0, 0, 0, 1, 0],
                  [0, 0, 0, 0, 1],
                  [0, 0, 0, 0, 0]]),
        [-1, 1, 0, 0, -2])

    hash2 = graph_util.hash_module(
        np.array([[0, 1, 0, 0, 0],
                  [0, 0, 1, 0, 0],
                  [0, 0, 0, 1, 0],
                  [0, 0, 0, 0, 1],
                  [0, 0, 0, 0, 0]]),
        [-1, 0, 0, 1, -2])
    self.assertNotEqual(hash1, hash2)

    # Non-isomorphic tricky case (breaks if you don't use directed edges)
    hash1 = graph_util.hash_module(
        np.array([[0, 1, 0, 1],
                  [0, 0, 1, 0],
                  [0, 0, 0, 1],
                  [0, 0, 0, 0]]),
        [-1, 1, 0, -2])

    hash2 = graph_util.hash_module(
        np.array([[0, 1, 0, 1],
                  [0, 0, 1, 0],
                  [0, 0, 0, 1],
                  [0, 0, 0, 0]]),
        [-1, 0, 1, -2])
    self.assertNotEqual(hash1, hash2)

    # Non-isomorphic tricky case (breaks if you only use out-neighbors and self)
    hash1 = graph_util.hash_module(np.array([[0, 1, 1, 1, 1, 0, 0],
                                             [0, 0, 1, 0, 0, 0, 0],
                                             [0, 0, 0, 0, 0, 0, 1],
                                             [0, 0, 0, 0, 0, 1, 0],
                                             [0, 0, 0, 0, 0, 1, 0],
                                             [0, 0, 0, 0, 0, 0, 1],
                                             [0, 0, 0, 0, 0, 0, 0]]),
                                   [-1, 1, 0, 0, 0, 0, -2])
    hash2 = graph_util.hash_module(np.array([[0, 1, 1, 1, 1, 0, 0],
                                             [0, 0, 1, 0, 0, 0, 0],
                                             [0, 0, 0, 0, 0, 0, 1],
                                             [0, 0, 0, 0, 0, 1, 0],
                                             [0, 0, 0, 0, 0, 1, 0],
                                             [0, 0, 0, 0, 0, 0, 1],
                                             [0, 0, 0, 0, 0, 0, 0]]),
                                   [-1, 0, 0, 0, 1, 0, -2])
    self.assertNotEqual(hash1, hash2)
Пример #8
0
def pretraining_gae(dataset, cfg):
    """ implementation of VGAE pretraining on DARTS Search Space """
    X_adj, X_ops, indices, X_adj_val, X_ops_val, indices_val = _build_dataset(dataset)
    print('train set size: {}, validation set size: {}'.format(indices.shape[0], indices_val.shape[0]))
    model = Model(input_dim=args.input_dim, hidden_dim=args.hidden_dim, latent_dim=args.dim,
                   num_hops=args.hops, num_mlp_layers=args.mlps, dropout=args.dropout, **cfg['GAE']).cuda()
    optimizer = optim.Adam(model.parameters(), lr=1e-3, betas=(0.9, 0.999), eps=1e-08)
    epochs = args.epochs
    bs = args.bs
    loss_total = []
    best_graph_acc = 0
    for epoch in range(0, epochs):
        chunks = X_adj.shape[0] // bs
        if X_adj.shape[0] % bs > 0:
            chunks += 1
        X_adj_split = torch.split(X_adj, bs, dim=0)
        X_ops_split = torch.split(X_ops, bs, dim=0)
        indices_split = torch.split(indices, bs, dim=0)
        loss_epoch = []
        Z = []
        for i, (adj, ops, ind) in enumerate(zip(X_adj_split, X_ops_split, indices_split)):
            optimizer.zero_grad()
            adj, ops = adj.cuda(), ops.cuda()
            # preprocessing
            adj, ops, prep_reverse = preprocessing(adj, ops, **cfg['prep'])
            # forward
            ops_recon, adj_recon, mu, logvar = model(ops, adj)
            Z.append(mu)
            adj_recon, ops_recon = prep_reverse(adj_recon, ops_recon)
            adj, ops = prep_reverse(adj, ops)
            loss = VAEReconstructed_Loss(**cfg['loss'])((ops_recon, adj_recon), (ops, adj), mu, logvar)
            loss.backward()
            nn.utils.clip_grad_norm_(model.parameters(), 5)
            optimizer.step()
            loss_epoch.append(loss.item())
            if i % 500 == 0:
                print('epoch {}: batch {} / {}: loss: {:.5f}'.format(epoch, i, chunks, loss.item()))
        Z = torch.cat(Z, dim=0)
        z_mean, z_std = Z.mean(0), Z.std(0)
        validity_counter = 0
        buckets = {}
        model.eval()
        for _ in range(args.latent_points):
            z = torch.randn(11, args.dim).cuda()
            z = z * z_std + z_mean
            op, ad = model.decoder(z.unsqueeze(0))
            op = op.squeeze(0).cpu()
            ad = ad.squeeze(0).cpu()
            max_idx = torch.argmax(op, dim=-1)
            one_hot = torch.zeros_like(op)
            for i in range(one_hot.shape[0]):
                one_hot[i][max_idx[i]] = 1
            op_decode = to_ops_darts(max_idx)
            ad_decode = (ad>0.5).int().triu(1).numpy()
            ad_decode = np.ndarray.tolist(ad_decode)
            if is_valid_darts(ad_decode, op_decode):
                validity_counter += 1
                fingerprint = graph_util.hash_module(np.array(ad_decode), one_hot.numpy().tolist())
                if fingerprint not in buckets:
                    buckets[fingerprint] = (ad_decode, one_hot.numpy().astype('int8').tolist())
        validity = validity_counter / args.latent_points
        print('Ratio of valid decodings from the prior: {:.4f}'.format(validity))
        print('Ratio of unique decodings from the prior: {:.4f}'.format(len(buckets) / (validity_counter+1e-8)))

        acc_ops_val, mean_corr_adj_val, mean_fal_pos_adj_val, acc_adj_val = get_val_acc_vae(model,cfg,X_adj_val, X_ops_val,indices_val)
        print('validation set: acc_ops:{0:.2f}, mean_corr_adj:{1:.2f}, mean_fal_pos_adj:{2:.2f}, acc_adj:{3:.2f}'.format(
                acc_ops_val, mean_corr_adj_val, mean_fal_pos_adj_val, acc_adj_val))

        #print("reconstructed adj matrix:", adj_recon[1])
        #print("original adj matrix:", adj[1])
        #print("reconstructed ops matrix:", ops_recon[1])
        #print("original ops matrix:", ops[1])

        print('epoch {}: average loss {:.5f}'.format(epoch, sum(loss_epoch)/len(loss_epoch)))
        loss_total.append(sum(loss_epoch) / len(loss_epoch))
        print('loss for epochs: \n', loss_total)
        save_checkpoint_vae(model, optimizer, epoch, sum(loss_epoch) / len(loss_epoch), args.dim, args.name, args.dropout, args.seed)


    print('loss for epochs: ', loss_total)
Пример #9
0
def pretraining_model(dataset, cfg, args):
    nasbench = api.NASBench('data/nasbench_only108.tfrecord')
    train_ind_list, val_ind_list = range(int(len(dataset)*0.9)), range(int(len(dataset)*0.9), len(dataset))
    X_adj_train, X_ops_train, indices_train = _build_dataset(dataset, train_ind_list)
    X_adj_val, X_ops_val, indices_val = _build_dataset(dataset, val_ind_list)
    model = Model(input_dim=args.input_dim, hidden_dim=args.hidden_dim, latent_dim=args.dim,
                   num_hops=args.hops, num_mlp_layers=args.mlps, dropout=args.dropout, **cfg['GAE']).cuda()
    optimizer = optim.Adam(model.parameters(), lr=1e-3, betas=(0.9, 0.999), eps=1e-08)
    epochs = args.epochs
    bs = args.bs
    loss_total = []
    for epoch in range(0, epochs):
        chunks = len(train_ind_list) // bs
        if len(train_ind_list) % bs > 0:
            chunks += 1
        X_adj_split = torch.split(X_adj_train, bs, dim=0)
        X_ops_split = torch.split(X_ops_train, bs, dim=0)
        indices_split = torch.split(indices_train, bs, dim=0)
        loss_epoch = []
        Z = []
        for i, (adj, ops, ind) in enumerate(zip(X_adj_split, X_ops_split, indices_split)):
            optimizer.zero_grad()
            adj, ops = adj.cuda(), ops.cuda()
            # preprocessing
            adj, ops, prep_reverse = preprocessing(adj, ops, **cfg['prep'])
            # forward
            ops_recon, adj_recon, mu, logvar = model(ops, adj.to(torch.long))
            Z.append(mu)
            adj_recon, ops_recon = prep_reverse(adj_recon, ops_recon)
            adj, ops = prep_reverse(adj, ops)
            loss = VAEReconstructed_Loss(**cfg['loss'])((ops_recon, adj_recon), (ops, adj), mu, logvar)
            loss.backward()
            nn.utils.clip_grad_norm_(model.parameters(), 5)
            optimizer.step()
            loss_epoch.append(loss.item())
            if i%1000==0:
                print('epoch {}: batch {} / {}: loss: {:.5f}'.format(epoch, i, chunks, loss.item()))
        Z = torch.cat(Z, dim=0)
        z_mean, z_std = Z.mean(0), Z.std(0)
        validity_counter = 0
        buckets = {}
        model.eval()
        for _ in range(args.latent_points):
            z = torch.randn(7, args.dim).cuda()
            z = z * z_std + z_mean
            op, ad = model.decoder(z.unsqueeze(0))
            op = op.squeeze(0).cpu()
            ad = ad.squeeze(0).cpu()
            max_idx = torch.argmax(op, dim=-1)
            one_hot = torch.zeros_like(op)
            for i in range(one_hot.shape[0]):
                one_hot[i][max_idx[i]] = 1
            op_decode = transform_operations(max_idx)
            ad_decode = (ad>0.5).int().triu(1).numpy()
            ad_decode = np.ndarray.tolist(ad_decode)
            spec = api.ModelSpec(matrix=ad_decode, ops=op_decode)
            if nasbench.is_valid(spec):
                validity_counter += 1
                fingerprint = graph_util.hash_module(np.array(ad_decode), one_hot.numpy().tolist())
                if fingerprint not in buckets:
                    buckets[fingerprint] = (ad_decode, one_hot.numpy().astype('int8').tolist())
        validity = validity_counter / args.latent_points
        print('Ratio of valid decodings from the prior: {:.4f}'.format(validity))
        print('Ratio of unique decodings from the prior: {:.4f}'.format(len(buckets) / (validity_counter+1e-8)))
        acc_ops_val, mean_corr_adj_val, mean_fal_pos_adj_val, acc_adj_val = get_val_acc_vae(model, cfg, X_adj_val, X_ops_val, indices_val)
        print('validation set: acc_ops:{0:.4f}, mean_corr_adj:{1:.4f}, mean_fal_pos_adj:{2:.4f}, acc_adj:{3:.4f}'.format(
                acc_ops_val, mean_corr_adj_val, mean_fal_pos_adj_val, acc_adj_val))
        print('epoch {}: average loss {:.5f}'.format(epoch, sum(loss_epoch)/len(loss_epoch)))
        loss_total.append(sum(loss_epoch) / len(loss_epoch))
        save_checkpoint_vae(model, optimizer, epoch, sum(loss_epoch) / len(loss_epoch), args.dim, args.name, args.dropout, args.seed)
    print('loss for epochs: \n', loss_total)
Пример #10
0
def main(_):
    total_graphs = 0  # Total number of graphs (including isomorphisms)
    total_unlabeled_graphs = 0  # Total number of unlabeled graphs
    # hash --> (matrix, label) for the canonical graph associated with each hash
    buckets = {}

    logging.info('Using %d vertices, %d op labels, min %d max %d edges',
                 FLAGS.max_vertices, FLAGS.num_ops, FLAGS.min_edges,
                 FLAGS.max_edges)
    for vertices in range(FLAGS.min_vertices, FLAGS.max_vertices + 1):
        for bits in range(2**(vertices * (vertices - 1) // 2)):
            if bits % 100000 == 0:
                print('bits:', bits)

            # Construct adj matrix from bit string
            matrix = np.fromfunction(graph_util.gen_is_edge_fn(bits),
                                     (vertices, vertices),
                                     dtype=np.int8)

            # Discard any graphs which can be pruned or exceed constraints
            if (not graph_util.is_full_dag(matrix)
                    or graph_util.num_edges(matrix) > FLAGS.max_edges
                    or graph_util.num_edges(matrix) < FLAGS.min_edges):

                continue

            # this step should be redundant with is_full_dag()
            if graph_util.hanging_edge(matrix):
                print(np.array(matrix))
                continue

            print('found valid ulabeled graph')
            print(matrix)
            total_unlabeled_graphs += 1

            # Iterate through all possible labelings
            for labeling in itertools.product(
                    *[range(FLAGS.num_ops) for _ in range(vertices - 2)]):
                total_graphs += 1
                labeling = [-1] + list(labeling) + [-2]
                fingerprint = graph_util.hash_module(matrix, labeling)

                # todo: check if hash is in nasbench
                if fingerprint not in buckets:
                    buckets[fingerprint] = (matrix.tolist(), labeling)

                # This catches the "false positive" case of two models which are not
                # isomorphic hashing to the same bucket.
                elif FLAGS.verify_isomorphism:
                    canonical_graph = buckets[fingerprint]
                    if not graph_util.is_isomorphic(
                        (matrix.tolist(), labeling), canonical_graph):
                        logging.fatal(
                            'Matrix:\n%s\nLabel: %s\nis not isomorphic to'
                            ' canonical matrix:\n%s\nLabel: %s', str(matrix),
                            str(labeling), str(canonical_graph[0]),
                            str(canonical_graph[1]))
                        sys.exit()

        logging.info('Up to %d vertices: %d graphs (%d without hashing)',
                     vertices, len(buckets), total_graphs)
        logging.info('%d unlabeled graphs', total_unlabeled_graphs)

    print('finished')

    with tf.io.gfile.GFile(FLAGS.output_file, 'w') as f:
        print('outputting now to ', FLAGS.output_file)
        json.dump(buckets, f, sort_keys=True)
Пример #11
0
for line in sys.stdin:
    arch = json.loads(line)

    hash_ = arch['hash']
    matrix = np.vstack(arch['adj'])
    labeling = deepcopy(arch['labeling'])

    # --
    # Operator exchanges

    for i in range(1, len(labeling) - 1):
        for op in range(num_ops):
            if labeling[i] == op:
                continue

            l = copy(labeling)
            l[i] = op

            print(hash_, graph_util.hash_module(matrix, l))

    # --
    # Edge exchanges

    for i in range(matrix.shape[0]):
        for j in range(i + 1, matrix.shape[1]):
            matrix[i, j] = 1 - matrix[i, j]
            print(hash_, graph_util.hash_module(matrix, labeling))
            matrix[i, j] = 1 - matrix[i, j]

    sys.stdout.flush()