Пример #1
0
    def _layer_initialize(self):
        agg_layers = []
        adversarial_layers = []
        for i in range(self.model_config['layer_depth']):
            # initialize the two aggregators
            agg_one = GCN(self.model_config['attr_one_dim'],
                          self.model_config['attr_two_dim']).to(
                              self.model_config['device'])
            agg_two = GCN(self.model_config['attr_two_dim'],
                          self.model_config['attr_one_dim']).to(
                              self.model_config['device'])
            agg_layers.append([agg_one, agg_two])

            # initialize the adversarial operations
            adv_one = AdversarialLearning(agg_one,
                                          self.model_config['attr_two_dim'],
                                          self.model_config['disc_hid_dim'],
                                          self.model_config['learning_rate'],
                                          self.model_config['weight_decay'],
                                          self.model_config['dropout'],
                                          self.model_config['device'],
                                          outfeat=1)
            adv_two = AdversarialLearning(agg_two,
                                          self.model_config['attr_one_dim'],
                                          self.model_config['disc_hid_dim'],
                                          self.model_config['learning_rate'],
                                          self.model_config['weight_decay'],
                                          self.model_config['dropout'],
                                          self.model_config['device'],
                                          outfeat=1)
            adversarial_layers.append([adv_one, adv_two])
        return agg_layers, adversarial_layers
 def __layer_initialize(self):
     gcn_layers = []
     adversarial_layers = []
     for i in range(self.layer_depth):
         if i % 2 == 0:
             one_gcn_layer = GCN(self.v_attr_dimensions,
                                 self.u_attr_dimensions).to(self.device)
             gcn_layers.append(one_gcn_layer)
             adversarial_layers.append(
                 AdversarialLearning(one_gcn_layer,
                                     self.u_attr_dimensions,
                                     self.v_attr_dimensions,
                                     self.dis_hidden_dim,
                                     self.learning_rate,
                                     self.weight_decay,
                                     self.dropout,
                                     self.device,
                                     outfeat=1))
         else:
             one_gcn_layer = GCN(self.u_attr_dimensions,
                                 self.v_attr_dimensions).to(self.device)
             gcn_layers.append(one_gcn_layer)
             adversarial_layers.append(
                 AdversarialLearning(one_gcn_layer,
                                     self.v_attr_dimensions,
                                     self.u_attr_dimensions,
                                     self.dis_hidden_dim,
                                     self.learning_rate,
                                     self.weight_decay,
                                     self.dropout,
                                     self.device,
                                     outfeat=1))
     return gcn_layers, adversarial_layers
Пример #3
0
    def _get_models(self):
        # bow_feat = self.loader.bow_mx
        topo_feat = self.loader.topo_mx

        # model1 = GCN(nfeat=bow_feat.shape[1],
        #              hlayers=self._conf["kipf"]["hidden"],
        #              nclass=self.loader.num_labels,
        #              dropout=self._conf["kipf"]["dropout"])
        # opt1 = optim.Adam(model1.parameters(), lr=self._conf["kipf"]["lr"],
        #                   weight_decay=self._conf["kipf"]["weight_decay"])
        #
        # model2 = GCNCombined(nbow=bow_feat.shape[1],
        #                      nfeat=topo_feat.shape[1],
        #                      hlayers=self._conf["hidden_layers"],
        #                      nclass=self.loader.num_labels,
        #                      dropout=self._conf["dropout"])
        # opt2 = optim.Adam(model2.parameters(), lr=self._conf["lr"], weight_decay=self._conf["weight_decay"])
        #
        # model3 = GCN(nfeat=topo_feat.shape[1],
        #              hlayers=self._conf["multi_hidden_layers"],
        #              nclass=self.loader.num_labels,
        #              dropout=self._conf["dropout"],
        #              layer_type=None,
        #              is_regression=self.loader.regression)
        # opt3 = optim.Adam(model3.parameters(), lr=self._conf["lr"], weight_decay=self._conf["weight_decay"])
        #
        model4 = GCN(nfeat=topo_feat.shape[1],
                     hlayers=self._conf["multi_hidden_layers"],
                     nclass=self.loader.num_labels,
                     dropout=self._conf["dropout"],
                     layer_type=AsymmetricGCN,
                     is_regression=self.loader.regression)
        opt4 = optim.Adam(model4.parameters(), lr=self._conf["lr"], weight_decay=self._conf["weight_decay"])

        return {
            # "kipf": {
            #     "model": model1, "optimizer": opt1,
            #     "arguments": [self.loader.bow_mx, self.loader.adj_mx],
            #     "labels": self.loader.labels,
            # },
            # "our_combined": {
            #     "model": model2, "optimizer": opt2,
            #     "arguments": [self.loader.bow_mx, self.loader.topo_mx, self.loader.adj_rt_mx],
            #     "labels": self.loader.labels,
            # },
            # "our_topo_sym": {
            #     "model": model3, "optimizer": opt3,
            #     "arguments": [self.loader.topo_mx, self.loader.adj_mx],
            #     "labels": self.loader.labels,
            # },
            "our_topo_asymm": {
                "model": model4, "optimizer": opt4,
                "arguments": [self.loader.topo_mx, self.loader.adj_rt_mx],
                "labels": self.loader.labels,
            },
        }
Пример #4
0
    def __init__(self,
                 args,
                 batch_size=128,
                 target='mnistm',
                 learning_rate=0.0002,
                 interval=10,
                 optimizer='adam',
                 checkpoint_dir=None,
                 save_epoch=10):
        self.batch_size = batch_size
        self.target = target
        self.checkpoint_dir = checkpoint_dir
        self.save_epoch = save_epoch
        self.interval = interval
        self.lr = learning_rate
        self.best_correct = 0
        self.args = args
        if self.args.use_target:
            self.ndomain = self.args.ndomain
        else:
            self.ndomain = self.args.ndomain - 1

        # load source and target domains
        self.datasets, self.dataset_test, self.dataset_size = dataset_read(
            target, self.batch_size)
        self.niter = self.dataset_size / self.batch_size
        print('Dataset loaded!')

        # define the feature extractor and GCN-based classifier
        self.G = Generator(self.args.net)
        self.GCN = GCN(nfeat=args.nfeat, nclasses=args.nclasses)
        self.G.cuda()
        self.GCN.cuda()
        print('Model initialized!')

        if self.args.load_checkpoint is not None:
            self.state = torch.load(self.args.load_checkpoint)
            self.G.load_state_dict(self.state['G'])
            self.GCN.load_state_dict(self.state['GCN'])
            print('Model load from: ', self.args.load_checkpoint)

        # initialize statistics (prototypes and adjacency matrix)
        if self.args.load_checkpoint is None:
            self.mean = torch.zeros(args.nclasses * self.ndomain,
                                    args.nfeat).cuda()
            self.adj = torch.zeros(args.nclasses * self.ndomain,
                                   args.nclasses * self.ndomain).cuda()
            print('Statistics initialized!')
        else:
            self.mean = self.state['mean'].cuda()
            self.adj = self.state['adj'].cuda()
            print('Statistics loaded!')

        # define the optimizer
        self.set_optimizer(which_opt=optimizer, lr=self.lr)
        print('Optimizer defined!')
Пример #5
0
 def __init__(self, name, params, method_name, graph, file_tags):
     super(GCNModel, self).__init__(params["dimension"], method_name, graph)
     # Training settings
     self._adj, self._features, self._labels, self._idx_train, self._idx_val, self._idx_test = \
         new_load_data(graph, file_tags, len(graph.nodes()), name=name)
     self._seed = 42
     self._epochs = params["epochs"]
     self._lr = params["lr"]
     self._weight_decay = params["weight_decay"]
     self._hidden = params["hidden"]
     self._dropout = params["dropout"]
     self._model = GCN(nfeat=self._features.shape[1], nhid=self._hidden, nclass=self._d, dropout=self._dropout)
     self._optimizer = optim.Adam(self._model.parameters(), lr=self._lr, weight_decay=self._weight_decay)
    def __init__(self,
                 bipartite_graph_data_loader,
                 args,
                 device,
                 layer_depth=3,
                 rank=-1,
                 dataset="cora"):
        self.rank = rank
        self.dataset = dataset
        self.epochs = args.epochs
        self.dis_hidden_dim = args.dis_hidden
        self.learning_rate = args.lr
        self.weight_decay = args.weight_decay
        self.dropout = args.dropout
        self.u_attr_dimensions = bipartite_graph_data_loader.get_u_attr_dimensions(
        )
        self.v_attr_dimensions = bipartite_graph_data_loader.get_v_attr_dimensions(
        )
        bipartite_graph_data_loader = bipartite_graph_data_loader
        self.device = device

        self.layer_depth = layer_depth
        self.bipartite_graph_data_loader = bipartite_graph_data_loader
        self.batch_size = bipartite_graph_data_loader.batch_size
        self.device = device

        self.batch_num_u = bipartite_graph_data_loader.get_batch_num_u()
        self.batch_num_v = bipartite_graph_data_loader.get_batch_num_v()
        self.u_attr = bipartite_graph_data_loader.get_u_attr_array()
        self.v_attr = bipartite_graph_data_loader.get_v_attr_array()
        self.u_adj = bipartite_graph_data_loader.get_u_adj()
        self.v_adj = bipartite_graph_data_loader.get_v_adj()
        self.u_num = len(self.u_attr)
        self.v_num = len(self.v_attr)
        self.f_loss = open("BGNN-Adv-loss.txt", "a")

        self.gcn_explicit = GCN(self.v_attr_dimensions,
                                self.u_attr_dimensions).to(device)
        self.gcn_implicit = GCN(self.u_attr_dimensions,
                                self.v_attr_dimensions).to(device)
        self.gcn_merge = GCN(self.v_attr_dimensions,
                             self.u_attr_dimensions).to(device)

        self.learning_type = args.learning_type
Пример #7
0
def get_model(adj, features, labels, idxs, args):
    # Model and optimizer
    model = GCN(nfeat=features.shape[1],
                nhid=args.hidden,
                nclass=labels.max().item() + 1,
                dropout=args.dropout)
    if args.use_gpu:
        model = model.cuda()

    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           weight_decay=args.weight_decay)

    adj_norm = normalize_adj(adj, args)

    # train
    for epoch in range(args.epochs):
        train(epoch, model, optimizer, adj_norm, features, labels, idxs, args)

    # test
    acc_test = test(model, adj_norm, features, labels, idxs, args)
    return model, acc_test
Пример #8
0
    def __init__(self, bipartite_graph_data_loader, args, device, rank=-1, dataset="cora"):
        self.rank = rank
        self.dataset = dataset
        u_attr_dimensions = bipartite_graph_data_loader.get_u_attr_dimensions()
        v_attr_dimensions = bipartite_graph_data_loader.get_v_attr_dimensions()
        decoder_hidfeat = args.decoder_hidfeat
        learning_rate = args.lr
        weight_decay = args.weight_decay
        dropout = args.dropout
        gcn_output_dim = args.gcn_output_dim

        self.gcn_explicit = GCN(v_attr_dimensions, gcn_output_dim).to(device)
        self.gcn_implicit = GCN(u_attr_dimensions, gcn_output_dim).to(device)
        self.gcn_merge = GCN(v_attr_dimensions, gcn_output_dim).to(device)
        self.gcn_opposite = GCN(u_attr_dimensions, gcn_output_dim).to(device)

        self.mlp_explicit = MLPLearning(self.gcn_explicit, gcn_output_dim, u_attr_dimensions, decoder_hidfeat,
                                        learning_rate, weight_decay, dropout, device)
        self.mlp_implicit = MLPLearning(self.gcn_implicit, gcn_output_dim, v_attr_dimensions, decoder_hidfeat,
                                        learning_rate, weight_decay, dropout, device)
        self.mlp_merge = MLPLearning(self.gcn_merge, gcn_output_dim, u_attr_dimensions, decoder_hidfeat,
                                     learning_rate, weight_decay, dropout, device)
        self.mlp_opposite = MLPLearning(self.gcn_opposite, gcn_output_dim, v_attr_dimensions, decoder_hidfeat,
                                        learning_rate, weight_decay, dropout, device)

        self.bipartite_graph_data_loader = bipartite_graph_data_loader
        self.batch_size = bipartite_graph_data_loader.batch_size
        self.device = device
        self.epochs = args.epochs

        self.batch_num_u = bipartite_graph_data_loader.get_batch_num_u()
        self.batch_num_v = bipartite_graph_data_loader.get_batch_num_v()
        self.u_attr = bipartite_graph_data_loader.get_u_attr_array()
        self.v_attr = bipartite_graph_data_loader.get_v_attr_array()
        self.u_adj = bipartite_graph_data_loader.get_u_adj()
        self.v_adj = bipartite_graph_data_loader.get_v_adj()
        self.u_num = len(self.u_attr)
        self.v_num = len(self.v_attr)
Пример #9
0
def main(sample_name, epochs=200, get_probs=False):
    # Training settings
    valid = False
    no_cuda = False
    seed = 42
    lr = 1e-2
    weight_decay = 1e-5
    hidden = 32
    dropout = 0.5

    cuda = not no_cuda and torch.cuda.is_available()

    np.random.seed(seed)
    torch.manual_seed(seed)
    if cuda:
        torch.cuda.manual_seed(seed)

    # Load data
    adj, features, labels, y_test, idx_train, idx_val, idx_test = load_data()

    # Model and optimizer
    model = GCN(nfeat=features.shape[1],
                nhid=hidden,
                nclass=1,
                dropout=dropout)
    optimizer = optim.Adam(model.parameters(),
                           lr=lr, weight_decay=weight_decay)

    if cuda:
        model.cuda()
        features = features.cuda()
        adj = adj.cuda()
        labels = labels.cuda()
        idx_train = idx_train.cuda()
        idx_val = idx_val.cuda()
        idx_test = idx_test.cuda()
        y_test = y_test.cuda()

#   Training model
    torch.set_grad_enabled(True)
    t_total = time.time()
    model.eval()
    print("------- Training GCN")
    for epoch in range(epochs):
        if epoch == epochs - 1:
            valid = True
        train(model, optimizer, epoch, adj, features, labels, idx_train, idx_val, valid)
    print("Optimization Finished!")
    print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
    # Testing
    info = gcn_inference(sample_name, model, adj, features, y_test, idx_test, get_probs=get_probs)
    return info
Пример #10
0
class GCNModel(StaticGraphEmbedding):
    def __init__(self, name, params, method_name, graph, file_tags):
        super(GCNModel, self).__init__(params["dimension"], method_name, graph)
        # Training settings
        self._adj, self._features, self._labels, self._idx_train, self._idx_val, self._idx_test = \
            new_load_data(graph, file_tags, len(graph.nodes()), name=name)
        self._seed = 42
        self._epochs = params["epochs"]
        self._lr = params["lr"]
        self._weight_decay = params["weight_decay"]
        self._hidden = params["hidden"]
        self._dropout = params["dropout"]
        self._model = GCN(nfeat=self._features.shape[1], nhid=self._hidden, nclass=self._d, dropout=self._dropout)
        self._optimizer = optim.Adam(self._model.parameters(), lr=self._lr, weight_decay=self._weight_decay)

    def learn_embedding(self):
        for epoch in range(self._epochs):
            output1 = train_(epoch, self._epochs, self._model, self._optimizer, self._features, self._labels, self._adj, self._idx_train, self._idx_val)
        y = output1.detach().numpy()
        nodes = list(self._graph.nodes())
        self._dict_embedding = {nodes[i]: y[i, :] for i in range(len(nodes))}
        return self._dict_embedding
Пример #11
0
                   0] = 4

    if word_ind_sorted[3] != adj[0].shape[0] - 1:
        all_phrase_val[epoch,
                       int(word_ind_sorted[3] + 1):int(adj[0].shape[0]), 0] = 5

    y_val[epoch, :] = np.float32(label[current_ind, :]).reshape(1, -1)

feed_dict_val = construct_feed_dict_sgc(features_val_feed, eigvec_val, y_val,
                                        val_mask, all_phrase_val, class_weight,
                                        placeholders)

with tf.device('/cpu:0'):

    # Create model
    model = GCN(placeholders, input_dim=features[0].shape[1], logging=False)

    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                            log_device_placement=False))

    # Init variables
    sess.run(tf.global_variables_initializer())

    cost_val_f1 = []
    cost_val_loss = []

    for epoch_h in range(0, 200):
        t = time.time()
        shuffle_ind_per_epoch = np.asarray(
            range(0, int(num_batch / 10 * 9 - 1)))
        np.random.shuffle(shuffle_ind_per_epoch)
bob_features = features.send(bob)
bob_labels = labels[idx_bob].send(bob)
bob_idx = idx_bob.send(bob)

alice_adj = adj.send(alice)
alice_features = features.send(alice)
alice_labels = labels[idx_alice].send(alice)
alice_idx = idx_alice.send(alice)

# 以列表嵌套list的形式存放数据
remote_dataset[0] = [bob_adj, bob_features, bob_labels, bob_idx]
remote_dataset[1] = [alice_adj, alice_features, alice_labels, alice_idx]

# model and optimizer
local_model = GCN(nfeat=features.shape[1],
                  nhid=args.nhid,
                  nclass=args.nclass,
                  dropout=args.dropout)

bob_alone_model = GCN(nfeat=features.shape[1],
                      nhid=args.nhid,
                      nclass=args.nclass,
                      dropout=args.dropout)
alice_alone_model = GCN(nfeat=features.shape[1],
                        nhid=args.nhid,
                        nclass=args.nclass,
                        dropout=args.dropout)

bob_fed_model = GCN(nfeat=features.shape[1],
                    nhid=args.nhid,
                    nclass=args.nclass,
                    dropout=args.dropout)
Пример #13
0
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data()
print(adj)

# Model and optimizer
model = GCN(nfeat=features.shape[1],
            nhid=args.hidden,
            nclass=labels.max().item() + 1,
            dropout=args.dropout)
optimizer = optim.Adam(model.parameters(),
                       lr=args.lr, weight_decay=args.weight_decay)

if args.cuda:
    model.cuda()
    features = features.cuda()
    adj = adj.cuda()
    labels = labels.cuda()
    idx_train = idx_train.cuda()
    idx_val = idx_val.cuda()
    idx_test = idx_test.cuda()


def train(epoch):
Пример #14
0
def train(features, labels, adj):
    # Settings
    flags = tf.app.flags
    FLAGS = flags.FLAGS
    flags.DEFINE_string('dataset', 'cora', 'Dataset string.')  # 'cora', 'citeseer', 'pubmed'
    flags.DEFINE_string('model', 'gcn', 'Model string.')  # 'gcn', 'gcn_cheby', 'dense'
    flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
    flags.DEFINE_integer('epochs', 200, 'Number of epochs to train.')
    flags.DEFINE_integer('hidden1', 16, 'Number of units in hidden layer 1.')
    flags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep probability).')
    flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.')
    flags.DEFINE_integer('early_stopping', 10, 'Tolerance for early stopping (# of epochs).')
    flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.')

    features = sp.lil_matrix(features)
    features = preprocess_features(features)
    support = [preprocess_adj(adj)]
    num_supports = 1
    idx_train = range(len(labels))
    train_mask = sample_mask(idx_train, labels.shape[0])

    # Define placeholders
    placeholders = {
        'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],
        'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)),
        'labels': tf.placeholder(tf.float32, shape=(None, labels.shape[1])),
        'labels_mask': tf.placeholder(tf.int32),
        'dropout': tf.placeholder_with_default(0., shape=()),
        'num_features_nonzero': tf.placeholder(tf.int32)  # helper variable for sparse dropout
    }

    # Create model
    model = GCN(placeholders, input_dim=features[2][1], logging=True)

    # Initialize session
    sess = tf.Session()

    # Init variables
    sess.run(tf.global_variables_initializer())

    # Train model
    epochs = 200
    # early_stopping = 10
    for epoch in range(epochs):

        t = time.time()
        # Construct feed dictionary
        feed_dict = construct_feed_dict(features, support, labels, train_mask, placeholders)
        feed_dict.update({placeholders['dropout']: 0.5})

        # Training step
        outs = sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict)

        # Validation
        # cost, acc, duration = evaluate(features, support, y_val, val_mask, placeholders)
        # cost_val.append(cost)

        # Print results
        print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(outs[1]),
              "train_acc=", "{:.5f}".format(outs[2]), "time=", "{:.5f}".format(time.time() - t))
        # "val_loss=", "{:.5f}".format(cost), "val_acc=", "{:.5f}".format(acc),

        # if epoch > early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping + 1):-1]):
        #     print("Early stopping...")
        #     break

    print("Optimization Finished!")
Пример #15
0
        acc_val = validate(model, features, adj, labels, idx_val)
        if acc_val > best_acc:
            best_acc = acc_val
        print('best val accuracy {}'.format(best_acc))
        print(' -- ' * 5)
        print('\n\n')
        train_loss.append(loss)
    test(model, features, adj, labels, idx_test)
    plt.figure()
    plt.plot(train_loss)
    plt.savefig('./train_loss.png')

    #GCN

    adj, features, labels, idx_train, idx_val, idx_test, idx2node = load_nx_adj_data(
        G, label_file, exist_fea=basic_file)
    model = GCN(nfeat=features.shape[1],
                nhid=128,
                nclass=labels.max().item() + 1,
                dropout=0.5)
    optimizer = optim.Adam(model.parameters(),
                           lr=lr,
                           weight_decay=weight_decay)
    embedding = model.get_embeddings(features, adj, idx2node)
    for epoch in range(epochs):
        train(epoch, model, optimizer, adj, features, labels, idx_train)
    embedding = model.get_embeddings(features, adj, idx2node)
    print(" Optimization Finished ! ")
    embedding.to_pickle('./Result/' + name + 'GCN_embedding' + str(DIM) +
                        '.pickle')
Пример #16
0
opt.cuda = not opt.no_cuda and torch.cuda.is_available()

np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
if opt.cuda:
    torch.cuda.manual_seed(opt.seed)

# Download data
download_data(opt.dataset)

# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data(opt.dataset,opt.sub_dataset)

# Model and optimizer
model = GCN(nfeat=features.shape[1],
            nhid=opt.hidden,
            nclass=labels.max().item() + 1,
            dropout=opt.dropout)
optimizer = optim.Adam(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)

if opt.cuda:
    model.cuda()
    features = features.cuda()
    adj = adj.cuda()
    labels = labels.cuda()
    idx_train = idx_train.cuda()
    idx_val = idx_val.cuda()
    idx_test = idx_test.cuda()

#########
# Train #
#########
Пример #17
0
    def _get_models(self):
        bow_feat = self.loader.bow_mx
        topo_feat = self.loader.topo_mx

        model1 = GCN(nfeat=bow_feat.shape[1],
                     hlayers=[self.conf["kipf"]["hidden"]],
                     nclass=self.loader.num_labels,
                     dropout=self.conf["kipf"]["dropout"])
        opt1 = optim.Adam(model1.parameters(),
                          lr=self.conf["kipf"]["lr"],
                          weight_decay=self.conf["kipf"]["weight_decay"])

        model2 = GCNCombined(nbow=bow_feat.shape[1],
                             nfeat=topo_feat.shape[1],
                             hlayers=self.conf["hidden_layers"],
                             nclass=self.loader.num_labels,
                             dropout=self.conf["dropout"])
        opt2 = optim.Adam(model2.parameters(),
                          lr=self.conf["lr"],
                          weight_decay=self.conf["weight_decay"])

        model3 = GCN(nfeat=topo_feat.shape[1],
                     hlayers=self.conf["multi_hidden_layers"],
                     nclass=self.loader.num_labels,
                     dropout=self.conf["dropout"],
                     layer_type=None)
        opt3 = optim.Adam(model3.parameters(),
                          lr=self.conf["lr"],
                          weight_decay=self.conf["weight_decay"])

        model4 = GCN(nfeat=topo_feat.shape[1],
                     hlayers=self.conf["multi_hidden_layers"],
                     nclass=self.loader.num_labels,
                     dropout=self.conf["dropout"],
                     layer_type=AsymmetricGCN)
        opt4 = optim.Adam(model4.parameters(),
                          lr=self.conf["lr"],
                          weight_decay=self.conf["weight_decay"])

        return {
            "kipf": {
                "model": model1,
                "optimizer": opt1,
                "arguments": [self.loader.bow_mx, self.loader.adj_mx],
                "labels": self.loader.labels,
            },
            "our_combined": {
                "model":
                model2,
                "optimizer":
                opt2,
                "arguments": [
                    self.loader.bow_mx, self.loader.topo_mx,
                    self.loader.adj_rt_mx
                ],
                "labels":
                self.loader.labels,
            },
            "topo_sym": {
                "model": model3,
                "optimizer": opt3,
                "arguments": [self.loader.topo_mx, self.loader.adj_mx],
                "labels": self.loader.labels,
            },
            "topo_asym": {
                "model": model4,
                "optimizer": opt4,
                "arguments": [self.loader.topo_mx, self.loader.adj_rt_mx],
                "labels": self.loader.labels,
            },
        }
Пример #18
0
cuda = useCuda and torch.cuda.is_available()
weightAdj = True

np.random.seed(randomseed)
torch.manual_seed(randomseed)
if cuda:
    torch.cuda.manual_seed(randomseed)

filePath = '../data/kg_train_edges_weighted.txt'
label, adj, features, numHerbs, numSymps = load_data(filePath,
                                                     weighted=weightAdj)
herbPairRule = getHerPairMatrix('../data/herb_pair.txt')

print(label.shape)

model = GCN(nfeat=features.shape[1], nhid=hidden, dimension=d, dropout=dropout)

optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)

if cuda:
    model.cuda()
    features = features.cuda()
    adj = adj.cuda()


def getoutputHC(output):
    all = []
    outputHC = torch.zeros(numHerbs,
                           numHerbs,
                           dtype=torch.float,
                           requires_grad=True)
Пример #19
0
    def build_architecture(self):

        self.split_train_val_test(train_s=Train, val_s=Val, test_s=Test)
        # self._train_mask = self._data.train_mask
        # self._val_mask = self._data.val_mask
        # self._test_mask = self._data.test_mask

        if self._topo_edges is None:
            self.build_topological_edges()

        if self._params['net'] == 'gcn':
            self._net = GCNNet(self._num_features, self._num_classes,
                                h_layers=self._params['hidden_sizes'], dropout=self._params['dropout_rate'],
                                activation=self._params['activation'])
        elif self._params['net'] == 'siam':
            self._net = SiamNet(self._num_features, self._num_classes,
                                h_layers=self._params['hidden_sizes'], dropout=self._params['dropout_rate'],
                                activation=self._params['activation'])
        elif self._params['net'] == 'gat':
            self._net = GatNet(self._num_features, self._num_classes,
                               h_layers=self._params['hidden_sizes'], dropout=self._params['dropout_rate'],
                               activation=self._params['activation'])
        elif self._params['net'] == 'siam_gat':
            self._net = SiamGAT(self._num_features, self._num_classes,
                               h_layers=self._params['hidden_sizes'], dropout=self._params['dropout_rate'],
                               activation=self._params['activation'], heads=self._params['gat_heads'])
        elif self._params['net'] == 'combined':
            self.build_topo_matrix()
            self._net = GCNCombined(nbow=self._num_features,
                             nfeat=self._topo_mx.shape[1],
                             hlayers=self._params['hidden_sizes'],
                             nclass=self._num_classes,
                             dropout=self._params['dropout_rate'])
        elif self._params['net'] == 'combined_gcn':
            self.build_topo_matrix()
            self._net = CombinedGCN(self._num_features, self._num_classes,
                                    h_layers=self._params['hidden_sizes'], dropout=self._params['dropout_rate'],
                                    activation=self._params['activation'],
                                    num_topology=self._topo_mx.shape[1])
        elif self._params['net'] == 'symmetric':
            self.build_topo_matrix()
            self._net = GCN(nfeat=self._topo_mx.shape[1],
                             hlayers=self._params['hidden_sizes'],
                             nclass=self._num_classes,
                             dropout=self._params['dropout_rate'],
                             layer_type=None)
        elif self._params['net'] == 'asymmetric':
            self.build_topo_matrix()
            self._net = GCN(nfeat=self._topo_mx.shape[1],
                             hlayers=self._params['hidden_sizes'],
                             nclass=self._num_classes,
                             dropout=self._params['dropout_rate'],
                             layer_type=AsymmetricGCN)
        else:
            self._net = SiamNet(self._num_features, self._num_classes,
                                h_layers=self._params['hidden_sizes'], dropout=self._params['dropout_rate'],
                                activation=self._params['activation'])
        self._net.to(self._device)

        # self._criterion = nn.CrossEntropyLoss()
        self._criterion = nn.NLLLoss()
        self._optimizer = optim.Adam(self._net.parameters(), lr=self._params['learning_rate'],
                                     weight_decay=self._params['weight_decay'])
Пример #20
0
            A = scipy.linalg.block_diag(*As)

            yield self._transform(x, A, y)

# Define placeholders
placeholders = {
    'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],
    'features': tf.sparse_placeholder(tf.float32, shape=(None, 2)),
    'labels': tf.placeholder(tf.float32, shape=(None, 2)),
    'labels_mask': tf.placeholder(tf.int32),
    'dropout': tf.placeholder_with_default(0., shape=()),
    'num_features_nonzero': tf.placeholder(tf.int32)  # helper variable for sparse dropout
}

# Create model
model = GCN(placeholders, input_dim=2, logging=True)

# Initialize session
sess = tf.Session()


# Define model evaluation function
def evaluate(features, support, labels, mask, placeholders):
    t_test = time.time()
    feed_dict_val = construct_feed_dict(features, support, labels, mask, placeholders)
    outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val)
    return outs_val[0], outs_val[1], (time.time() - t_test)


# Init variables
sess.run(tf.global_variables_initializer())
Пример #21
0
  policy_estimator = PolicyEstimator(learning_rate=0.001)
  value_estimator = ValueEstimator(learning_rate=0.001)

  num_supports = 1
  placeholders = {
    'adj': tf.placeholder(tf.float32, shape=(None, None)) , #unnormalized adjancy matrix
    'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],
    'features': tf.sparse_placeholder(tf.float32),
    'labels': tf.placeholder(tf.float32, shape=(None, 2)),
    'labels_mask': tf.placeholder(tf.int32),
    'dropout': tf.placeholder_with_default(0., shape=()),
    'num_features_nonzero': tf.placeholder(tf.int32),  # helper variable for sparse dropout
    'learning_rate': tf.placeholder(tf.float32)
  }
  gcn = GCN(placeholders, input_dim=feats, logging=True,FLAGS=FLAGS)



  sess.run(tf.global_variables_initializer())
  stats = actor_critic(sess, gcn, placeholders, env, policy_estimator,
       value_estimator, num_episodes=1000, discount_factor=0.99)








Пример #22
0
class Solver(object):
    def __init__(self,
                 args,
                 batch_size=128,
                 target='mnistm',
                 learning_rate=0.0002,
                 interval=10,
                 optimizer='adam',
                 checkpoint_dir=None,
                 save_epoch=10):
        self.batch_size = batch_size
        self.target = target
        self.checkpoint_dir = checkpoint_dir
        self.save_epoch = save_epoch
        self.interval = interval
        self.lr = learning_rate
        self.best_correct = 0
        self.args = args
        if self.args.use_target:
            self.ndomain = self.args.ndomain
        else:
            self.ndomain = self.args.ndomain - 1

        # load source and target domains
        self.datasets, self.dataset_test, self.dataset_size = dataset_read(
            target, self.batch_size)
        self.niter = self.dataset_size / self.batch_size
        print('Dataset loaded!')

        # define the feature extractor and GCN-based classifier
        self.G = Generator(self.args.net)
        self.GCN = GCN(nfeat=args.nfeat, nclasses=args.nclasses)
        self.G.cuda()
        self.GCN.cuda()
        print('Model initialized!')

        if self.args.load_checkpoint is not None:
            self.state = torch.load(self.args.load_checkpoint)
            self.G.load_state_dict(self.state['G'])
            self.GCN.load_state_dict(self.state['GCN'])
            print('Model load from: ', self.args.load_checkpoint)

        # initialize statistics (prototypes and adjacency matrix)
        if self.args.load_checkpoint is None:
            self.mean = torch.zeros(args.nclasses * self.ndomain,
                                    args.nfeat).cuda()
            self.adj = torch.zeros(args.nclasses * self.ndomain,
                                   args.nclasses * self.ndomain).cuda()
            print('Statistics initialized!')
        else:
            self.mean = self.state['mean'].cuda()
            self.adj = self.state['adj'].cuda()
            print('Statistics loaded!')

        # define the optimizer
        self.set_optimizer(which_opt=optimizer, lr=self.lr)
        print('Optimizer defined!')

    # optimizer definition
    def set_optimizer(self, which_opt='sgd', lr=0.001, momentum=0.9):
        if which_opt == 'sgd':
            self.opt_g = optim.SGD(self.G.parameters(),
                                   lr=lr,
                                   weight_decay=0.0005,
                                   momentum=momentum)
            self.opt_gcn = optim.SGD(self.GCN.parameters(),
                                     lr=lr,
                                     weight_decay=0.0005,
                                     momentum=momentum)
        elif which_opt == 'adam':
            self.opt_g = optim.Adam(self.G.parameters(),
                                    lr=lr,
                                    weight_decay=0.0005)
            self.opt_gcn = optim.Adam(self.GCN.parameters(),
                                      lr=lr,
                                      weight_decay=0.0005)

    # empty gradients
    def reset_grad(self):
        self.opt_g.zero_grad()
        self.opt_gcn.zero_grad()

    # compute the discrepancy between two probabilities
    def discrepancy(self, out1, out2):
        return torch.mean(torch.abs(F.softmax(out1) - F.softmax(out2)))

    # compute the Euclidean distance between two tensors
    def euclid_dist(self, x, y):
        x_sq = (x**2).mean(-1)
        x_sq_ = torch.stack([x_sq] * y.size(0), dim=1)
        y_sq = (y**2).mean(-1)
        y_sq_ = torch.stack([y_sq] * x.size(0), dim=0)
        xy = torch.mm(x, y.t()) / x.size(-1)
        dist = x_sq_ + y_sq_ - 2 * xy

        return dist

    # construct the extended adjacency matrix
    def construct_adj(self, feats):
        dist = self.euclid_dist(self.mean, feats)
        sim = torch.exp(-dist / (2 * self.args.sigma**2))
        E = torch.eye(feats.shape[0]).float().cuda()

        A = torch.cat([self.adj, sim], dim=1)
        B = torch.cat([sim.t(), E], dim=1)
        gcn_adj = torch.cat([A, B], dim=0)

        return gcn_adj

    # assign pseudo labels to target samples
    def pseudo_label(self, logit, feat):
        pred = F.softmax(logit, dim=1)
        entropy = (-pred * torch.log(pred)).sum(-1)
        label = torch.argmax(logit, dim=-1).long()

        mask = (entropy < self.args.entropy_thr).float()
        index = torch.nonzero(mask).squeeze(-1)
        feat_ = torch.index_select(feat, 0, index)
        label_ = torch.index_select(label, 0, index)

        return feat_, label_

    # update prototypes and adjacency matrix
    def update_statistics(self, feats, labels, epsilon=1e-5):
        curr_mean = list()
        num_labels = 0

        for domain_idx in range(self.ndomain):
            tmp_feat = feats[domain_idx]
            tmp_label = labels[domain_idx]
            num_labels += tmp_label.shape[0]

            if tmp_label.shape[0] == 0:
                curr_mean.append(
                    torch.zeros((self.args.nclasses, self.args.nfeat)).cuda())
            else:
                onehot_label = torch.zeros(
                    (tmp_label.shape[0], self.args.nclasses)).scatter_(
                        1,
                        tmp_label.unsqueeze(-1).cpu(), 1).float().cuda()
                domain_feature = tmp_feat.unsqueeze(
                    1) * onehot_label.unsqueeze(-1)
                tmp_mean = domain_feature.sum(0) / (
                    onehot_label.unsqueeze(-1).sum(0) + epsilon)

                curr_mean.append(tmp_mean)

        curr_mean = torch.cat(curr_mean, dim=0)
        curr_mask = (curr_mean.sum(-1) != 0).float().unsqueeze(-1)
        self.mean = self.mean.detach() * (
            1 - curr_mask) + (self.mean.detach() * self.args.beta + curr_mean *
                              (1 - self.args.beta)) * curr_mask
        curr_dist = self.euclid_dist(self.mean, self.mean)
        self.adj = torch.exp(-curr_dist / (2 * self.args.sigma**2))

        # compute local relation alignment loss
        loss_local = ((((curr_mean - self.mean) * curr_mask)**
                       2).mean(-1)).sum() / num_labels

        return loss_local

    # compute global relation alignment loss
    def adj_loss(self):
        adj_loss = 0

        for i in range(self.ndomain):
            for j in range(self.ndomain):
                adj_ii = self.adj[i * self.args.nclasses:(i + 1) *
                                  self.args.nclasses,
                                  i * self.args.nclasses:(i + 1) *
                                  self.args.nclasses]
                adj_jj = self.adj[j * self.args.nclasses:(j + 1) *
                                  self.args.nclasses,
                                  j * self.args.nclasses:(j + 1) *
                                  self.args.nclasses]
                adj_ij = self.adj[i * self.args.nclasses:(i + 1) *
                                  self.args.nclasses,
                                  j * self.args.nclasses:(j + 1) *
                                  self.args.nclasses]

                adj_loss += ((adj_ii - adj_jj)**2).mean()
                adj_loss += ((adj_ij - adj_ii)**2).mean()
                adj_loss += ((adj_ij - adj_jj)**2).mean()

        adj_loss /= (self.ndomain * (self.ndomain - 1) / 2 * 3)

        return adj_loss

    # per epoch training in a Domain Generalization setting
    def train_gcn_baseline(self, epoch, record_file=None):
        criterion = nn.CrossEntropyLoss().cuda()
        self.G.train()
        self.GCN.train()

        for batch_idx, data in enumerate(self.datasets):
            # get the source batches
            img_s = list()
            label_s = list()
            stop_iter = False
            for domain_idx in range(self.ndomain):
                tmp_img = data['S' + str(domain_idx + 1)].cuda()
                tmp_label = data['S' + str(domain_idx + 1) +
                                 '_label'].long().cuda()
                img_s.append(tmp_img)
                label_s.append(tmp_label)

                if tmp_img.size()[0] < self.batch_size:
                    stop_iter = True

            if stop_iter:
                break

            self.reset_grad()

            # get feature embeddings
            feats = list()
            for domain_idx in range(self.ndomain):
                tmp_img = img_s[domain_idx]
                tmp_feat = self.G(tmp_img)
                feats.append(tmp_feat)

            # Update the global mean and adjacency matrix
            loss_local = self.update_statistics(feats, label_s)
            feats = torch.cat(feats, dim=0)
            labels = torch.cat(label_s, dim=0)

            # add query samples to the domain graph
            gcn_feats = torch.cat([self.mean, feats], dim=0)
            gcn_adj = self.construct_adj(feats)

            # output classification logit with GCN
            gcn_logit = self.GCN(gcn_feats, gcn_adj)

            # define GCN classification losses
            domain_logit = gcn_logit[:self.mean.shape[0], :]
            domain_label = torch.cat([torch.arange(self.args.nclasses)] *
                                     self.ndomain,
                                     dim=0)
            domain_label = domain_label.long().cuda()
            loss_cls_dom = criterion(domain_logit, domain_label)

            query_logit = gcn_logit[self.mean.shape[0]:, :]
            loss_cls_src = criterion(query_logit, labels)

            loss_cls = loss_cls_src + loss_cls_dom

            # define relation alignment losses
            loss_global = self.adj_loss() * self.args.Lambda_global
            loss_local = loss_local * self.args.Lambda_local
            loss_relation = loss_local + loss_global

            loss = loss_cls + loss_relation

            # back-propagation
            loss.backward()
            self.opt_gcn.step()
            self.opt_g.step()

            # record training information
            if epoch == 0 and batch_idx == 0:
                record = open(record_file, 'a')
                record.write(str(self.args))
                record.close()

            if batch_idx % self.interval == 0:
                print(
                    'Train Epoch: {:>3} [{:>3}/{} ({:.2f}%)]\tLoss_cls_domain: {:.5f}\tLoss_cls_source: {:.5f}'
                    '\tLoss_global: {:.5f}\tLoss_local: {:.5f}'.format(
                        epoch, batch_idx + 1, self.niter,
                        (batch_idx + 1.) / self.niter, loss_cls_dom.item(),
                        loss_cls_src.item(), loss_global.item(),
                        loss_local.item()))
                if record_file:
                    record = open(record_file, 'a')
                    record.write(
                        '\nTrain Epoch: {:>3} [{:>3}/{} ({:.2f}%)]\tLoss_cls_domain: {:.5f}\tLoss_cls_source: {:.5f}'
                        '\tLoss_global: {:.5f}\tLoss_local: {:.5f}'.format(
                            epoch, batch_idx + 1, self.niter,
                            (batch_idx + 1.) / self.niter, loss_cls_dom.item(),
                            loss_cls_src.item(), loss_global.item(),
                            loss_local.item()))
                    record.close()

        return batch_idx

    # per epoch training in a Multi-Source Domain Adaptation setting
    def train_gcn_adapt(self, epoch, record_file=None):
        criterion = nn.CrossEntropyLoss().cuda()
        self.G.train()
        self.GCN.train()

        for batch_idx, data in enumerate(self.datasets):
            # get the source batches
            img_s = list()
            label_s = list()
            stop_iter = False
            for domain_idx in range(self.ndomain - 1):
                tmp_img = data['S' + str(domain_idx + 1)].cuda()
                tmp_label = data['S' + str(domain_idx + 1) +
                                 '_label'].long().cuda()
                img_s.append(tmp_img)
                label_s.append(tmp_label)

                if tmp_img.size()[0] < self.batch_size:
                    stop_iter = True

            if stop_iter:
                break

            # get the target batch
            img_t = data['T'].cuda()
            if img_t.size()[0] < self.batch_size:
                break

            self.reset_grad()

            # get feature embeddings
            feat_list = list()
            for domain_idx in range(self.ndomain - 1):
                tmp_img = img_s[domain_idx]
                tmp_feat = self.G(tmp_img)
                feat_list.append(tmp_feat)

            feat_t = self.G(img_t)
            feat_list.append(feat_t)
            feats = torch.cat(feat_list, dim=0)
            labels = torch.cat(label_s, dim=0)

            # add query samples to the domain graph
            gcn_feats = torch.cat([self.mean, feats], dim=0)
            gcn_adj = self.construct_adj(feats)

            # output classification logit with GCN
            gcn_logit = self.GCN(gcn_feats, gcn_adj)

            # predict the psuedo labels for target domain
            feat_t_, label_t_ = self.pseudo_label(
                gcn_logit[-feat_t.shape[0]:, :], feat_t)
            feat_list.pop()
            feat_list.append(feat_t_)
            label_s.append(label_t_)

            # update the statistics for source and target domains
            loss_local = self.update_statistics(feat_list, label_s)

            # define GCN classification losses
            domain_logit = gcn_logit[:self.mean.shape[0], :]
            domain_label = torch.cat([torch.arange(self.args.nclasses)] *
                                     self.ndomain,
                                     dim=0)
            domain_label = domain_label.long().cuda()
            loss_cls_dom = criterion(domain_logit, domain_label)

            query_logit = gcn_logit[self.mean.shape[0]:, :]
            loss_cls_src = criterion(query_logit[:-feat_t.shape[0]], labels)

            target_logit = query_logit[-feat_t.shape[0]:]
            target_prob = F.softmax(target_logit, dim=1)
            loss_cls_tgt = (-target_prob *
                            torch.log(target_prob + 1e-8)).mean()

            loss_cls = loss_cls_dom + loss_cls_src + loss_cls_tgt

            # define relation alignment losses
            loss_global = self.adj_loss() * self.args.Lambda_global
            loss_local = loss_local * self.args.Lambda_local
            loss_relation = loss_local + loss_global

            loss = loss_cls + loss_relation

            # back-propagation
            loss.backward(retain_graph=True)
            self.opt_gcn.step()
            self.opt_g.step()

            # record training information
            if epoch == 0 and batch_idx == 0:
                record = open(record_file, 'a')
                record.write(str(self.args) + '\n')
                record.close()

            if batch_idx % self.interval == 0:
                print(
                    'Train Epoch: {:>3} [{:>3}/{} ({:.2f}%)]\tLoss_cls_domain: {:.5f}\tLoss_cls_source: {:.5f}'
                    '\tLoss_cls_target: {:.5f}\tLoss_global: {:.5f}\tLoss_local: {:.5f}'
                    .format(epoch, batch_idx + 1, self.niter,
                            (batch_idx + 1.) / self.niter, loss_cls_dom.item(),
                            loss_cls_src.item(), loss_cls_tgt.item(),
                            loss_global.item(), loss_local.item()))
                if record_file:
                    record = open(record_file, 'a')
                    record.write(
                        '\nTrain Epoch: {:>3} [{:>3}/{} ({:.2f}%)]\tLoss_cls_domain: {:.5f}\tLoss_cls_source: {:.5f}'
                        '\tLoss_cls_target: {:.5f}\tLoss_global: {:.5f}\tLoss_local: {:.5f}'
                        .format(epoch, batch_idx + 1, self.niter,
                                (batch_idx + 1.) / self.niter,
                                loss_cls_dom.item(), loss_cls_src.item(),
                                loss_cls_tgt.item(), loss_global.item(),
                                loss_local.item()))
                    record.close()

        return batch_idx

    # per epoch test on target domain
    def test(self, epoch, record_file=None, save_model=False):
        self.G.eval()
        self.GCN.eval()

        test_loss = 0
        correct = 0
        size = 0

        for batch_idx, data in enumerate(self.dataset_test):
            img = data['T']
            label = data['T_label']
            img, label = img.cuda(), label.long().cuda()

            feat = self.G(img)

            gcn_feats = torch.cat([self.mean, feat], dim=0)
            gcn_adj = self.construct_adj(feat)
            gcn_logit = self.GCN(gcn_feats, gcn_adj)
            output = gcn_logit[self.mean.shape[0]:, :]

            test_loss += -F.nll_loss(output, label).item()
            pred = output.max(1)[1]
            k = label.size()[0]
            correct += pred.eq(label).cpu().sum()
            size += k

        test_loss = test_loss / size

        if correct > self.best_correct:
            self.best_correct = correct
            if save_model:
                best_state = {
                    'G': self.G.state_dict(),
                    'GCN': self.GCN.state_dict(),
                    'mean': self.mean.cpu(),
                    'adj': self.adj.cpu(),
                    'epoch': epoch
                }
                torch.save(best_state,
                           os.path.join(self.checkpoint_dir, 'best_model.pth'))

        # save checkpoint
        if save_model and epoch % self.save_epoch == 0:
            state = {
                'G': self.G.state_dict(),
                'GCN': self.GCN.state_dict(),
                'mean': self.mean.cpu(),
                'adj': self.adj.cpu()
            }
            torch.save(
                state,
                os.path.join(self.checkpoint_dir,
                             'epoch_' + str(epoch) + '.pth'))

        # record test information
        print(
            '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%), Best Accuracy: {}/{} ({:.4f}%)  \n'
            .format(test_loss, correct, size, 100. * float(correct) / size,
                    self.best_correct, size,
                    100. * float(self.best_correct) / size))

        if record_file:
            if epoch == 0:
                record = open(record_file, 'a')
                record.write(str(self.args))
                record.close()

            record = open(record_file, 'a')
            print('recording %s', record_file)
            record.write(
                '\nEpoch {:>3} Average loss: {:.5f}, Accuracy: {:.5f}, Best Accuracy: {:.5f}'
                .format(epoch, test_loss, 100. * float(correct) / size,
                        100. * float(self.best_correct) / size))
            record.close()