def forward_model(best_model, method):
    args = best_model['args']

    torch.cuda.set_device(args.gpu)
    set_seed(args)

    # load and preprocess dataset
    all_data = load_dataset(args)
    training = all_data[:int(len(all_data) * 0.7)]
    validation = all_data[int(len(all_data) * 0.7):int(len(all_data) * 0.8)]
    testing = all_data[int(len(all_data) * 0.8):]

    train_loader = DataLoader(training,
                              batch_size=1000,
                              shuffle=True,
                              collate_fn=collate)
    val_loader = DataLoader(validation,
                            batch_size=1000,
                            shuffle=True,
                            collate_fn=collate)
    test_loader = DataLoader(testing,
                             batch_size=4000,
                             shuffle=False,
                             collate_fn=collate)

    dataset = (None, np.zeros((15, 15)), np.zeros(
        (1, args.num_factors)), None, None, None, None)
    # create model
    model = get_model(dataset, args, mode='multilabel').cuda()

    for step, (g, labels, gt_adjs) in enumerate(test_loader):
        model.load_state_dict(best_model['model_state_dict'])
        model.eval()

        # update the new graph
        model.g = g

        features = g.ndata['feat'].float().cuda()
        labels = labels.cuda()
        logits = model(features)  #.view(-1, n_class, n_latent)

        hidden = model.get_hidden_feature()
        matrix = hidden[0]  # #sample x dim
        correlation = np.zeros((matrix.shape[1], matrix.shape[1]))
        for i in range(matrix.shape[1]):
            for j in range(matrix.shape[1]):
                cof = scipy.stats.pearsonr(matrix[:, i], matrix[:, j])[0]
                correlation[i][j] = cof

        plot_corr(np.abs(correlation), save=f'{method}.png')
Beispiel #2
0
def main(args):
    torch.cuda.set_device(args.gpu)
    set_seed(args)

    log_dir = make_log_dir(args.model_name, args.dataset, args.log_subdir)

    log_file = os.path.join(log_dir, "log.txt")
    sys.stdout = open(log_file, 'w')
    backup_model = f"cp -r ./models {log_dir}"
    os.system(backup_model)

    # load and preprocess dataset
    zinc_data = load_dataset(args)

    train_loader = DataLoader(zinc_data.train,
                              batch_size=1000,
                              shuffle=True,
                              collate_fn=zinc_data.collate,
                              num_workers=4)
    val_loader = DataLoader(zinc_data.val,
                            batch_size=1000,
                            shuffle=False,
                            collate_fn=zinc_data.collate)
    test_loader = DataLoader(zinc_data.test,
                             batch_size=1000,
                             shuffle=False,
                             collate_fn=zinc_data.collate)

    # placeholder of dataset
    dataset = (None, None, None, None, None, None, None)
    # create model
    model = get_model(dataset, args, mode='zinc').cuda()

    print(model)
    # define loss func
    loss_fcn = torch.nn.L1Loss()

    # define optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     mode='min',
                                                     factor=0.5,
                                                     patience=50,
                                                     verbose=True)

    best_val_loss = sys.maxsize
    best_test_mae = sys.maxsize
    dur = []
    for epoch in range(args.epochs):
        model.train()
        epoch_loss = 0
        epoch_train_mae = 0
        t0 = time.time()
        for iter, (batch_graphs, batch_targets, batch_snorm_n,
                   batch_snorm_e) in enumerate(train_loader):
            batch_x = batch_graphs.ndata['feat'].cuda()  # num x feat
            batch_e = batch_graphs.edata['feat'].cuda()
            batch_snorm_e = batch_snorm_e.cuda()
            batch_targets = batch_targets.cuda()
            batch_snorm_n = batch_snorm_n.cuda()  # num x 1

            optimizer.zero_grad()

            model.g = batch_graphs
            batch_scores = model.forward(batch_x, batch_e, batch_snorm_n,
                                         batch_snorm_e)

            loss = loss_fcn(batch_scores, batch_targets)

            if args.model_name == "FactorGNN" and args.dis_weight > 0.0:
                losses = model.compute_disentangle_loss()
                dis_loss = model.merge_loss(losses) * args.dis_weight
                loss = loss + dis_loss

            loss.backward()
            optimizer.step()

            iter_loss = loss.item()
            iter_mae = F.l1_loss(batch_scores, batch_targets).item()
            epoch_loss += iter_loss
            epoch_train_mae += iter_mae

        dur.append(time.time() - t0)
        epoch_loss /= (iter + 1)
        epoch_train_mae /= (iter + 1)
        # print(f"loss {epoch_loss:.4f}, mae {epoch_train_mae:.4f}")
        val_loss, val_mae = test(model, val_loader)
        test_loss, test_mae = test(model, test_loader)

        if val_loss < best_val_loss:
            best_val_loss = val_loss
            best_test_mae = test_mae
            torch.save(
                {
                    'epoch': epoch,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'loss': best_test_mae,
                    'args': args
                }, os.path.join(log_dir, 'best_model.pt'))

        print(f"time {np.mean(dur):.2f} epoch {epoch:03d} | " +
              f"train ({epoch_loss:.4f}, {epoch_train_mae:.4f}) | " +
              f"val ({val_loss:.4f}, {val_mae:.4f}) | " +
              f"test ({test_loss:.4f}, {test_mae:.4f}) | " +
              f"best: {best_test_mae:.4f}")

        sys.stdout.flush()

        if optimizer.param_groups[0]['lr'] > 1e-5:
            scheduler.step(val_loss)
def main(args):
    torch.cuda.set_device(args.gpu)
    set_seed(args)

    log_dir = make_log_dir(args.model_name, args.dataset, args.log_subdir)

    log_file = os.path.join(log_dir, "log.txt")
    sys.stdout = open(log_file, 'w')
    backup_model = f"cp -r ./models {log_dir}"
    os.system(backup_model)

    # load and preprocess dataset
    all_data = load_dataset(args)
    training = all_data[:int(len(all_data) * 0.7)]
    validation = all_data[int(len(all_data) * 0.7):int(len(all_data) * 0.8)]
    testing = all_data[int(len(all_data) * 0.8):]

    train_loader = DataLoader(training,
                              batch_size=1000,
                              shuffle=True,
                              collate_fn=collate)
    val_loader = DataLoader(validation,
                            batch_size=1000,
                            shuffle=True,
                            collate_fn=collate)
    test_loader = DataLoader(testing,
                             batch_size=1000,
                             shuffle=False,
                             collate_fn=collate)

    dataset = (None, np.zeros((15, 15)), np.zeros(
        (1, args.num_factors)), None, None, None, None)
    # create model
    model = get_model(dataset, args, mode='multilabel').cuda()

    print(model)

    # define loss func
    loss_fcn = torch.nn.BCEWithLogitsLoss()

    # define optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    best_val_f1 = 0
    best_test_f1 = 0
    dur = []
    for epoch in range(args.epochs):
        for step, (g, labels, gt_adjs) in enumerate(train_loader):
            model.train()

            # update the new graph
            model.g = g

            t0 = time.time()
            features = g.ndata['feat'].float().cuda()
            labels = labels.cuda()
            logits = model(features)  #.view(-1, n_class, n_latent)
            loss = loss_fcn(logits, labels)

            if args.model_name == 'FactorGNN' and args.dis_weight > 0.0:
                losses = model.compute_disentangle_loss()
                dis_loss = model.merge_loss(losses) * args.dis_weight
                loss = loss + dis_loss

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            dur.append(time.time() - t0)

        val_micro_f1 = test(model, val_loader)
        test_micro_f1 = test(model, val_loader)

        if val_micro_f1 > best_val_f1:
            best_val_f1 = val_micro_f1
            best_test_f1 = test_micro_f1
            torch.save(
                {
                    'epoch': epoch,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'loss': best_test_f1,
                    'args': args
                }, os.path.join(log_dir, 'best_model.pt'))

        print(f"time {np.mean(dur):.2f} epoch {epoch:03d} | " +
              f"val ({val_micro_f1:.4f}) | " +
              f"test ({test_micro_f1:.4f}) | " + f"best: {best_test_f1:.4f}")

        sys.stdout.flush()
Beispiel #4
0
def forward_model(best_model, method):
    args = best_model['args']

    torch.cuda.set_device(args.gpu)
    set_seed(args)

    # load and preprocess dataset
    all_data = load_dataset(args)
    training = all_data[:int(len(all_data) * 0.7)]
    validation = all_data[int(len(all_data) * 0.7):int(len(all_data) * 0.8)]
    testing = all_data[int(len(all_data) * 0.8):]

    train_loader = DataLoader(training,
                              batch_size=1000,
                              shuffle=True,
                              collate_fn=collate)
    val_loader = DataLoader(validation,
                            batch_size=1000,
                            shuffle=True,
                            collate_fn=collate)
    test_loader = DataLoader(testing,
                             batch_size=4000,
                             shuffle=False,
                             collate_fn=collate)

    dataset = (None, np.zeros((15, 15)), np.zeros(
        (1, args.num_factors)), None, None, None, None)
    # create model
    model = get_model(dataset, args, mode='multilabel').cuda()

    g, labels, gt_adjs = next(iter(test_loader))
    model.load_state_dict(best_model['model_state_dict'])
    model.eval()

    # update the new graph
    model.g = g

    features = g.ndata['feat'].float().cuda()
    labels = labels.cuda()
    logits = model(features)  #.view(-1, n_class, n_latent)
    factors = model.get_factor()

    batch_g = factors[0]
    unbatch_g = dgl.unbatch(batch_g)

    ged_ins = compute_GED()

    total_ged = []
    total_factor_map = collections.defaultdict(list)
    sample_n = 0
    for gt_list, pred_g in tqdm.tqdm(zip(gt_adjs, unbatch_g)):
        # dgl graph to adj
        pred_list = generate_adj_factor_graph(pred_g)
        ged, factor_map = ged_ins.hungarian_match(gt_list, pred_list, sample_n)

        for edge_id in factor_map.keys():
            total_factor_map[
                edge_id] = total_factor_map[edge_id] + factor_map[edge_id]

        total_ged.append(ged / len(gt_list))
        sample_n += 1

    c_score = compute_consistant(total_factor_map)

    print(
        f" c_score {c_score:.3f} | ged: {np.mean(total_ged):.3f} $\pm$ {np.std(total_ged):.3f}"
    )
Beispiel #5
0
# Copyright 2019 ChangyuLiu Authors. All Rights Reserved.
#
#           Licensed under the MIT License.
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#         https://opensource.org/licenses/MIT
# ==============================================================================

from dataset.load_dataset import load_dataset
from eval.cal_loss import cal_loss

train_dataset, _ = load_dataset(6000, 128, 50000, 128)

for image_batch in train_dataset:
    cal_loss(image_batch)
Beispiel #6
0
                    help='Epochs for training.')
args = parser.parse_args()
print(args)

# define model save path
save_path = 'training_checkpoint'

# create dir
if not os.path.exists(save_path):
    os.makedirs(save_path)

# define random noise
noise = tf.random.normal([16, 100])

# load dataset
mnist_train_dataset, cifar_train_dataset = load_dataset(60000, 128, 50000, 64)

# load network and optim paras
generator = make_generator_model(args.dataset)
generator_optimizer = generator_optimizer()

discriminator = make_discriminator_model(args.dataset)
discriminator_optimizer = discriminator_optimizer()

checkpoint_dir, checkpoint, checkpoint_prefix = save_checkpoints(
    generator, discriminator, generator_optimizer, discriminator_optimizer,
    save_path)


# This annotation causes the function to be "compiled".
@tf.function
Beispiel #7
0
import os

from tensorflow import keras
import tensorflow as tf

from model.unet_model import unet_little, unet
from loss.loss import dice_coef, dice_2_coef
from dataset.load_dataset import load_dataset

os.environ["CUDA_VISIBLE_DEVICES"] = "2, 3"

# load train dataset
train_ds, val_ds = load_dataset("/home/py36/workspace/deep_binary/dataset/all")
print(type(train_ds))
print(type(val_ds))

# load unet model
model = unet()
model.summary()

# todo: change the file path to google drive
callbacks_list = [
    keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5, patience=10),
    keras.callbacks.ModelCheckpoint(
        filepath=
        '/home/py36/workspace/deep_binary/save_models/deep_binary_ver0.9_best_loss.h5',
        monitor='loss',
        save_best_only=True),
    keras.callbacks.ModelCheckpoint(
        filepath=
        '/home/py36/workspace/deep_binary/save_models/deep_binary_ver0.9_best_dice.h5',
Beispiel #8
0
def forward_model(best_model, method):
    args = best_model['args']

    torch.cuda.set_device(args.gpu)
    set_seed(args)

    # load and preprocess dataset
    zinc_data = load_dataset(args)
    train_loader = DataLoader(zinc_data.train,
                              batch_size=1000,
                              shuffle=True,
                              collate_fn=zinc_data.collate,
                              num_workers=4)
    val_loader = DataLoader(zinc_data.val,
                            batch_size=1000,
                            shuffle=False,
                            collate_fn=zinc_data.collate)
    test_loader = DataLoader(zinc_data.test,
                             batch_size=1000,
                             shuffle=False,
                             collate_fn=zinc_data.collate)

    # placeholder of dataset
    dataset = (None, None, None, None, None, None, None)
    # create model
    model = get_model(dataset, args, mode='zinc').cuda()
    model.load_state_dict(best_model['model_state_dict'])
    model.eval()

    batch_graphs, batch_targets, batch_snorm_n, batch_snorm_e = next(
        iter(train_loader))
    batch_x = batch_graphs.ndata['feat'].cuda()  # num x feat
    batch_e = batch_graphs.edata['feat'].cuda()
    batch_snorm_e = batch_snorm_e.cuda()
    batch_targets = batch_targets.cuda()
    batch_snorm_n = batch_snorm_n.cuda()  # num x 1

    model.g = batch_graphs
    batch_scores = model.forward(batch_x, batch_e, batch_snorm_n,
                                 batch_snorm_e)

    factors = model.get_factor()

    batch_g = factors[1]
    unbatch_g = dgl.unbatch(batch_g)

    gt_adjs = translate_gt_graph_to_adj(batch_graphs)

    ged_ins = compute_GED()

    total_ged = []
    total_factor_map = collections.defaultdict(list)
    for gt_list, pred_g in tqdm.tqdm(zip(gt_adjs, unbatch_g)):
        # dgl graph to adj
        pred_list = generate_adj_factor_graph(pred_g)
        ged, factor_map = ged_ins.hungarian_match(gt_list, pred_list)

        for edge_id in factor_map.keys():
            total_factor_map[
                edge_id] = total_factor_map[edge_id] + factor_map[edge_id]

        total_ged.append(ged / len(gt_list))

    c_score = compute_consistant(total_factor_map)

    print(
        f"{method}| loss {best_model['loss']:.3f} | c_score {c_score:.3f} | ged: {np.mean(total_ged):.2f} $\pm$ {np.std(total_ged):.2f}"
    )