def main(args):
    # get device (GPU or CPU)
    if torch.cuda.is_available():
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
        torch.backends.cudnn.benchmark = True
        torch.backends.cudnn.enabled = True
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    # fix seed for reproducibility
    torch.manual_seed(7777)

    # load config
    if args.config[-4:] != '.yaml': args.config += '.yaml'
    with open(args.config) as cfg_file:
        config = yaml.safe_load(cfg_file)
        print(config)

    # load dataset
    val_dataset = get_dataset(config, mode=args.mode)
    val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                             num_workers=config["num_workers"],
                                             batch_size=1,
                                             shuffle=False,
                                             collate_fn=collate_fn)
    print("... Get COCO Dataloader for evaluation")
    coco = get_coco_api_from_dataset(val_loader.dataset)

    ckp_paths = glob.glob(os.path.join(args.ckp_dir, "*.tar"))
    for ckp_idx, ckp_path in enumerate(ckp_paths):
        print("[CKP {} / {}]".format(ckp_idx, len(ckp_paths)), "-----" * 10)
        # load model
        model = get_instance_segmentation_model(num_classes=2)
        model.load_state_dict(torch.load(ckp_path))
        model.to(device)

        coco_evaluator = evaluate(coco, model, val_loader, device)

        if args.write_excel:
            os.makedirs(args.excel_save_dir, exist_ok=True)
            epoch = int(os.path.basename(ckp_path)[6:-4])
            coco_to_excel(
                coco_evaluator, epoch, args.excel_save_dir,
                "{}_{}".format(config["dataset"], config["label_type"]))
Exemplo n.º 2
0
def main(args):
    # # get device (GPU or CPU)
    # if torch.cuda.is_available():
    #     os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    #     os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    #     torch.backends.cudnn.benchmark = True
    #     torch.backends.cudnn.enabled = True
    #     device = torch.device("cuda")
    # else:
    #     device = torch.device("cpu")

    # fix seed for reproducibility
    torch.manual_seed(7777)

    # load config
    if args.config[-4:] != '.yaml': args.config += '.yaml'
    with open(args.config) as cfg_file:
        config = yaml.safe_load(cfg_file)
        print(config)

    # load dataset
    val_dataset = get_dataset(config, mode=args.mode)
    val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                             num_workers=config["num_workers"],
                                             batch_size=1,
                                             shuffle=False,
                                             collate_fn=collate_fn)
    print("... Get COCO Dataloader for evaluation")
    coco = get_coco_api_from_dataset(val_loader.dataset)

    # load model
    model = get_instance_segmentation_model(num_classes=2)
    model.load_state_dict(torch.load(args.trained_ckp))
    model.to(device)

    coco_evaluator = evaluate(coco, model, val_loader, device)
import matplotlib.pyplot as plt
from tensorflow.python.keras import models, layers, optimizers
import tensorflow as tf
import bz2
from sklearn.metrics import f1_score, roc_auc_score, accuracy_score
import tensorflow.keras.backend as K

import sys

sys.setrecursionlimit(2500)

import os

import loader as ld

train_texts, train_labels, test_texts, test_labels, test_ascii, embedding_matrix, MAX_LENGTH, MAX_FEATURES = ld.get_dataset(
)

#####################
# Execusion options #
#####################

TRAIN = True

RECR = False  # recurrent netowrk (RNN/GRU) or a non-recurrent network

ATTN = True  # use attention layer in global sum pooling or not
LSTM = False  # use LSTM or otherwise RNN

WEIGHTED = True

Exemplo n.º 4
0
# config for dataset
cfg_name = "./config/eval_real_data_MediHard"
mode = 'test'
save_dir = "./tmp/UNIMIB_vis/OurReal_MediHard"

cfg_name = "./config/eval_unimib"
mode = 'test'
save_dir = "./tmp/Inference/UNIMIB_Testset_GroundTruth"
os.makedirs(save_dir, exist_ok=True)

if cfg_name[-4:] != '.yaml': cfg_name += '.yaml'
with open(cfg_name) as cfg_file:
    config = yaml.safe_load(cfg_file)
    print(config)

dataset = get_dataset(config, mode=mode)
dataloader = torch.utils.data.DataLoader(dataset=dataset,
                                         num_workers=config["num_workers"],
                                         batch_size=1,
                                         shuffle=False)
unorm = UnNormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))

print("+++ Visualize {} data".format(len(dataloader)))
for idx, (image, target) in tqdm(enumerate(dataloader)):
    # de-normalize image tensor
    image = unorm(image[0]).cpu().detach().numpy().transpose(1, 2, 0)
    image = np.uint8(image * 255)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    # extract mask
    masks = target["masks"][0].cpu().detach().numpy()
    boxes = target["boxes"][0].cpu().detach().numpy()
Exemplo n.º 5
0
def main():
    images = get_dataset(data_dir, batch_size)
    images1 = tf.image.resize_bilinear(images, [64, 64], align_corners=False)
    tf.image_summary("real", images, max_images=1)

    z = tf.placeholder(tf.float32, [None, z_dim], name='z')

    with tf.variable_scope("generator1") as scope:
        gen1 = generator1(z)
        tf.image_summary("fake1", gen1, max_images=1)
        scope.reuse_variables()
        sampler1 = generator1(z, training=False)
        tf.image_summary("fake1_sampler", sampler1, max_images=1)

    with tf.variable_scope("generator2") as scope:
        gen2 = generator2(gen1)
        tf.image_summary("fake2", gen2, max_images=1)
        scope.reuse_variables()
        sampler2 = generator2(gen1, training=False)
        tf.image_summary("fake2_sampler", sampler2, max_images=1)

    with tf.variable_scope("discriminator1") as scope:
        disc_real1 = discriminator1(images1)
        scope.reuse_variables()
        disc_fake1 = discriminator1(gen1)

    with tf.variable_scope("discriminator2") as scope:
        disc_real2 = discriminator2(images)
        scope.reuse_variables()
        disc_fake2 = discriminator2(gen2)

    d_vars1 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                "discriminator1")
    g_vars1 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "generator1")
    d_vars2 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                "discriminator2")
    g_vars2 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "generator2")

    disc_real_loss1 = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(disc_real1,
                                                tf.ones(tf.shape(disc_real1))))
    disc_fake_loss1 = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(
            disc_fake1, tf.fill(tf.shape(disc_real1), -1.0)))

    disc_real_loss2 = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(disc_real2,
                                                tf.ones(tf.shape(disc_real1))))
    disc_fake_loss2 = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(
            disc_fake2, tf.fill(tf.shape(disc_real1), -1.0)))

    d_loss1 = disc_real_loss1 + (disc_fake_loss1 / 2.0)
    d_loss2 = disc_real_loss2 + (disc_fake_loss2 / 2.0)

    g_loss1 = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(disc_fake1,
                                                tf.ones(tf.shape(disc_real1))))
    g_loss2 = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(disc_fake2,
                                                tf.ones(tf.shape(disc_real1))))

    tf.scalar_summary("Discriminator_loss_real1", disc_real_loss1)
    tf.scalar_summary("Discrimintator_loss_fake1", disc_fake_loss1)
    tf.scalar_summary("Discriminator_loss1", d_loss1)
    tf.scalar_summary("Generator_loss1", g_loss1)
    tf.scalar_summary("Discriminator_loss_real2", disc_real_loss2)
    tf.scalar_summary("Discrimintator_loss_fake2", disc_fake_loss2)
    tf.scalar_summary("Discriminator_loss2", d_loss2)
    tf.scalar_summary("Generator_loss2", g_loss2)

    if WGAN:
        d_optimizer1 = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
        g_optimizer1 = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
        d_optimizer2 = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
        g_optimizer2 = tf.train.RMSPropOptimizer(learning_rate=learning_rate)

    else:
        d_optimizer1 = tf.train.AdamOptimizer(learning_rate=learning_rate)
        g_optimizer1 = tf.train.AdamOptimizer(learning_rate=learning_rate)
        d_optimizer2 = tf.train.AdamOptimizer(learning_rate=learning_rate)
        g_optimizer2 = tf.train.AdamOptimizer(learning_rate=learning_rate)

    d_train_op1 = slim.learning.create_train_op(d_loss1,
                                                d_optimizer1,
                                                variables_to_train=d_vars1)
    g_train_op1 = slim.learning.create_train_op(g_loss1,
                                                g_optimizer1,
                                                variables_to_train=g_vars1)

    d_train_op2 = slim.learning.create_train_op(d_loss2,
                                                d_optimizer2,
                                                variables_to_train=d_vars2)
    g_train_op2 = slim.learning.create_train_op(g_loss2,
                                                g_optimizer2,
                                                variables_to_train=g_vars2)

    clip_critic1 = []
    for var in d_vars1:
        clip_critic1.append(tf.assign(var, tf.clip_by_value(var, -c, c)))

    clip_critic2 = []
    for var in d_vars2:
        clip_critic2.append(tf.assign(var, tf.clip_by_value(var, -c, c)))

    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess, coord)
        summary_op = tf.merge_all_summaries()
        summary_writer = tf.train.SummaryWriter(log_dir, sess.graph)
        saver = tf.train.Saver()

        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        start = 0

        ckpt = tf.train.get_checkpoint_state(log_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Model found! Restoring...")
            start = int(ckpt.model_checkpoint_path.split("-")[-1]) + 1
            saver.restore(sess, ckpt.model_checkpoint_path)
            print("Restored!")
        else:
            print("No model found!")

        def make_feed_dict():
            batch_z = np.random.uniform(-1., 1.,
                                        [batch_size, z_dim]).astype(np.float32)
            feed = {z: batch_z}
            return feed

        def visualize(step, image_amount=9):
            images1 = []
            images2 = []
            done1 = False
            done2 = False
            while not done1:
                img = sess.run(sampler1, feed_dict=make_feed_dict())
                img = np.squeeze(img)
                for i in xrange(batch_size):
                    images1.append(np.reshape(img[i], [64, 64, 3]))
                    if len(images1) == image_amount * image_amount:
                        color_grid_vis(images1,
                                       image_amount,
                                       image_amount,
                                       save_path=os.path.join(
                                           imgs_dir, "test_" + str(step) +
                                           "x1" + ".png"))
                        done1 = True
                        break

            while not done2:
                img = sess.run(sampler2, feed_dict=make_feed_dict())
                img = np.squeeze(img)
                for i in xrange(batch_size):
                    images2.append(np.reshape(img[i], [128, 128, 3]))
                    if len(images2) == image_amount * image_amount:
                        color_grid_vis(images2,
                                       image_amount,
                                       image_amount,
                                       save_path=os.path.join(
                                           imgs_dir, "test_" + str(step) +
                                           "x2" + ".png"))
                        done2 = True
                        break

        try:
            curr = start
            print("Starting training!")
            r1 = 1
            r2 = 1
            for itr in xrange(start, max_iterations):
                # start_time = time.time()
                if WGAN:
                    if itr < 25 or (start_2 < itr and itr <
                                    (start_2 + 25)) or itr % 500 == 0:
                        diters = 100
                    else:
                        diters = d_iters
                else:
                    if r1 < 0.1:
                        diters = 1
                    elif r1 > 10:
                        diters = 1
                    else:
                        diters = 1

                for i in xrange(diters):
                    if WGAN:
                        sess.run(clip_critic1)
                    sess.run(d_train_op1, feed_dict=make_feed_dict())

                sess.run(g_train_op1, feed_dict=make_feed_dict())

                if start_2 < itr:
                    if WGAN:
                        if itr < 25 or (start_2 < itr and itr <
                                        (start_2 + 25)) or itr % 500 == 0:
                            diters = 100
                        else:
                            diters = d_iters
                    else:
                        if r2 < 0.1:
                            diters = 1
                        elif r2 > 10:
                            diters = 1
                        else:
                            diters = 1

                    for i in xrange(diters):
                        if WGAN:
                            sess.run(clip_critic2)
                        sess.run(d_train_op2, feed_dict=make_feed_dict())

                    sess.run(g_train_op2, feed_dict=make_feed_dict())

                if itr % sum_per == 0:
                    g_loss_val, d_loss_val, g_loss_val2, d_loss_val2, summary_str = sess.run(
                        [g_loss1, d_loss1, g_loss2, d_loss2, summary_op],
                        feed_dict=make_feed_dict())
                    print(
                        "Step: %d, generator1 loss: %g, discriminator1_loss: %g"
                        % (itr, g_loss_val, d_loss_val))
                    print(
                        "Step: %d, generator2 loss: %g, discriminator2_loss: %g"
                        % (itr, g_loss_val2, d_loss_val2))
                    if not WGAN:
                        r1 = max(g_loss_val, 0.000001) / max(
                            d_loss_val, 0.000001)
                        r2 = max(g_loss_val2, 0.000001) / max(
                            d_loss_val2, 0.000001)
                    summary_writer.add_summary(summary_str, itr)
                    # print("--- %s seconds ---" % (time.time() - start_time))

                if itr % save_per == 0:
                    saver.save(sess,
                               os.path.join(log_dir, "model.ckpt"),
                               global_step=itr)

                if itr % image_per == 0:
                    visualize(itr)

                curr = itr

        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        except KeyboardInterrupt:
            print("Ending Training...")
            saver.save(sess,
                       os.path.join(log_dir, "model.ckpt"),
                       global_step=curr)
        finally:
            coord.request_stop()

        coord.join(threads)
Exemplo n.º 6
0
import matplotlib.pyplot as plt
import numpy as np
import numpy.linalg as la
import scipy as sp
import loader as ld
import spirals as sp
from plotting import plot_progression, plot_model, plot_separation
import random as rn

rng = np.random.randn

images, label = ld.get_dataset("training", 3, 6, "../mnist")
images = images / 255

K = 15  # Antall lag
d = 784  # Antall piksel-elementer til hvert bilde. Hvert bilde er stablet opp i en vektor av lengde d
I = 70  # Antall bilder
h = 0.1  # Skrittlengde i transformasjonene
Wk = rng(K, d, d)
w = rng(d)
mu = rng(1)
one = np.ones(I)
bk = rng(d, K)
U0 = np.array((Wk, bk, w, mu))
y0, C1 = sp.get_data_spiral_2d(I)
C = np.reshape(C1, I)


# Med matrise som argument virker funksjonene på hvert element i matrisen
def eta(x):
    return 1 / 2 * (1 + np.tanh(x / 2))
def main(args):
    # get device (GPU or CPU)
    if torch.cuda.is_available():
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
        torch.backends.cudnn.benchmark = True
        torch.backends.cudnn.enabled = True
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    # fix seed for reproducibility
    torch.manual_seed(7777)

    # load config
    if args.config[-4:] != '.yaml': args.config += '.yaml'
    with open(args.config) as cfg_file:
        config = yaml.safe_load(cfg_file)
        print(config)

    # load dataset
    train_dataset = get_dataset(config)
    train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        num_workers=config["num_workers"],
        batch_size=config["batch_size"],
        shuffle=True,
        collate_fn=collate_fn)
    val_dataset = get_dataset(config, mode="val")
    val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                             num_workers=config["num_workers"],
                                             batch_size=1,
                                             shuffle=False,
                                             collate_fn=collate_fn)
    print("... Get COCO Dataloader for evaluation")
    coco = get_coco_api_from_dataset(val_loader.dataset)

    # load model
    model = get_instance_segmentation_model(num_classes=2)
    if args.resume:
        if args.resume_ckp:
            resume_ckp = args.resume_ckp
        elif "resume_ckp" in config:
            resume_ckp = config["resume_ckp"]
        else:
            raise ValueError(
                "Wrong resume setting, there's no trainied weight in config and args"
            )
        model.load_state_dict(torch.load(resume_ckp))
    model.to(device)

    # construct an optimizer
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.Adam(params,
                                 lr=config["lr"],
                                 weight_decay=config["wd"])
    lr_update = config["save_interval"] if "save_interval" in config else None

    # set training epoch
    start_epoch = args.resume_epoch if args.resume_epoch else 0
    if args.max_epoch:
        max_epoch = args.max_epoch
    else:
        max_epoch = config['max_epoch'] if "max_epoch" in config else 100
    assert start_epoch < max_epoch
    save_interval = config["save_interval"] if "save_interval" in config else 1

    # logging
    output_folder = config["save_dir"]
    os.makedirs(output_folder, exist_ok=True)

    print("+++ Start Training  @start:{} @max: {}".format(
        start_epoch, max_epoch))
    for epoch in range(start_epoch, max_epoch):
        # train
        train_one_epoch(epoch, model, train_loader, optimizer, device,
                        lr_update)
        # validate and write results
        coco_evaluator = evaluate(coco, model, val_loader, device)
        # save weight
        if epoch % save_interval == 0:
            torch.save(model.state_dict(),
                       '{}/epoch_{}.tar'.format(output_folder, epoch))
            if args.write_excel:
                coco_to_excel(
                    coco_evaluator, epoch, output_folder,
                    "{}_{}".format(config["dataset"], config["label_type"]))