Beispiel #1
0
 def test_define(self):
     net = Model(self.mode, [], "kaiming", show_structure=False)
     if torch.cuda.device_count() == 0:
         net = Model(self.mode, [], "kaiming", show_structure=False)
     elif torch.cuda.device_count() == 1:
         net = Model(self.mode, [0], "kaiming", show_structure=False)
     else:
         net = Model(self.mode, [0, 1], "kaiming", show_structure=False)
Beispiel #2
0
 def test_check_point(self):
     if torch.cuda.is_available():
         net = Model(self.mode, [0], "kaiming", show_structure=False)
     elif torch.cuda.device_count() > 1:
         net = Model(self.mode, [0, 1], "kaiming", show_structure=False)
     elif torch.cuda.device_count() > 2:
         net = Model(self.mode, [2, 3], "kaiming", show_structure=False)
     else:
         net = Model(self.mode, [], "kaiming", show_structure=False)
     net.check_point("tm", self.epoch, "test_model")
     dir = "test_model/"
     shutil.rmtree(dir)
Beispiel #3
0
    def test_load_point(self):
        if torch.cuda.device_count() == 0:
            net = Model(self.mode, [], "kaiming", show_structure=False)
        elif torch.cuda.device_count() == 1:
            net = Model(self.mode, [0], "kaiming", show_structure=False)
        else:
            net = Model(self.mode, [0, 1], "kaiming", show_structure=False)

        net.check_point("tm", self.epoch, "test_model")
        net.load_point("tm", self.epoch, "test_model")
        dir = "test_model/"
        shutil.rmtree(dir)
Beispiel #4
0
 def test_save_load_weights(self):
     print(self.mode)
     if torch.cuda.device_count() == 0:
         net = Model(self.mode, [], "kaiming", show_structure=False)
     elif torch.cuda.device_count() == 1:
         net = Model(self.mode, [0], "kaiming", show_structure=False)
     else:
         net = Model(self.mode, [0, 1], "kaiming", show_structure=False)
     net.check_point("tm", self.epoch, "test_model")
     net.load_weights("test_model/checkpoint/Weights_tm_%d.pth" %
                      self.epoch)
     dir = "test_model/"
     shutil.rmtree(dir)
Beispiel #5
0
 def test__weight_init(self):
     if torch.cuda.is_available():
         net = Model(self.mode, [0], "kaiming", show_structure=False)
     elif torch.cuda.device_count() > 1:
         net = Model(self.mode, [0, 1], "kaiming", show_structure=False)
     elif torch.cuda.device_count() > 2:
         net = Model(self.mode, [2, 3], "kaiming", show_structure=False)
     else:
         net = Model(self.mode, [], "kaiming", show_structure=False)
     net.init_fc = init.kaiming_normal_
     self.mode.apply(net._weight_init)
Beispiel #6
0
def build_task_trainer(unfixed_params):
    """build a task just like FashionClassTrainer.

    :param unfixed_params:
    :return:
    """
    logdir = unfixed_params['logdir']
    gpu_ids_abs = unfixed_params["gpu_ids_abs"]
    depth = unfixed_params["depth"]
    lr = unfixed_params["lr"]

    batch_size = 32
    opt_name = "RMSprop"
    lr_decay = 0.94
    decay_position = 1
    position_type = "epoch"
    weight_decay = 2e-5
    momentum = 0
    nepochs = 100
    num_class = 10
    torch.backends.cudnn.benchmark = True
    mnist = FashionMNIST(root="datasets/fashion_data",
                         batch_size=batch_size,
                         num_workers=2)
    net = Model(SimpleModel(depth),
                gpu_ids_abs=gpu_ids_abs,
                init_method="kaiming",
                verbose=False)
    opt = Optimizer(net.parameters(),
                    opt_name,
                    lr_decay,
                    decay_position,
                    position_type=position_type,
                    lr=lr,
                    weight_decay=weight_decay,
                    momentum=momentum)
    Trainer = FashionClassTrainer(logdir, nepochs, gpu_ids_abs, net, opt,
                                  mnist, num_class)
    return Trainer
Beispiel #7
0
 def test_loadModel(self):
     print(self.mode)
     net = Model(self.mode, show_structure=False)
     net.checkPoint("tm", self.epoch, "test_model")
     net.loadModel("test_model/checkpoint/Model_tm_%d.pth" % self.epoch,
                   "test_model/checkpoint/Weights_tm_%d.pth" % self.epoch)
     dir = "test_model/"
     shutil.rmtree(dir)
Beispiel #8
0
 def test_configure(self):
     if torch.cuda.device_count() == 0:
         net = Model(self.mode, [], "kaiming", show_structure=False)
     elif torch.cuda.device_count() == 1:
         net = Model(self.mode, [0], "kaiming", show_structure=False)
     else:
         net = Model(self.mode, [0, 1], "kaiming", show_structure=False)
     self.assertEqual(
         net.configure, {
             'model_name':
             'Sequential',
             'init_method':
             'kaiming',
             'total_params':
             91,
             'structure':
             'Sequential(\n  (0): Conv2d(10, 1, kernel_size=(3, 3), '
             'stride=(1, 1))\n)'
         })
     print(net.configure)
     for k, v in net.configure.items():
         assert k is not None
         assert v is not None
Beispiel #9
0
def start_cifarPix2pixGanTrainer(gpus=(),
                                 nepochs=200,
                                 lr=1e-3,
                                 depth_G=32,
                                 depth_D=32,
                                 run_type="train"):
    gpus = gpus  # set `gpus = []` to use cpu
    batch_size = 32
    image_channel = 3
    nepochs = nepochs
    depth_G = depth_G
    depth_D = depth_D

    G_hprams = {
        "optimizer": "Adam",
        "lr_decay": 0.9,
        "decay_position": 10,
        "position_type": "epoch",
        "lr": lr,
        "weight_decay": 2e-5,
        "betas": (0.9, 0.99)
    }
    D_hprams = {
        "optimizer": "RMSprop",
        "lr_decay": 0.9,
        "decay_position": 10,
        "position_type": "epoch",
        "lr": lr,
        "weight_decay": 2e-5,
        "momentum": 0
    }

    print('===> Build dataset')
    cifar10 = Cifar10(root="datasets/cifar10", batch_size=batch_size)
    torch.backends.cudnn.benchmark = True
    print('===> Building model')
    D_net = Discriminator(input_nc=image_channel, depth=depth_D)
    D = Model(D_net,
              gpu_ids_abs=gpus,
              init_method="kaiming",
              check_point_pos=50)
    # -----------------------------------
    G_net = Generator(input_nc=1, output_nc=image_channel, depth=depth_G)
    G = Model(G_net,
              gpu_ids_abs=gpus,
              init_method="kaiming",
              check_point_pos=50)
    print('===> Building optimizer')
    opt_D = Optimizer(D.parameters(), **D_hprams)
    opt_G = Optimizer(G.parameters(), **G_hprams)
    print('===> Training')
    Trainer = CifarPix2pixGanTrainer("log/cifar_p2p", nepochs, gpus, G, D,
                                     opt_G, opt_D, cifar10)
    if run_type == "train":
        Trainer.train()
    elif run_type == "debug":
        Trainer.debug()
Beispiel #10
0
 def test_regist_config(self):
     log = Loger()
     param = [torch.ones(3, 3, requires_grad=True)] * 5
     opt = Optimizer(param,
                     lr=0.999,
                     weight_decay=0.03,
                     momentum=0.5,
                     betas=(0.1, 0.4),
                     opt_name="RMSprop")
     log.regist_config(1, opt)
     print(log.__dict__["Optimizer"])
     opt.do_lr_decay()
     log.regist_config(2, opt)
     print(log.__dict__["Optimizer"])
     log.regist_config(3, opt)
     print(log.__dict__["Optimizer"])
     net_G = Model(Inception3(4))
     log.regist_config(1, net_G)
Beispiel #11
0
def start_fashionGenerateGanTrainer(gpus=(), nepochs=50, lr=1e-3, depth_G=32, depth_D=32, latent_shape=(256, 1, 1),
                                    run_type="train"):
    gpus = gpus  # set `gpus = []` to use cpu
    batch_size = 64
    image_channel = 1
    nepochs = nepochs

    depth_G = depth_G
    depth_D = depth_D

    G_hprams = {"optimizer": "Adam", "lr_decay": 0.94,
                "decay_position": 2, "position_type": "epoch",
                "lr": lr, "weight_decay": 2e-5,
                "betas": (0.9, 0.99)
                }
    D_hprams = {"optimizer": "RMSprop", "lr_decay": 0.94,
                "decay_position": 2, "position_type": "epoch",
                "lr": lr, "weight_decay": 2e-5,
                "momentum": 0
                }

    # the input shape of Generator
    latent_shape = latent_shape
    print('===> Build dataset')
    mnist = FashionMNIST(batch_size=batch_size)
    torch.backends.cudnn.benchmark = True
    print('===> Building model')
    D_net = Discriminator(input_nc=image_channel, depth=depth_D)
    D = Model(D_net, gpu_ids_abs=gpus, init_method="kaiming", check_point_pos=10)
    # -----------------------------------
    G_net = Generator(input_nc=latent_shape[0], output_nc=image_channel, depth=depth_G)
    G = Model(G_net, gpu_ids_abs=gpus, init_method="kaiming", check_point_pos=10)
    print('===> Building optimizer')
    opt_D = Optimizer(D.parameters(), **D_hprams)
    opt_G = Optimizer(G.parameters(), **G_hprams)
    print('===> Training')
    print("using `tensorboard --logdir=log` to see learning curves and net structure."
          "training and valid_epoch data, configures info and checkpoint were save in `log` directory.")
    Trainer = FashionGenerateGanTrainer("log/fashion_generate", nepochs, gpus, G, D, opt_G, opt_D, mnist,
                                        latent_shape)
    if run_type == "train":
        Trainer.train()
    elif run_type == "debug":
        Trainer.debug()
Beispiel #12
0
    lr = 1e-3
    lr_decay = 0.9  # 0.94
    weight_decay = 2e-5  # 2e-5
    momentum = 0
    betas = (0.9, 0.999)

    opt_D_name = "RMSprop"
    depth_D = 16

    latent_shape = (16, 4, 4)
    image_channel = 3
    mid_channel = 4
    print('===> Build dataset')
    cifar10 = Cifar10(batch_size=batchSize)
    torch.backends.cudnn.benchmark = True
    print('===> Building model')
    D_net = NThickLayer_D(input_nc=image_channel, mid_channels=mid_channel, depth=depth_D, norm_type=None,
                          active_type="ReLU")
    D = Model(D_net, gpu_ids_abs=gpus, init_method="kaiming")
    # -----------------------------------
    G_net = TWnet_G(input_nc=latent_shape[0], output_nc=image_channel, depth=depth_G, norm_type="batch",
                    active_type="LeakyReLU")
    G = Model(G_net, gpu_ids_abs=gpus, init_method="kaiming")
    print('===> Building optimizer')
    opt_D = Optimizer(D.parameters(), lr, lr_decay, weight_decay, momentum, betas, opt_D_name)
    opt_G = Optimizer(G.parameters(), lr, lr_decay, weight_decay, momentum, betas, opt_G_name)

    print('===> Training')
    Trainer = GenerateGanTrainer("log", nepochs, gpus, G, D, opt_G, opt_D, cifar10, latent_shape)
    Trainer.train()
Beispiel #13
0
        return out


def resunet18(depth=64):
    """Constructs a ResNet-18 model.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = ResUNet([2, 2, 2, 2], depth=depth)
    return model


def nlayer(depth=64):
    """Constructs a ResNet-18 model.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = NLayer_D(depth=depth)
    return model


if __name__ == '__main__':
    from jdit.model import Model

    model = resunet18(32)
    m = Model(model)
    print(m.num_params)
    # d = nlayer(8)
    # x = torch.randn(1, 1, 256, 256)
    # y = model(x)
Beispiel #14
0
 def test_checkPoint(self):
     net = Model(self.mode, show_structure=False)
     net.checkPoint("tm", self.epoch, "test_model")
     dir = "test_model/"
     shutil.rmtree(dir)
Beispiel #15
0
 def test_configure(self):
     net = Model(self.mode)
     for k, v in net.configure.items():
         assert k is not None
         assert v is not None
Beispiel #16
0
    lr_decay = 0.92
    weight_decay = 2e-5
    momentum = 0
    betas = (0.9, 0.999)

    d_opt_name = "Adam"
    g_opt_name = "RMSprop"

    torch.backends.cudnn.benchmark = True
    print('===> Build dataset')
    trainLoader, testLoader = get_fashion_mnist_dataloaders(
        batch_size=batchSize)

    print('===> Building model')
    model_g_net = TWnet_G(depth=g_depth, norm_type="switch")
    # model_d_net = NLayer_D(depth=d_depth, norm_type="instance", use_sigmoid=False, use_liner=False)
    model_d_net = NThickLayer_D(depth=d_depth)
    net_G = Model(model_g_net, gpu_ids=gpus, init_method=True)

    net_D = Model(model_d_net, gpu_ids=gpus, init_method=True)

    print('===> Building optimizer')
    optG = Optimizer(net_G.parameters(), lr, lr_decay, weight_decay, momentum,
                     betas, d_opt_name)
    optD = Optimizer(net_D.parameters(), lr, lr_decay, weight_decay, momentum,
                     betas, g_opt_name)
    print('===> Training')
    Trainer = FashingTrainer(nepochs, gpus, net_G, net_D, optG, optD,
                             trainLoader, testLoader, testLoader, d_turn)
    Trainer.train()
Beispiel #17
0
 def test_weightsInit(self):
     net = Model()
     net.init_fc = init.kaiming_normal_
     self.mode.apply(net._weight_init)
Beispiel #18
0
 def test_print_network(self):
     net = Model(self.mode, show_structure=False)
     assert net.model is not None
Beispiel #19
0
 def test_define(self):
     net = Model()
     assert net.model is None
     net.define(self.mode, [], "kaiming", show_structure=False)
     assert net.model is not None
Beispiel #20
0
    opt_name = "Adam"

    torch.backends.cudnn.benchmark = True
    print('===> Build dataset')
    trainLoader, testLoader, cvLoader = getDataLoader(
        image_dir_path=IMAGE_PATH,
        mask_dir_path=MASK_PATH_DIC["gaussian"],
        batch_size=batchSize,
        test_size=test_size,
        train_size=train_size,
        valid_size=cv_size,
        num_workers=0)

    print('===> Building model')
    net_G = Model(Wnet_G(depth=g_depth, norm_type="switch"),
                  gpu_ids=gpus,
                  use_weights_init=True)

    net_D = Model(NLayer_D(depth=d_depth,
                           norm_type="instance",
                           use_sigmoid=False,
                           use_liner=False),
                  gpu_ids=gpus,
                  use_weights_init=True)

    print('===> Building optimizer')
    optG = Optimizer(net_G.parameters(), lr, lr_decay, weight_decay, momentum,
                     betas, opt_name)
    optD = Optimizer(filter(lambda p: p.requires_grad, net_D.parameters()), lr,
                     lr_decay, weight_decay, momentum, betas, opt_name)
    print('===> Training')