Exemple #1
0
def start_cifarPix2pixGanTrainer(gpus=(),
                                 nepochs=200,
                                 lr=1e-3,
                                 depth_G=32,
                                 depth_D=32,
                                 run_type="train"):
    gpus = gpus  # set `gpus = []` to use cpu
    batch_size = 32
    image_channel = 3
    nepochs = nepochs
    depth_G = depth_G
    depth_D = depth_D

    G_hprams = {
        "optimizer": "Adam",
        "lr_decay": 0.9,
        "decay_position": 10,
        "position_type": "epoch",
        "lr": lr,
        "weight_decay": 2e-5,
        "betas": (0.9, 0.99)
    }
    D_hprams = {
        "optimizer": "RMSprop",
        "lr_decay": 0.9,
        "decay_position": 10,
        "position_type": "epoch",
        "lr": lr,
        "weight_decay": 2e-5,
        "momentum": 0
    }

    print('===> Build dataset')
    cifar10 = Cifar10(root="datasets/cifar10", batch_size=batch_size)
    torch.backends.cudnn.benchmark = True
    print('===> Building model')
    D_net = Discriminator(input_nc=image_channel, depth=depth_D)
    D = Model(D_net,
              gpu_ids_abs=gpus,
              init_method="kaiming",
              check_point_pos=50)
    # -----------------------------------
    G_net = Generator(input_nc=1, output_nc=image_channel, depth=depth_G)
    G = Model(G_net,
              gpu_ids_abs=gpus,
              init_method="kaiming",
              check_point_pos=50)
    print('===> Building optimizer')
    opt_D = Optimizer(D.parameters(), **D_hprams)
    opt_G = Optimizer(G.parameters(), **G_hprams)
    print('===> Training')
    Trainer = CifarPix2pixGanTrainer("log/cifar_p2p", nepochs, gpus, G, D,
                                     opt_G, opt_D, cifar10)
    if run_type == "train":
        Trainer.train()
    elif run_type == "debug":
        Trainer.debug()
Exemple #2
0
def start_fashionClassTrainer(gpus=(), nepochs=10, run_type="train"):
    """" An example of fashion-mnist classification

    """
    num_class = 10
    depth = 32
    gpus = gpus
    batch_size = 4
    nepochs = nepochs
    opt_hpm = {"optimizer": "Adam",
               "lr_decay": 0.94,
               "decay_position": 10,
               "position_type": "epoch",
               "lr_reset": {2: 5e-4, 3: 1e-3},
               "lr": 1e-4,
               "weight_decay": 2e-5,
               "betas": (0.9, 0.99)}

    print('===> Build dataset')
    mnist = FashionMNIST(batch_size=batch_size)
    # mnist.dataset_train = mnist.dataset_test
    torch.backends.cudnn.benchmark = True
    print('===> Building model')
    net = Model(SimpleModel(depth=depth), gpu_ids_abs=gpus, init_method="kaiming", check_point_pos=1)
    print('===> Building optimizer')
    opt = Optimizer(net.parameters(), **opt_hpm)
    print('===> Training')
    print("using `tensorboard --logdir=log` to see learning curves and net structure."
          "training and valid_epoch data, configures info and checkpoint were save in `log` directory.")
    Trainer = FashionClassTrainer("log/fashion_classify", nepochs, gpus, net, opt, mnist, num_class)
    if run_type == "train":
        Trainer.train()
    elif run_type == "debug":
        Trainer.debug()
Exemple #3
0
 def test_regist_config(self):
     log = Loger()
     param = [torch.ones(3, 3, requires_grad=True)] * 5
     opt = Optimizer(param,
                     lr=0.999,
                     weight_decay=0.03,
                     momentum=0.5,
                     betas=(0.1, 0.4),
                     opt_name="RMSprop")
     log.regist_config(1, opt)
     print(log.__dict__["Optimizer"])
     opt.do_lr_decay()
     log.regist_config(2, opt)
     print(log.__dict__["Optimizer"])
     log.regist_config(3, opt)
     print(log.__dict__["Optimizer"])
     net_G = Model(Inception3(4))
     log.regist_config(1, net_G)
Exemple #4
0
def start_fashionGenerateGanTrainer(gpus=(), nepochs=50, lr=1e-3, depth_G=32, depth_D=32, latent_shape=(256, 1, 1),
                                    run_type="train"):
    gpus = gpus  # set `gpus = []` to use cpu
    batch_size = 64
    image_channel = 1
    nepochs = nepochs

    depth_G = depth_G
    depth_D = depth_D

    G_hprams = {"optimizer": "Adam", "lr_decay": 0.94,
                "decay_position": 2, "position_type": "epoch",
                "lr": lr, "weight_decay": 2e-5,
                "betas": (0.9, 0.99)
                }
    D_hprams = {"optimizer": "RMSprop", "lr_decay": 0.94,
                "decay_position": 2, "position_type": "epoch",
                "lr": lr, "weight_decay": 2e-5,
                "momentum": 0
                }

    # the input shape of Generator
    latent_shape = latent_shape
    print('===> Build dataset')
    mnist = FashionMNIST(batch_size=batch_size)
    torch.backends.cudnn.benchmark = True
    print('===> Building model')
    D_net = Discriminator(input_nc=image_channel, depth=depth_D)
    D = Model(D_net, gpu_ids_abs=gpus, init_method="kaiming", check_point_pos=10)
    # -----------------------------------
    G_net = Generator(input_nc=latent_shape[0], output_nc=image_channel, depth=depth_G)
    G = Model(G_net, gpu_ids_abs=gpus, init_method="kaiming", check_point_pos=10)
    print('===> Building optimizer')
    opt_D = Optimizer(D.parameters(), **D_hprams)
    opt_G = Optimizer(G.parameters(), **G_hprams)
    print('===> Training')
    print("using `tensorboard --logdir=log` to see learning curves and net structure."
          "training and valid_epoch data, configures info and checkpoint were save in `log` directory.")
    Trainer = FashionGenerateGanTrainer("log/fashion_generate", nepochs, gpus, G, D, opt_G, opt_D, mnist,
                                        latent_shape)
    if run_type == "train":
        Trainer.train()
    elif run_type == "debug":
        Trainer.debug()
Exemple #5
0
 def _train_iteration(self,
                      opt: Optimizer,
                      compute_loss_fc: FunctionType,
                      csv_filename: str = "Train"):
     opt.zero_grad()
     loss, var_dic = compute_loss_fc()
     loss.backward()
     opt.step()
     self.watcher.scalars(var_dict=var_dic,
                          global_step=self.step,
                          tag="Train")
     opt_name = list(self._opts.keys())[list(
         self._opts.values()).index(opt)]
     self.watcher.scalars(var_dict={"Learning rate": opt.lr},
                          global_step=self.step,
                          tag=opt_name)
     self.loger.write(self.step,
                      self.current_epoch,
                      var_dic,
                      csv_filename,
                      header=self.step <= 1)
Exemple #6
0
def build_task_trainer(unfixed_params):
    """build a task just like FashionClassTrainer.

    :param unfixed_params:
    :return:
    """
    logdir = unfixed_params['logdir']
    gpu_ids_abs = unfixed_params["gpu_ids_abs"]
    depth = unfixed_params["depth"]
    lr = unfixed_params["lr"]

    batch_size = 32
    opt_name = "RMSprop"
    lr_decay = 0.94
    decay_position = 1
    position_type = "epoch"
    weight_decay = 2e-5
    momentum = 0
    nepochs = 100
    num_class = 10
    torch.backends.cudnn.benchmark = True
    mnist = FashionMNIST(root="datasets/fashion_data",
                         batch_size=batch_size,
                         num_workers=2)
    net = Model(SimpleModel(depth),
                gpu_ids_abs=gpu_ids_abs,
                init_method="kaiming",
                verbose=False)
    opt = Optimizer(net.parameters(),
                    opt_name,
                    lr_decay,
                    decay_position,
                    position_type=position_type,
                    lr=lr,
                    weight_decay=weight_decay,
                    momentum=momentum)
    Trainer = FashionClassTrainer(logdir, nepochs, gpu_ids_abs, net, opt,
                                  mnist, num_class)
    return Trainer
Exemple #7
0
 def test_regist_config(self):
     log = Loger()
     param = torch.nn.Linear(10, 1).parameters()
     opt = Optimizer(param,
                     "RMSprop",
                     lr_decay=0.5,
                     decay_position=2,
                     position_type="step",
                     lr=0.999)
     log.regist_config(opt)
     self.assertEqual(
         log.regist_dict["Optimizer"], {
             'opt_name': 'RMSprop',
             'lr': 0.999,
             'momentum': 0,
             'alpha': 0.99,
             'eps': 1e-08,
             'centered': False,
             'weight_decay': 0,
             'lr_decay': '0.5',
             'decay_decay_typeposition': 'step',
             'decay_position': '2'
         })
Exemple #8
0
        """count the total parameters of model.

        :param proto_model: pytorch module
        :return: number of parameters
        """
        num_params = 0
        for param in proto_model.parameters():
            num_params += param.numel()
        return num_params

    @staticmethod
    def _build_dir(dirs: str):
        if not os.path.exists(dirs):
            os.makedirs(dirs)


if __name__ == '__main__':
    import torch.nn as nn

    test_log = Loger('log')
    test_model = nn.Linear(10, 1)
    test_opt = Optimizer(test_model.parameters(),
                         "Adam",
                         lr_decay=2,
                         decay_position=[1, 3])
    test_log.regist_config(test_opt, epoch=1)
    test_opt.do_lr_decay()
    test_log.regist_config(test_opt, epoch=2)
    test_log.regist_config(test_opt, epoch=3)
    test_log.regist_config(test_opt)
    trainLoader, testLoader, cvLoader = getDataLoader(
        image_dir_path=IMAGE_PATH,
        mask_dir_path=MASK_PATH_DIC["gaussian"],
        batch_size=batchSize,
        test_size=test_size,
        train_size=train_size,
        valid_size=cv_size,
        num_workers=0)

    print('===> Building model')
    net_G = Model(Wnet_G(depth=g_depth, norm_type="switch"),
                  gpu_ids=gpus,
                  use_weights_init=True)

    net_D = Model(NLayer_D(depth=d_depth,
                           norm_type="instance",
                           use_sigmoid=False,
                           use_liner=False),
                  gpu_ids=gpus,
                  use_weights_init=True)

    print('===> Building optimizer')
    optG = Optimizer(net_G.parameters(), lr, lr_decay, weight_decay, momentum,
                     betas, opt_name)
    optD = Optimizer(filter(lambda p: p.requires_grad, net_D.parameters()), lr,
                     lr_decay, weight_decay, momentum, betas, opt_name)
    print('===> Training')
    Trainer = AtomGanTrainer(nepochs, gpus, net_G, net_D, optG, optD,
                             trainLoader, testLoader, cvLoader, d_turn)
    Trainer.train()
Exemple #10
0
    lr = 1e-3
    lr_decay = 0.9  # 0.94
    weight_decay = 2e-5  # 2e-5
    momentum = 0
    betas = (0.9, 0.999)

    opt_D_name = "RMSprop"
    depth_D = 16

    latent_shape = (16, 4, 4)
    image_channel = 3
    mid_channel = 4
    print('===> Build dataset')
    cifar10 = Cifar10(batch_size=batchSize)
    torch.backends.cudnn.benchmark = True
    print('===> Building model')
    D_net = NThickLayer_D(input_nc=image_channel, mid_channels=mid_channel, depth=depth_D, norm_type=None,
                          active_type="ReLU")
    D = Model(D_net, gpu_ids_abs=gpus, init_method="kaiming")
    # -----------------------------------
    G_net = TWnet_G(input_nc=latent_shape[0], output_nc=image_channel, depth=depth_G, norm_type="batch",
                    active_type="LeakyReLU")
    G = Model(G_net, gpu_ids_abs=gpus, init_method="kaiming")
    print('===> Building optimizer')
    opt_D = Optimizer(D.parameters(), lr, lr_decay, weight_decay, momentum, betas, opt_D_name)
    opt_G = Optimizer(G.parameters(), lr, lr_decay, weight_decay, momentum, betas, opt_G_name)

    print('===> Training')
    Trainer = GenerateGanTrainer("log", nepochs, gpus, G, D, opt_G, opt_D, cifar10, latent_shape)
    Trainer.train()
Exemple #11
0
    lr_decay = 0.92
    weight_decay = 2e-5
    momentum = 0
    betas = (0.9, 0.999)

    d_opt_name = "Adam"
    g_opt_name = "RMSprop"

    torch.backends.cudnn.benchmark = True
    print('===> Build dataset')
    trainLoader, testLoader = get_fashion_mnist_dataloaders(
        batch_size=batchSize)

    print('===> Building model')
    model_g_net = TWnet_G(depth=g_depth, norm_type="switch")
    # model_d_net = NLayer_D(depth=d_depth, norm_type="instance", use_sigmoid=False, use_liner=False)
    model_d_net = NThickLayer_D(depth=d_depth)
    net_G = Model(model_g_net, gpu_ids=gpus, init_method=True)

    net_D = Model(model_d_net, gpu_ids=gpus, init_method=True)

    print('===> Building optimizer')
    optG = Optimizer(net_G.parameters(), lr, lr_decay, weight_decay, momentum,
                     betas, d_opt_name)
    optD = Optimizer(net_D.parameters(), lr, lr_decay, weight_decay, momentum,
                     betas, g_opt_name)
    print('===> Training')
    Trainer = FashingTrainer(nepochs, gpus, net_G, net_D, optG, optD,
                             trainLoader, testLoader, testLoader, d_turn)
    Trainer.train()