Example #1
0
 def train_dataloader(self):
     dataset = TrainDataset(self.train_df, self.data_dir,
                            self.train_transforms)
     loader = DataLoader(dataset,
                         batch_size=BATCH_SIZE,
                         num_workers=12,
                         shuffle=True)
     return loader
Example #2
0
    def fit(self):
        cfg = self.cfg
        self.refiner.zero_grad()
        self.refiner.train()
        self.train_loader = TrainDataset(cfg.train_data_path,
                                         size=cfg.patch_size,
                                         scale=cfg.scale,
                                         batch_size=cfg.batch_size)

        while True:
            self.train_loader.resample()
            inputs = self.train_loader[0]

            hr = customTransform(inputs, 0)
            lr = customTransform(inputs, 1)

            del inputs

            if 1:
                sr = self.refiner(lr)
                l1loss = self.loss_fn(sr, hr) / cfg.update_every
                loss = l1loss
                loss.backward()

                if self.step % cfg.update_every == 0:
                    nn.utils.clip_grad_norm_(self.refiner.parameters(),
                                             cfg.clip)
                    self.optim.step()
                    self.refiner.zero_grad()
                    self.epoch += 1
                    learning_rate = self.decay_learning_rate()
                    for param_group in self.optim.param_groups:
                        param_group["lr"] = learning_rate

            if 1:
                cv2.imshow('sr', deTransform(sr[:1]))
                cv2.imshow('hr', deTransform(hr[:1]))
                cv2.waitKey(1)
                self.step += 1
                if cfg.verbose and self.step % (cfg.update_every * 10) == 0:
                    print('epoch', self.epoch, 'l1_loss', l1loss.item())
                    if cfg.verbose and self.step % (cfg.update_every *
                                                    100) == 0:
                        self.save()
Example #3
0

makedirs_(opt.outf)

if opt.manualSeed is None:
    opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
    torch.cuda.manual_seed_all(opt.manualSeed)

cudnn.benchmark = True

###########   DATASET   ###########
train_dataset = TrainDataset(opt.data_path, opt.size_w, opt.size_h)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=opt.batch_size,
                                           shuffle=True,
                                           num_workers=6)


###########   MODEL   ###########
# custom weights initialization called on netG and netD
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
        m.bias.data.fill_(0)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
Example #4
0
class Solver():
    def __init__(self, model, cfg):
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.cfg = cfg
        self.refiner = model().to(self.device)

        self.optim = optim.Adam(
            filter(lambda p: p.requires_grad, self.refiner.parameters()),
            cfg.lr)

        self.epoch = 0
        if cfg.ckpt_name != 'None':
            self.load(cfg.ckpt_name)
        self.step = self.epoch * cfg.update_every
        learning_rate = self.decay_learning_rate()

        #for param_group in self.optim.param_groups:
        for param_group in self.optim.param_groups:
            param_group["lr"] = learning_rate

        self.loss_fn = nn.L1Loss()

        if cfg.verbose:
            num_params = sum(p.numel() for p in self.refiner.parameters()
                             if p.requires_grad)
            print("# of params:", num_params)

        os.makedirs(cfg.saved_ckpt_dir, exist_ok=True)

    def fit(self):
        cfg = self.cfg
        self.refiner.zero_grad()
        self.refiner.train()
        self.train_loader = TrainDataset(cfg.train_data_path,
                                         size=cfg.patch_size,
                                         scale=cfg.scale,
                                         batch_size=cfg.batch_size)

        while True:
            self.train_loader.resample()
            inputs = self.train_loader[0]

            hr = customTransform(inputs, 0)
            lr = customTransform(inputs, 1)

            del inputs

            if 1:
                sr = self.refiner(lr)
                l1loss = self.loss_fn(sr, hr) / cfg.update_every
                loss = l1loss
                loss.backward()

                if self.step % cfg.update_every == 0:
                    nn.utils.clip_grad_norm_(self.refiner.parameters(),
                                             cfg.clip)
                    self.optim.step()
                    self.refiner.zero_grad()
                    self.epoch += 1
                    learning_rate = self.decay_learning_rate()
                    for param_group in self.optim.param_groups:
                        param_group["lr"] = learning_rate

            if 1:
                cv2.imshow('sr', deTransform(sr[:1]))
                cv2.imshow('hr', deTransform(hr[:1]))
                cv2.waitKey(1)
                self.step += 1
                if cfg.verbose and self.step % (cfg.update_every * 10) == 0:
                    print('epoch', self.epoch, 'l1_loss', l1loss.item())
                    if cfg.verbose and self.step % (cfg.update_every *
                                                    100) == 0:
                        self.save()

    def load(self, path):
        states = torch.load(path)
        state_dict = self.refiner.state_dict()
        for k, v in states.items():
            if k in state_dict.keys():
                state_dict.update({k: v})
        self.refiner.load_state_dict(state_dict)

    def save(self):
        torch.save(
            self.refiner.state_dict(), self.cfg.saved_ckpt_dir +
            '/checkpoint_e' + str(self.epoch) + '.pth')

    def decay_learning_rate(self):
        lr = self.cfg.lr * (0.5**(self.epoch // self.cfg.decay))
        return lr
Example #5
0
 def val_dataloader(self):
     dataset = TrainDataset(self.valid_df, self.data_dir, self.valid_transforms)
     loader = DataLoader(dataset, batch_size=BATCH_SIZE, num_workers=12)
     return loader
Example #6
0
    train_cate1 = [cate[0] for cate in train_cate]
    train_cate2 = [cate[1] for cate in train_cate]
    train_cate3 = [cate[2] for cate in train_cate]

    train_title = padding(train_title, max(train_t_len))
    train_desc = padding(train_desc, max(train_d_len))

    train_t_len = torch.tensor(train_t_len)
    train_d_len = torch.tensor(train_d_len)

    train_cate1 = torch.tensor(train_cate1)
    train_cate2 = torch.tensor(train_cate2)
    train_cate3 = torch.tensor(train_cate3)

    train_set = TrainDataset(train_title, train_desc,
                          train_cate1, train_cate2, train_cate3,
                          train_t_len, train_d_len)
    train_loader = data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=4)

    with open(valid_feature_path, 'rb') as fp:
        valid_features = pickle.load(fp)
    valid_title = [feature[0] for feature in valid_features]
    valid_desc = [feature[1] for feature in valid_features]
    valid_t_len = [len(feature[0]) for feature in valid_features]
    valid_d_len = [len(feature[1]) for feature in valid_features]

    with open(valid_cate_path, 'rb') as fp:
        valid_cate = pickle.load(fp)
    valid_cate1 = [cate[0] for cate in valid_cate]
    valid_cate2 = [cate[1] for cate in valid_cate]
    valid_cate3 = [cate[2] for cate in valid_cate]
Example #7
0
    with open(valid_cate_path, 'rb') as fp:
        cate = pickle.load(fp)
    cate1 = [ca[0] for ca in cate]
    cate2 = [ca[1] for ca in cate]
    cate3 = [ca[2] for ca in cate]

    title = padding(title, max(t_len))
    desc = padding(desc, max(d_len))
    t_len = torch.tensor(t_len)
    d_len = torch.tensor(d_len)

    cate1 = torch.tensor(cate1)
    cate2 = torch.tensor(cate2)
    cate3 = torch.tensor(cate3)

    valid_set = TrainDataset(title, desc, cate1, cate2, cate3, t_len, d_len)
    valid_loader = data.DataLoader(valid_set,
                                   batch_size=args.batch_size,
                                   num_workers=4)

    clf1_state = torch.load(os.path.join('checkpoint', 'glu_w_cate1.pth'))
    clf1 = Cate1Classifier(WORDS_CNT + 1, clf1_state['args'])
    clf1.load_state_dict(clf1_state['model'])

    with open('./preproc/mask.pkl', 'rb') as fp:
        mask1, mask2 = pickle.load(fp)

    clf2_state = torch.load(os.path.join('checkpoint', 'glu_w_cate2.pth'))
    clf2 = Cate2Classifier(WORDS_CNT + 1, clf2_state['args'], mask1=mask1)
    clf2.load_state_dict(clf2_state['model'])