예제 #1
0
    def load(self):
        save_dir = os.path.join(self.save_dir, self.dataset, self.model_name)

        Generator1.load_state_dict(
            torch.load(os.path.join(save_dir, self.model_name + '_G.pkl')))
        Discriminator1.load_state_dict(
            torch.load(os.path.join(save_dir, self.model_name + '_D.pkl')))
예제 #2
0
    def save(self):
        save_dir = os.path.join(self.save_dir, self.dataset, self.model_name)

        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        torch.save(Generator1.state_dict(),
                   os.path.join(save_dir, self.model_name + '_G.pkl'))
        torch.save(Discriminator1.state_dict(),
                   os.path.join(save_dir, self.model_name + '_D.pkl'))

        with open(os.path.join(save_dir, self.model_name + '_history.pkl'),
                  'wb') as f:
            pickle.dump(self.train_hist, f)
예제 #3
0
    def visualize_results(self, epoch, fix=True):
        Generator1.eval()

        if not os.path.exists(self.result_dir + '/' + self.dataset + '/' +
                              self.model_name):
            os.makedirs(self.result_dir + '/' + self.dataset + '/' +
                        self.model_name)

        image_frame_dim = int(np.floor(np.sqrt(sample_num)))

        if fix:
            """ fixed noise """
            samples = Generator1(self.sample_z_, self.sample_y_)
        else:
            """ random noise """
            sample_y_ = torch.zeros(opt.batch_size, opt.n_classes).scatter_(
                1,
                torch.randint(0, opt.n_classes - 1,
                              (opt.batch_size, 1)).type(torch.LongTensor), 1)
            sample_z_ = torch.rand((opt.batch_size, opt.latent_dim))
            if cuda:
                sample_z_, sample_y_ = sample_z_.cuda(), sample_y_.cuda()

            samples = Generator1(sample_z_, sample_y_)

        if cuda:
            samples = samples.cpu().data.numpy().transpose(0, 2, 3, 1)
        else:
            samples = samples.data.numpy().transpose(0, 2, 3, 1)

        samples = (samples + 1) / 2
        utils.save_images(
            samples[:image_frame_dim * image_frame_dim, :, :, :],
            [image_frame_dim, image_frame_dim],
            self.result_dir + '/' + self.dataset + '/' + self.model_name +
            '/' + self.model_name + '_epoch%03d' % epoch + '.png')
예제 #4
0
    train_dataloader = DataLoader(dataset=DomainData(train_examples,
                                                     label_list,
                                                     max_seq_length,
                                                     tokenizer),
                                  batch_size=batch_size,
                                  shuffle=True,
                                  drop_last=False)
    val_dataset = DomainData(val_examples, label_list, max_seq_length,
                             tokenizer)
    val_dataloader = DataLoader(dataset=val_dataset, batch_size=batch_size)

    num_train_steps = int(len(train_examples) / batch_size * total_epoch_num)

    bert = BertModel.from_pretrained(bert_model, PYTORCH_PRETRAINED_BERT_CACHE)
    generator = Generator1(noise_size=noise_size,
                           output_size=768,
                           hidden_sizes=[768],
                           dropout_rate=0.1)
    discriminator = Discriminator(input_size=768,
                                  hidden_sizes=[768],
                                  num_labels=len(label_list),
                                  dropout_rate=0.1)

    bert.to(device)
    if multi_gpu:
        bert = torch.nn.DataParallel(bert, device_ids=device_ids)

    generator.to(device)
    discriminator.to(device)

    param_optimizer = list(bert.named_parameters())
    no_decay = ['bias', 'gamma', 'beta']
예제 #5
0
    # Initialize generator and discriminator
    if opt.arch == 'mlp':
        if opt.gan_type == 'cgan':
            generator = Generator_cgan(latent_dim=opt.latent_dim,
                                       img_shape=img_shape,
                                       n_classes=opt.n_classes)
            discriminator = Discriminator_cgan(img_shape=img_shape,
                                               n_classes=opt.n_classes)
        elif opt.gan_type == 'acgan':
            generator = Generator2(latent_dim=opt.latent_dim,
                                   img_shape=img_shape,
                                   n_classes=opt.n_classes)
            discriminator = Discriminator2(img_shape=img_shape,
                                           n_classes=opt.n_classes)
    elif opt.arch == 'cnn':
        generator = Generator1(latent_dim=opt.latent_dim)
        discriminator = Discriminator1()

    if cuda:
        generator.cuda()
        discriminator.cuda()
        adversarial_loss.cuda()
        auxiliary_loss.cuda()

    # Configure data loader
    transform = None
    if opt.centering == 0 and opt.num_per == 1:
        mean = [0.0052, -0.4731, 3.1408]
        std = [0.4781, 0.4572, 0.5331]
    elif opt.centering == 1 and opt.num_per == 1:
        mean = [-0.0049, -0.2049, -0.0329]
예제 #6
0
                                     opt.centering, opt.spherical,
                                     opt.real_per, opt.num_per, 'train',
                                     transform, opt.expt)
    else:
        traindata = NTURGBDData_full(0, None, 0, 'hcn', 'sub', 0, 0, 0,
                                     opt.real_per, opt.num_per, 'train',
                                     opt.expt)
    dataloader = DataLoader(traindata,
                            batch_size=opt.batch_size,
                            shuffle=True,
                            num_workers=opt.n_cpu,
                            pin_memory=True)

    # networks init
    Generator1 = Generator1(input_dim=opt.latent_dim,
                            output_dim=opt.n_channels,
                            input_size=opt.img_width,
                            class_num=opt.n_classes)
    Discriminator1 = Discriminator1(input_dim=opt.n_channels,
                                    output_dim=1,
                                    input_size=opt.img_width,
                                    class_num=opt.n_classes)
    Generator1_optimizer = optim.Adam(Generator1.parameters(),
                                      lr=args.lrG,
                                      betas=(args.beta1, args.beta2))
    Discriminator1_optimizer = optim.Adam(Discriminator1.parameters(),
                                          lr=args.lrD,
                                          betas=(args.beta1, args.beta2))

    if cuda:
        Generator1.cuda()
        Discriminator1.cuda()