Exemple #1
0
 def build_model(self, lr, EPOCH, loss = 'MSELoss', optim = 'Adam', betas = (0.5, 0.999), cycleLambda = 10):
     self.EPOCH = EPOCH
     self.cycleLambda = cycleLambda
     self.lr = lr
     self.optim = optim
     self.betas = betas
     
     if torch.cuda.is_available():
         self.device = torch.device('cuda')
         self.GeneratorA = Generator().cuda(self.device)
         self.GeneratorB = Generator().cuda(self.device)
         self.DiscriminatorA = Discriminator().cuda(self.device)
         self.DiscriminatorB = Discriminator().cuda(self.device)
         
     else:
         self.device = torch.device('cpu')
         self.GeneratorA = Generator()
         self.GeneratorB = Generator()
         self.DiscriminatorA = Discriminator()
         self.DiscriminatorB = Discriminator()   
     
     if 'GeneratorA.pkl' in os.listdir():
         self.GeneratorA = torch.load('GeneratorA.pkl')
     if 'DiscriminatorA.pkl' in os.listdir():
         self.DiscriminatorA = torch.load('DiscriminatorA.pkl')        
     if 'GeneratorB.pkl' in os.listdir():
         self.GeneratorB = torch.load('GeneratorB.pkl')
     if 'DiscriminatorB.pkl' in os.listdir():
         self.DiscriminatorB = torch.load('DiscriminatorB.pkl')
     
     self.criterion = utils.Loss(loss = loss)
     self.L1Loss = utils.Loss(loss = 'L1Loss')
Exemple #2
0
    def build_model(self,
                    lr,
                    Epoch,
                    loss='WGANLoss',
                    optim='Adam',
                    betas=(0.5, 0.999)):

        self.Epoch = Epoch
        #### Build Model ####
        if torch.cuda.is_available():
            self.device = torch.device('cuda')
            self.Generator = Generator().cuda(self.device)
            self.Discriminator = Discriminator().cuda(self.device)
        else:
            self.device = torch.device('cpu')
            self.Generator = Generator()
            self.Discriminator = Discriminator()

        if 'Generator.pkl' in os.listdir():
            self.Generator = torch.load('Generator.pkl')
        if 'Discriminator.pkl' in os.listdir():
            self.Discriminator = torch.load('Discriminator.pkl')

        #### Build Loss Function ####
        self.criterion = utils.Loss(loss=loss)

        #### Build optimizing ####
        self.optimG = utils.optim(self.Generator,
                                  lr=lr,
                                  optim=optim,
                                  betas=betas)
        self.optimD = utils.optim(self.Discriminator,
                                  lr=lr,
                                  optim=optim,
                                  betas=betas)
Exemple #3
0
def main(args):
    utils.seedme(args.seed)
    cudnn.benchmark = True
    device = torch.device(
        'cuda' if torch.cuda.is_available() and not args.nocuda else 'cpu')

    os.system('mkdir -p {}'.format(args.outf))

    img = utils.load_image(
        args.image, resize=args.resize)  # (channel, height, width), [-1,1]
    x0 = torch.from_numpy(img).unsqueeze(0).to(
        device)  # (1, channel, height, width), torch

    args.img = img
    args.nc = img.shape[0]

    x = models.X(image_size=args.syn_size,
                 nc=args.nc,
                 batch_size=args.batch_size).to(device)
    optimizer = optim.Adam(x.parameters(), lr=args.lr)

    netE = models.choose_archE(args).to(device)
    print netE

    mmdrq = mmd.MMDrq(nu=args.nu, encoder=netE)
    loss_func = utils.Loss(x0, mmdrq, args.patch_size, args.npatch)

    losses = []
    start_time = time.time()
    for i in range(args.niter):
        optimizer.zero_grad()

        x1 = x()
        loss = loss_func(x1).mean()
        loss.backward()
        optimizer.step()

        losses.append(loss.item())
        if (i + 1) % 500 == 0:
            print '[{}/{}] loss: {}'.format(i + 1, args.niter, loss.item())
            fig, ax = plt.subplots()
            ax.plot(signal.medfilt(losses, 101)[50:-50])
            ax.set_yscale('symlog')
            fig.tight_layout()
            fig.savefig('{}/loss.png'.format(args.outf))
            plt.close(fig)
            logger.vutils.save_image(x1,
                                     '{}/x_{}.png'.format(args.outf, i + 1),
                                     normalize=True,
                                     nrow=10)
            print 'This round took {0} secs'.format(time.time() - start_time)
            start_time = time.time()

    np.save('{}/x1.npy'.format(args.outf), x1.detach().cpu().numpy().squeeze())
Exemple #4
0
def main(args):
    utils.seedme(args.seed)
    cudnn.benchmark = True
    device = torch.device('cuda' if torch.cuda.is_available() and not args.nocuda else 'cpu')

    img = utils.load_image(args.image, resize=args.resize) # (channel, height, width), [-1,1]
    x0 = torch.from_numpy(img).unsqueeze(0).to(device)  # (1, channel, height, width), torch

    args.img = img
    args.nc = img.shape[0]

    netG = models.choose_archG(args).to(device)
    netE = models.choose_archE(args).to(device)
    print netE
    print netG

    optimizer = optim.Adam(netG.parameters(), lr=args.lr, betas=(args.beta1, args.beta2), amsgrad=True)
    z = torch.randn(args.batch_size,args.nz,1,1).to(device)

    mmdrq = mmd.MMDrq(nu=args.nu, encoder=netE)
    loss_func = utils.Loss(x0, mmdrq, args.patch_size, args.npatch)

    log = logger.Logger(args, netG, netE)
    log.save_image(x0, 'ref.png')
    nstart, nend = log.nstart, log.nend

    start_time = time.time()
    for i in range(nstart, nend):
        optimizer.zero_grad()

        x1 = netG(z.normal_())
        loss = loss_func(x1).mean()
        ent = utils.sample_entropy(x1.view(x1.shape[0],-1))
        kl = loss - args.alpha*ent

        kl.backward()
        optimizer.step()

        # --- logging
        log.log(loss.item(), ent.item(), kl.item())
        if (i+1) % 500 == 0:
            print 'This round took {0} secs'.format(time.time()-start_time)
            start_time = time.time()
Exemple #5
0
    def run_solver(iterations,
                   arm,
                   data,
                   test,
                   rng=None,
                   problem='cont',
                   method='5fold',
                   track_valid=np.array([1.]),
                   track_test=np.array([1.]),
                   verbose=False):
        """

        :param iterations:
        :param arm:
        :param data:
        :param test:
        :param rng:
        :param problem:
        :param method:
        :param track_valid:
        :param track_test:
        :param verbose:
        :return:
        """
        x, y = data
        x_test, y_test = test
        loss = utils.Loss(Ada(),
                          x,
                          y,
                          x_test,
                          y_test,
                          method=method,
                          problem=problem)

        best_loss = 1.
        avg_loss = 0.
        test_score = 1.

        if track_valid.size == 0:
            current_best_valid = 1.
            current_test = 1.
            current_track_valid = np.array([1.])
            current_track_test = np.array([1.])
        else:
            current_best_valid = track_valid[-1]
            current_test = track_test[-1]
            current_track_valid = np.copy(track_valid)
            current_track_test = np.copy(track_test)

        for iteration in range(iterations):
            current_loss, test_error = loss.evaluate_loss(
                n_estimators=arm['n_estimators'],
                learning_rate=arm['learning_rate'])
            current_loss = -current_loss
            avg_loss += current_loss

            if verbose:
                print('iteration %i, validation error %f %%' %
                      (iteration, current_loss * 100.))

            if current_loss < best_loss:
                best_loss = current_loss
                test_score = -test_error
                # best_iter = iteration

            if best_loss < current_best_valid:
                current_best_valid = best_loss
                current_test = test_score
                current_track_valid = np.append(current_track_valid,
                                                current_best_valid)
                current_track_test = np.append(current_track_test,
                                               current_test)
            else:
                current_track_valid = np.append(current_track_valid,
                                                current_best_valid)
                current_track_test = np.append(current_track_test,
                                               current_test)

        avg_loss = avg_loss / iterations

        return best_loss, avg_loss, current_track_valid, current_track_test
Exemple #6
0
    drop_concl=None,
    drop_hidden=0.1,
    initer_stddev=0.02,
    loss=ks.losses.SparseCategoricalCrossentropy(from_logits=True),
    metric=ks.metrics.SparseCategoricalCrossentropy(from_logits=True),
    num_epochs=2,
    num_heads=3,
    num_rounds=2,
    num_shards=2,
    optimizer=ks.optimizers.Adam(),
    width_dec=40,
    width_enc=50,
)

params.update(
    loss=qu.Loss(),
    metric=qu.Metric(),
)


def main(ps, fn, root=None, groups=None, count=None):
    qu.Config.runtime.is_training = True
    groups = groups or qs.groups
    for r in range(ps.num_rounds):
        for g in groups:
            print(f'\nRound {r + 1}, group {g}...\n=======================')
            fn(ps, qd.dset_for(ps, root, g, count=count), model_for(ps, g))


if __name__ == '__main__':
    ps = qu.Params(**params)