Пример #1
0
def draw(partition='test'):
    h_top = top_4.draw_singletons(bins.items(), partition=partition)
    h_qcd = qcd_0.draw_singletons(bins.items(), partition=partition)

    for h in [h_top, h_qcd]:
        for v in h.values():
            v.scale()

    for k in bins:
        p = utils.Plotter()
        p.add_hist(h_top[k], 'top', 'r')
        p.add_hist(h_qcd[k], 'q/g', 'k')
        p.plot({
            'xlabel':
            labels[k],
            'ylabel':
            'Probability',
            'output':
            '/home/snarayan/public_html/figs/testplots/%s/' % partition + k
        })
Пример #2
0

f_vars = {
    'tau32': (lambda x: x['singletons'][:, obj.singletons['tau32']],
              np.arange(0, 1.2, 0.01)),
    'msd': (lambda x: x['singletons'][:, obj.singletons['msd']],
            np.arange(0., 400., 10.)),
    'pt': (lambda x: x['singletons'][:, obj.singletons['pt']],
           np.arange(250., 1000., 50.)),
    'dnn': (predict, np.arange(0, 1.2, 0.01)),
}

OUTPUT = '/home/snarayan/public_html/figs/badnet/test_lstm/'
system('mkdir -p ' + OUTPUT)

p = utils.Plotter()
r = utils.Roccer()


# now mask the mass
def mask(data):
    return predict(data) > 0.8


#     lower = data['singletons'][:,obj.singletons['msd']] > 110
#     higher = data['singletons'][:,obj.singletons['msd']] < 210
#     pt = data['singletons'][:,obj.singletons['pt']] > 400
#     return np.logical_and(pt, np.logical_and(lower,higher))

hists_top = top_4.draw(components=['singletons', 'inclusive'],
                       f_vars=f_vars,
Пример #3
0
def logp(z):  # log posterior distribution
    x = netG(z)
    lpr = -0.5 * (z**2).view(z.shape[0], -1).sum(-1)  # log prior
    llh = -0.5 * ((x[..., ij[:, 0], ij[:, 1]] - vals)**2).view(
        x.shape[0], -1).sum(-1) / args.alpha  # log likelihood
    return llh + lpr


optimizer = optim.Adam(netI.parameters(),
                       lr=args.lr,
                       amsgrad=True,
                       betas=(0.5, 0.9))
w = torch.FloatTensor(args.batch_size, args.nw).to(device)

history = utils.History(args.outdir)
plotter = utils.Plotter(args.outdir, netG, netI, args.condfile,
                        torch.randn(64, args.nw).to(device))

for i in xrange(args.niter):

    optimizer.zero_grad()
    w.normal_(0, 1)
    z = netI(w)
    z = z.view(z.shape[0], z.shape[1], 1, 1)
    err = -logp(z).mean()
    ent = utils.sample_entropy(z)
    kl = err - ent
    kl.backward()
    optimizer.step()

    history.dump(KL=kl.item(), nlogp=err.item(), entropy=ent.item())
fake_labels = Variable(Tensor(batch_size).fill_(0.0), requires_grad=False)

transforms = transforms.Compose([
    transforms.Resize(int(image_size * 1.2), Image.BICUBIC),
    transforms.RandomCrop(image_size),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor()
])

dataset = ImageDataset(dataroot=dataroot, transforms=transforms, aligned=True)
dataloader = DataLoader(dataset=dataset,
                        batch_size=batch_size,
                        shuffle=True,
                        num_workers=num_workers)

p = utils.Plotter(['Loss_G', 'Loss_Dx', 'Loss_Dy'])

print('Start training.')
for epoch in range(epochs):
    start_time = time.monotonic()
    for idx, batch in enumerate(dataloader):
        real_X = input_X.copy_(batch['X_trans'])
        real_Y = input_Y.copy_(batch['Y_trans'])

        raw_X = batch['X_raw']
        raw_Y = batch['Y_raw']

        # training generators
        optimizer_G.zero_grad()

        # identity loss
Пример #5
0
for model in rmodels:
    if arguments.model_refresh:
        model.train(
            arguments.bundle,
            arguments.bundle_size,
            arguments.bundle_steps_per_epoch,
            arguments.bundle_epochs,
            arguments.verbose,
        )
    else:
        model.unserialize()
    model.serialize()

for model in rmodels:
    time_start = time.time()
    model.test(
        arguments.bundle,
        arguments.bundle_size,
        arguments.verbose,
    )
    score = model.score(
        arguments.bundle,
        arguments.bundle_size,
        arguments.verbose,
    )
    time_diff = time.time() - time_start
    print(model.name(), time_diff, score)
    if arguments.show_train_plots:
        plotter = utils.Plotter(arguments.bundle, model.name())
        plotter.show(model.history())