Exemplo n.º 1
0
def run(model, trn_loader, crit, optim, epochs, plot_interval=1000):
    losses = []
    for epoch in range(epochs):
        loss = train(model, trn_loader, crit, optim)
        print('Epoch {:d} Loss: {:.4f}'.format(epoch + 1, loss))
        if epoch % plot_interval == 0:
            samples = predict_batch(model, trn_loader)
            utils.plot_batch(samples)
        losses.append(loss)
    samples = predict_batch(model, trn_loader)
    utils.plot_batch(samples)
    return losses
def test_color_gradient(rgb_image,
                        kernel=3,
                        grad_thresh=(20, 100),
                        h_thresh=(0, 255),
                        l_thresh=(0, 255),
                        s_thresh=(170, 255),
                        plot_all=True):

    c_binary = hls_threshold(rgb_image, h_thresh, l_thresh, s_thresh)

    g_binary = abs_sobel_thresh(rgb_image,
                                orient='x',
                                sobel_kernel=kernel,
                                thresh=grad_thresh)

    combo = np.logical_or(c_binary, g_binary)

    plt.interactive(True)

    if plot_all:
        fig = utils.plot_batch(
            [rgb_image, c_binary, g_binary, combo],
            ['Original', 'Color Threshold', 'Gradient Threshold', 'Combo'],
            figsize=(16, 6))

    else:
        fig = plt.figure()
        plt.imshow(combo, cmap='gray')

    fig.tight_layout()
    return fig
Exemplo n.º 3
0
def run(model,
        trn_loader,
        tst_loader,
        crit,
        optim,
        epochs,
        plot_interval=1000):
    losses = {'trn': [], 'tst': []}
    for epoch in range(epochs):
        trn_loss = train(model, trn_loader, crit, optim)
        tst_loss = test(model, tst_loader, crit)
        print('Epoch %d, TrnLoss: %.4f, TstLoss: %.4f' %
              (epoch + 1, trn_loss, tst_loss))
        if epoch % plot_interval == 0:
            samples, mu, var = predict_batch(model, tst_loader)
            utils.plot_batch(samples)
        losses['trn'].append(trn_loss)
        losses['tst'].append(tst_loss)
    samples, mean, var = predict_batch(model, trn_loader)
    utils.plot_batch(samples)
    return losses
Exemplo n.º 4
0
        for phase in TEST_PHASE_LIST
    }

    nima_dataloaders = {
        phase:
        DataLoader(dataset=nima_datasets[phase],
                   batch_size=DATA_LOADER_CONFIG[phase + "_batch_size"],
                   shuffle=DATA_LOADER_CONFIG[phase + "_shuffle"],
                   num_workers=DATA_LOADER_CONFIG[phase + "_num_workers"])
        for phase in TEST_PHASE_LIST
    }

    batch_images, batch_ground_truths = iter(nima_dataloaders[TEST]).next()
    print(batch_images.shape)
    print(batch_ground_truths.shape)
    plot_batch(batch_images, batch_ground_truths)

    # -- nima network --
    vgg16_net = vgg16_bn(
        pretrained_path="e:/src/jupyter/pytorch/models/vgg16_bn-6c64b313.pth",
        is_init_weights=False)
    vgg16_net_dict = {
        "arch": "vgg",
        "model": vgg16_net,
    }
    nima_net = NIMA(baseline_model_dict=vgg16_net_dict)
    assert os.path.isfile(MODEL_LOAD_PATH), \
        "ERROR: model weights does not exist!"
    nima_net.load_state_dict(torch.load(MODEL_LOAD_PATH))
    print(nima_net)
        step = 1
colors = _get_colors(int((len(sys.argv) - 1) / step))
for i, filename in enumerate(sys.argv[1:][::step]):
    header, data = load_batch(filename)

    if not ic:
        ic = header['initial_condition']
    elif ic != header['initial_condition']:
        ic = 'mixed'

    N, K = header['N'], header['K']
    data['chi_r'] = data['chi_r'] * N
    data['chi_psi'] = data['chi_psi'] * N
    print(header)
    plotkwargs = dict(color=colors[i], marker=ms(i), mfc='w', ms=fs * 0.5)
    plot_batch(axes, data, ylog=True, **plotkwargs)
    handles += [Line2D([], [], **plotkwargs)]
    alpha = K / N
    if header['p']:
        title = 'N  $\\alpha$  p'
        tmp = '{:04d}  {:f}  {:f}'.format(header['N'], alpha, header['p'])
    else:
        title = 'N             $\\alpha$'
        tmp = '{:04d}  {:f}'.format(header['N'], alpha)
    labels += [tmp]
plt.figlegend(handles,
              labels,
              fontsize=fs,
              bbox_to_anchor=(1.0, 1.0),
              bbox_transform=fig.transFigure,
              title=title,
Exemplo n.º 6
0
                test_image_outputs = []
                test_image_outputs.append(
                    np.zeros((3, crop_image_height, crop_image_height)))
                for i in range(display_size):
                    boundary = test_image_b[i].sum(0).unsqueeze(0)
                    test_image_outputs.append(
                        np.tile(boundary.numpy(), (3, 1, 1)))
                for i in range(display_size):
                    test_image_outputs.append(test_image_a[i, ...].numpy())
                    for j in range(display_size):
                        with torch.no_grad():
                            out = trainer.transfer(
                                test_image_a[i].unsqueeze(0).cuda(),
                                test_image_b[j].unsqueeze(0).cuda())[-1]
                        test_image_outputs.append(out[0,
                                                      ...].data.cpu().numpy())
                # for i in range(len(test_image_outputs)):
                #     print(test_image_outputs[i].shape)
                test_image_outputs = np.stack(test_image_outputs, axis=0)
                test_image_outputs = test_image_outputs.transpose((0, 2, 3, 1))
                plot_batch(
                    test_image_outputs,
                    os.path.join(image_directory,
                                 "test_{:08}.png".format(iterations + 1)))
            # Save network weights
            if (iterations + 1) % config['snapshot_save_iter'] == 0:
                trainer.save(checkpoint_directory, iterations)
            iterations += 1
            if iterations >= max_iter:
                sys.exit('Finish training')