Пример #1
0
def test(predictor, shape, batch_size, gpu, to_cpu):

    print('------')

    n_samples = 10
    dataset = Dataset(n_samples, shape)

    model = MCSampler(predictor, mc_iteration=5)

    if gpu >= 0:
        chainer.backends.cuda.get_device_from_id(gpu).use()
        model.to_gpu()

    iterator = SerialIterator(dataset, batch_size, repeat=False)

    infer = Inferencer(iterator, model, device=gpu, to_cpu=to_cpu)

    ret = infer.run()

    if isinstance(ret, (list, tuple)):
        for r in ret:
            print(r.shape)
            print(r.__class__)
    else:
        print(ret.shape)
        print(ret.__class__)
def test_phase(predictor, test, args):

    # setup an iterator
    test_iter = chainer.iterators.SerialIterator(test, args.batchsize, repeat=False, shuffle=False)

    # setup an inferencer
    chainer.serializers.load_npz(os.path.join(args.out, 'predictor.npz'), predictor)

    model = MCSampler(predictor,
                      mc_iteration=args.mc_iteration,
                      activation=[F.identity, F.exp],
                      reduce_mean=None,
                      reduce_var=None)

    if args.gpu >= 0:
        chainer.backends.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    infer = Inferencer(test_iter, model, device=args.gpu)

    pred, epistemic_uncert, aleatory_uncert, _ = infer.run()

    # visualize
    x = test.x.ravel()
    t = test.t.ravel()
    pred = pred.ravel()
    epistemic_uncert = epistemic_uncert.ravel()
    aleatory_uncert = aleatory_uncert.ravel()

    plt.rcParams['font.size'] = 18
    plt.figure(figsize=(13,5))
    ax = sns.scatterplot(x=x, y=pred, color='blue', s=75)
    ax.errorbar(x, pred, yerr=epistemic_uncert, fmt='none', capsize=10, ecolor='gray', linewidth=1.5)
    ax.plot(x, t, color='red', linewidth=1.5)
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_xlim(-10, 10)
    ax.set_ylim(-15, 15)
    plt.legend(['Ground-truth', 'Prediction', 'Epistemic uncertainty'])
    plt.title('Result on testing data set')
    plt.tight_layout()
    plt.savefig(os.path.join(args.out, 'eval_epistemic.png'))
    plt.close()

    plt.rcParams['font.size'] = 18
    plt.figure(figsize=(13,5))
    ax = sns.scatterplot(x=x, y=pred, color='blue', s=75)
    ax.errorbar(x, pred, yerr=aleatory_uncert, fmt='none', capsize=10, ecolor='gray', linewidth=1.5)
    ax.plot(x, t, color='red', linewidth=1.5)
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_xlim(-10, 10)
    ax.set_ylim(-15, 15)
    plt.legend(['Ground-truth', 'Prediction', 'Aleatoric uncertainty'])
    plt.title('Result on testing data set')
    plt.tight_layout()
    plt.savefig(os.path.join(args.out, 'eval_aleatoric.png'))
    plt.close()
def test_phase(predictor, test, args):

    # setup an iterator
    test_iter = chainer.iterators.SerialIterator(test,
                                                 args.batchsize,
                                                 repeat=False,
                                                 shuffle=False)

    # setup an inferencer
    chainer.serializers.load_npz(os.path.join(args.out, 'predictor.npz'),
                                 predictor)

    model = MCSampler(predictor,
                      mc_iteration=args.mc_iteration,
                      activation=partial(F.softmax, axis=1),
                      reduce_mean=partial(F.argmax, axis=1),
                      reduce_var=partial(F.mean, axis=1))

    if args.gpu >= 0:
        chainer.backends.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    infer = Inferencer(test_iter, model, device=args.gpu)

    pred, uncert = infer.run()

    # evaluate
    os.makedirs(args.out, exist_ok=True)

    match = pred == test.labels
    accuracy = np.sum(match) / len(match)

    arr = [uncert[match], uncert[np.logical_not(match)]]

    plt.rcParams['font.size'] = 18
    plt.figure(figsize=(13, 5))
    ax = sns.violinplot(data=arr,
                        inner='quartile',
                        palette='Blues',
                        orient='h',
                        cut=0)
    ax.set_xlabel('Predicted variance')
    ax.set_yticklabels([
        'Correct prediction\n(n=%d)' % len(arr[0]),
        'Wrong prediction\n(n=%d)' % len(arr[1])
    ])
    plt.title('Accuracy=%.3f' % accuracy)
    plt.tight_layout()
    plt.savefig(os.path.join(args.out, 'eval.png'))
    plt.close()
Пример #4
0
def main():
    mc_iteration = 10
    mc_samples = np.random.rand(1, 10, 2).astype(np.float32)
    mc_samples = np.repeat(mc_samples, mc_iteration, axis=0)

    _mean, _var = _calc_uncertanty_from_mc_samples(mc_samples)

    print('numpy')
    print(_mean)
    print(_var)
    print('------')

    mean = chainer.functions.mean(mc_samples, axis=0)
    var = mc_samples - mean
    var = chainer.functions.mean(chainer.functions.square(var), axis=0)

    mean = mean.data
    var = var.data

    print('chainer')
    print(mean)
    print(var)

    print((np.abs(mean - _mean)))
    print((np.abs(var - _var)))
    print('------')

    sampler = MCSampler(lambda x: x, mc_iteration, lambda x: x, None, None)
    with configuration.using_config('train', False):
        mean, var = sampler(mc_samples[0])

    print('mc_sampler')
    print(mean)
    print(var)

    print((np.abs(mean - _mean)))
    print((np.abs(var - _var)))
    print('------')
def test_phase(predictor, test, args):

    print('# samples:')
    print('-- test:', len(test))

    test_iter = chainer.iterators.SerialIterator(test,
                                                 args.batchsize,
                                                 repeat=False,
                                                 shuffle=False)

    # setup a inferencer
    snapshot_file = find_latest_snapshot(
        'predictor_iter_{.updater.iteration:08}.npz', args.out)
    chainer.serializers.load_npz(snapshot_file, predictor)
    print('Loaded a snapshot:', snapshot_file)

    model = MCSampler(predictor,
                      mc_iteration=args.mc_iteration,
                      activation=partial(F.softmax, axis=1),
                      reduce_mean=partial(F.argmax, axis=1),
                      reduce_var=partial(F.mean, axis=1))

    if args.gpu[0] >= 0:
        chainer.backends.cuda.get_device_from_id(args.gpu[0]).use()
        model.to_gpu()

    infer = Inferencer(test_iter, model, device=args.gpu[0])

    pred, uncert = infer.run()

    # evaluate
    os.makedirs(os.path.join(args.out, 'test'), exist_ok=True)

    acc_values = []
    uncert_values = []

    uncert_clim = (0, np.percentile(uncert, 95))

    files = test.files['image']
    if isinstance(files, np.ndarray): files = files.tolist()
    commonpath = os.path.commonpath(files)

    plt.rcParams['font.size'] = 14

    for i, (p, u, imf, lbf) in enumerate(
            zip(pred, uncert, test.files['image'], test.files['label'])):
        im, _ = load_image(imf)
        im = im[:, :, ::-1]
        lb, _ = load_image(lbf)
        if lb.ndim == 3: lb = lb[:, :, 0]

        acc_values.append(eval_metric(p, lb))
        uncert_values.append(np.mean(u[p == 1]))  # NOTE: instrument class

        plt.figure(figsize=(20, 4))

        for j, (pic, cmap, clim, title) in enumerate(
                zip([im, p, lb, u, (p != lb).astype(np.uint8)],
                    [None, None, None, 'jet', 'jet'],
                    [None, None, None, uncert_clim, None], [
                        'Input image\n%s' % os.path.relpath(imf, commonpath),
                        'Predicted label\n(DC=%.3f)' % acc_values[-1],
                        'Ground-truth label',
                        'Predicted variance\n(PV=%.4f)' % uncert_values[-1],
                        'Error'
                    ])):
            plt.subplot(1, 5, j + 1)
            plt.imshow(pic, cmap=cmap)
            plt.xticks([], [])
            plt.yticks([], [])
            plt.title(title)
            plt.clim(clim)

        plt.tight_layout()
        plt.savefig(os.path.join(args.out, 'test/%03d.png' % i))
        plt.close()

    c = pearsonr(uncert_values, acc_values)

    plt.figure(figsize=(11, 11))
    ax = sns.scatterplot(x=uncert_values, y=acc_values, color='blue', s=50)
    ax.set_xlabel('Predicted variance')
    ax.set_ylabel('Dice coefficient')
    plt.grid()
    plt.title('r=%.3f' % c[0])
    plt.savefig(os.path.join(args.out, 'eval.png'))
    plt.close()
def test_phase(generator, test, args):

    print('# samples:')
    print('-- test:', len(test))

    test_iter = chainer.iterators.SerialIterator(test,
                                                 args.batchsize,
                                                 repeat=False,
                                                 shuffle=False)

    # setup a inferencer
    snapshot_file = find_latest_snapshot(
        'generator_iter_{.updater.iteration:08}.npz', args.out)
    chainer.serializers.load_npz(snapshot_file, generator)
    print('Loaded a snapshot:', snapshot_file)

    model = MCSampler(generator,
                      mc_iteration=args.mc_iteration,
                      activation=F.tanh,
                      reduce_mean=None,
                      reduce_var=partial(F.mean, axis=1))

    if args.gpu[0] >= 0:
        chainer.backends.cuda.get_device_from_id(args.gpu[0]).use()
        model.to_gpu()

    infer = Inferencer(test_iter, model, device=args.gpu[0])

    pred, uncert = infer.run()

    # evaluate
    os.makedirs(os.path.join(args.out, 'test'), exist_ok=True)

    acc_values = []
    uncert_values = []

    uncert_clim = (0, np.percentile(uncert, 95))
    error_clim = (0, 1)

    files = test.files['image']
    if isinstance(files, np.ndarray): files = files.tolist()
    commonpath = os.path.commonpath(files)

    plt.rcParams['font.size'] = 14

    for i, (p, u, imf, lbf) in enumerate(
            zip(pred, uncert, test.files['image'], test.files['label'])):
        im, _ = load_image(imf)
        lb, _ = load_image(lbf)
        im = im.astype(np.float32)
        lb = lb.astype(np.float32)

        p = p.transpose(1, 2, 0)

        im = (im[:, :, ::-1] + 1.) / 2.
        lb = (lb[:, :, ::-1] + 1.) / 2.
        p = (p[:, :, ::-1] + 1.) / 2.

        error = np.mean(np.abs(p - lb), axis=-1)

        acc_values.append(eval_metric(p, lb))
        uncert_values.append(np.mean(u))

        plt.figure(figsize=(20, 4))

        for j, (pic, cmap, clim, title) in enumerate(
                zip([im, p, lb, u, error], [None, None, None, 'jet', 'jet'],
                    [None, None, None, uncert_clim, error_clim], [
                        'Input image\n%s' % os.path.relpath(imf, commonpath),
                        'Predicted label\n(MAE=%.3f)' % acc_values[-1],
                        'Ground-truth label',
                        'Predicted variance\n(PV=%.4f)' % uncert_values[-1],
                        'Error'
                    ])):
            plt.subplot(1, 5, j + 1)
            plt.imshow(pic, cmap=cmap)
            plt.xticks([], [])
            plt.yticks([], [])
            plt.title(title)
            plt.clim(clim)

        plt.tight_layout()
        plt.savefig(os.path.join(args.out, 'test/%03d.png' % i))
        plt.close()