Exemplo n.º 1
0
def test_hdf5meansubtract(backend_default, meansubhdf):
    NervanaObject.be.bsz = 128
    bsz = 128

    datit = HDF5Iterator(meansubhdf[0])
    datit.allocate()
    typ = meansubhdf[1]
    mn = datit.mean.get()
    assert typ in ['chan_mean', 'full_mean']

    cnt_image = 0
    max_len = datit.ndata
    MAX_CNT = max_len * datit.inp.shape[1]
    for x, t in datit:
        x_ = x.get().flatten()
        x_exp = (np.arange(len(x_)) + cnt_image) % MAX_CNT
        x_exp = x_exp.reshape((-1, np.prod(datit.lshape))).T
        if typ == 'chan_mean':
            x_exp = x_exp.reshape((datit.lshape[0], -1)) - mn
        elif typ == 'full_mean':
            x_exp = x_exp.reshape((-1, bsz)) - mn
        x_exp = x_exp.flatten()
        assert allclose_with_out(x_, x_exp, atol=0.0, rtol=1.0e-7)
        cnt_image += len(x_)

    datit.cleanup()
Exemplo n.º 2
0
def test_multidimout(backend_default, multidimout):
    NervanaObject.be.bsz = 128

    fn = multidimout
    datit = HDF5Iterator(fn)

    max_len = datit.ndata
    MAX_CNT = max_len * datit.inp.shape[1]
    for (x, t) in datit:
        x_ = x.get()
        t_ = t.get()
        assert x_.shape == t_.shape

        x_ = x_.T % MAX_CNT
        t_ = t_.T % MAX_CNT
        assert np.all(x_ == t_[:, ::-1])

    datit.cleanup()
Exemplo n.º 3
0
def test_reset(backend_default, hdf5datafile):
    NervanaObject.be.bsz = 128

    fn = hdf5datafile
    datit = HDF5Iterator(fn)

    for (x, t) in datit:
        break
    x_1 = x.get()
    t_1 = t.get()

    for cnt_end in range(2):
        cnt = 0
        for (x, t) in datit:
            cnt += 1
            if cnt > cnt_end:
                break
        datit.reset()
        for (x, t) in datit:
            break
        assert np.all(x.get() == x_1)
        assert np.all(t.get() == t_1)
    datit.cleanup()
Exemplo n.º 4
0
def test_h5iterator(backend_default, hdf5datafile, onehot):
    NervanaObject.be.bsz = 128
    bsz = 128

    fn = hdf5datafile
    if onehot:
        datit = HDF5IteratorOneHot(fn)
    else:
        datit = HDF5Iterator(fn)
    cnt_image = 0
    cnt_target = 0
    max_len = datit.ndata
    mb_cnt = 0
    MAX_CNT = max_len * datit.inp.shape[1]
    for (x, t) in datit:
        x_ = x.get()
        t_ = t.get()
        assert x_.shape[1] == t_.shape[1]
        assert not np.all(x_ == 0)
        assert not np.all(t_ == 0)

        x_ = x_.T.flatten()
        x_exp = (np.arange(len(x_)) + cnt_image) % MAX_CNT
        assert np.all(x_ == x_exp)
        cnt_image += len(x_)

        if onehot:
            t_ = np.argmax(t_, axis=0).flatten()
        else:
            t_ = t_.flatten()
        t_exp = (np.arange(len(t_)) + cnt_target) % max_len
        assert np.all(t_ == t_exp)
        cnt_target += len(t_)

        mb_cnt += 1
    assert mb_cnt == int(math.ceil(datit.inp.shape[0] / float(bsz)))
    datit.cleanup()
print('Using test file: {}'.format(testFileName))

# Next line gets rid of the deterministic warning
args.deterministic = None

if (args.rng_seed is None):
  args.rng_seed = 16

print('Batch size = {}'.format(args.batch_size))

# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))

# We don't need to one hot encode this since it is only 2 classes 
# and the sklearn stuff is just looking at the probability of class 1
test_set = HDF5Iterator(testFileName)
#test_set = HDF5IteratorOneHot(testFileName)


model_filename= 'LUNA16_resnetHDF_subset{}.prm'.format(subset)
#model_filename= 'LUNA16_resnetHDF_subset0.prm'

print('Using model: {}'.format(model_filename))

lunaModel = Model(model_filename)

prob, target = lunaModel.get_outputs(test_set, return_targets=True) 


np.set_printoptions(precision=3, suppress=True)
Exemplo n.º 6
0
from neon.layers import GeneralizedCost, Affine, Sequential, MergeMultistream, Linear, Pooling, Conv,Dropout
from neon.models import Model
from neon.optimizers import GradientDescentMomentum, RMSProp, Adam
from neon.transforms import Rectlin, Logistic, CrossEntropyBinary, SumSquared, ObjectDetection
from neon.callbacks.callbacks import Callbacks
from neon.backends import gen_backend

# parser = NeonArgparser(__doc__)
# args = parser.parse_args(gen_be=False)

be = gen_backend(batch_size=128, backend='gpu')

traindir = 'train'
imwidth = 256

train_set = HDF5Iterator('whale_train.h5')
eval_set = HDF5Iterator('whale_eval.h5')
test_set = HDF5Iterator('whale_test.h5')

# weight initialization
init_norm = Gaussian(loc=0.0, scale=0.01)

# setup model layers
               
layers = [Conv((7, 7, 16), init=init_norm, activation=Rectlin()),
          Pooling((2, 2)),  
          
          Conv((3, 3, 32), init=init_norm, activation=Rectlin()),
          Conv((3, 3, 32), init=init_norm, activation=Rectlin()),
          Conv((3, 3, 32), init=init_norm, activation=Rectlin()),