Ejemplo n.º 1
0
def test(files_):
    global jobs
    if use.drop:  # dont use dropout when testing
        drop.p_traj.set_value(float32(0.))
        drop.p_vid.set_value(float32(0.))
        drop.p_hidden.set_value(float32(0.))
    ce = []
    first_test_file = True
    # use.aug=False
    for file in files_:
        if first_test_file:
            augm = False
            first_test_file = False
        else:
            augm = True
        load_data(file, augm, istest=True)
        ce.append(_batch(test_model, False))

    if use.drop:  # reset dropout
        drop.p_traj.set_value(drop.p_traj_val)
        drop.p_vid.set_value(drop.p_vid_val)
        drop.p_hidden.set_value(drop.p_hidden_val)

    if use.aug:
        start_load(files.train, augm=use.aug)

    return _avg(ce)
Ejemplo n.º 2
0
def test(files_):
    global jobs
    if use.drop:  # dont use dropout when testing
        drop.p_traj.set_value(float32(0.0))
        drop.p_vid.set_value(float32(0.0))
        drop.p_hidden.set_value(float32(0.0))
    ce = []
    first_test_file = True
    # use.aug=False
    for file in files_:
        if first_test_file:
            augm = False
            first_test_file = False
        else:
            augm = True
        load_data(file, augm, istest=True)
        ce.append(_batch(test_model, False))

    if use.drop:  # reset dropout
        drop.p_traj.set_value(drop.p_traj_val)
        drop.p_vid.set_value(drop.p_vid_val)
        drop.p_hidden.set_value(drop.p_hidden_val)

    if use.aug:
        start_load(files.train, augm=use.aug)

    return _avg(ce)
Ejemplo n.º 3
0
    # train = glob(src+'/train/batch_100_*.zip')#+glob(src+'/valid/batch_100_*.zip')
    # valid = glob(src+'/valid/batch_100_*.zip')#[:1]

    n_train = len(train)
    n_valid = len(valid)

    # if use.valid2: valid2 = data_files[n_train+n_valid:]
    # valid2 = glob(src+'_valid/batch_100_*.p')


rng.shuffle(files.train)

# data augmentation
if use.aug:
    jobs, queue = start_load(files.train, augm=use.aug, start=True)

# print data sizes
if use.valid2: files.n_test = len(files.valid2)
else: files.n_test = 0
write('data: total: %i train: %i valid: %i test: %i' % \
    ((files.n_test+files.n_train+files.n_valid)*batch_size,
        files.n_train*batch_size,
        files.n_valid*batch_size,
        files.n_test*batch_size))

first_report2 = True
epoch = 0


def load_data(path, trans, istest=False):
Ejemplo n.º 4
0
    # train = glob(src+'/train/batch_100_*.zip')#+glob(src+'/valid/batch_100_*.zip')
    # valid = glob(src+'/valid/batch_100_*.zip')#[:1]

    n_train = len(train)
    n_valid = len(valid)

    # if use.valid2: valid2 = data_files[n_train+n_valid:]
    # valid2 = glob(src+'_valid/batch_100_*.p')


rng.shuffle(files.train)

# data augmentation
if use.aug:
    jobs, queue = start_load(files.train, augm=use.aug, start=True)

# print data sizes
if use.valid2:
    files.n_test = len(files.valid2)
else:
    files.n_test = 0
write(
    "data: total: %i train: %i valid: %i test: %i"
    % (
        (files.n_test + files.n_train + files.n_valid) * batch_size,
        files.n_train * batch_size,
        files.n_valid * batch_size,
        files.n_test * batch_size,
    )
)