예제 #1
0
# _files = glob(src+'_train/batch_100_*.p')
# _files.sort()
# _files = _files[:10]
rng.shuffle(_files)
class files:
    data_files = _files
    n_train = int(len(data_files) *  .8)
    n_valid = int(len(data_files) *  .2)
    train = data_files[:n_train]
    valid = data_files[n_train:n_train+n_valid]
    if use.valid2: valid2 = data_files[n_train+n_valid:]
    # valid2 = glob(src+'_valid/batch_100_*.p')

# data augmentation
if use.aug:
    remove_aug() # remove data augmentation of previous session if any
    start_transform_loop() # start augmentation loop
    transform(files.train[-1]) # transform last file

# print data sizes
if use.valid2: files.n_test = len(files.valid2)
else: files.n_test = 0
write('data: total: %i train: %i valid: %i test: %i' % \
    ((files.n_test+files.n_train+files.n_valid)*batch_size, 
        files.n_train*batch_size, 
        files.n_valid*batch_size, 
        files.n_test*batch_size))

def load_data(path, trans): 
    global rng, x_,t_,y_
    """ load data into shared variables """
예제 #2
0
rng.shuffle(_files)


class files:
    data_files = _files
    n_train = int(len(data_files) * .8)
    n_valid = int(len(data_files) * .2)
    train = data_files[:n_train]
    valid = data_files[n_train:n_train + n_valid]
    if use.valid2: valid2 = data_files[n_train + n_valid:]
    # valid2 = glob(src+'_valid/batch_100_*.p')


# data augmentation
if use.aug:
    remove_aug()  # remove data augmentation of previous session if any
    start_transform_loop()  # start augmentation loop
    transform(files.train[-1])  # transform last file

# print data sizes
if use.valid2: files.n_test = len(files.valid2)
else: files.n_test = 0
write('data: total: %i train: %i valid: %i test: %i' % \
    ((files.n_test+files.n_train+files.n_valid)*batch_size,
        files.n_train*batch_size,
        files.n_valid*batch_size,
        files.n_test*batch_size))


def load_data(path, trans):
    global rng, x_, t_, y_