Esempio n. 1
0
def test(files_):
    global jobs
    if use.drop: # dont use dropout when testing
        drop.p_traj.set_value(float32(0.)) 
        drop.p_vid.set_value(float32(0.)) 
        drop.p_hidden.set_value(float32(0.)) 
    ce = []
    first_test_file = True
    # use.aug=False
    for file in files_:
        if first_test_file:
            augm = False
            first_test_file = False
        else: augm = True
        load_data(file,  augm,istest=True)
        ce.append(_batch(test_model, False))

    if use.drop: # reset dropout
        drop.p_traj.set_value(drop.p_traj_val) 
        drop.p_vid.set_value(drop.p_vid_val) 
        drop.p_hidden.set_value(drop.p_hidden_val)

    if use.aug:
        start_load(files.train,augm=use.aug)

    return _avg(ce)
Esempio n. 2
0
def test(files_):
    global jobs
    if use.drop:  # dont use dropout when testing
        drop.p_traj.set_value(float32(0.))
        drop.p_vid.set_value(float32(0.))
        drop.p_hidden.set_value(float32(0.))
    ce = []
    first_test_file = True
    # use.aug=False
    for file in files_:
        if first_test_file:
            augm = False
            first_test_file = False
        else:
            augm = True
        load_data(file, augm, istest=True)
        ce.append(_batch(test_model, False))

    if use.drop:  # reset dropout
        drop.p_traj.set_value(drop.p_traj_val)
        drop.p_vid.set_value(drop.p_vid_val)
        drop.p_hidden.set_value(drop.p_hidden_val)

    if use.aug:
        start_load(files.train, augm=use.aug)

    return _avg(ce)
Esempio n. 3
0
def load_data(path, trans): 
    global rng, x_,t_,y_,first_report2
    """ load data into shared variables """

    # if trans and use.aug:
    #     transform(path) # que up the path for augmentation
    #     vid, traj, lbl = load_aug(path)
    # else:
    #     vid, traj, lbl = load_normal(path)

    # file = GzipFile(path, 'rb')
    # vid, skel, lbl = load(file)
    # file.close()
    # traj,ori,pheight = skel

    # print path
    # import cv2
    # for img in vid[0,0,0]:
    #      cv2.imshow("Video", img)
    #      cv2.waitKey(0)
    # for img in vid[0,0,1]:
    #      cv2.imshow("Video", img)
    #      cv2.waitKey(0)

    # new_vid = empty(in_shape,dtype="uint8")

    # vid_ = vid[:,0,:2,:,::2,::2]
    # vid_ = vid[:,0,:2]
    # zm = 1.*90./128.
    # vid_ = ndimage.zoom(vid_,(1,1,1,zm,zm),order=0)
    # new_vid[:,0] = vid_
    # new_vid[:,1] = vid[:,1,:2]

    # print "loading..."
    start_time = time()
    if not trans: 
        start_load(files.valid,jobs,False)
    vid, skel, lbl = queue.get()[0]
    traj,ori,pheight = skel

    print "get in",str(time()-start_time)[:3]+"s",

    # shuffle data
    ind = rng.permutation(batch_size)
    vid, traj, lbl = vid[ind].astype(floatX), traj[ind].astype(floatX),lbl[ind].astype(floatX)

    # vid = vid/(255./(scaler*2.))-scaler
    # traj = traj/(255./(scaler_traj*2.))-scaler_traj
    # traj = traj/(255./5.)

    lbl -= 1

    if first_report2:
        print "data range:",vid.min(),vid.max()
        print "traj range:",traj.min(),traj.max()
        print "lbl range:",lbl.min(),lbl.max()
        first_report2 = False

    # set value
    x_.set_value(vid, borrow=True)
    t_.set_value(traj, borrow=True)
    y_.set_value(lbl, borrow=True)
Esempio n. 4
0
_files = glob(src+'/batch_100_*.zip')+glob(src+'/valid/batch_100_*.zip')
# _files = glob(src+'_train/batch_100_*.p')
# _files.sort()
# _files = _files[:10]
rng.shuffle(_files)
class files:
    data_files = _files
    n_train = int(len(data_files) *  .8)
    n_valid = int(len(data_files) *  .2)
    train = data_files[:n_train]
    valid = data_files[n_train:n_train+n_valid]
    if use.valid2: valid2 = data_files[n_train+n_valid:]
    # valid2 = glob(src+'_valid/batch_100_*.p')

# data augmentation
jobs,queue = start_load(files.train,augm=use.aug,start=True)

# print data sizes
if use.valid2: files.n_test = len(files.valid2)
else: files.n_test = 0
write('data: total: %i train: %i valid: %i test: %i' % \
    ((files.n_test+files.n_train+files.n_valid)*batch_size, 
        files.n_train*batch_size, 
        files.n_valid*batch_size, 
        files.n_test*batch_size))

first_report2 = True
def load_data(path, trans): 
    global rng, x_,t_,y_,first_report2
    """ load data into shared variables """
Esempio n. 5
0
def load_data(path, trans):
    global rng, x_, t_, y_, first_report2
    """ load data into shared variables """

    # if trans and use.aug:
    #     transform(path) # que up the path for augmentation
    #     vid, traj, lbl = load_aug(path)
    # else:
    #     vid, traj, lbl = load_normal(path)

    # file = GzipFile(path, 'rb')
    # vid, skel, lbl = load(file)
    # file.close()
    # traj,ori,pheight = skel

    # print path
    # import cv2
    # for img in vid[0,0,0]:
    #      cv2.imshow("Video", img)
    #      cv2.waitKey(0)
    # for img in vid[0,0,1]:
    #      cv2.imshow("Video", img)
    #      cv2.waitKey(0)

    # new_vid = empty(in_shape,dtype="uint8")

    # vid_ = vid[:,0,:2,:,::2,::2]
    # vid_ = vid[:,0,:2]
    # zm = 1.*90./128.
    # vid_ = ndimage.zoom(vid_,(1,1,1,zm,zm),order=0)
    # new_vid[:,0] = vid_
    # new_vid[:,1] = vid[:,1,:2]

    # print "loading..."
    start_time = time()
    if not trans:
        start_load(files.valid, jobs, False)
    vid, skel, lbl = queue.get()[0]
    traj, ori, pheight = skel

    print "get in", str(time() - start_time)[:3] + "s",

    # shuffle data
    ind = rng.permutation(batch_size)
    vid, traj, lbl = vid[ind].astype(floatX), traj[ind].astype(
        floatX), lbl[ind].astype(floatX)

    # vid = vid/(255./(scaler*2.))-scaler
    # traj = traj/(255./(scaler_traj*2.))-scaler_traj
    # traj = traj/(255./5.)

    lbl -= 1

    if first_report2:
        print "data range:", vid.min(), vid.max()
        print "traj range:", traj.min(), traj.max()
        print "lbl range:", lbl.min(), lbl.max()
        first_report2 = False

    # set value
    x_.set_value(vid, borrow=True)
    t_.set_value(traj, borrow=True)
    y_.set_value(lbl, borrow=True)
Esempio n. 6
0
# _files = _files[:10]
rng.shuffle(_files)


class files:
    data_files = _files
    n_train = int(len(data_files) * .8)
    n_valid = int(len(data_files) * .2)
    train = data_files[:n_train]
    valid = data_files[n_train:n_train + n_valid]
    if use.valid2: valid2 = data_files[n_train + n_valid:]
    # valid2 = glob(src+'_valid/batch_100_*.p')


# data augmentation
jobs, queue = start_load(files.train, augm=use.aug, start=True)

# print data sizes
if use.valid2: files.n_test = len(files.valid2)
else: files.n_test = 0
write('data: total: %i train: %i valid: %i test: %i' % \
    ((files.n_test+files.n_train+files.n_valid)*batch_size,
        files.n_train*batch_size,
        files.n_valid*batch_size,
        files.n_test*batch_size))

first_report2 = True


def load_data(path, trans):
    global rng, x_, t_, y_, first_report2