def get_data( net_type, batch_size, init_states = (), splite_rate=0.1, small=False): if small: files = small_files else: files = all_files files = [ os.path.join('/home/zijia/HeartDeepLearning/DATA/PK/NEW', f) for f in files ] from RNN.rnn_load import load_rnn_pk imgs, labels = load_rnn_pk(files) data_list = mu.prepare_set(imgs, labels, rate=splite_rate) if net_type == 'c': img_shape = data_list[0].shape[2:] data_list = [ d.reshape( (-1,)+img_shape ) for d in data_list] train, val = mu.create_iter( *data_list, batch_size=batch_size) return train, val elif net_type == 'r': from rnn.rnn_iter import RIter train = RIter( data_list[0], init_states, label=data_list[1], batch_size=batch_size, last_batch_handle='pad') val = RIter( data_list[2], init_states, label=data_list[3], batch_size=batch_size, last_batch_handle='pad') return train, val
def train(param=PARAMS, sv=SOLVE, small=False): sv['name'] = 'TEST' input_var = raw_input('Are you testing now? ') if 'no' in input_var: sv.pop('name') else: sv['name'] += input_var #out = u.get(6,small=True, aug=True) imgs, ll = load_rnn_pk(files) imgs = imgs.reshape((-1,1,256,256)) ll = ll.reshape((-1,1,256,256)) datas = u.prepare_set(imgs, ll) out = u.create_iter(*datas, batch_size=5) net = cnn_net( use_logis=True ) param['eval_data'] = out[1] s = Solver(net, out[0], sv, **param) s.train() s.predict() s.all_to_png() s.save_best_model() s.plot_process()
def get_rnn(bs, small=False, aug=False, rate=0.1): import RNN.rnn_load as r fs = r.f10 if small else r.files imgs, labels = r.load_rnn_pk(fs) data = mu.prepare_set(imgs, labels, rate=rate) data = list(data) data[1], data[3] = reshape_label(data[1::2]) for i, a in enumerate(data): data[i] = np.transpose(a, axes=(1, 0, 2, 3, 4)) hidden = data[1].shape[-1]**2 train, val = r.create_rnn_iter(*data, batch_size=bs, num_hidden=hidden) mark = [1] * imgs.shape[0] return {'train': train, 'val': val, 'marks': mark}
def get(init_status, bs=1, fs=None, rate=0.1, small=False): from RNN.rnn_load import files, f10, load_rnn_pk from my_utils import prepare_set import numpy as np if small: fs = f10 elif fs is None: fs = files imgs, labels = load_rnn_pk(fs) print 'IMAGE SHAPE', imgs.shape, labels.shape data = prepare_set(imgs, labels, rate=rate) data = list(data) data.append(init_status) train, val = create_iter(*data, batch_size=bs) return {'train': train, 'val': val}
def get(init_status, bs=1, fs=None, rate=0.1, small=False): from RNN.rnn_load import files, f10, load_rnn_pk from my_utils import prepare_set import numpy as np if small: fs = f10 elif fs is None: fs = files imgs, labels = load_rnn_pk(fs) print 'IMAGE SHAPE', imgs.shape, labels.shape data = prepare_set(imgs, labels, rate=rate) data = list(data) data.append(init_status) train, val = create_iter(*data, batch_size=bs) return {'train':train, 'val':val}
def get(bs, small=False, aug=False): if small: filename = "/home/zijia/HeartDeepLearning/DATA/PK/o1.pk" else: filename = [ '/home/zijia/HeartDeepLearning/DATA/PK/online.pk', '/home/zijia/HeartDeepLearning/DATA/PK/validate.pk', ] img, label = mu.load_pk(filename) it, lt, iv, lv = mu.prepare_set(img, label) if aug: it, lt = mu.augment_sunny(it, lt) Lt, Lv = reshape_label([lt, lv]) print 'Data Shape, Train %s, Val %s' % (it.shape, iv.shape) train, val = mu.create_iter(it, Lt, iv, Lv, batch_size=bs) return {'train': train, 'val': val}
def train(base_model, param=PARAMS, sv=SOLVE, small=False): # prepare data if small: files = rnn_load.f10 param['ctx'] = mu.gpu(1) else: files = rnn_load.files imgs, labels = rnn_load.load_rnn_pk(files) it, lt, iv, lv = mu.prepare_set(imgs, labels) N, T = it.shape[:2] # cnn process model = mx.model.FeedForward.load(*base_model, ctx=mu.gpu(1)) rnn_input = np.zeros_like(it) for n in range(1): rnn_input[n], imgs, labels = mu.predict_draw(model, it[n]) # prepare params #datas = [rnn_input, lt, iv, lv] datas = [ lt, lt, lv, lv] for i, d in enumerate(datas): #datas[i] = np.transpose(d,axes=(1,0,2,3,4)) # make T become one datas[i] = d.reshape((-1,1)+d.shape[2:]) iters = rnn_load.create_rnn_iter(*datas, batch_size=1, num_hidden=1000) param['eval_data'] = iters[1] mark = param['marks'] = param['e_marks'] = [1]*T rnet = rnn_net(begin=mx.sym.Variable('data'), num_hidden=1000) s = Solver(rnet, iters[0], sv, **param) # train print 'Start Training...' s.train() s.predict()
def train(base_model, param=PARAMS, sv=SOLVE, small=False): # prepare data if small: files = rnn_load.f10 param['ctx'] = mu.gpu(1) else: files = rnn_load.files imgs, labels = rnn_load.load_rnn_pk(files) it, lt, iv, lv = mu.prepare_set(imgs, labels) N, T = it.shape[:2] # cnn process model = mx.model.FeedForward.load(*base_model, ctx=mu.gpu(1)) rnn_input = np.zeros_like(it) for n in range(1): rnn_input[n], imgs, labels = mu.predict_draw(model, it[n]) # prepare params #datas = [rnn_input, lt, iv, lv] datas = [lt, lt, lv, lv] for i, d in enumerate(datas): #datas[i] = np.transpose(d,axes=(1,0,2,3,4)) # make T become one datas[i] = d.reshape((-1, 1) + d.shape[2:]) iters = rnn_load.create_rnn_iter(*datas, batch_size=1, num_hidden=1000) param['eval_data'] = iters[1] mark = param['marks'] = param['e_marks'] = [1] * T rnet = rnn_net(begin=mx.sym.Variable('data'), num_hidden=1000) s = Solver(rnet, iters[0], sv, **param) # train print 'Start Training...' s.train() s.predict()