def train(param=PARAMS, sv=SOLVE, small=False):

    sv['name'] = __file__.rstrip('.py')
    input_var = raw_input('Are you testing now? ')
    
    if 'no' in input_var:
        sv.pop('name')
    else:
        sv['name'] += input_var

    out = get(1) 
    from my_layer import LSTM
    sym = LSTM(e_net.l3_4, 64*64, 1, 64, 64)
    sym = list(sym)
    sym[0] = mx.sym.LogisticRegressionOutput(data=sym[0], name='softmax')
    sym = mx.symbol.Group(list(sym))

    param['eval_data'] = out['val'] 
    param['marks'] = param['e_marks'] = out['marks'] 
    param['ctx'] = mu.gpu(1)

    print out['train'].label[0][1].shape
  
    s = Solver(sym, out['train'], sv, **param)
    s.train()
    s.predict()
Пример #2
0
def train(param=PARAMS, sv=SOLVE, small=False):

    sv['name'] = __file__.rstrip('.py')
    input_var = raw_input('Are you testing now? ')

    if 'no' in input_var:
        sv.pop('name')
    else:
        sv['name'] += input_var

    out = get(1)
    from my_layer import LSTM
    sym = LSTM(e_net.l3_4, 64 * 64, 1, 64, 64)
    sym = list(sym)
    sym[0] = mx.sym.LogisticRegressionOutput(data=sym[0], name='softmax')
    sym = mx.symbol.Group(list(sym))

    param['eval_data'] = out['val']
    param['marks'] = param['e_marks'] = out['marks']
    param['ctx'] = mu.gpu(1)

    print out['train'].label[0][1].shape

    s = Solver(sym, out['train'], sv, **param)
    s.train()
    s.predict()
def train(base_model, param=PARAMS, sv=SOLVE, small=False):

    # prepare data
    if small:
        files = rnn_load.f10
        param['ctx'] = mu.gpu(1)
    else:
        files = rnn_load.files

    imgs, labels = rnn_load.load_rnn_pk(files)
    it, lt, iv, lv = mu.prepare_set(imgs, labels)
    N, T = it.shape[:2]

    # cnn process
    model = mx.model.FeedForward.load(*base_model, ctx=mu.gpu(1))
    rnn_input = np.zeros_like(it)
    for n in range(1):
        rnn_input[n], imgs, labels = mu.predict_draw(model, it[n])

    # prepare params
    #datas = [rnn_input, lt, iv, lv]
    datas = [ lt, lt, lv, lv]
    for i, d in enumerate(datas):
        #datas[i] = np.transpose(d,axes=(1,0,2,3,4))

        # make T become one
        datas[i] = d.reshape((-1,1)+d.shape[2:])

    iters = rnn_load.create_rnn_iter(*datas, batch_size=1, num_hidden=1000)
    param['eval_data'] = iters[1]
    mark = param['marks'] = param['e_marks'] = [1]*T
    rnet = rnn_net(begin=mx.sym.Variable('data'), num_hidden=1000)
    s = Solver(rnet, iters[0], sv, **param)

    # train
    print 'Start Training...'
    s.train()
    s.predict()
def train(base_model, param=PARAMS, sv=SOLVE, small=False):

    # prepare data
    if small:
        files = rnn_load.f10
        param['ctx'] = mu.gpu(1)
    else:
        files = rnn_load.files

    imgs, labels = rnn_load.load_rnn_pk(files)
    it, lt, iv, lv = mu.prepare_set(imgs, labels)
    N, T = it.shape[:2]

    # cnn process
    model = mx.model.FeedForward.load(*base_model, ctx=mu.gpu(1))
    rnn_input = np.zeros_like(it)
    for n in range(1):
        rnn_input[n], imgs, labels = mu.predict_draw(model, it[n])

    # prepare params
    #datas = [rnn_input, lt, iv, lv]
    datas = [lt, lt, lv, lv]
    for i, d in enumerate(datas):
        #datas[i] = np.transpose(d,axes=(1,0,2,3,4))

        # make T become one
        datas[i] = d.reshape((-1, 1) + d.shape[2:])

    iters = rnn_load.create_rnn_iter(*datas, batch_size=1, num_hidden=1000)
    param['eval_data'] = iters[1]
    mark = param['marks'] = param['e_marks'] = [1] * T
    rnet = rnn_net(begin=mx.sym.Variable('data'), num_hidden=1000)
    s = Solver(rnet, iters[0], sv, **param)

    # train
    print 'Start Training...'
    s.train()
    s.predict()
def main():
    net = cnn_net()
    img, ll = u.load_pk('../DATA/PK/o1.pk')

    ival, lval = u.augment_sunny(img[:5], ll[:5])

    val = mx.io.NDArrayIter(ival, label=lval)

    model = mx.model.FeedForward.load(
        *Aug40,
        ctx=u.gpu(1),
        learning_rate=6,
        num_epoch=10,
        optimizer='sgd',
        initializer=mx.initializer.Xavier(rnd_type='gaussian'))

    u.predict_draw(model, val, folder='MoveCheck')
Пример #6
0
    init_h = [('l%d_init_h'%l, (batch_size, num_hidden, 256, 256)) for l in range(num_lstm_layer)]
    init_states = init_c + init_h
    data = get(init_states, bs=batch_size, small=small)
    data_train = data['train']
    data_val   = data['val']
    param['eval_data'] = data_val

    num_time = data_train.data_list[0].shape[1]
    symbol = sym_gen(num_time)
    
    s = Solver(symbol, data_train, sv, **param)
    print 'Start Training...'
    s.train()
    # s.predict()
    
   

if __name__ == '__main__':
    PARAMS['num_epoch'] = 30
    PARAMS['learning_rate'] = 3
    PARAMS['ctx'] = mu.gpu(2)
    
    # SOLVE['load'] = False
    # SOLVE['load_perfix'] = '/home/zijia/HeartDeepLearning/RNN/Result/<9-10:28:52>LSTM[E50]/[ACC-0.34549 E49]'
    # SOLVE['load_epoch']  = 49

    SOLVE['name'] = __file__

    train(small=False)

# coding: utf-8

import ipt
import mxnet as mx
from rnn.rnn_solver import Solver
import my_utils as mu
import os
import pickle as pk
import matplotlib.pyplot as plt

PARAMS = {
    'ctx': mu.gpu(2),
    'learning_rate': 1,
    'num_epoch': 10,
    'initializer': mx.initializer.Xavier(rnd_type='gaussian'),
}

SOLVE = {
    'save_best': True,
    'is_rnn': False,
}

from my_net import net
from tools import get_data


def cf_train(sv=SOLVE, param=PARAMS):

    train, val = get_data('c', 2, small=False)

    sv['name'] = 'CF'
Пример #8
0
if __name__ == '__main__':
    batch_size = 1
    num_epoch = 25
    small_set = True 


    learning_rate = 0.01
    num_hidden = 4
    num_lstm_layer = 1
    momentum = 0.0

    # dummy data is used to test speed without IO
    dummy_data = False

    contexts = mu.gpu(1)

    def sym_gen(seq_len):
        return lstm_unroll(num_lstm_layer, seq_len, num_hidden=num_hidden, num_label=1)

    init_c = [('l%d_init_c'%l, (batch_size, num_hidden, 256, 256)) for l in range(num_lstm_layer)]
    init_h = [('l%d_init_h'%l, (batch_size, num_hidden, 256, 256)) for l in range(num_lstm_layer)]
    init_states = init_c + init_h
    data = get(init_states, bs=batch_size, small=small_set)
    data_train = data['train']
    data_val   = data['val']

    if dummy_data:
        data_train = DummyIter(data_train)
        data_val = DummyIter(data_val)
Пример #9
0
              for l in range(num_lstm_layer)]
    init_h = [('l%d_init_h' % l, (batch_size, num_hidden, 256, 256))
              for l in range(num_lstm_layer)]
    init_states = init_c + init_h
    data = get(init_states, bs=batch_size, small=small)
    data_train = data['train']
    data_val = data['val']
    param['eval_data'] = data_val

    num_time = data_train.data_list[0].shape[1]
    symbol = sym_gen(num_time)

    s = Solver(symbol, data_train, sv, **param)
    print 'Start Training...'
    s.train()
    # s.predict()


if __name__ == '__main__':
    PARAMS['num_epoch'] = 30
    PARAMS['learning_rate'] = 3
    PARAMS['ctx'] = mu.gpu(2)

    # SOLVE['load'] = False
    # SOLVE['load_perfix'] = '/home/zijia/HeartDeepLearning/RNN/Result/<9-10:28:52>LSTM[E50]/[ACC-0.34549 E49]'
    # SOLVE['load_epoch']  = 49

    SOLVE['name'] = __file__

    train(small=False)
import ipt
import mxnet as mx
from rnn import rnn_net as rnn
from HeartDeepLearning.solver import Solver
import my_utils as mu
from rnn_load import get

PARAMS={
    'ctx':mu.gpu(2),
    'learning_rate':5,
    'num_epoch':15,
    'initializer':mx.initializer.Xavier(rnd_type='gaussian'),
}

SOLVE = {
    'save_best':True,
    'is_rnn'   :True,  
}


def train(param = PARAMS, sv=SOLVE, small=False):

    sv['name'] = 'TEST'
    input_var = raw_input('Are you testing now? ')
    
    if 'no' in input_var:
        sv.pop('name')
    else:
        sv['name'] += input_var

import ipt, logging
import mxnet as mx
from cnn import cnn_net
import my_utils as u
from solver import Solver
import os

PARAMS={
    'ctx':u.gpu(2),
    'learning_rate':3,
    'num_epoch':15,
    #'optimizer':'adam',
    'initializer':mx.initializer.Xavier(rnd_type='gaussian'),
}

SOLVE = {
    'save_best':True,
    'is_rnn'   :False,  
}

def train(param = PARAMS, sv=SOLVE, small=False):

    sv['name'] = 'TEST'
    input_var = raw_input('Are you testing now? ')
    
    if 'no' in input_var:
        sv.pop('name')
    else:
        sv['name'] += input_var

if __name__ == '__main__':
    batch_size = 2
    num_epoch = 25
    small_set = False


    learning_rate = 0.01
    num_hidden = 4
    num_lstm_layer = 1
    momentum = 0.0

    # dummy data is used to test speed without IO
    dummy_data = False

    contexts = mu.gpu(2)

    def sym_gen(seq_len):
        return lstm_unroll(num_lstm_layer, seq_len, num_hidden=num_hidden, num_label=1)

    init_c = [('l%d_init_c'%l, (batch_size, num_hidden, 256, 256)) for l in range(num_lstm_layer)]
    init_h = [('l%d_init_h'%l, (batch_size, num_hidden, 256, 256)) for l in range(num_lstm_layer)]
    init_states = init_c + init_h
    data = get(init_states, bs=batch_size, small=small_set)
    data_train = data['train']
    data_val   = data['val']

    if dummy_data:
        data_train = DummyIter(data_train)
        data_val = DummyIter(data_val)
import ipt, logging
import mxnet as mx
from cnn import cnn_net
import my_utils as u
from solver import Solver
import os
from HeartDeepLearning.RNN.rnn_load import load_rnn_pk, files

PARAMS={
    'ctx':u.gpu(2),
    'learning_rate':3,
    'num_epoch':15,
    #'optimizer':'adam',
    'initializer':mx.initializer.Xavier(rnd_type='gaussian'),
    'wd':1,
}

SOLVE = {
    'save_best':True,
    'is_rnn'   :False,  
}

def train(param=PARAMS, sv=SOLVE, small=False):

    sv['name'] = 'TEST'
    input_var = raw_input('Are you testing now? ')
    
    if 'no' in input_var:
        sv.pop('name')
    else:
        sv['name'] += input_var

if __name__ == '__main__':
    batch_size = 2
    num_epoch = 25
    small_set = False

    learning_rate = 0.01
    num_hidden = 4
    num_lstm_layer = 1
    momentum = 0.0

    # dummy data is used to test speed without IO
    dummy_data = False

    contexts = mu.gpu(2)

    def sym_gen(seq_len):
        return lstm_unroll(num_lstm_layer,
                           seq_len,
                           num_hidden=num_hidden,
                           num_label=1)

    init_c = [('l%d_init_c' % l, (batch_size, num_hidden, 256, 256))
              for l in range(num_lstm_layer)]
    init_h = [('l%d_init_h' % l, (batch_size, num_hidden, 256, 256))
              for l in range(num_lstm_layer)]
    init_states = init_c + init_h
    data = get(init_states, bs=batch_size, small=small_set)
    data_train = data['train']
    data_val = data['val']
Пример #15
0

if __name__ == '__main__':
    batch_size = 1
    num_epoch = 25
    small_set = True

    learning_rate = 0.01
    num_hidden = 4
    num_lstm_layer = 1
    momentum = 0.0

    # dummy data is used to test speed without IO
    dummy_data = False

    contexts = mu.gpu(1)

    def sym_gen(seq_len):
        return lstm_unroll(num_lstm_layer,
                           seq_len,
                           num_hidden=num_hidden,
                           num_label=1)

    init_c = [('l%d_init_c' % l, (batch_size, num_hidden, 256, 256))
              for l in range(num_lstm_layer)]
    init_h = [('l%d_init_h' % l, (batch_size, num_hidden, 256, 256))
              for l in range(num_lstm_layer)]
    init_states = init_c + init_h
    data = get(init_states, bs=batch_size, small=small_set)
    data_train = data['train']
    data_val = data['val']