# fp.write('set term postscript eps enhanced color;set output "tmp/predict.eps"\n') # fp.write('plot "{}" using 2:3 w l t "y","" using 2:1 w l t "yp", "" using 2:($1-$3) w l t "yp-y"\n'.format(fnpred)) # fp.close() # my_misc.myshell('gnuplot tmp/predict.plt;gv tmp/predict.eps&') if net['data_class'] == 'ts': #time-series # test=copy.deepcopy(givendata) # import pdb;pdb.set_trace(); #for debug sim.exec_msp_test(net, givendata, test, args) # elif args.ex!='': #?? # sim.exec_ssp_test(net, givendata, test) elif args.fnl != '': t = 0 n_train = 0 # import pdb;pdb.set_trace(); #for debug test['y'][t, 0], y, test['Y'][t, 0] = my_plinn.calc_output( net, test['x'][t, :], test['x'][t, :k]) with open(net['fnpred'], 'w') as fp: fp.write('%.7e %d %.7e %.7e %.7e %.7e #Y^,t,Y,y,c,e2\n' % (test['Y'][t], t - n_train + 1, givendata['Y'][t], test['y'][t], net['c'][t], (test['e'][t])**2)) ## ########### ########################## ####### Save results ########################## # import pdb;pdb.set_trace(); #for debug print(net['mes']) # fnlst = [] if args.fns != '':
def main(): #引数設定 parser = argparse.ArgumentParser(description='Chainer example: MNIST') parser.add_argument('--gpu', '-g', default=-1, type=int, help='GPU ID (negative value indicates CPU)') ###########kuro parser.add_argument( '-bag', type=str, default='100,0.7,1,4', help= 'b,a,s,m for bagging; b=n_bags, a=bagsize-ratio,s=seed,m,number-of-parallel' ) ##function_approximation parser.add_argument( '-fn', type=str, default='', help= 'fntrain,fntest,IDexe; IDexe:ID of execution for parallel processing') parser.add_argument('-k', type=str, default='', help='k1 and k2: number of input channels k=k1,k2=0') ###init_net() in my_plinn.c parser.add_argument( '--inet', '-in', type=str, default='', help='n-units,n_compare, v_thresh,vmin,vmin2,v_ratio,width') parser.add_argument( '-ex', type=str, default='', help='l_mode,gamma0,nentropy_thresh, n-it, n-display,rot_x,rot_y') parser.add_argument('--seed', '-s', default='', type=str, help='seed of random number') parser.add_argument('-DISP', default='', type=str, help='0 for display no-figures.') parser.add_argument( '-Tpinv', default='', type=str, help='Tpinv for use pseudo-inverse from t=Tpinv learning iterations.') parser.add_argument('-nop', type=int, default=0, help='1 for noprint 0 for print') ##########kuro parser.add_argument( '--epochs', '-e', type=str, default='', #type=int, default=100, help='number of epochs to learn') parser.add_argument( '-BIAS', '-B', type=str, default='', #type=int, default=1, help='Bias') parser.add_argument( '-Lstd', '-Ls', type=str, default='', #default="0,2" help='Lstd,Lsdtm') parser.add_argument( '-ib', '-ib', type=str, default='', #"0,0,0,0", help='ib') ### load_data() in my_function.c # parser.add_argument('--data_class', '-dc', type=str, default='', #'1', # help='data_class 0:timeseries,1:regression') ### # parser.add_argument('-Nbas', type=str, default='60,100,0.7,1', # help='N,b,a,s for bagging; N:number of units, b=n_bags, a=bagsize-ratio,s=seed,N,numberofunits') parser.add_argument( '-t', type=str, default='', help= 'null for regression, t:tr0-tr1:tp0-tp1:tpD:Ey for |tpD|-step ahead prediction with recursive tpD>0, non-recursive tpD<0 ' ) # parser.add_argument('-t', type=str, default='', # help='null for regression, t:tr0-tr1:tp0-tp1:tpD:tpG:Ey for recursive tpD-step ahead prediction with tpG=0, non-recursive one step ahead pred with tpG=1') ### normalize_data(DATA *givendata, NET *net) parser.add_argument( '--ytrans', '-y', type=str, default='', #"0,0,0,0", help= 'ymin0,ymax0,ymin1,ymax1 to transform y in [ymin0,ymax0] to y in [ymin1,ymax1]' ) parser.add_argument( '--xtrans', '-x', type=str, default='', #"0,0,0,0", help= 'xmin0,xmax0,xmin1,xmax1 to transform x in [xmin0,xmax0] to x in [xmin1,xmax1]' ) parser.add_argument( '-r', type=str, default='', #"0,0,0", help= 'r1,r2 for integers r1 and r2 is the resolution, no digitization if r1=0' ) parser.add_argument( '--pinv', '-pI', type=str, default='', #default=0, type=int, help='1 for use pseudo-inverse') #later# parser.add_argument('--resume', type=str, default='', # 'log/model.npz,0', #later# help='Resume the model') #later# #later# parser.add_argument('--alpha','-a', type=float, default=0.001) #learning rate? #later## parser.add_argument('--LINESIZE','-L', type=float, default=0) #later# parser.add_argument('--n_bags','-nb', type=float, default=1) #later# parser.add_argument('--n','-n', type=str, default='1,10,10,1', #later# help='<0>,<N1>,<nens>,<NStep> or <1>,<N1>,<N2>,<NStep> ') #later# parser.add_argument('--chkomit','-ch', type=int, default=0) #later# parser.add_argument('--bagging','-bg', type=str, default='tmp/train-test.csv',help='if BAGGING = BAGwithoutVal enter /dev/null. Or tmp/train-test.csv or tmp/test.csv') #later# parser.add_argument('--Tsk', '-Tsk', default="0,0,0,0", type=str, #later# help='<Task>[,<t1>,<t2>,<t0>] <Task>==1 for regression, ==0 for time-series [t0:t1+t0-1] for training;[t1+t0:t2+t0] for test.') #later# parser.add_argument('--lossall','-lossall', type=str, default=0) #later# parser.add_argument('--lcom','-lcom', type=str, default=0) #later# parser.add_argument('--r0','-r0', type=int, default=0, #later# help='0 or 1') #later# parser.add_argument('--ib','-ibm', type=str, default='0,0',help='-1 is NULL')#ibmode #later# parser.add_argument('--LDm','-Ldm', type=str, default=2)#LDmode #later# parser.add_argument('--bst','-bst', type=str, default='0,0')#boost #later# parser.add_argument('--t','-t', type=str, default='result')#fn_target #later# parser.add_argument('--i','-i', type=int, default='0')#intmode #later# parser.add_argument('--rdm','-rdm', type=int, default='0')#rangedatamode #later# parser.add_argument('--ssp','-ssp', type=int, default='0')#ssp #later# parser.add_argument('-tau','-tau' , type=str, default="0,8.0,8.0,2.0", #later# help='0,tau_c,&tau_h,&eta1 or 1,tau_c,&tau_h') #later# parser.add_argument('--bayes','-bayes' , type=str, default="0,0,0,0,1", #later# help='Bayes,BayesLambdaL,BayesLambdaS,BayesUseAllData,Bayesseed'), #later# parser.add_argument('--nobt','-nobt', type=int, default='0')#nob_thresh #later# parser.add_argument('--fupdate','-fupdate', type=str, default='1')#fupdate #later# parser.add_argument('--pupdate','-pupdate', type=str, default='1')#pupdate #later# parser.add_argument('--e4t','-e4t', type=float, default=0)#err4terminate #later# parser.add_argument('--e4p','-e4p', type=float, default=0)#err4propagate ############# args = parser.parse_args() argv = sys.argv cmd = '' for i, v in enumerate(argv): cmd += v + ' ' print('#start:python {}'.format(cmd)) #初期化 MULTIFOLD = 1 NoBAG = 0 BAGwithVal = 1 BAGwithoutVal = 2 NoBoost = 0 EmBoost = 1 GbBoost = 2 BAGGING = NoBAG #L177 Boost = NoBoost meannTestData = 0 nValData = 0 t_boost = 0 #apply boosting for t_boost>=1, Gradient-based boosting for t_boost==-2 # chkomit = 0; #chkomit=1; fnsize = 256 #char **fntrain=NULL;//fntrain[nFoldsmax][256]; #char **fntest=NULL;// char fntest[nFoldsmax][256]; err4propagate = 0 err4terminate = 0 nop = 0 if args.t == '': data_class = 'reg' #regression or function approximation else: data_class = 'ts' #time_series ######## fn = args.fn.split(',') lfn = len(fn) if lfn <= 0: print('#specify -fn <fntrain>,<fntest>,<fnpred>') quit() fntest0, fnpred0 = '/dev/null', 'tmp/predict.dat' #defalut if lfn >= 1: fntrain0 = fn[0] if lfn >= 2: fntest0 = fn[1] if lfn >= 3: fnpred0 = fn[2] #### # import pdb;pdb.set_trace() #for debug # argsNbas=args.Nbas.split(',') # N=n_units=int(argsNbas[0]) # b_bag=n_Folds=n_bags=int(argsNbas[1]) # a_bag=float(argsNbas[2]) # s_bag=int(argsNbas[3]) argsbag = args.bag.split(',') largsbag = len(argsbag) b_bag, a_bag, s_bag, m_bag = 10, 0.7, 1, 0 #default if largsbag >= 1: b_bag = n_Folds = n_bags = int(argsbag[0]) if largsbag >= 2: a_bag = float(argsbag[1]) if largsbag >= 3: s_bag = int(argsbag[2]) if largsbag >= 4: m_cpu = int(argsbag[3]) if m_cpu <= 0: m_cpu = multi.cpu_count() if args.k != '': _k = map(int, args.k.split(',')) _k2 = 0 if len(_k) >= 2: _k1, _k2 = _k else: _k1 = _k[0] k = _k1 + _k2 k1 = k + 1 if args.inet == '': print( '#specify -in <n-units>,<n_compare>,<v_thresh>,<vmin>,<vmin2>,<v_ratio>,<width>' ) quit() else: n_units = NC = args.inet.split(',')[0] # import pdb;pdb.set_trace() #for debug # inet = args.inet.split(',') # N=NC = int(inet[1]) # vt = float(inet[2]) # vm = int(inet[3]) # vr = float(inet[5]) # w = float(inet[6]) #L769 cmd_can2 = '-k {} -in {} -s {}'.format(k, args.inet, s_bag) if args.ex != '': cmd_can2 += ' -ex {}'.format(args.ex) ex = args.ex.split(',') T = ex[3] if args.Tpinv != '': cmd_can2 += ' -Tpinv {}'.format(args.Tpinv) Tpinv = args.Tpinv if args.seed != '': cmd_can2 += ' -s {}'.format(args.seed) seed = int(args.seed) if args.nop != '': cmd_can2 += ' -nop {}'.format(args.nop) nop = args.nop if args.DISP != '': cmd_can2 += ' -DISP {}'.format(args.DISP) if args.BIAS != '': cmd_can2 += ' -BIAS {}'.format(args.BIAS) if args.Lstd != '': cmd_can2 += ' -Ls {}'.format(args.Lstd) if args.ib != '': cmd_can2 += ' -ib {}'.format(args.ib) # if args.data_class !='': # cmd_can2 += ' -dc {}'.format(args.data_class) if args.ytrans != '': cmd_can2 += ' -y " {}"'.format(args.ytrans) if data_class == 'ts': cmd_can2 += ' -x " {}"'.format(args.ytrans) if args.xtrans != '': cmd_can2 += ' -x {}'.format(args.xtrans) if args.r != '': cmd_can2 += ' -r {}'.format(args.r) if args.pinv != '': cmd_can2 += ' -pI {}'.format(args.pinv) if args.gpu != '' and args.gpu >= 0: cmd_can2 += ' -g {}'.format(args.gpu) # if args.t !='': # cmd_can2 += ' -t {}'.format(args.t) set_random_seed(s_bag) # nr=xp.zeros((n_Folds,n_train),dtype=xp.int32) # nc=xp.zeros((n_Folds,n_train0),dtype=xp.int32) # Itrain_Fold=[] # Itest_Fold=[] cmd = [] if data_class == 'ts': #time-series if 1 == 0 and n_Folds <= 0: #single CAN2 cmdj = 'python can2.py -fn {} -t {} {}'.format( fntrain0, args.t, cmd_can2) cmd.append(cmdj) else: ## if n_Folds<=0: #bagging CAN2 argst = args.t.split(':') tr, tp = argst[0], argst[1] largst = len(argst) tpD, tpG, tpEy = 0, 0, 15 if largst >= 3: tpD = int(argst[2]) if largst >= 4: tpG = int(argst[3]) if largst >= 5: tpEy = int(argst[4]) tr0, tr1 = map(int, tr.split('-')) tp0, tp1 = map(int, tp.split('-')) ###learning y0 = xp.array( pd.read_csv(fntrain0, delim_whitespace=True, dtype=xpfloat, header=1))[:, 0] # y0=xp.array(pd.read_csv(fntrain0,delim_whitespace=True,dtype=str,header=1))[:,0] # import pdb;pdb.set_trace(); #for debug #y0=xp.array(pd.read_csv(fntrain0,delim_whitespace=True,dtype=xpfloat,header=1))[:,0] # net['DiffMode']=0 # if net['DiffMode']==1: # y1=xp.zeros((len(y0)-1),dtype=xpfloat) # for t in range(len(y0)-1): # y1[t]=y0[t+1]-y0[t] # y0=y1 n_train0 = tr1 - tr0 - k n_test0 = 1 #tp1-tp0 # train0=xp.zeros((n_train0,k1),dtype=str) # train0=xp.zeros((n_train0,k1),dtype=xpfloat) # train0=xp.zeros((n_train0,k1),dtype=unicode) # train0=xp.zeros((n_train0,k1),dtype=xpfloat) train0 = xp.empty((n_train0, k1), dtype=object) for t in range(n_train0): t0 = t + tr0 train0[t, :k] = y0[ t0:t0 + k][:: -1] #reverse y(t)= M(0)*y(t-1)+...+M(k-1)*y(t-k)...+ Mk*BIAS train0[t, k] = y0[t0 + k + tpD - 1] # import pdb;pdb.set_trace(); #for debug ### fntest0 = 'tmp/test{:03d}.dat'.format(tp0) test0 = xp.zeros( (1, k1), dtype=object) # test0=xp.zeros((1,k1),dtype=str) # test0=xp.zeros((1,k1),dtype=xpfloat) # import pdb;pdb.set_trace(); #for debug test0[0, :k] = y0[tp0 - k - tpD + 1:tp0 - tpD + 1][::-1] test0[0, k] = y0[tp0] df = pd.DataFrame(test0.reshape((-1, k1))) df.to_csv(fntest0, index=False, sep=' ', header=None, float_format='%.7e') Itrain0 = [i for i in range(len(train0))] n_train = bagsize = int(n_train0 * a_bag) prob = xp.ones((n_train0)) * 1. / n_train0 #same probability ## nc = xp.zeros((n_train0, n_Folds)) # fnpred = [] fnnet = [] for j in range(n_Folds): fnnetj = 'tmp/{}+null+b{}a{}s{}N{}k{}j{}.net'.format( fn2dbe(fntrain0)[1], n_Folds, a_bag, s_bag, n_units, k, j) fnnet.append(fnnetj) # import pdb;pdb.set_trace(); #for debug if not os.path.exists(fnnetj): if n_Folds == 1: Itrain_Foldj = [i for i in range(n_train0)] else: Itrain_Foldj = xp.random.choice( Itrain0, size=n_train, p=prob) #functions as walkeralias fntrainj = 'tmp/train{:03d}.dat'.format(j) df = pd.DataFrame(train0[Itrain_Foldj, :].reshape( (-1, k1))) df.to_csv(fntrainj, index=False, sep=' ', header=None) # df.to_csv(fntrainj,index=False,sep=' ',header=None, float_format='%.7e') fnpredj = 'tmp/tp{}-{}+null+b{}a{}s{}N{}k{}j{}.dat'.format( tp0, fn2dbe(fntrain0)[1], n_Folds, a_bag, s_bag, n_units, k, j) fnpred.append(fnpredj) cmdj = 'python can2.py -fn {},{},{} -fns {} {}'.format( fntrainj, fntest0, fnpredj, fnnetj, cmd_can2) cmd.append(cmdj) ############### # execution of learning bags ############### #import pdb;pdb.set_trace(); #for debug starttime = time.time() with concurrent.futures.ProcessPoolExecutor(m_cpu) as excuter: result_list = list(excuter.map(myshell, cmd)) elapsed_time = time.time() - starttime # print('#Elapled time={}'.format(elapsed_time)) ############### ##net-load() ############### net = [] for j in range(n_Folds): net.append(my_plinn.net_load(fnnet[j])) ############### # execution of ensemble ############### n_pred = tp1 - tp0 ypbag = xp.zeros((n_pred, n_Folds), dtype=xpfloat) Ypbag = xp.zeros((n_pred, n_Folds), dtype=xpfloat) # yp=xp.zeros((k+n_pred),dtype=xp.float128) # yp=xp.zeros((k+n_pred),dtype=xpfloat) # yp[0:k]=y0[tp0-k:tp0].astype(xp.float128) # import pdb;pdb.set_trace(); #for debug Y0 = y0.astype(xpfloat) # y0=Y0 y0 = my_function.moverange(Y0, net[0]['ymin0'], net[0]['ymax0'], net[0]['ymin'], net[0]['ymax']) # import pdb;pdb.set_trace(); #for debug #move kD = k + tpD - 1 Yp = xp.zeros((kD + n_pred), dtype=xpfloat) yp = xp.zeros((kD + n_pred), dtype=xpfloat) yp[0:kD] = y0[tp0 - kD:tp0] # yp[0:k]=y0[tp0-kD:tp0-(tpD-1)] #t=0 or tp0 x = xp.zeros((1, k1), dtype=xpfloat) x[0, k] = 1 #yp[t+k+tpD-1] #dummy? for t in range(0, n_pred): x[0, :k] = yp[ t:t + k][:: -1] #reverse y(t)= M(0)*y(t-1)+...+M(k-1)*y(t-k)...+ Mk*BIAS for j in range(n_Folds): ypbag[t, j], y2, Ypbag[t, j] = my_plinn.calc_output( net[j], x[0, :], x[0, :k]) # import pdb;pdb.set_trace(); #for debug Yp[t + kD] = xp.mean(Ypbag[t, :]) yp[t + kD] = xp.mean(ypbag[t, :]) mo = xp.concatenate((Yp[kD:].astype(str).reshape((-1, 1)), xp.array([t for t in range(tp0 + tpD, tp1 + tpD)], dtype=str).reshape((-1, 1))), axis=1) mo = xp.concatenate((mo, Y0[tp0:tp1].astype(str).reshape((-1, 1))), axis=1) df = pd.DataFrame(mo) df.to_csv(fnpred0, index=False, sep=' ', header=None) # import pdb;pdb.set_trace(); #for debug if 1 == 1: #net['DISP']>0: import my_misc hpred = n_pred for t in range(n_pred): if abs(Yp[t + k] - Y0[t + tp0]) > tpEy: hpred = t break # import pdb;pdb.set_trace(); #for debug with open('tmp/msp.plt', 'w') as fp: fp.write( 'set grid;set title "Recursive MultiStep Pred: T={} N={} seed={} Tpinv={} H={}(Ey{})"\n' .format(T, n_units, s_bag, Tpinv, hpred, tpEy)) fp.write( 'set term postscript eps enhanced color;set output "tmp/msp.eps"\n' ) fp.write( 'plot "{}" using 2:3 w l t "y","" using 2:1 w l t "yp", "" using 2:($1-$3) w l t "yp-y"\n' .format(fnpred0)) my_misc.myshell('gnuplot tmp/msp.plt') my_misc.myshell('gv tmp/msp.eps&') elapsed_time1 = time.time() - starttime mes = '[{},{}]({:.1f}s) #[T,Tpinv] k{} N{} b{}a{}s{}m{} nop{} t{}H{}'.format( T, Tpinv, elapsed_time1, k, n_units, b_bag, a_bag, s_bag, m_cpu, nop, args.t, hpred) print('{}'.format(mes)) # import pdb;pdb.set_trace(); #for debug quit() ####################################### elif data_class == 'reg': #regression function-approximation train0 = xp.array( pd.read_csv(fntrain0, delim_whitespace=True, dtype=xpfloat, header=None)) if fntest0 != '/dev/null': test0 = xp.array( pd.read_csv(fntest0, delim_whitespace=True, dtype=xpfloat, header=None)) n_test0 = test0.shape[0] else: n_test0 = 0 n_train0, k1 = train0.shape k = k1 - 1 Itrain0 = [i for i in range(len(train0))] n_train = bagsize = int(n_train0 * a_bag) prob = xp.ones((n_train0)) * 1. / n_train0 #same probability if n_Folds <= 0: #single CAN2 cmdj = 'python can2.py -fn {},{},{} {}'.format( fntrain0, fntest0, fnpred0, cmd_can2) cmd.append(cmdj) else: ## if n_Folds<=0: #bagging CAN2 nc = xp.zeros((n_train0, n_Folds)) # fnpred = [] for j in range(n_Folds): Itrain_Foldj = xp.random.choice( Itrain0, size=n_train, p=prob) #functions as walkeralias # Itrain_Fold.append(Itrain_Foldj) #involves duplicated elements # trainj.append(train0[Itrain_Fold,:].reshape((-1,k1))) # testj.append(train0[Itest_Fold,:].reshape((-1,k1))) # import pdb;pdb.set_trace(); #for debug # print('#j={}'.format(j)) fntrainj = 'tmp/train{:02d}.dat'.format(j) #testj=train0[Itrain_Fold[-1],:].reshape((-1,k1)) # import pdb;pdb.set_trace(); #for debug df = pd.DataFrame(train0[Itrain_Foldj, :].reshape((-1, k1))) df.to_csv(fntrainj, index=False, sep=' ', header=None, float_format='%.7e') fntestj = 'tmp/test{:02d}.dat'.format(j) if fntest0 == '/dev/null': #out-of-bag prediction Itest_Foldj = list(set(Itrain0) - set(Itrain_Foldj)) # Itest_Fold.append(Itest_Foldj) testj = train0[Itest_Foldj, :].reshape((-1, k1)) nc[Itest_Foldj, j] = 1 # n_pred = n_train0 else: testj = test0 n_pred = n_test0 df = pd.DataFrame(testj) df.to_csv(fntestj, index=False, sep=' ', header=None, float_format='%.7e') fnpredj = 'tmp/{}-{}+{}+b{}a{}s{}N{}k{}j{}.dat'.format( fn2dbe(fnpred0)[1], fn2dbe(fntrain0)[1], fn2dbe(fntest0)[1], n_Folds, a_bag, s_bag, n_units, k, j) # fn.append([fntrainj,fntestj,fnpredj]) fnpred.append(fnpredj) cmdj = 'python can2.py -fn {},{},{} {}'.format( fntrainj, fntestj, fnpredj, cmd_can2) cmd.append(cmdj) # import pdb;pdb.set_trace(); #for debug ############### # execution of learning ############### starttime = time.time() with concurrent.futures.ProcessPoolExecutor(m_cpu) as excuter: # with concurrent.futures.ProcessPoolExecutor(max_workers=multi.cpu_count()) as excuter: result_list = list(excuter.map(myshell, cmd)) elapsed_time = time.time() - starttime # print('#Elapled time={}'.format(elapsed_time)) ############### # execution of ensemble ############### ypbag = xp.zeros((n_pred, n_Folds), dtype=xpfloat) if fntest0 != '/dev/null': #not out-of-bag for j, fnpredj in enumerate(fnpred): ypbag[:, j] = xp.array( pd.read_csv(fnpredj, delim_whitespace=True, dtype=str, header=None))[:, 0].astype('float64') yp = xp.mean(ypbag, axis=1) mse = xp.var(yp - test0[:, -1]) else: #out-of-bag for j, fnpredj in enumerate(fnpred): ypj = xp.array( pd.read_csv(fnpredj, delim_whitespace=True, dtype=str, header=None))[:, 0].astype('float64') ncj = nc[:, j] Ipredj = xp.where(ncj == 1)[0] ypbag[Ipredj, j] = ypj # ypbag[Itest_Fold[j],j]=ypj # import pdb;pdb.set_trace(); #for debug yp = xp.zeros((n_pred), dtype=xpfloat) for i in range(n_pred): nci = nc[i, :] Ipredi = xp.where(nci == 1)[0] yp[i] = xp.mean(ypbag[i, Ipredi], axis=0) mse = xp.var(yp - train0[:, -1]) # import pdb;pdb.set_trace(); #for debug df = pd.DataFrame(yp) df.to_csv(fnpred0, index=False, sep=' ', header=None, float_format='%.7e') elapsed_time1 = time.time() - starttime mes = '[{},{}]({:.1f}s) {:.3e} #[T,Tpinv] MSE n{},{} k{} N{} b{} a{} s{} m{} seed{} nop{}'.format( T, Tpinv, elapsed_time1, mse, n_train0, n_test0, k, n_units, b_bag, a_bag, s_bag, m_cpu, seed, nop) print('{}'.format(mes)) quit()
def calc_MSE(t,test,net,givendata): test['y'][t],y,test['Y'][t]=my_plinn.calc_output(net, test['x'][t]) test['e'][t] = (test['Y'][t]-givendata['Y'][t]) e2=(test['e'][t])**2 return e2
def exec_msp_test(net, givendata,test,args): k=n_channels=net['k'] k1=k+1 #net['k1'] # n_total=givendata['n_total'] n_train=givendata['n_train'] n_test=givendata['n_test'] # tr0=givendata['tr0'] tr1=givendata['tr1'] tp0=givendata['tp0'] tp1=givendata['tp1'] tpD=givendata['tpD'] tpG=givendata['tpG'] test=copy.deepcopy(givendata) t0=n_train=givendata['n_train'] #t0=n_train indicates the first time for prediction test['x'][t0,:]=givendata['x'][t0,:] #no-need ? already set in load_data ? # test['x'][t0,:]=copy.deepcopy(givendata['x'][t0,:]) #no-need ? already set in load_data ? # test['x'][t0-1,0:k]=givendata['x'][t0,0:k] #no-need ? already set in load_data ? # test['x'][t0,0]=givendata['x'][t0,0] #no-need ? already set in load_data ? netc=xp.zeros((n_total),dtype=xp.int32) starttime = time.time() for t in range(t0,n_total): # import pdb;pdb.set_trace() #for debug # test['x'][t,1:k]=test['x'][t-1,0:k-1] test['y'][t,0],y,test['Y'][t,0]=my_plinn.calc_output(net,test['x'][t,:],test['x'][t,:k]) # yrt,yt,Yt=my_plinn.calc_output2_(net,test['x'][t,:],test['x'][t,:k]) # yrt,yt,Yt=yrt[0],yt[0],Yt[0] # test['y'][t],y,test['Y'][t]=yrt[0],yt[0],Yt[0] # netc[t]=net['c'] netc[t]=net['c'][0] #for calc_output2_ # import pdb;pdb.set_trace() #for debug # 次の時刻での入力データを準備 #########after 20190120 (no use tpG) from here ##### if tpD == 0 and t+1 < n_total: #non recursive one-step ahead prediction ##### test['x'][t+1,0]=givendata['x'][t+1,0] ##### elif t+tpD<n_total:#recursive tpD-step ahead prediction ##### if t-n_train < tr1-tp0:#use given data if exists ##### ###### import pdb;pdb.set_trace() #for debug ##### test['x'][t+tpD,0]=test['y'][t,0] #for check ###### test['x'][t+tpD+1,0]=givendata['x'][t+tpD+1,0] ##### else: #elif t+tpD+1<n_total: ##### test['x'][t+tpD,0]=test['y'][t,0] ###### print test['x'][t+tpD+1,0],test['y'][t,0], test['x'][t+tpD+1,0]==test['y'][t,0] ###### import pdb;pdb.set_trace() #for debug ###### test['x'][t+tpD+1,0]=givendata['x'][t+tpD+1,0] #for check #########after 20190120 (no use tpG) to here ####before 20190120 from here if tpG <0 and t+tpD < n_total: #non recursive one-step ahead prediction test['x'][t+tpD,0]=givendata['x'][t+1,0] elif t+tpD+1<n_total:#recursive tpD-step ahead prediction if t-n_train < tr1-tp0:#use given data if exists # import pdb;pdb.set_trace() #for debug test['x'][t+tpD,0]=test['y'][t,0] #for check # test['x'][t+tpD+1,0]=givendata['x'][t+tpD+1,0] else: #elif t+tpD+1<n_total: test['x'][t+tpD,0]=test['y'][t,0] # print test['x'][t+tpD+1,0],test['y'][t,0], test['x'][t+tpD+1,0]==test['y'][t,0] # import pdb;pdb.set_trace() #for debug # test['x'][t+tpD+1,0]=givendata['x'][t+tpD+1,0] #for check ####before 20190120 to here if t+1 < n_total: test['x'][t+1,1:k]=test['x'][t,0:k-1] # test['x'][t+1,1:k1]=BIAS #no-need ? already set in load_data ? elapsed_time = time.time() - starttime testerr=abs(test['Y'][n_train:n_total]-givendata['Y'][n_train:n_total]) net['hpred']=len(testerr) for t in range(len(testerr)): if testerr[t]>net['tpEy']: net['hpred']=t break # import pdb;pdb.set_trace() #for debug net['mes']='{} t{}H{} predTime{:.3f}s'.format(net['mes'],args.t,net['hpred'],elapsed_time) # net['mes']='{} tp{}-{}H{}(Ey{}) predTime{:.3f}s'.format(net['mes'],tp0,tp1,net['hpred'],net['tpEy'],elapsed_time) mo=xp.concatenate((test['Y'][n_train:n_total].astype(str).reshape((-1,1)),xp.array([t for t in range(tp0+tpD,tp1+tpD)],dtype=str).reshape((-1,1))),axis=1) mo=xp.concatenate((mo,givendata['Y'][n_train:n_total].astype(str).reshape((-1,1))),axis=1)#pred ts, time mo=xp.concatenate((mo,test['y'][n_train:n_total].astype(str).reshape((-1,1))),axis=1) #given ts mo=xp.concatenate((mo,(test['Y'][n_train:n_total]-givendata['Y'][n_train:n_total]).astype(str).reshape((-1,1))),axis=1)#err # mo=xp.concatenate((mo,abs(test['Y'][n_train:n_total]-givendata['Y'][n_train:n_total]).astype(str).reshape((-1,1))),axis=1)#err # mo=xp.concatenate((mo,xp.zeros((n_test),dtype=str).reshape((-1,1))),axis=1) #zero ?? mo=xp.concatenate((mo,netc[n_train:n_total].astype(str).reshape((-1,1))),axis=1) #unit number selected df=pd.DataFrame(mo) df.to_csv("msp.dat",index=False,sep=' ',header=None) # df=pd.DataFrame(test['x'][n_train:n_total]) # df.to_csv("xtest.dat",index=False,sep=' ',header=None) if net['DISP']>0: import my_misc with open('tmp/msp.plt','w') as fp: fp.write('set grid;set title "Recursive MultiStep Pred: T={} N={} seed={} Tpinv={} H={}(Ey{})"\n'.format(net['i_times'],net['n_cells'],net['seed'],net['Tpinv'],net['hpred'],net['tpEy'])) fp.write('set term postscript eps enhanced color;set output "tmp/msp.eps"\n') fp.write('plot "msp.dat" using 2:3 w l t "y","" using 2:1 w l t "yp", "" using 2:($1-$3) w l t "yp-y"\n') my_misc.myshell('gnuplot tmp/msp.plt') my_misc.myshell('gv tmp/msp.eps&')
# execution of ensemble ############### n_pred = tp1 - tp0 ypbag = xp.zeros((n_pred, n_Folds), dtype=xpfloat) yp = xp.zeros((k + n_pred), dtype=xpfloat) yp[0:k] = y0[tp0 - k:tp0] #t=0 or tp0 X = xp.zeros((1, k1), dtype=xpfloat) X[0, k] = 1 #yp[t+k+tpD-1] #dummy? for t in range(0, n_pred): X[0, :k] = yp[ t:t + k][:: -1] #reverse y(t)= M(0)*y(t-1)+...+M(k-1)*y(t-k)...+ Mk*BIAS for j in range(n_Folds): y1, y2, ypbag[t, j] = my_plinn.calc_output( net[j], X[0, :], X[0, :k]) yp[t + k] = xp.mean(ypbag[t, :]) # import pdb;pdb.set_trace(); #for debug mo = xp.concatenate((yp[k:].astype(str).reshape((-1, 1)), xp.array([t for t in range(tp0 + tpD, tp1 + tpD)], dtype=str).reshape((-1, 1))), axis=1) mo = xp.concatenate((mo, y0[tp0:tp1].astype(str).reshape((-1, 1))), axis=1) df = pd.DataFrame(mo) df.to_csv(fnpred0, index=False, sep=' ', header=None) # import pdb;pdb.set_trace(); #for debug if 1 == 1: #net['DISP']>0: import my_misc hpred = n_pred for t in range(n_pred):
def exec_msp_test(net, givendata, test, args): k = n_channels = net['k'] k1 = k + 1 #net['k1'] # n_total = givendata['n_total'] n_train = givendata['n_train'] n_test = givendata['n_test'] # tr0=givendata['tr0'] tr1 = givendata['tr1'] tp0 = givendata['tp0'] tp1 = givendata['tp1'] tpD = givendata['tpD'] tpG = givendata['tpG'] test = copy.deepcopy(givendata) t0 = n_train = givendata[ 'n_train'] #t0=n_train indicates the first time for prediction test['x'][t0, :] = givendata['x'][ t0, :] #no-need ? already set in load_data ? # test['x'][t0,:]=copy.deepcopy(givendata['x'][t0,:]) #no-need ? already set in load_data ? # test['x'][t0-1,0:k]=givendata['x'][t0,0:k] #no-need ? already set in load_data ? # test['x'][t0,0]=givendata['x'][t0,0] #no-need ? already set in load_data ? netc = xp.zeros((n_total), dtype=xp.int32) starttime = time.time() for t in range(t0, n_total): # import pdb;pdb.set_trace() #for debug # test['x'][t,1:k]=test['x'][t-1,0:k-1] test['y'][t, 0], y, test['Y'][t, 0] = my_plinn.calc_output( net, test['x'][t, :], test['x'][t, :k]) # yrt,yt,Yt=my_plinn.calc_output2_(net,test['x'][t,:],test['x'][t,:k]) # yrt,yt,Yt=yrt[0],yt[0],Yt[0] # test['y'][t],y,test['Y'][t]=yrt[0],yt[0],Yt[0] # netc[t]=net['c'] netc[t] = net['c'][0] #for calc_output2_ # import pdb;pdb.set_trace() #for debug # 次の時刻での入力データを準備 if tpG != 0 and t + 1 < n_total: #non recursive one-step ahead prediction test['x'][t + 1, 0] = givendata['x'][t + 1, 0] elif t + tpD + 1 < n_total: #recursive tpD-step ahead prediction if t - n_train < tr1 - tp0: #use given data if exists # import pdb;pdb.set_trace() #for debug test['x'][t + tpD + 1, 0] = test['y'][t, 0] #for check # test['x'][t+tpD+1,0]=givendata['x'][t+tpD+1,0] else: #elif t+tpD+1<n_total: test['x'][t + tpD + 1, 0] = test['y'][t, 0] # print test['x'][t+tpD+1,0],test['y'][t,0], test['x'][t+tpD+1,0]==test['y'][t,0] # import pdb;pdb.set_trace() #for debug # test['x'][t+tpD+1,0]=givendata['x'][t+tpD+1,0] #for check if t + 1 < n_total: test['x'][t + 1, 1:k] = test['x'][t, 0:k - 1] # test['x'][t+1,1:k1]=BIAS #no-need ? already set in load_data ? elapsed_time = time.time() - starttime testerr = abs(test['Y'][n_train:n_total] - givendata['Y'][n_train:n_total]) net['hpred'] = len(testerr) for t in range(len(testerr)): if testerr[t] > net['tpEy']: net['hpred'] = t break # import pdb;pdb.set_trace() #for debug net['mes'] = '{} H{}(Ey{}) predTime{:.3f}s'.format(net['mes'], net['hpred'], net['tpEy'], elapsed_time) mo = xp.concatenate((test['Y'][n_train:n_total].astype(str).reshape( (-1, 1)), xp.array([t for t in range(tp0 + tpD, tp1 + tpD)], dtype=str).reshape((-1, 1))), axis=1) mo = xp.concatenate( (mo, givendata['Y'][n_train:n_total].astype(str).reshape((-1, 1))), axis=1) #pred ts, time mo = xp.concatenate((mo, test['y'][n_train:n_total].astype(str).reshape( (-1, 1))), axis=1) #given ts mo = xp.concatenate( (mo, (test['Y'][n_train:n_total] - givendata['Y'][n_train:n_total]).astype(str).reshape((-1, 1))), axis=1) #err # mo=xp.concatenate((mo,abs(test['Y'][n_train:n_total]-givendata['Y'][n_train:n_total]).astype(str).reshape((-1,1))),axis=1)#err # mo=xp.concatenate((mo,xp.zeros((n_test),dtype=str).reshape((-1,1))),axis=1) #zero ?? mo = xp.concatenate((mo, netc[n_train:n_total].astype(str).reshape( (-1, 1))), axis=1) #unit number selected df = pd.DataFrame(mo) df.to_csv("msp.dat", index=False, sep=' ', header=None)
def main(args): ########################## ####### Initialize ########################## set_random_seed(args.seed) givendata = {} test = {} net = {} ### net['seed'] = args.seed net['Tpinv'] = args.Tpinv net['pinvflag'] = 0 net['NDS'] = -0.5 net['modify_M_batch'] = my_plinn.modify_M_batch_RLS if args.pinv == 0 else my_plinn.modify_M_batch_pinv net['nop'] = args.nop net['print'] = my_misc.print1 if args.nop == 0 else my_misc.noprint # _k = map(int, args.k.split(',')) if len(_k) >= 2: k1, k2 = _k else: k1 = _k[0] k2 = 0 k = k1 + k2 net['k'] = k fntest = '/dev/null' fnpred = 'tmp/predict.dat' fn = args.fn.split(',') fntrain = fn[0] if len(fn) >= 2: fntest = fn[1] if len(fn) >= 3: fnpred = fn[2] # import pdb;pdb.set_trace(); #for debug if args.t == '': net['data_class'] = 'reg' #regression or function approximation else: net['data_class'] = 'ts' #time_series if args.fnl == '': #no net to be loaded net['fntrain'] = fntrain net['fntest'] = fntest net['fnpred'] = fnpred net['BIAS'] = args.BIAS net['r1'], net['r2'], net['r3'] = map(int, args.r.split(',')) net['DISP'] = args.DISP net['ytrans'] = args.ytrans net['xtrans'] = args.xtrans net['t'] = args.t ########################## ####### Load data (traininig and test) ########################## # my_function.load_data(givendata,test,net) # load_data() in my_function.c givendata, test, net = my_function.load_data( givendata, test, net) # load_data() in my_function.c #use net['k'],net['fntrain'],net['BIAS'],net['data_class],net['DiffMode'] #net['t'], net['ymax']-net['ymin'] net['print']('Finish load_data.') ########################## ####### Initialize Net ########################## my_plinn.init_net(net, args) # init_net() in my_plinn.c net['print']('Finish init_net.') ########################## ####### Execute training-and-test, or single step prediction ########################## # import pdb;pdb.set_trace(); #for debug if args.ex != '': #learning only when no net-load sim.exec_sim( net, givendata, test, args) # sim.py, exec_sim(net, givendata, test) in sim.c # else: # net-load: else if args.fnl=='': net = my_plinn.net_load(args) net['fntrain'] = fntrain net['fntest'] = fntest net['fnpred'] = fnpred net['t'] = args.t net['mes'] = '' k = net['k'] # import pdb;pdb.set_trace(); #for debug givendata, test, net = my_function.load_data( givendata, test, net) # load_data() in my_function.c # import pdb;pdb.set_trace(); #for debug # test=xp.array(pd.read_csv(fntest,delim_whitespace=True,dtype=xpfloat,header=None)) # normalize_data(test,net) # import pdb;pdb.set_trace(); #for debug # if args.DISP>0: #disp result # if k <= 2: # my_misc.myshell('export fntest={} fnpred={};../sh/show{}dpred.sh&'.format(fntest,fnpred,min(givendata['k'],2))) # else: # fp=open('tmp/predict.plt','w') # fp.write('set grid;set title "Regression: T={} N={} seed={} Tpinv={}"\n'.format(net['i_times'],net['n_cells'],net['seed'],net['Tpinv'])) # fp.write('set term postscript eps enhanced color;set output "tmp/predict.eps"\n') # fp.write('plot "{}" using 2:3 w l t "y","" using 2:1 w l t "yp", "" using 2:($1-$3) w l t "yp-y"\n'.format(fnpred)) # fp.close() # my_misc.myshell('gnuplot tmp/predict.plt;gv tmp/predict.eps&') if net['data_class'] == 'ts': #time-series # test=copy.deepcopy(givendata) # import pdb;pdb.set_trace(); #for debug sim.exec_msp_test(net, givendata, test, args) # elif args.ex!='': #?? # sim.exec_ssp_test(net, givendata, test) elif args.fnl != '': t = 0 n_train = 0 # import pdb;pdb.set_trace(); #for debug test['y'][t, 0], y, test['Y'][t, 0] = my_plinn.calc_output( net, test['x'][t, :], test['x'][t, :k]) with open(net['fnpred'], 'w') as fp: fp.write('%.7e %d %.7e %.7e %.7e %.7e #Y^,t,Y,y,c,e2\n' % (test['Y'][t], t - n_train + 1, givendata['Y'][t], test['y'][t], net['c'][t], (test['e'][t])**2)) ## ########### ########################## ####### Save results ########################## # import pdb;pdb.set_trace(); #for debug print(net['mes']) # fnlst = [] if args.fns != '': fnlst.append(args.fns) my_plinn.net_save(net, args.fns) # fn = 'tmp/V.txt' fnlst.append(fn) fp = open(fn, 'w') for i in xrange(net['n_cells']): fp.write('V{} {}'.format(i, net['V']['ij2t'][i])) fp.close() fn = 'tmp/w.csv' df = pdDataFrameGpu(net['w'], args.gpu) df.to_csv(fn, index=False, sep=' ', header=None, float_format='%.7e') fnlst.append(fn) fn = 'tmp/M.csv' df = pdDataFrameGpu( net['am']['M'].reshape((net['n_cells'], net['n_channels'] + 1)), args.gpu) df.to_csv(fn, index=False, sep=' ', header=None, float_format='%.7e') fnlst.append(fn) fn = 'tmp/v.csv' df = pdDataFrameGpu(net['v'], args.gpu) df.to_csv(fn, index=False, sep=' ', header=None) fnlst.append(fn) ## # import pdb;pdb.set_trace(); #for debug if 'MSE' in test.keys(): #if args.ex!='': if args.DISP > 0: #disp result fn = 'tmp/mse.csv' df = pd.DataFrame(test['MSE']) df.to_csv(fn, index=False, sep=' ', header=None, float_format='%.7e') fnlst.append(fn) # fp=open('tmp/mse.plt','w') with open('tmp/mse.plt', 'w') as fp: fp.write( 'set grid;set title "T={} N={} seed={} Tpinv={}"\n'.format( net['i_times'], net['n_cells'], net['seed'], net['Tpinv'])) fp.write( 'set term postscript eps enhanced color;set output "tmp/mse.eps"\n' ) fp.write('set logscale y;set format y "%.1e"\n') ## fp.write('set format y"10^{%L}\n') ## fp.write('set ytics format "%.1t{/Symbol=12 \264}10^{%T}"\n') fp.write( 'plot "tmp/mse.csv" using 0:1 w lp t "MSEtr","" using 0:2 w lp t "MSE"\n' ) fp.write( 'set term postscript eps enhanced color;set output "tmp/nmse.eps"\n' ) fp.write( 'plot "tmp/mse.csv" using 0:3 w lp t "NMSEtr","" using 0:4 w lp t "NMSE";quit\n' ) # fp.close() my_misc.myshell('gnuplot tmp/mse.plt') my_misc.myshell('gv tmp/mse.eps&') ## net['print']('#saved in {}'.format(fnlst))