def load_avg_convergence(d, prefix = ''): c = [] for d_ in d: et.globaldata.directory = prefix + d_ context = et.load('context.pkl') c.append(np.array(et.load('acc_hist.pkl'))) return np.mean(c, axis = 0), context
def plot_mac(): et.globaldata.directory = '/homes/eneftci/work/code/C/auryn_rbp/build/release/experiments/Results/133__17-01-2017/' context = et.load('context.pkl') convergence_perbp = np.array(et.load('acc_hist.pkl')) nh = context['nh'] nc = context['nc'] nv = context['nv'] spk_cnt = et.load('spkcnt.pkl') acc_hist = np.array(et.load('acc_hist.pkl')) nerr2 = extract_spk_cnt(spk_cnt, 0) nerr1 = extract_spk_cnt(spk_cnt, 1) nvis = extract_spk_cnt(spk_cnt, 2) nout = extract_spk_cnt(spk_cnt, 3) nhid = extract_spk_cnt(spk_cnt, 4) import pickle fh = file('/home/eneftci/Projects/code/python/dtp/Results/bp_100.pkl','r') cgpubp = np.array(pickle.load(fh)['test']) fh = file('/home/eneftci/Projects/code/python/dtp/Results/bp_100.pkl','r') tbp = np.array(pickle.load(fh)['epoch']) ss = np.cumsum((nv*nh + nh*nc + nv*nh + nh*nc)*np.ones(2000)*50000) #plot(acc_hist[:,1],np.cumsum(nvis*nh + nhid*nc + 2*nout*nc + (nerr1+nerr2)*(nc+nh)).astype('float'),'r.-', linewidth=3) plot(1-cgpubp/100,ss[tbp-1].astype('float'),'g.-', linewidth=3) ylabel('MACs') xlabel('MNIST Accuracy')
def plot_synop_mac(): et.globaldata.directory = '/homes/eneftci/work/code/C/auryn_rbp/build/release/experiments/Results/135__27-01-2017/' context = et.load('context.pkl') convergence_perbp = np.array(et.load('acc_hist.pkl')) nh = context['nh'] nc = context['nc'] nv = context['nv'] spk_cnt = et.load('spkcnt.pkl') acc_hist = np.array(et.load('acc_hist.pkl')) nerr2 = extract_spk_cnt(spk_cnt, 0) nerr1 = extract_spk_cnt(spk_cnt, 1) nvis = extract_spk_cnt(spk_cnt, 2) nout = extract_spk_cnt(spk_cnt, 3) nhid = extract_spk_cnt(spk_cnt, 4) import pickle fh = file('/home/eneftci/Projects/code/python/dtp/Results/bp_200_200.pkl','r') cgpubp = np.array(pickle.load(fh)['test']) fh = file('/home/eneftci/Projects/code/python/dtp/Results/bp_200_200.pkl','r') tbp = np.array(pickle.load(fh)['epoch']) ss = np.cumsum((nv*nh + nh*nc + nv*nh + nh*nc)*np.ones(2000)*50000) figure(figsize=(6,4)) semilogy(acc_hist[:,1],np.cumsum(nvis*nh + nhid*nc + 2*nout*nc + (nerr1+nerr2)*(nc+nh)).astype('float'),'r.-', linewidth=3, label='peRBP (Spiking)') semilogy(1-cgpubp/100,ss[tbp-1].astype('float'),'k.-', linewidth=3, label='BP (GPU)', alpha=.6) ylabel('MACs / SynOps') xlabel('Accuracy') legend(loc=2) xticks([.75,.85,.90,.95,1.0]) tight_layout() savefig('Results/synop_mac.png', format='png', dpi=1200)
def plot_convergence_200_200(): #stim_show(M['vh'].todense()[:,:100].T) #et.savefig('features_100.png') pylab.ion() fh = file('/home/eneftci/Projects/code/python/dtp/Results/rbp_200_200.pkl','r') import pickle cgpu = np.array(pickle.load(fh)['test']) fh = file('/home/eneftci/Projects/code/python/dtp/Results/rbp_200_200.pkl','r') trbp = np.array(pickle.load(fh)['epoch']) fh = file('/home/eneftci/Projects/code/python/dtp/Results/bp_200_200.pkl','r') cgpubp = np.array(pickle.load(fh)['test']) fh = file('/home/eneftci/Projects/code/python/dtp/Results/bp_200_200.pkl','r') tbp = np.array(pickle.load(fh)['epoch']) et.globaldata.directory = '/homes/eneftci/work/code/C/auryn_rbp/build/release/experiments/Results/135__27-01-2017/' context = et.load('context.pkl') convergence_perbp = np.array(et.load('acc_hist.pkl')) et.globaldata.directory = '/homes/eneftci/work/code/C/auryn_rbp/build/release/experiments/Results/173__31-03-2017/' context = et.load('context.pkl') convergence_erbp = np.array(et.load('acc_hist.pkl')) figure(figsize=(6,4)) title('784-200-200-10') v = context['n_samples_train']/50000 semilogx(trbp,cgpu[:], 'b.-', linewidth=3, alpha=.6, label='RBP (GPU)') semilogx(tbp,cgpubp[:], 'k.-', linewidth=3, alpha=.6, label='BP (GPU)') semilogx(v+convergence_erbp[:-1,0]*v,100-100*convergence_erbp[:-1,1], 'g.-',linewidth=3, alpha=.6, label='eRBP (Spiking)') semilogx(v+convergence_perbp[:-1,0]*v,100-100*convergence_perbp[:-1,1], 'r.-',linewidth=3, alpha=.6, label='peRBP (Spiking)') print("200-200 perbp: {0:1.2f} erbp: {1:1.2f}".format(100*np.mean(1-convergence_perbp[-5:,1]),100*np.mean(1-convergence_erbp[-5:,1]))) xlim([0,2000]) xlabel('Epochs') ylabel('Error %') xticks([1,10,100,1000,2000],[1,10,100,1000,2000],rotation=45) ylim([0,15]) #legend(frameon=False) tight_layout() savefig('Results/convergence_erbp_auryn_200_200.png', format='png', dpi=1200)
def plot_convergence_100(): #stim_show(M['vh'].todense()[:,:100].T) #et.savefig('features_100.png') pylab.ion() fh = file('/home/eneftci/Projects/code/python/dtp/Results/rbp_100.pkl','r') import pickle cgpu = np.array(pickle.load(fh)['test']) fh = file('/home/eneftci/Projects/code/python/dtp/Results/rbp_100.pkl','r') trbp = np.array(pickle.load(fh)['epoch']) fh = file('/home/eneftci/Projects/code/python/dtp/Results/bp_100.pkl','r') cgpubp = np.array(pickle.load(fh)['test']) fh = file('/home/eneftci/Projects/code/python/dtp/Results/bp_100.pkl','r') tbp = np.array(pickle.load(fh)['epoch']) d = ['013__28-01-2017/', '014__28-01-2017/', '015__28-01-2017/', '016__28-01-2017/'] convergence_erbp, context = load_avg_convergence(d, prefix = '/home/eneftci/Projects/code/C/auryn_rbp/build/release/experiments/Results_scripts/') assert context['sigma'] > 0 assert context['nh'] ==100 convergence_erbp = np.array(et.load('acc_hist.pkl')) d = ['002__22-01-2017/', '003__22-01-2017/', '004__22-01-2017/', '001__22-01-2017/'] convergence_perbp, context = load_avg_convergence(d, prefix = '/home/eneftci/Projects/code/C/auryn_rbp/build/release/experiments/Results_scripts/') assert context['sigma'] == 0 assert context['nh'] ==100 print("100 perbp: {0:1.2f} erbp: {1:1.2f}".format(100*np.mean(1-convergence_perbp[-5:,1]),100*np.mean(1-convergence_erbp[-5:,1]))) figure(figsize=(6,4)) title('784-100-10') v = context['n_samples_train']/50000 semilogx(trbp,cgpu[:], 'b.-', linewidth=3, alpha=.6, label='RBP (GPU)') semilogx(tbp,cgpubp[:], 'k.-', linewidth=3, alpha=.6, label='BP (GPU)') semilogx(v+convergence_erbp[:-1,0]*v,100-100*convergence_erbp[:-1,1], 'g.-',linewidth=3, alpha=.6, label='eRBP (Spiking)') semilogx(v+convergence_perbp[:-1,0]*v,100-100*convergence_perbp[:-1,1], 'r.-',linewidth=3, alpha=.6, label='peRBP (Spiking)') xlim([0,2000]) xlabel('Epochs') ylabel('Error %') ylim([0,15]) xticks([1,10,100,1000,2000],[1,10,100,1000,2000],rotation=45) legend(frameon=False, loc=0,prop={'size':16}) draw() tight_layout() savefig('Results/convergence_erbp_auryn_100.png', format='png', dpi=1200)
n_samples_test = context['n_samples_test'] n_epochs = context['n_epochs'] test_every = context['test_every'] #never test if not hot_init and directory is None: print 'Cold initialization..' os.system('rm -rf inputs/{directory}/train/'.format(**context)) os.system('rm -rf inputs/{directory}/test/'.format(**context)) os.system('mkdir -p inputs/{directory}/train/' .format(**context)) os.system('mkdir -p inputs/{directory}/test/' .format(**context)) create_cnn_init(base_filename = 'inputs/{directory}/train/fwmat'.format(**context), **context) elif directory is not None: print 'Loading previous run...' et.globaldata.directory = directory M = et.load('M.pkl') M = process_allparameters_rbp(context) #save_parameters(M, context) if test_every>0: labels_test, SL_test = create_data_rbp(n_samples = n_samples_test, output_directory = '{directory}/test'.format(**context), data_url = context['test_data_url'], labels_url = context['test_labels_url'], randomize = False, with_labels = False, duration_data = context['sample_duration_test'], duration_pause = context['sample_pause_test'], generate_sl = False, **context)
hot_init = True if o == '-n': #change number of epochs context['n_epochs'] = int(a) if o == '-t': context['test_every'] = int(a) if o == '-d': directory = a if o == '-s': save = True if o == '-g': generate = True if o == '-c': #custom parameters passed through input arguments context.update(eval(a)) et.globaldata.directory = directory context = et.load('context.pkl') context['n_samples_test'] = 10000 context['sample_duration_test'] = .5 context['sample_pause_test'] = .0 context['simtime_test'] = context['n_samples_test'] * ( context['sample_duration_test'] + context['sample_pause_test']) if not context.has_key('prob_syn'): context['prob_syn'] = 1.0 if not context.has_key('sigma'): context['sigma'] = 50e-3 n_samples_train = context['n_samples_train'] n_samples_test = context['n_samples_test'] n_epochs = context['n_epochs'] test_every = context['test_every'] #never test M = et.load('M.pkl')
import pickle import numpy as np import matplotlib.pylab as plt import experimentTools as ext from matplotlib import gridspec from pyNSATlib import nsat_reader # from experimentLib import * if __name__ == '__main__': # idir = "005__05-04-2017/" idir = "Results/016__22-09-2017/" cfg = ext.load(idir + 'cfg_train.pkl') nsat_stats = np.cumsum(pickle.load( file('Results/016__22-09-2017/stats_nsat.pkl', 'r')), axis=0) stats_h = nsat_stats[:, :100].sum(axis=1) stats_o = nsat_stats[:, 100:110].sum(axis=1) stats_e = nsat_stats[:, 110:].sum(axis=1) weights = nsat_reader.read_synaptic_weights(cfg[0], idir + '/weights.pkl', idir + '/ptr.pkl') w0 = weights[:794, 794:894, 0].flatten() w1 = weights[793:893, 894:(894 + 10), 0].flatten() w2 = weights[904:, 794:894, 1].flatten() W = [w0, w1, w2] epo, err = np.array(pickle.load(file(idir + 'pip.pkl', 'r'))).T print(epo) # w = cfg.core_cfgs[0].W[:794, 794:894, 1] # np.append(w, cfg.core_cfgs[0].W[:]) epo_tf, acc_tf, macs_tf = pickle.load(
# Licence : GPLv2 #----------------------------------------------------------------------------- from experimentLib import * import experimentTools as et from pylab import * import matplotlib, pickle font = {'family': 'normal', 'weight': 'bold', 'size': 14} matplotlib.rc('font', **font) matplotlib.rc('savefig', dpi=600) dnsat = '/home/eneftci/Projects/code/python/HiAER-NSAT/examples/erbp/Results/021__15-01-2017/' et.globaldata.directory = '/homes/eneftci/work/code/C/auryn_rbp/build/release/experiments/Results/127__13-01-2017/' convergence = np.array(et.load('acc_hist.pkl')) context = et.load('context.pkl') figure(figsize=(6, 4)) v = context['n_samples_train'] / 50000 semilogx(v + (convergence[:, 0]) * v, 100 - 100 * convergence[:, 1], 'o-', linewidth=3, alpha=.6, label='peRBP (Spiking 64-bit)') tt, nsat = np.array(pickle.load(file(dnsat + 'pip.pkl', 'r'))).T semilogx(1 + tt, 100 - nsat, 'x-', linewidth=3, alpha=.6,
et.mksavedir() et.globaldata.context = context start_epoch = 0 acc_hist = [] snr_hist = [] weight_stats = {} output_weights = [] spkcnt = [None for i in range(context['n_epochs'])] if args.resume is not '': if args.resume[-1] is not '/': args.resume += '/' print 'Loading previous run from {}'.format(args.resume) et.globaldata.directory = args.resume M = et.load('M.pkl') old_context = et.load('context.pkl') acc_hist = et.load('acc_hist.pkl') spkcnt = et.load('spkcnt.pkl') snr_hist = et.load('snr_hist.pkl') bestM = et.load('bestM.pkl') start_epoch = acc_hist[-1][0] + 1 context.update(old_context) # take n_cores and test interval from current args context['ncores'] = args.n_cores context['test_every'] = args.testinterval print('Restored context: {}'.format(context)) elib.write_allparameters_rbp(M, context) max_samples_train = context['max_samples_train']
if __name__ == '__main__': try: last_perf = (0.0, 0.0) init = True # initialize weights? new_test_data = True # generate new test data ras files? test = True # test before first training epoch? save = True # save results to result folder? # folder = '066__21-07-2018' # directory = 'Results/{}/'.format(folder) directory = None if directory is not None: print 'Loading previous run...' et.globaldata.directory = directory M = et.load('M.pkl') old_context = et.load('context.pkl') context.update(old_context) elib.write_allparameters_rbp(M, context) context['test_every'] = 2 context['n_epochs'] = 0 context['n_samples_train'] = 10 context['eta'] = 1e-7 context['n_samples_test'] = 1000 max_samples_train = context['max_samples_train'] max_samples_test = context['max_samples_test'] n_samples_train = context['n_samples_train'] n_samples_test = context['n_samples_test'] n_epochs = context['n_epochs'] test_every = context['test_every']