예제 #1
0
Data_v = Data_v/255.
Labels = np.array(train_y[index_train]).astype(int)
Data_l = np.zeros((train_num, 10))
for i in range(train_num):
    Data_l[i, Labels[i]] = 1.

nodes = [784, 500, 500, 2000, 10]
bsize = 10
iteration = 50
dbnet = alg.init_label_dbn(Data_v, Data_l, nodes, eta=1e-3, batch_size=bsize, epoc=iteration)
dbnet = alg.greedy_train(dbnet)
dbnet['train_x']=[]
dbnet['train_y']=[]
predict, recon = alg.greedy_recon(dbnet, Data_v[0])
dbn_file = '%s_greedy_b%d_epoc%d'%(neuron, bsize, iteration)
alg.save_dict(dbnet, dbn_file)

test_x, test_y = mu.get_test_data()
index_digit = np.where(test_y>=0)[0]
train_num = len(index_digit)-1
index_train = index_digit[0:train_num]
test_v = np.array(test_x[index_train]).astype(float)
test_v = test_v/255.
test_l = np.array(test_y[index_train]).astype(int)
dbnet = alg.test_label_data(dbnet, test_v, test_l)
predict, result = alg.dbn_greedy_test(dbnet)
print np.where(result==False)[0].shape, np.where(result==1)[0].shape, np.where(result==-1)[0].shape

'''
#fine training
dbnet['train_x'] = Data_v
예제 #2
0
                   
if os.path.isfile('%s.pkl'%w_listf):
    scaled_w = alg.load_dict(w_listf)
    w = scaled_w['w']
    k = scaled_w['k']
    x0 = scaled_w['x0']
    y0 = scaled_w['y0']
    print 'found w_list file'
else:
    w, k, x0, y0 = sr.w_adjust(dbnet, cell_params_lif)
    scaled_w = {}
    scaled_w['w'] = w
    scaled_w['k'] = k
    scaled_w['x0'] = x0
    scaled_w['y0'] = y0
    alg.save_dict(scaled_w, w_listf)

num_test = 10
random.seed(0)
dur_test = 1000
silence = 200
test_x = dbnet['test_x']
result_list = np.zeros((test_x.shape[0], 2))

for offset in range(0, test_x.shape[0], num_test):
#for offset in range(0, 1000, num_test):
    print offset
    test = test_x[offset:(offset+num_test), :]
    spike_source_data = sr.gen_spike_source(test)                
    spikes = sr.run_test(w, cell_params_lif, spike_source_data)
    spike_count = list()
예제 #3
0
nodes = [784, 500, 500, 2000, 10]
bsize = 10
iteration = 50
dbnet = alg.init_label_dbn(Data_v,
                           Data_l,
                           nodes,
                           eta=1e-3,
                           batch_size=bsize,
                           epoc=iteration)
dbnet = alg.greedy_train(dbnet)
dbnet['train_x'] = []
dbnet['train_y'] = []
predict, recon = alg.greedy_recon(dbnet, Data_v[0])
dbn_file = '%s_greedy_b%d_epoc%d' % (neuron, bsize, iteration)
alg.save_dict(dbnet, dbn_file)

test_x, test_y = mu.get_test_data()
index_digit = np.where(test_y >= 0)[0]
train_num = len(index_digit) - 1
index_train = index_digit[0:train_num]
test_v = np.array(test_x[index_train]).astype(float)
test_v = test_v / 255.
test_l = np.array(test_y[index_train]).astype(int)
dbnet = alg.test_label_data(dbnet, test_v, test_l)
predict, result = alg.dbn_greedy_test(dbnet)
print np.where(result == False)[0].shape, np.where(
    result == 1)[0].shape, np.where(result == -1)[0].shape
'''
#fine training
dbnet['train_x'] = Data_v
예제 #4
0
if os.path.isfile('%s.pkl' % w_listf):
    scaled_w = alg.load_dict(w_listf)
    w = scaled_w['w']
    k = scaled_w['k']
    x0 = scaled_w['x0']
    y0 = scaled_w['y0']
    print 'found w_list file'
else:
    w, k, x0, y0 = sr.w_adjust(dbnet, cell_params_lif)
    scaled_w = {}
    scaled_w['w'] = w
    scaled_w['k'] = k
    scaled_w['x0'] = x0
    scaled_w['y0'] = y0
    alg.save_dict(scaled_w, w_listf)

num_test = 10
random.seed(0)
dur_test = 1000
silence = 200
test_x = dbnet['test_x']
result_list = np.zeros((test_x.shape[0], 2))

for offset in range(0, test_x.shape[0], num_test):
    #for offset in range(0, 1000, num_test):
    print offset
    test = test_x[offset:(offset + num_test), :]
    spike_source_data = sr.gen_spike_source(test)
    spikes = sr.run_test(w, cell_params_lif, spike_source_data)
    spike_count = list()