예제 #1
0
def save_mat(net, path):
    d = dict()
    for l in net.layers:
        if hasattr(l, 'weight'):
            d[l.name + '_weight'] = data_loader.copy_to_cpu(l.weight)
            d[l.name + '_bias'] = data_loader.copy_to_cpu(l.bias)
    scipy.io.savemat(path, d)
예제 #2
0
def save_mat(net, path):
	d = dict()
	for l in net.layers:
		if hasattr(l,'weight'):
			d[l.name + '_weight'] = data_loader.copy_to_cpu(l.weight)
			d[l.name + '_bias'] = data_loader.copy_to_cpu(l.bias)
	scipy.io.savemat(path, d)
예제 #3
0
def get_labels(net, noisy_batches):
	w = data_loader.copy_to_cpu(net.layers[-2].weight)
	N = len(noisy_batches) * 128
	y = np.zeros([w.shape[0], N], dtype = np.float32)
	c = 0
	for b in noisy_batches:
		labels = data_loader.copy_to_cpu(b.labels)
		for i in range(len(labels)):
			y[int(labels[i]), i + c] = 1
		c += 128
	return y
예제 #4
0
def get_net_output(net, noisy_batches):
	w = data_loader.copy_to_cpu(net.layers[-2].weight)
	N = len(noisy_batches) * 128
	x = np.zeros([w.shape[1], N], dtype = np.float32)
	c = 0
	for b in noisy_batches:
		data, label = net.prepare_for_train(b.data, b.labels)
		net.fprop(data, net.output, False)
		net_out = data_loader.copy_to_cpu(net.outputs[-3])
		x[:,c:c+128] = net_out
		c += 128
	return x
예제 #5
0
def save_net(net, path):
	d = dict()
	d['w1'] = data_loader.copy_to_cpu(net.layers[1].weight)
	d['w2'] = data_loader.copy_to_cpu(net.layers[5].weight)
	d['w3'] = data_loader.copy_to_cpu(net.layers[9].weight)
	d['w4'] = data_loader.copy_to_cpu(net.layers[12].weight)
	d['b1'] = data_loader.copy_to_cpu(net.layers[1].bias)
	d['b2'] = data_loader.copy_to_cpu(net.layers[5].bias)
	d['b3'] = data_loader.copy_to_cpu(net.layers[9].bias)
	d['b4'] = data_loader.copy_to_cpu(net.layers[12].bias)
	if len(net.layers) == 16:
		d['confw'] = data_loader.copy_to_cpu(net.layers[14].weight)
	if hasattr(net, 'W_denoise'):
		d['W_denoise'] = data_loader.copy_to_cpu(net.W_denoise)
	scipy.io.savemat(path, d)
예제 #6
0
def save_net(net, path):
    d = dict()
    d['w1'] = data_loader.copy_to_cpu(net.layers[1].weight)
    d['w2'] = data_loader.copy_to_cpu(net.layers[5].weight)
    d['w3'] = data_loader.copy_to_cpu(net.layers[9].weight)
    d['w4'] = data_loader.copy_to_cpu(net.layers[12].weight)
    d['b1'] = data_loader.copy_to_cpu(net.layers[1].bias)
    d['b2'] = data_loader.copy_to_cpu(net.layers[5].bias)
    d['b3'] = data_loader.copy_to_cpu(net.layers[9].bias)
    d['b4'] = data_loader.copy_to_cpu(net.layers[12].bias)
    if len(net.layers) == 16:
        d['confw'] = data_loader.copy_to_cpu(net.layers[14].weight)
    if hasattr(net, 'W_denoise'):
        d['W_denoise'] = data_loader.copy_to_cpu(net.W_denoise)
    scipy.io.savemat(path, d)
예제 #7
0
def save(net, path):
    d = dict()
    d['layers'] = list()
    for i in range(len(net.layers)):
        l = net.layers[i]
        if hasattr(l, 'weight'):
            ld = dict()
            ld['weight'] = data_loader.copy_to_cpu(l.weight)
            ld['bias'] = data_loader.copy_to_cpu(l.bias)
            ld['ind'] = i
            d['layers'].append(ld)

    f = open(path, 'wb')
    d['stat'] = net.stat
    cPickle.dump(d, f, protocol=-1)
    f.close()
예제 #8
0
def save(net, path):
	d = dict()
	d['layers'] = list()
	for i in range(len(net.layers)):
		l = net.layers[i]
		if hasattr(l,'weight'):
			ld = dict()
			ld['weight'] = data_loader.copy_to_cpu(l.weight)
			ld['bias'] = data_loader.copy_to_cpu(l.bias)
			ld['ind'] = i
			d['layers'].append(ld)

	f = open(path, 'wb')
	d['stat'] = net.stat
	cPickle.dump(d, f, protocol=-1)
	f.close()
예제 #9
0
def save_net(net, path):
	d = dict()
	d['w1'] = data_loader.copy_to_cpu(net.layers[1].weight)
	d['w2'] = data_loader.copy_to_cpu(net.layers[5].weight)
	d['w3'] = data_loader.copy_to_cpu(net.layers[9].weight)
	d['w4'] = data_loader.copy_to_cpu(net.layers[12].weight)
	d['b1'] = data_loader.copy_to_cpu(net.layers[1].bias)
	d['b2'] = data_loader.copy_to_cpu(net.layers[5].bias)
	d['b3'] = data_loader.copy_to_cpu(net.layers[9].bias)
	d['b4'] = data_loader.copy_to_cpu(net.layers[12].bias)
	scipy.io.savemat(path, d)
예제 #10
0
def get_scores(net, batches):
	batch_size = batches[0].data.shape[1]
	N = len(batches) * batch_size
	score = np.zeros(N)
	i = 0
	for batch in batches:
		net.train_batch(batch.data, batch.labels, TEST)
		score[i:i+batch_size] = -1 * data_loader.copy_to_cpu(net.layers[-1].cost).reshape(batch_size)
		i += batch_size
	return score
예제 #11
0
def get_scores(net, batches):
    batch_size = batches[0].data.shape[1]
    N = len(batches) * batch_size
    score = np.zeros(N)
    i = 0
    for batch in batches:
        net.train_batch(batch.data, batch.labels, TEST)
        score[i:i + batch_size] = -1 * data_loader.copy_to_cpu(
            net.layers[-1].cost).reshape(batch_size)
        i += batch_size
    return score
예제 #12
0
def show_stat(net, test_batches):
    plt.clf()
    f = plt.gcf()
    c = confusion_matrix.get_confusion(net, test_batches)
    f.add_subplot('221')
    plt.imshow(c, interpolation = 'nearest')
    plt.colorbar()

    f.add_subplot('222')
    m = data_loader.copy_to_cpu(net.layers[-2].weight)
    plt.imshow(m, interpolation = 'nearest')
    plt.colorbar()

    f.add_subplot('223')
    plt.plot(net.stat['test-error'])

    plt.draw()
    time.sleep(0.05)
예제 #13
0
def show_stat(net, test_batches):
    plt.clf()
    f = plt.gcf()
    c = confusion_matrix.get_confusion(net, test_batches)
    f.add_subplot('221')
    plt.imshow(c, interpolation='nearest')
    plt.colorbar()

    f.add_subplot('222')
    m = data_loader.copy_to_cpu(net.layers[-2].weight)
    plt.imshow(m, interpolation='nearest')
    plt.colorbar()

    f.add_subplot('223')
    plt.plot(net.stat['test-error'])

    plt.draw()
    time.sleep(0.05)
예제 #14
0
def update_confusion_matrix(net, noisy_batches, y, wc):
	w = data_loader.copy_to_cpu(net.layers[-2].weight)
	x = get_net_output(net, noisy_batches)
	w = train(w, x, y, 1, wc, 50)
	w = train(w, x, y, 0.1, wc, 10)
	net.layers[-2].weight = data_loader.copy_to_gpu(w)