def test(self, input_matrix, teacher_matrix): """Function to test the network @param input_matrix -- matrix consisting of input data to the network. @param teacher_matrix -- matrix consisting of labels of input data . """ number_of_pictures = input_matrix.shape[-1] mse = 0 squared_errors = cp.dev_matrix_cmf(self.neuron_layer[-1].deltas.h, self.neuron_layer[-1].deltas.w) for batch in xrange(number_of_pictures/self.batch_size): index_begin = self.batch_size * batch index_end = index_begin + self.batch_size self.neuron_layer[0].activations = cp.push( input_matrix[:, index_begin:index_end].astype('float32').copy('F')) teachbatch = cp.push(teacher_matrix[:, index_begin:index_end].astype('float32').copy('F')) for i in xrange(self.number_of_layers): self.weight_layer[i].forward() cp.apply_binary_functor(squared_errors, self.neuron_layer[-1].deltas, cp.binary_functor.COPY) cp.apply_scalar_functor(squared_errors, cp.scalar_functor.SQUARE) mse += cp.sum(squared_errors) teachbatch.dealloc() print "MSE: ", (mse/number_of_pictures) squared_errors.dealloc()
def loadLastLayer(self,dim1,dim2): fn=os.path.join(self.cfg.workdir,"weights-%d-finetune.npy"%(self.NumberOfLayers-1)) fn_bias=os.path.join(self.cfg.workdir,"bias-%d-finetune.npy"%(self.NumberOfLayers-1)) if os.path.exists(fn) and os.path.exists(fn_bias): top_weights=np.load(fn) assert((dim1,dim2)==top_weights.shape) self.Weights.append(cp.push(top_weights)) top_bias=np.load(fn_bias) assert(dim2==top_bias.shape[0]) self.Bias.append(cp.push(top_bias)) return 1 else: return 0
def loadLastLayer(self, dim1, dim2): fn = os.path.join( self.cfg.workdir, "weights-%d-finetune.npy" % (self.NumberOfLayers - 1)) fn_bias = os.path.join( self.cfg.workdir, "bias-%d-finetune.npy" % (self.NumberOfLayers - 1)) if os.path.exists(fn) and os.path.exists(fn_bias): top_weights = np.load(fn) assert ((dim1, dim2) == top_weights.shape) self.Weights.append(cp.push(top_weights)) top_bias = np.load(fn_bias) assert (dim2 == top_bias.shape[0]) self.Bias.append(cp.push(top_bias)) return 1 else: return 0
def gray_test(ni): src = cp.push(to_cmuc(np.tile(ni,(1,4)))) dst = cp.dev_matrix_cmf(src.h,src.w) cp.fill(dst,0) cp.image_move(dst,src,128,128,1,-10,-4) res = cp.pull(dst) #set_trace() plt.matshow(res[0:128**2,0].reshape(128,128)) plt.colorbar() plt.show()
def gray_test(ni): src = cp.push(to_cmuc(np.tile(ni, (1, 4)))) dst = cp.dev_matrix_cmf(src.h, src.w) cp.fill(dst, 0) cp.image_move(dst, src, 128, 128, 1, -10, -4) res = cp.pull(dst) #set_trace() plt.matshow(res[0:128**2, 0].reshape(128, 128)) plt.colorbar() plt.show()
def color_test(ni): ts = 128 src = cp.push(to_cmuc(np.tile(ni,(1,4)))) dst = cp.dev_matrix_cmf(ts**2*3,src.w) cp.fill(dst,0) cp.image_move(dst,src,128,ts,4,-10,-4) res = cp.pull(dst) plt.matshow(res[0:ts**2,0].reshape(ts,ts), cmap = plt.cm.bone_r) plt.matshow(res[ts**2:2*ts**2,0].reshape(ts,ts), cmap = plt.cm.bone_r) plt.matshow(res[2*ts**2:3*ts**2,0].reshape(ts,ts), cmap = plt.cm.bone_r) plt.show()
def color_test(ni): ts = 128 src = cp.push(to_cmuc(np.tile(ni, (1, 4)))) dst = cp.dev_matrix_cmf(ts**2 * 3, src.w) cp.fill(dst, 0) cp.image_move(dst, src, 128, ts, 4, -10, -4) res = cp.pull(dst) plt.matshow(res[0:ts**2, 0].reshape(ts, ts), cmap=plt.cm.bone_r) plt.matshow(res[ts**2:2 * ts**2, 0].reshape(ts, ts), cmap=plt.cm.bone_r) plt.matshow(res[2 * ts**2:3 * ts**2, 0].reshape(ts, ts), cmap=plt.cm.bone_r) plt.show()
def test_pixel_classes(): w, h = 512, 512 input_channels, pyramid_channels = 4, 3 pic = Image.open("tests/data/lena.bmp").resize((w, h)).convert("RGBA") pic = np.asarray(pic).reshape(h, w * 4) pic_d = cp.push(pic) pyr = cp.dev_image_pyramid_f(pic_d.h / 2, pic_d.w / input_channels / 2, 4, pyramid_channels) pyr.build(pic_d, 4) plt.matshow(pic[0:h:2, 0:4 * w:8]) #plt.matshow(cp.pull(pyr.get(1,0))) #plt.title("Channel0") #plt.matshow(cp.pull(pyr.get(1,1))) #plt.title("Channel1") #plt.matshow(cp.pull(pyr.get(1,2))) #plt.title("Channel2") #plt.matshow(cp.pull(pyr.get_all_channels(1))) #plt.title("allchannels level 1") #plt.show() # create source image from higher level of pyramid pic1 = pyr.get_all_channels(0) for i in xrange(10): smooth(pic1) plt.matshow(cp.pull(pic1)[:h / 2, :w]) ca = cp.dev_cuda_array_f(pic1.h, pic1.w, 1) ca.assign(pic1) # create destination matrix pic0 = pyr.get(0) dst = cp.dev_matrix_rmuc(pic0.h, pic0.w * 4) # uchar4 # generate pixel classes and visualize cp.get_pixel_classes(dst, ca, 1) tmp = cp.pull(dst) tmp = Image.frombuffer("CMYK", (pic0.w, pic0.h), cp.pull(dst).flatten(), "raw", "CMYK", 0, 1).resize( (2 * 512, 2 * 512), Image.NEAREST) tmp.show() print cp.pull(dst) plt.show()
def test_pixel_classes(): w, h = 512,512 input_channels, pyramid_channels = 4,3 pic = Image.open("tests/data/lena.bmp").resize((w,h)).convert("RGBA") pic = np.asarray(pic).reshape(h,w*4) pic_d = cp.push(pic) pyr = cp.dev_image_pyramid_f(pic_d.h/2,pic_d.w/input_channels/2,4,pyramid_channels) pyr.build(pic_d,4) plt.matshow(pic[0:h:2,0:4*w:8]) #plt.matshow(cp.pull(pyr.get(1,0))) #plt.title("Channel0") #plt.matshow(cp.pull(pyr.get(1,1))) #plt.title("Channel1") #plt.matshow(cp.pull(pyr.get(1,2))) #plt.title("Channel2") #plt.matshow(cp.pull(pyr.get_all_channels(1))) #plt.title("allchannels level 1") #plt.show() # create source image from higher level of pyramid pic1 = pyr.get_all_channels(0) for i in xrange(10): smooth(pic1) plt.matshow(cp.pull(pic1)[:h/2,:w]) ca = cp.dev_cuda_array_f(pic1.h,pic1.w,1) ca.assign(pic1) # create destination matrix pic0 = pyr.get(0) dst = cp.dev_matrix_rmuc(pic0.h,pic0.w*4) # uchar4 # generate pixel classes and visualize cp.get_pixel_classes(dst,ca,1) tmp = cp.pull(dst) tmp = Image.frombuffer("CMYK", (pic0.w,pic0.h), cp.pull(dst).flatten(), "raw", "CMYK", 0, 1 ).resize((2*512,2*512), Image.NEAREST) tmp.show() print cp.pull(dst) plt.show()
def build_pyramid_GPU(pic,input_channels,pyramid_channels): pic_d = cp.push(pic) pyr = cp.dev_image_pyramid_f(pic_d.h/2,pic_d.w/input_channels/2,4,pyramid_channels) pyr.build(pic_d,4)
def build_pyramid_GPU(pic, input_channels, pyramid_channels): pic_d = cp.push(pic) pyr = cp.dev_image_pyramid_f(pic_d.h / 2, pic_d.w / input_channels / 2, 4, pyramid_channels) pyr.build(pic_d, 4)
class MLP: """ A Multi-Layer Perceptron """ def __init__(self, neurons, batch_size): """Constructor @param neurons -- array of sizes of layers. @param batch_size -- size of batch being used for training. """ self.number_of_layers = len(neurons) - 1 self.batch_size = batch_size self.neuron_layer = [] self.weight_layer = [] for i in xrange(self.number_of_layers+1): dim1 = neurons[i] self.neuron_layer.append(neuron_layer(dim1, self.batch_size )) for i in xrange(self.number_of_layers): self.weight_layer.append(weight_layer(self.neuron_layer[i], self.neuron_layer[i+1])) def train(self, input_matrix, teacher_matrix, number_of_epochs): """Function to train the network @param input_matrix -- matrix consisting of input data to the network. @param teacher_matrix -- matrix consisting of labels of input data. @param number_of_epochs -- number of rounds the network is to be trained. """ number_of_pictures = input_matrix.shape[-1] squared_errors = cp.dev_matrix_cmf(self.neuron_layer[-1].deltas.h, self.neuron_layer[-1].deltas.w) for r in xrange(number_of_epochs): print "Epoch ", r+1, "/", number_of_epochs mse = 0 for batch in xrange(number_of_pictures/self.batch_size): index_begin = self.batch_size * batch index_end = self.batch_size + index_begin # Push input and teacher to GPU memory self.neuron_layer[0].activations = cp.push( input_matrix[:,index_begin:index_end].astype('float32').copy('F')) teachbatch = cp.push( teacher_matrix[:,index_begin:index_end].astype('float32').copy('F')) # Forward-Pass for i in xrange(self.number_of_layers): self.weight_layer[i].forward() # calculate error at output layer cp.apply_binary_functor(self.neuron_layer[-1].deltas, teachbatch, cp.binary_functor.COPY) cp.apply_binary_functor(self.neuron_layer[-1].deltas, self.neuron_layer[-1].activations, cp.binary_functor.SUBTRACT) cp.apply_binary_functor(squared_errors, self.neuron_layer[-1].deltas, cp.binary_functor.COPY) cp.apply_scalar_functor(squared_errors, cp.scalar_functor.SQUARE) mse += cp.sum(squared_errors) # Backward-Pass for i in xrange(self.number_of_layers): self.weight_layer[self.number_of_layers-i-1].backward() # Don't wait for garbage collector teachbatch.dealloc() self.neuron_layer[0].activations.dealloc() print "MSE: ", (mse/number_of_pictures) squared_errors.dealloc()