Exemple #1
0
 def grad(x):
     ca.random.seed(random_seed)
     x = ca.array(np.reshape(x, input_shape))
     out = layer.fprop(ca.array(x), 'train')
     out_grad = ca.ones_like(out, dtype=np.float32)
     input_grad = layer.bprop(out_grad)
     return np.ravel(np.array(input_grad))
Exemple #2
0
def test_indexing():
    a_np = np.ones((3, 3, 3)) * np.arange(3)
    a_ca = ca.array(a_np)

    print(np.allclose(a_np[0], np.array(a_ca[0])))
    print(np.allclose(a_np[1], np.array(a_ca[1])))
    print(np.allclose(a_np[0, :, :], np.array(a_ca[0, :, :])))
    print(np.allclose(a_np[2, :, :], np.array(a_ca[2, :, :])))
    print(np.allclose(a_np[1, 1, :], np.array(a_ca[1, 1, :])))
    print(np.allclose(a_np[1, 1, 1], np.array(a_ca[1, 1, 1])))
    print(np.allclose(a_np[1:3, :, :], np.array(a_ca[1:3, :, :])))

    a_np = np.ones((3, 3, 3)) * np.arange(3)
    a_ca = ca.array(a_np)
    b_np = np.random.random(size=(3, 3))
    b_ca = ca.array(b_np)

    a_np[1] = b_np
    a_ca[1] = b_ca
    print(np.allclose(a_np, np.array(a_ca)))

    b_np = np.random.random(size=(3))
    b_ca = ca.array(b_np)
    a_np[1, 2] = b_np
    a_ca[1, 2] = b_ca
    print(np.allclose(a_np, np.array(a_ca)))
Exemple #3
0
 def setup(self, x_shape):
     batch_size = x_shape[0]
     self.x_src = ex.Source(x_shape)
     z = ex.random.normal(size=(batch_size, self.n_hidden))
     x_tilde = self.generator(z)
     x = ex.Concatenate(axis=0)(self.x_src, x_tilde)
     if self.real_vs_gen_weight != 0.5:
         # Scale gradients to balance real vs. generated contributions to
         # GAN discriminator
         dis_batch_size = batch_size * 2
         weights = np.zeros((dis_batch_size, 1))
         weights[:batch_size] = self.real_vs_gen_weight
         weights[batch_size:] = (1 - self.real_vs_gen_weight)
         dis_weights = ca.array(weights)
         shape = np.array(x_shape)**0
         shape[0] = dis_batch_size
         dis_weights_inv = ca.array(1.0 / np.reshape(weights, shape))
         x = ScaleGradient(dis_weights_inv)(x)
     # Discriminate
     d = self.discriminator(x)
     if self.real_vs_gen_weight != 0.5:
         d = ScaleGradient(dis_weights)(d)
     sign = np.ones((batch_size * 2, 1), dtype=ca.float_)
     sign[batch_size:] = -1.0
     offset = np.zeros_like(sign)
     offset[batch_size:] = 1.0
     self.gan_loss = ex.log(d * sign + offset + self.eps)
     self.loss = ex.sum(self.gan_loss)
     self._graph = ex.graph.ExprGraph(self.loss)
     self._graph.setup()
     self.loss.grad_array = ca.array(-1.0)
Exemple #4
0
 def fun(x, p_idx):
     ca.random.seed(seed)
     param_array = layer.params[p_idx].array
     param_array *= 0
     param_array += ca.array(x)
     y = np.array(layer.fprop(ca.array(x0))).astype(np.float_)
     return np.sum(y)
Exemple #5
0
def test_sum():
    a_np = np.random.normal(size=(5, 5))
    a_ca = ca.array(a_np)

    s_np = np.sum(a_np)
    s_ca = ca.sum(a_ca)
    print(np.allclose(s_np, np.array(s_ca)))

    a_np = np.random.normal(size=(5, 5))
    a_ca = ca.array(a_np)

    s_np = np.sum(a_np, 0)
    s_ca = ca.sum(a_ca, 0)
    print(np.allclose(s_np, np.array(s_ca)))

    s_np = np.sum(a_np, 1)
    s_ca = ca.sum(a_ca, 1)
    print(np.allclose(s_np, np.array(s_ca)))

    a_np = np.random.normal(size=(5, 5, 10))
    a_ca = ca.array(a_np)

    s_np = np.sum(a_np, 0)
    s_ca = ca.sum(a_ca, 0)
    print(np.allclose(s_np, np.array(s_ca)))

    s_np = np.sum(a_np, 2)
    s_ca = ca.sum(a_ca, 2)
    print(np.allclose(s_np, np.array(s_ca)))
Exemple #6
0
def test_indexing():
    a_np = np.ones((3, 3, 3)) * np.arange(3)
    a_ca = ca.array(a_np)

    print(np.allclose(a_np[0], np.array(a_ca[0])))
    print(np.allclose(a_np[1], np.array(a_ca[1])))
    print(np.allclose(a_np[0, :, :], np.array(a_ca[0, :, :])))
    print(np.allclose(a_np[2, :, :], np.array(a_ca[2, :, :])))
    print(np.allclose(a_np[1, 1, :], np.array(a_ca[1, 1, :])))
    print(np.allclose(a_np[1, 1, 1], np.array(a_ca[1, 1, 1])))
    print(np.allclose(a_np[1:3, :, :], np.array(a_ca[1:3, :, :])))

    a_np = np.ones((3, 3, 3)) * np.arange(3)
    a_ca = ca.array(a_np)
    b_np = np.random.random(size=(3, 3))
    b_ca = ca.array(b_np)

    a_np[1] = b_np
    a_ca[1] = b_ca
    print(np.allclose(a_np, np.array(a_ca)))

    b_np = np.random.random(size=(3))
    b_ca = ca.array(b_np)
    a_np[1, 2] = b_np
    a_ca[1, 2] = b_ca
    print(np.allclose(a_np, np.array(a_ca)))
Exemple #7
0
 def fun(x, p_idx):
     ca.random.seed(seed)
     param_array = layer._params[p_idx].array
     param_array *= 0
     param_array += ca.array(x)
     y = np.array(layer.fprop(ca.array(x0))).astype(np.float_)
     return np.sum(y)
 def setup(self, x_shape):
     batch_size = x_shape[0]
     self.x_src = expr.Source(x_shape)
     z = expr.random.normal(size=(batch_size, self.n_hidden))
     x_tilde = self.generator(z)
     x = expr.Concatenate(axis=0)(self.x_src, x_tilde)
     if self.real_vs_gen_weight != 0.5:
         # Scale gradients to balance real vs. generated contributions to
         # GAN discriminator
         dis_batch_size = batch_size*2
         weights = np.zeros((dis_batch_size, 1))
         weights[:batch_size] = self.real_vs_gen_weight
         weights[batch_size:] = (1-self.real_vs_gen_weight)
         dis_weights = ca.array(weights)
         shape = np.array(x_shape)**0
         shape[0] = dis_batch_size
         dis_weights_inv = ca.array(1.0 / np.reshape(weights, shape))
         x = ScaleGradient(dis_weights_inv)(x)
     # Discriminate
     d = self.discriminator(x)
     if self.real_vs_gen_weight != 0.5:
         d = ScaleGradient(dis_weights)(d)
     sign = np.ones((batch_size*2, 1), dtype=ca.float_)
     sign[batch_size:] = -1.0
     offset = np.zeros_like(sign)
     offset[batch_size:] = 1.0
     self.gan_loss = expr.log(d*sign + offset + self.eps)
     self._graph = expr.ExprGraph(-expr.sum(self.gan_loss))
     self._graph.out_grad = ca.array(1.0)
     self._graph.setup()
Exemple #9
0
def test_binary():
    a_np = np.random.normal(size=(5, 5))
    b_np = np.random.normal(size=(5, 5))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)

    c_np = np.add(a_np, b_np)
    c_ca = ca.add(a_ca, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    np.add(a_np, b_np, a_np)
    ca.add(a_ca, b_ca, a_ca)
    print(np.allclose(a_np, np.array(a_ca)))

    np.multiply(a_np, b_np, a_np)
    ca.multiply(a_ca, b_ca, a_ca)
    print(np.allclose(a_np, np.array(a_ca)))

    a_np = np.random.normal(size=(5, 5))
    b_np = np.random.normal(size=(5, 5)) > 0
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.multiply(a_np, b_np)
    c_ca = ca.multiply(a_ca, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))
Exemple #10
0
 def grad(x):
     ca.random.seed(random_seed)
     x = ca.array(np.reshape(x, input_shape))
     out = layer.fprop(ca.array(x), 'train')
     out_grad = ca.ones_like(out, dtype=np.float32)
     input_grad = layer.bprop(out_grad)
     return np.ravel(np.array(input_grad))
Exemple #11
0
def test_sum():
    a_np = np.random.normal(size=(5, 5))
    a_ca = ca.array(a_np)

    s_np = np.sum(a_np)
    s_ca = ca.sum(a_ca)
    print(np.allclose(s_np, np.array(s_ca)))

    a_np = np.random.normal(size=(5, 5))
    a_ca = ca.array(a_np)

    s_np = np.sum(a_np, 0)
    s_ca = ca.sum(a_ca, 0)
    print(np.allclose(s_np, np.array(s_ca)))

    s_np = np.sum(a_np, 1)
    s_ca = ca.sum(a_ca, 1)
    print(np.allclose(s_np, np.array(s_ca)))

    a_np = np.random.normal(size=(5, 5, 10))
    a_ca = ca.array(a_np)

    s_np = np.sum(a_np, 0)
    s_ca = ca.sum(a_ca, 0)
    print(np.allclose(s_np, np.array(s_ca)))

    s_np = np.sum(a_np, 2)
    s_ca = ca.sum(a_ca, 2)
    print(np.allclose(s_np, np.array(s_ca)))
Exemple #12
0
def test_binary():
    a_np = np.random.normal(size=(5, 5))
    b_np = np.random.normal(size=(5, 5))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)

    c_np = np.add(a_np, b_np)
    c_ca = ca.add(a_ca, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    np.add(a_np, b_np, a_np)
    ca.add(a_ca, b_ca, a_ca)
    print(np.allclose(a_np, np.array(a_ca)))

    np.multiply(a_np, b_np, a_np)
    ca.multiply(a_ca, b_ca, a_ca)
    print(np.allclose(a_np, np.array(a_ca)))

    a_np = np.random.normal(size=(5, 5))
    b_np = np.random.normal(size=(5, 5)) > 0
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.multiply(a_np, b_np)
    c_ca = ca.multiply(a_ca, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))
Exemple #13
0
	def start_graph(self, session_id, specs_dict):
		
		self.create_backend_graphs(session_id, specs_dict)
		_iter = self.master_graphs[self.session_id].observation_server.__iter__()
		total_iterations = self.master_graphs[self.session_id].iterations

		i = 0
		while i < total_iterations:
			self.master_graphs[self.session_id].reset_except_origin()
			observation = _iter.next()
			y_vec = observation['digit-label']
			y_vec  = ca.array(y_vec)
			x = observation['image']
			
			for graph in specs_dict['canvas_graphs']:
				if graph['name'].split('_')[-1] != 'origin':
					g_name = graph['name']
					self.create_graph( session_id, {'name':g_name,'type': graph['type'],'size':graph['size'] } )

					for node in graph['nodes']:
						if node['class'] == "TargetNode":
							node['opts']['y'] = y_vec
						elif node['class'] == "DataNode":
							if 'image_shape' in node['opts']:
								x.shape =	(1,1,node['opts']['image_shape'][0],node['opts']['image_shape'][1])
								node['opts']['image_shape'] = x.shape
							else:
								x.shape = (1,x.size)
							x = ca.array(x) / 255.0
							node['opts']['key'] = x
						self.add_node(g_name, node)
						node['opts'].pop('graph')

					for node in graph['nodes']:
						if node['class'] == "TargetNode":
							node['opts'].pop('y')
						elif node['class'] == "DataNode":
							if 'image_shape' in node['opts']:
								node['opts']['image_shape'] = [node['opts']['image_shape'][2],node['opts']['image_shape'][3]]
							node['opts'].pop('key')
					
					for relation in graph['relations']:
						for child in relation['children']:
							rel_mold = {'parent': relation['parent'], 'children':[child]}
							self.add_child(g_name, rel_mold)

			for external_relation in specs_dict['canvas_relations']:
				parent = external_relation['parent'][1]
				graph_name = external_relation['parent'][0]
				self.add_child(graph_name, { 'parent': parent, 'children': [external_relation['child']] } )
			
			self.master_graphs[self.session_id].forward()
			self.master_graphs[self.session_id].backward()
			self.master_graphs[self.session_id].update_weights()
			self.master_graphs[self.session_id].print_error(i)
			self.master_graphs[self.session_id].reset_grads()

			i += 1
Exemple #14
0
 def batches(self, phase='train'):
     if phase == 'train':
         for batch_start, batch_stop in self._batch_slices():
             x_batch = ca.array(self.x[batch_start:batch_stop])
             y_batch = ca.array(self.y[batch_start:batch_stop])
             yield x_batch, y_batch
     elif phase == 'test':
         for x in super(SupervisedInput, self).batches():
             yield x
Exemple #15
0
 def concatenate_(shape_a, shape_b, axis):
     a = np.random.random(size=shape_a)
     b = np.random.random(size=shape_b)
     c_np = np.concatenate((a, b), axis=axis)
     c_ca = ca.extra.concatenate(ca.array(a), ca.array(b), axis=axis)
     print(np.allclose(c_np, np.array(c_ca)))
     a_, b_ = ca.extra.split(c_ca, a_size=a.shape[axis], axis=axis)
     print(np.allclose(a, np.array(a_)))
     print(np.allclose(b, np.array(b_)))
Exemple #16
0
 def func(x, *args):
     ca.random.seed(random_seed)
     p_idx = args[0]
     param_vals = layer._params[p_idx].array
     param_vals *= 0
     param_vals += ca.array(np.reshape(x, param_vals.shape))
     out = layer.fprop(ca.array(x0), 'train')
     y = ca.sum(out)
     return np.array(y)
Exemple #17
0
 def concatenate_(shape_a, shape_b, axis):
     a = np.random.random(size=shape_a)
     b = np.random.random(size=shape_b)
     c_np = np.concatenate((a, b), axis=axis)
     c_ca = ca.extra.concatenate(ca.array(a), ca.array(b), axis=axis)
     print(np.allclose(c_np, np.array(c_ca)))
     a_, b_ = ca.extra.split(c_ca, a_size=a.shape[axis], axis=axis)
     print(np.allclose(a, np.array(a_)))
     print(np.allclose(b, np.array(b_)))
Exemple #18
0
 def fun_grad(x, p_idx):
     param_array = layer.params[p_idx].array
     param_array *= 0
     param_array += ca.array(x)
     out = layer.fprop(ca.array(x0))
     y_grad = ca.ones_like(out, dtype=ca.float_)
     layer.bprop(y_grad)
     param_grad = np.array(layer.params[p_idx].grad())
     return param_grad.astype(np.float_)
Exemple #19
0
 def func(x, *args):
     ca.random.seed(random_seed)
     p_idx = args[0]
     param_vals = layer.params()[p_idx].values
     param_vals *= 0
     param_vals += ca.array(np.reshape(x, param_vals.shape))
     out = layer.fprop(ca.array(x0), 'train')
     y = ca.sum(out)
     return np.array(y)
Exemple #20
0
 def fun_grad(x, p_idx):
     param_array = layer._params[p_idx].array
     param_array *= 0
     param_array += ca.array(x)
     out = layer.fprop(ca.array(x0))
     y_grad = ca.ones_like(out, dtype=ca.float_)
     layer.bprop(y_grad)
     param_grad = np.array(layer._params[p_idx].grad())
     return param_grad.astype(np.float_)
Exemple #21
0
    def __init__(self, layers, init_img, subject_img, style_img,
                 subject_weights, style_weights, smoothness=0.0):

        # Map weights (in convolution indices) to layer indices
        self.subject_weights = np.zeros(len(layers))
        self.style_weights = np.zeros(len(layers))
        layers_len = 0
        conv_idx = 0
        for l, layer in enumerate(layers):
            if isinstance(layer, dp.Activation):
                self.subject_weights[l] = subject_weights[conv_idx]
                self.style_weights[l] = style_weights[conv_idx]
                if subject_weights[conv_idx] > 0 or \
                   style_weights[conv_idx] > 0:
                    layers_len = l+1
                conv_idx += 1

        # Discard unused layers
        layers = layers[:layers_len]

        # Wrap convolution layers for better performance
        self.layers = [Convolution(l) if isinstance(l, dp.Convolution) else l
                       for l in layers]

        # Setup network
        x_shape = init_img.shape
        self.x = Parameter(init_img)
        self.x.setup(x_shape)
        for layer in self.layers:
            layer.setup(x_shape)
            x_shape = layer.y_shape(x_shape)

        # Precompute subject features and style Gram matrices
        self.subject_feats = [None]*len(self.layers)
        self.style_grams = [None]*len(self.layers)
        next_subject = ca.array(subject_img)
        next_style = ca.array(style_img)
        for l, layer in enumerate(self.layers):
            next_subject = layer.fprop(next_subject)
            next_style = layer.fprop(next_style)
            if self.subject_weights[l] > 0:
                self.subject_feats[l] = next_subject
            if self.style_weights[l] > 0:
                gram = gram_matrix(next_style)
                # Scale gram matrix to compensate for different image sizes
                n_pixels_subject = np.prod(next_subject.shape[2:])
                n_pixels_style = np.prod(next_style.shape[2:])
                scale = (n_pixels_subject / float(n_pixels_style))
                self.style_grams[l] = gram * scale

        self.tv_weight = smoothness
        kernel = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]], dtype=dp.float_)
        kernel /= np.sum(np.abs(kernel))
        self.tv_kernel = ca.array(kernel[np.newaxis, np.newaxis, ...])
        self.tv_conv = ca.nnet.ConvBC01((1, 1), (1, 1))
    def __init__(self, layers, init_img, subject_img, style_img,
                 subject_weights, style_weights, smoothness=0.0):

        # Map weights (in convolution indices) to layer indices
        self.subject_weights = np.zeros(len(layers))
        self.style_weights = np.zeros(len(layers))
        layers_len = 0
        conv_idx = 0
        for l, layer in enumerate(layers):
            if isinstance(layer, dp.Activation):
                self.subject_weights[l] = subject_weights[conv_idx]
                self.style_weights[l] = style_weights[conv_idx]
                if subject_weights[conv_idx] > 0 or \
                   style_weights[conv_idx] > 0:
                    layers_len = l+1
                conv_idx += 1

        # Discard unused layers
        layers = layers[:layers_len]

        # Wrap convolution layers for better performance
        self.layers = [Convolution(l) if isinstance(l, dp.Convolution) else l
                       for l in layers]

        # Setup network
        x_shape = init_img.shape
        self.x = Parameter(init_img)
        self.x.setup(x_shape)
        for layer in self.layers:
            layer.setup(x_shape)
            x_shape = layer.y_shape(x_shape)

        # Precompute subject features and style Gram matrices
        self.subject_feats = [None]*len(self.layers)
        self.style_grams = [None]*len(self.layers)
        next_subject = ca.array(subject_img)
        next_style = ca.array(style_img)
        for l, layer in enumerate(self.layers):
            next_subject = layer.fprop(next_subject)
            next_style = layer.fprop(next_style)
            if self.subject_weights[l] > 0:
                self.subject_feats[l] = next_subject
            if self.style_weights[l] > 0:
                gram = gram_matrix(next_style)
                # Scale gram matrix to compensate for different image sizes
                n_pixels_subject = np.prod(next_subject.shape[2:])
                n_pixels_style = np.prod(next_style.shape[2:])
                scale = (n_pixels_subject / float(n_pixels_style))
                self.style_grams[l] = gram * scale

        self.tv_weight = smoothness
        kernel = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]], dtype=dp.float_)
        kernel /= np.sum(np.abs(kernel))
        self.tv_kernel = ca.array(kernel[np.newaxis, np.newaxis, ...])
        self.tv_conv = ca.nnet.ConvBC01((1, 1), (1, 1))
def getGiantFeaturesMatGPU(paths_to_features, feature_type='fc7'):
    shape_record = []
    for idx, path_curr in enumerate(paths_to_features):
        mat_curr = np.load(path_curr)[feature_type]
        shape_record.append(mat_curr.shape[0])
        mat_curr = mat_curr.reshape(mat_curr.shape[0], mat_curr.shape[1])
        if idx == 0:
            train = ca.array(mat_curr)
        else:
            train = ca.extra.concatenate(train, ca.array(mat_curr), axis=0)
    return train, shape_record
Exemple #24
0
def test_softmaxcrossentropy():
    confs = itertools.product(batch_sizes, n_ins)
    for batch_size, n_in in confs:
        print("SoftmaxCrossEntropy: batch_size=%i, n_in=%i" % (batch_size, n_in))
        x_shape = (batch_size, n_in)
        x = np.random.normal(size=x_shape)
        y = np.random.randint(low=0, high=n_in, size=batch_size)
        loss = dp.SoftmaxCrossEntropy()
        loss._setup(x_shape)
        assert loss.loss(ca.array(x), ca.array(y)).shape == x_shape[:1]
        check_grad(loss, x, y)
Exemple #25
0
def test_binarycrossentropy():
    confs = itertools.product(batch_sizes, n_ins)
    for batch_size, n_in in confs:
        print("BinaryCrossEntropy: batch_size=%i, n_in=%i" % (batch_size, n_in))
        x_shape = (batch_size, n_in)
        x = np.random.uniform(size=x_shape)
        y = np.random.uniform(size=x_shape)
        loss = dp.BinaryCrossEntropy()
        loss._setup(x_shape)
        assert loss.loss(ca.array(x), ca.array(y)).shape == x_shape[:1]
        check_grad(loss, x, y)
Exemple #26
0
def test_meansquarederror():
    confs = itertools.product(batch_sizes, n_ins)
    for batch_size, n_in in confs:
        print('MeanSquaredError: batch_size=%i, n_in=%i' % (batch_size, n_in))
        x_shape = (batch_size, n_in)
        x = np.random.normal(size=x_shape)
        y = np.random.normal(size=x_shape)
        loss = dp.MeanSquaredError()
        loss._setup(x_shape)
        assert loss.loss(ca.array(x), ca.array(y)).shape == x_shape[:1]
        check_grad(loss, x, y)
def getGiantFeaturesMatGPU(paths_to_features,feature_type='fc7'):
    shape_record=[];
    for idx,path_curr in enumerate(paths_to_features):
        mat_curr=np.load(path_curr)[feature_type];
        shape_record.append(mat_curr.shape[0]);
        mat_curr=mat_curr.reshape(mat_curr.shape[0],mat_curr.shape[1]);
        if idx==0:
            train = ca.array(mat_curr);
        else:
            train = ca.extra.concatenate(train,ca.array(mat_curr),axis=0);
    return train,shape_record
Exemple #28
0
 def grad(x, *args):
     ca.random.seed(random_seed)
     p_idx = args[0]
     param_vals = layer._params[p_idx].array
     param_vals *= 0
     param_vals += ca.array(np.reshape(x, param_vals.shape))
     out = layer.fprop(ca.array(x0), 'train')
     out_grad = ca.ones_like(out, dtype=np.float32)
     layer.bprop(out_grad)
     param_grad = layer._params[p_idx].grad()
     return np.ravel(np.array(param_grad))
Exemple #29
0
def test_meansquarederror():
    confs = itertools.product(batch_sizes, n_ins)
    for batch_size, n_in in confs:
        print('MeanSquaredError: batch_size=%i, n_in=%i' % (batch_size, n_in))
        x_shape = (batch_size, n_in)
        x = np.random.normal(size=x_shape)
        y = np.random.normal(size=x_shape)
        loss = dp.MeanSquaredError()
        loss.setup(x_shape)
        assert loss.loss(ca.array(x), ca.array(y)).shape == x_shape[:1]
        check_grad(loss, x, y)
Exemple #30
0
 def grad(x, *args):
     ca.random.seed(random_seed)
     p_idx = args[0]
     param_vals = layer.params()[p_idx].values
     param_vals *= 0
     param_vals += ca.array(np.reshape(x, param_vals.shape))
     out = layer.fprop(ca.array(x0), 'train')
     out_grad = ca.ones_like(out, dtype=np.float32)
     layer.bprop(out_grad)
     param_grad = layer.params()[p_idx].grad
     return np.ravel(np.array(param_grad))
 def setup(self, x_shape):
     batch_size = x_shape[0]
     self.x_src = expr.Source(x_shape)
     loss = 0
     # Encode
     enc = self.encoder(self.x_src)
     z, self.encoder_loss = self.latent_encoder.encode(enc, batch_size)
     loss += self.encoder_loss
     # Decode
     x_tilde = self.decoder(z)
     if self.recon_depth > 0:
         # Reconstruction error in discriminator
         x = expr.Concatenate(axis=0)(x_tilde, self.x_src)
         d = self.discriminator_recon(x)
         d = expr.Reshape((batch_size*2, -1))(d)
         d_x_tilde, d_x = expr.Slices([batch_size])(d)
         loss += self.recon_error(d_x_tilde, d_x)
     else:
         loss += self.recon_error(x_tilde, self.x_src)
     # Kill gradient from GAN loss to AE encoder
     z = ScaleGradient(0.0)(z)
     # Decode for GAN loss
     gen_size = batch_size
     if self.sample_z:
         gen_size += batch_size
         z_samples = self.latent_encoder.samples(batch_size)
         z = expr.Concatenate(axis=0)(z, z_samples)
     x = self.decoder_neggrad(z)
     x = expr.Concatenate(axis=0)(self.x_src, x)
     # Scale gradients to balance real vs. generated contributions to GAN
     # discriminator
     dis_batch_size = batch_size + gen_size
     real_weight = self.real_vs_gen_weight
     gen_weight = (1-self.real_vs_gen_weight) * float(batch_size)/gen_size
     weights = np.zeros((dis_batch_size, 1))
     weights[:batch_size] = real_weight
     weights[batch_size:] = gen_weight
     dis_weights = ca.array(weights)
     shape = np.array(x_shape)**0
     shape[0] = dis_batch_size
     dis_weights_inv = ca.array(1.0 / np.reshape(weights, shape))
     x = ScaleGradient(dis_weights_inv)(x)
     # Discriminate
     d = self.discriminator(x)
     d = ScaleGradient(dis_weights)(d)
     sign = np.ones((gen_size + batch_size, 1), dtype=ca.float_)
     sign[batch_size:] = -1.0
     offset = np.zeros_like(sign)
     offset[batch_size:] = 1.0
     self.gan_loss = expr.log(d*sign + offset + self.eps)
     self._graph = expr.ExprGraph(expr.sum(loss) + expr.sum(-self.gan_loss))
     self._graph.out_grad = ca.array(1.0)
     self._graph.setup()
Exemple #32
0
def test_softmaxcrossentropy():
    confs = itertools.product(batch_sizes, n_ins)
    for batch_size, n_in in confs:
        print('SoftmaxCrossEntropy: batch_size=%i, n_in=%i' %
              (batch_size, n_in))
        x_shape = (batch_size, n_in)
        x = np.random.normal(size=x_shape)
        y = np.random.randint(low=0, high=n_in, size=batch_size)
        loss = dp.SoftmaxCrossEntropy()
        loss._setup(x_shape)
        assert loss.loss(ca.array(x), ca.array(y)).shape == x_shape[:1]
        check_grad(loss, x, y)
Exemple #33
0
def test_binarycrossentropy():
    confs = itertools.product(batch_sizes, n_ins)
    for batch_size, n_in in confs:
        print('BinaryCrossEntropy: batch_size=%i, n_in=%i' %
              (batch_size, n_in))
        x_shape = (batch_size, n_in)
        x = np.random.uniform(size=x_shape)
        y = np.random.uniform(size=x_shape)
        loss = dp.BinaryCrossEntropy()
        loss._setup(x_shape)
        assert loss.loss(ca.array(x), ca.array(y)).shape == x_shape[:1]
        check_grad(loss, x, y)
Exemple #34
0
    def __init__(self, layers, subject_img, style_img, subject_weights,
                 style_weights):

        # Map weights (in convolution indices) to layer indices
        self.subject_weights = np.zeros(len(layers))
        self.style_weights = np.zeros(len(layers))
        layers_len = 0
        conv_idx = 0
        for l, layer in enumerate(layers):
            if isinstance(layer, dp.Activation):
                self.subject_weights[l] = subject_weights[conv_idx]
                self.style_weights[l] = style_weights[conv_idx]
                if subject_weights[conv_idx] > 0 or \
                   style_weights[conv_idx] > 0:
                    layers_len = l + 1
                conv_idx += 1

        # Discard unused layers
        layers = layers[:layers_len]

        # Wrap convolution layers for better performance
        self.layers = [
            Convolution(l) if isinstance(l, dp.Convolution) else l
            for l in layers
        ]

        # Setup network
        x_shape = subject_img.shape
        self.x = Parameter(subject_img)
        self.x._setup(x_shape)
        for layer in self.layers:
            layer._setup(x_shape)
            x_shape = layer.y_shape(x_shape)

        # Precompute subject features and style Gram matrices
        self.subject_feats = [None] * len(self.layers)
        self.style_grams = [None] * len(self.layers)
        next_subject = ca.array(subject_img)
        next_style = ca.array(style_img)
        for l, layer in enumerate(self.layers):
            next_subject = layer.fprop(next_subject)
            next_style = layer.fprop(next_style)
            if self.subject_weights[l] > 0:
                self.subject_feats[l] = next_subject
            if self.style_weights[l] > 0:
                gram = gram_matrix(next_style)
                # Scale gram matrix to compensate for different image sizes
                n_pixels_subject = np.prod(next_subject.shape[2:])
                n_pixels_style = np.prod(next_style.shape[2:])
                scale = (n_pixels_subject / float(n_pixels_style))
                self.style_grams[l] = gram * scale
def getNNIndicesForBigFeatureMats(test_org,mats):
    
    test=ca.array(test_org);
    distances=[]
    
    for idx_mat,mat_curr in enumerate(mats):
        print idx_mat
        distances.append(nearest_neighbor.getSimpleDot(test,ca.array(mats[idx_mat]),gpuFlag=True));
    # print '';
    
    distances=np.hstack(tuple(distances));
    indices=np.argsort(distances,axis=1)[:,::-1].astype(np.uint32)        

    return indices
Exemple #36
0
def test_batch_dot():
    batch_size = 10
    a_np = np.random.normal(size=(batch_size, 5, 5))
    b_np = np.random.normal(size=(batch_size, 5, 5))
    c_np = np.empty_like(a_np)
    for i in range(batch_size):
        c_np[i] = np.dot(a_np[i], b_np[i])

    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_ca = ca.array(c_np)
    bdot = ca.batch.Dot(a_ca, b_ca, c_ca)
    bdot.perform()
    print(np.allclose(c_np, np.array(c_ca)))
Exemple #37
0
def test_batch_dot():
    batch_size = 10
    a_np = np.random.normal(size=(batch_size, 5, 5))
    b_np = np.random.normal(size=(batch_size, 5, 5))
    c_np = np.empty_like(a_np)
    for i in range(batch_size):
        c_np[i] = np.dot(a_np[i], b_np[i])

    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_ca = ca.array(c_np)
    bdot = ca.batch.Dot(a_ca, b_ca, c_ca)
    bdot.perform()
    print(np.allclose(c_np, np.array(c_ca)))
    def __init__(self, layers, subject_img, style_img, subject_weights,
                 style_weights):

        # Map weights (in convolution indices) to layer indices
        self.subject_weights = np.zeros(len(layers))
        self.style_weights = np.zeros(len(layers))
        layers_len = 0
        conv_idx = 0
        for l, layer in enumerate(layers):
            if isinstance(layer, dp.Activation):
                self.subject_weights[l] = subject_weights[conv_idx]
                self.style_weights[l] = style_weights[conv_idx]
                if subject_weights[conv_idx] > 0 or \
                   style_weights[conv_idx] > 0:
                    layers_len = l+1
                conv_idx += 1

        # Discard unused layers
        layers = layers[:layers_len]

        # Wrap convolution layers for better performance
        self.layers = [Convolution(l) if isinstance(l, dp.Convolution) else l
                       for l in layers]

        # Setup network
        x_shape = subject_img.shape
        self.x = Parameter(subject_img)
        self.x._setup(x_shape)
        for layer in self.layers:
            layer._setup(x_shape)
            x_shape = layer.y_shape(x_shape)

        # Precompute subject features and style Gram matrices
        self.subject_feats = [None]*len(self.layers)
        self.style_grams = [None]*len(self.layers)
        next_subject = ca.array(subject_img)
        next_style = ca.array(style_img)
        for l, layer in enumerate(self.layers):
            next_subject = layer.fprop(next_subject)
            next_style = layer.fprop(next_style)
            if self.subject_weights[l] > 0:
                self.subject_feats[l] = next_subject
            if self.style_weights[l] > 0:
                gram = gram_matrix(next_style)
                # Scale gram matrix to compensate for different image sizes
                n_pixels_subject = np.prod(next_subject.shape[2:])
                n_pixels_style = np.prod(next_style.shape[2:])
                scale = (n_pixels_subject / float(n_pixels_style))
                self.style_grams[l] = gram * scale
Exemple #39
0
def test_reduce():
    a_np = np.random.normal(size=(1024, ))
    a_ca = ca.array(a_np)
    c_np = np.sum(a_np)
    c_ca = ca.sum(a_ca)
    print(np.allclose(c_np, np.array(c_ca)))
    c_np = np.mean(a_np)
    c_ca = ca.mean(a_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(5, 5))
    a_ca = ca.array(a_np)
    c_np = np.sum(a_np)
    c_ca = ca.sum(a_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    c_np = np.sum(a_np, axis=0)
    c_ca = ca.sum(a_ca, axis=0)
    print(np.allclose(c_np, np.array(c_ca)))

    c_np = np.sum(a_np, axis=1)
    c_ca = ca.sum(a_ca, axis=1)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(5, 7, 11))
    a_ca = ca.array(a_np)
    c_np = np.sum(a_np, axis=0)
    c_ca = ca.sum(a_ca, axis=0)
    print(np.allclose(c_np, np.array(c_ca)))

    c_np = np.sum(a_np, axis=2)
    c_ca = ca.sum(a_ca, axis=2)
    print(np.allclose(c_np, np.array(c_ca)))

    c_np = np.sum(a_np, axis=(0, 1))
    c_ca = ca.sum(a_ca, axis=(0, 1))
    print(np.allclose(c_np, np.array(c_ca)))

    c_np = np.sum(a_np, axis=(1, 2))
    c_ca = ca.sum(a_ca, axis=(1, 2))
    print(np.allclose(c_np, np.array(c_ca)))

    c_np = np.argmin(a_np, axis=0)
    c_ca = ca.argmin(a_ca, axis=0)
    print(np.allclose(c_np, np.array(c_ca)))

    c_np = np.argmin(a_np, axis=2)
    c_ca = ca.argmin(a_ca, axis=2)
    print(np.allclose(c_np, np.array(c_ca)))
Exemple #40
0
def test_reduce():
    a_np = np.random.normal(size=(1024,))
    a_ca = ca.array(a_np)
    c_np = np.sum(a_np)
    c_ca = ca.sum(a_ca)
    print(np.allclose(c_np, np.array(c_ca)))
    c_np = np.mean(a_np)
    c_ca = ca.mean(a_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(5, 5))
    a_ca = ca.array(a_np)
    c_np = np.sum(a_np)
    c_ca = ca.sum(a_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    c_np = np.sum(a_np, axis=0)
    c_ca = ca.sum(a_ca, axis=0)
    print(np.allclose(c_np, np.array(c_ca)))

    c_np = np.sum(a_np, axis=1)
    c_ca = ca.sum(a_ca, axis=1)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(5, 7, 11))
    a_ca = ca.array(a_np)
    c_np = np.sum(a_np, axis=0)
    c_ca = ca.sum(a_ca, axis=0)
    print(np.allclose(c_np, np.array(c_ca)))

    c_np = np.sum(a_np, axis=2)
    c_ca = ca.sum(a_ca, axis=2)
    print(np.allclose(c_np, np.array(c_ca)))

    c_np = np.sum(a_np, axis=(0, 1))
    c_ca = ca.sum(a_ca, axis=(0, 1))
    print(np.allclose(c_np, np.array(c_ca)))

    c_np = np.sum(a_np, axis=(1, 2))
    c_ca = ca.sum(a_ca, axis=(1, 2))
    print(np.allclose(c_np, np.array(c_ca)))

    c_np = np.argmin(a_np, axis=0)
    c_ca = ca.argmin(a_ca, axis=0)
    print(np.allclose(c_np, np.array(c_ca)))

    c_np = np.argmin(a_np, axis=2)
    c_ca = ca.argmin(a_ca, axis=2)
    print(np.allclose(c_np, np.array(c_ca)))
Exemple #41
0
	def start_graph(self, session_id, graphs):

		self.create_backend_graphs(session_id, graphs)

		_iter = self.master_graphs[self.session_id].observation_server.__iter__()

		i = 0

		total_iterations = self.master_graphs[self.session_id].iterations

		while i < total_iterations:
			
			self.master_graphs[self.session_id].reset_except_origin()

			observation = _iter.next()

			y_vec = observation['digit-label']
			x = observation['image']
			x.shape = (256,784)
			x = ca.array(x) / 255.0
			y_vec  = ca.array(y_vec)

			for graph in graphs:
				if graph['name'].split('_')[-1] != 'origin':
					g_name = graph['name']
					mold = {'name':g_name,'type': graph['type'], 'session_id':self.session_id,'size':graph['size']}
					self.create_graph(mold)

					for node in graph['nodes']:
						if node['class'] == "TargetNode":
							node['opts']['y'] = y_vec

						elif node['class'] == "DataNode":
							node['opts']['key'] = x
						self.add_node_to_graph(g_name, node)

					relations = graph['relations']
					for relation in relations:
						for child in relation['children']:
							mold = {'parent': relation['parent'], 'children':[child]}
							self.add_child_to_node(g_name, mold)
			
			self.master_graphs[self.session_id].forward()
			self.master_graphs[self.session_id].backward()
			self.master_graphs[self.session_id].update_weights()
			self.master_graphs[self.session_id].print_error(i)

			i += 1
Exemple #42
0
 def array(self, shape):
     if isinstance(shape, int):
         shape = (shape, )
     if self.arr.shape != shape:
         raise ValueError('Shape mismatch: expected %s but got %s' %
                          (str(self.arr.shape), str(shape)))
     return ca.array(self.arr)
Exemple #43
0
def _require_expr(x):
    if isinstance(x, Expr):
        return x
    elif isinstance(x, ca.ndarray):
        return Constant(ca.array(x))
    else:
        return Constant(x)
Exemple #44
0
def _require_expr(x):
    if isinstance(x, Expr):
        return x
    elif isinstance(x, ca.ndarray):
        return Constant(ca.array(x))
    else:
        return Constant(x)
 def __init__(self, shape, val=0):
     self.name = "latch"
     self.val = val
     self.shape = shape
     self.array = np.ones(shape)
     self.array *= val
     self.array = ca.array(self.array)
 def setup(self, x_shape):
     n_channels = x_shape[1]
     if self.kernel.ndim == 2:
         self.kernel = np.repeat(self.kernel[np.newaxis, np.newaxis, ...], n_channels, axis=1)
     elif self.kernel.ndim == 3:
         self.kernel = self.kernel[np.newaxis, :]
     self.ca_kernel = ca.array(self.kernel)
 def array(self, shape):
     if isinstance(shape, int):
         shape = (shape,)
     if self.arr.shape != shape:
         raise ValueError('Shape mismatch: expected %s but got %s'
                          % (str(self.arr.shape), str(shape)))
     return ca.array(self.arr)
 def __init__(self, shape, val=0):
     self.name = 'latch'
     self.val = val
     self.shape = shape
     self.array = np.ones(shape)
     self.array *= val
     self.array = ca.array(self.array)
Exemple #49
0
 def batches(self, phase='train', domain='target'):
     if phase == 'train':
         for batch_start, batch_stop in self._batch_slices2(
                 domain='target'):
             x1_batch = ca.array(self.x[batch_start:batch_stop])
             x2_batch = ca.array(self.x2[batch_start:batch_stop])
             yield {'x1': x1_batch, 'x2': x2_batch}
     elif phase == 'test':
         if domain == 'target':
             for batch_start, batch_stop in self._batch_slices2(domain):
                 x1_batch = ca.array(self.x[batch_start:batch_stop])
                 yield {'x1': x1_batch}
         elif domain == 'source':
             for batch_start, batch_stop in self._batch_slices2(domain):
                 x2_batch = ca.array(self.x2[batch_start:batch_stop])
                 yield {'x2': x2_batch}
Exemple #50
0
 def setup(self, x_shape, y_shape=None):
     self._x_src = Source(x_shape)
     self._y_src = Source(y_shape)
     y_pred = self.expression(self._x_src)
     loss = self.loss(y_pred, self._y_src)
     self._graph = ExprGraph(loss)
     self._graph.setup()
     self._graph.out_grad = ca.array(1)
def getNNIndicesForBigFeatureMats(test_org, mats):

    test = ca.array(test_org)
    distances = []

    for idx_mat, mat_curr in enumerate(mats):
        print idx_mat
        distances.append(
            nearest_neighbor.getSimpleDot(test,
                                          ca.array(mats[idx_mat]),
                                          gpuFlag=True))
    # print '';

    distances = np.hstack(tuple(distances))
    indices = np.argsort(distances, axis=1)[:, ::-1].astype(np.uint32)

    return indices
Exemple #52
0
 def __init__(self, value):
     if isinstance(value, np.ndarray):
         value = ca.array(value)
     self.array = value
     if isinstance(value, (float, int)):
         self.shape = (1, )
     else:
         self.shape = self.array.shape
 def setup(self, x_shape, y_shape=None):
     self._x_src = Source(x_shape)
     self._y_src = Source(y_shape)
     y_pred = self.expression(self._x_src)
     loss = self.loss(y_pred, self._y_src)
     self._graph = ExprGraph(loss)
     self._graph.setup()
     self._graph.out_grad = ca.array(1)
def test_error(model, x):
    y = x[1:]
    x = x[:-1]
    net = test_network(model)
    y_pred = np.zeros_like(x)
    for i in range(x.shape[0]):
        y_pred[i] = one_hot_decode(np.array(net.fprop(x=ca.array(x[i]))))
    return np.mean(y_pred != y)
Exemple #55
0
 def fun_grad(x):
     ca.random.seed(seed)
     src.array = ca.array(x)
     graph.fprop()
     sink.grad_array = ca.ones(sink.shape, dtype=ca.float_)
     graph.bprop()
     x_grad = np.array(src.grad_array)
     return x_grad
Exemple #56
0
 def fun_grad(x):
     ca.random.seed(seed)
     src.array = ca.array(x)
     graph.fprop()
     sink.grad_array = ca.ones(sink.shape, dtype=ca.float_)
     graph.bprop()
     x_grad = np.array(src.grad_array)
     return x_grad
Exemple #57
0
 def _setup(self, x_shape):
     n_channels = x_shape[1]
     if self.kernel.ndim == 2:
         self.kernel = np.repeat(self.kernel[np.newaxis, np.newaxis, ...],
                                 n_channels, axis=1)
     elif self.kernel.ndim == 3:
         self.kernel = self.kernel[np.newaxis, :]
     self.ca_kernel = ca.array(self.kernel)
Exemple #58
0
 def setup(self, x_shape, y_shape=None):
     self.x_src = ex.Source(x_shape)
     y_expr = self._fprop_expr(self.x_src)
     if y_shape is not None:
         self.y_src = ex.Source(y_shape)
         y_expr = self.loss(y_expr, self.y_src)
         y_expr.grad_array = ca.array(1.0)
     self.graph = ex.graph.ExprGraph(y_expr)
     self.graph.setup()
Exemple #59
0
def test_transpose():
    shapes = [(4, 4), (5, 4), (8, 8), (32, 32), (55, 44), (64, 55), (55, 64),
              (32, 64), (64, 128), (128, 64), (128, 1)]
    for shape in shapes:
        a_np = np.reshape(np.arange(np.prod(shape)), shape) + 1
        a_ca = ca.array(a_np)
        a_np = np.ascontiguousarray(a_np.T)
        a_ca = ca.ascontiguousarray(a_ca.T)
        print(np.allclose(a_np, np.array(a_ca)))