Example #1
1
    def test_init(self):
        import numpy as np
        import math
        import sys

        assert np.intp() == np.intp(0)
        assert np.intp("123") == np.intp(123)
        raises(TypeError, np.intp, None)
        assert np.float64() == np.float64(0)
        assert math.isnan(np.float64(None))
        assert np.bool_() == np.bool_(False)
        assert np.bool_("abc") == np.bool_(True)
        assert np.bool_(None) == np.bool_(False)
        assert np.complex_() == np.complex_(0)
        # raises(TypeError, np.complex_, '1+2j')
        assert math.isnan(np.complex_(None))
        for c in ["i", "I", "l", "L", "q", "Q"]:
            assert np.dtype(c).type().dtype.char == c
        for c in ["l", "q"]:
            assert np.dtype(c).type(sys.maxint) == sys.maxint
        for c in ["L", "Q"]:
            assert np.dtype(c).type(sys.maxint + 42) == sys.maxint + 42
        assert np.float32(np.array([True, False])).dtype == np.float32
        assert type(np.float32(np.array([True]))) is np.ndarray
        assert type(np.float32(1.0)) is np.float32
        a = np.array([True, False])
        assert np.bool_(a) is a
Example #2
1
    def _init_params(self):
        # Left weight matrix
        self.W_hh = theano.shared(
            self.init_fn(self.n_hids, self.n_hids, self.sparsity, self.scale, rng=self.rng), name="W_%s" % self.name
        )
        self.params = [self.W_hh]
        # Right weight matrix
        self.U_hh = theano.shared(
            self.init_fn(self.n_hids, self.n_hids, self.sparsity, self.scale, rng=self.rng), name="U_%s" % self.name
        )
        self.params += [self.U_hh]
        # Bias
        self.b_hh = theano.shared(self.bias_fn(self.n_hids, self.bias_scale, self.rng), name="b_%s" % self.name)
        self.params += [self.b_hh]
        # gaters
        # if self.conv_mode == "conv":
        self.GW_hh = theano.shared(numpy.float32(0.01 * self.rng.randn(self.n_hids, 3)), name="GW_%s" % self.name)
        self.params += [self.GW_hh]
        self.GU_hh = theano.shared(numpy.float32(0.01 * self.rng.randn(self.n_hids, 3)), name="GU_%s" % self.name)
        self.params += [self.GU_hh]
        self.Gb_hh = theano.shared(self.bias_fn(3, self.bias_scale, self.rng), name="Gb_%s" % self.name)
        self.params += [self.Gb_hh]

        self.params_grad_scale = [self.grad_scale for x in self.params]
        self.restricted_params = [x for x in self.params]
        if self.weight_noise:
            self.nW_hh = theano.shared(self.W_hh.get_value() * 0, name="noise_" + self.W_hh.name)
            self.nU_hh = theano.shared(self.U_hh.get_value() * 0, name="noise_" + self.U_hh.name)
            self.nb_hh = theano.shared(self.b_hh.get_value() * 0, name="noise_" + self.b_hh.name)
            self.noise_params = [self.nW_hh, self.nU_hh, self.nb_hh]
            self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
Example #3
0
    def test_cublasSgemmBatched(self):
        l, m, k, n = 11, 7, 5, 3
        A = np.random.rand(l, m, k).astype(np.float32)
        B = np.random.rand(l, k, n).astype(np.float32)

        C_res = np.einsum('nij,njk->nik', A, B)

        a_gpu = gpuarray.to_gpu(A)
        b_gpu = gpuarray.to_gpu(B)
        c_gpu = gpuarray.empty((l, m, n), np.float32)

        alpha = np.float32(1.0)
        beta = np.float32(0.0)

        a_arr = bptrs(a_gpu)
        b_arr = bptrs(b_gpu)
        c_arr = bptrs(c_gpu)

        cublas.cublasSgemmBatched(self.cublas_handle, 'n','n',
                                  n, m, k, alpha,
                                  b_arr.gpudata, n,
                                  a_arr.gpudata, k,
                                  beta, c_arr.gpudata, n, l)

        assert np.allclose(C_res, c_gpu.get())
Example #4
0
def warpTriangle(img1, img2, t1, t2) :

    # Find bounding rectangle for each triangle
    r1 = cv2.boundingRect(np.float32([t1]))
    r2 = cv2.boundingRect(np.float32([t2]))

    # Offset points by left top corner of the respective rectangles
    t1Rect = [] 
    t2Rect = []
    t2RectInt = []

    for i in xrange(0, 3):
        t1Rect.append(((t1[i][0] - r1[0]),(t1[i][1] - r1[1])))
        t2Rect.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1])))
        t2RectInt.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1])))


    # Get mask by filling triangle
    mask = np.zeros((r2[3], r2[2], 3), dtype = np.float32)
    cv2.fillConvexPoly(mask, np.int32(t2RectInt), (1.0, 1.0, 1.0), 16, 0);

    # Apply warpImage to small rectangular patches
    img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
    
    size = (r2[2], r2[3])

    img2Rect = applyAffineTransform(img1Rect, t1Rect, t2Rect, size)
    
    img2Rect = img2Rect * mask

    # Copy triangular region of the rectangular patch to the output image
    img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] * ( (1.0, 1.0, 1.0) - mask )
     
    img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] + img2Rect
Example #5
0
 def ilspec_render(self, blocksize, gridsize):
     frender = self.mod.get_function('ilRender')
     frender(cuda.Out(tempout), cuda.In(dnus), cuda.InOut(tau),
             np.float32(ny0), np.float32(dopp_width0),
             np.int32(nlamb), np.int8(opacity),
             block=(blocksize, 1, 1), grid=(gridsize, 1, 1))
     ilspec_render.datout += tempout
def build(path,pathdir,files,labels,all_count,ratio,size):

    if not label_fixed:
        train_labels=random.sample(labels,int(ratio*len(labels)))
    else:
        train_labels=labels[:int(ratio*len(labels))]
        print train_labels[:20]
    for i in train_labels:
        labels.remove(i)
    test_labels=labels
    assert len(train_labels)+len(test_labels)==all_count

    train_dates={label:[] for label in train_labels}
    test_dates={label:[] for label in test_labels}
    for file in files:
        label=file[-11:-7]
        if label in train_labels:
            train_dates[label].append(0.001*(255-np.float32(imresize(imread(file,1),size))))
        else:
            test_dates[label].append(0.001*(255-np.float32(imresize(imread(file,1),size))))

    train_rank_dates={}
    for i in range(len(train_dates)):
        train_rank_dates[i]=train_dates[train_dates.keys()[i]]
    if cnn_only:
        return train_rank_dates
    x_train,y_train=get_sequence_images(train_rank_dates,train_labels,path_length,total_labels_per_seq,size,total_roads)

    # x_train,y_train=get_sequence_images(train_dates,train_labels,path_length,total_labels_per_seq,size,total_roads)
    x_test,y_test=get_sequence_images_cover(train_dates,test_dates,train_labels,test_labels,path_length,total_labels_per_seq,size,total_roads)
    return x_train,y_train,x_test,y_test,train_labels,test_labels
  def _testReduceSum(self,
                     expected_result,
                     dtype,
                     test_inputs,
                     rtol=1e-3,
                     atol=1e-4):
    """Tests reduce sum on a list of input arrays.

    For each array in test_inputs, check that performing reduce sum on the array
    produces a value that is close to the expected result.

    Args:
      expected_result: the expected result.
      dtype: the data type of the reduce sum operation.
      test_inputs: a list of input arrays for the reduce sum operation.
      rtol: the relative error.
      atol: the absolute error.
    """

    for test_input in test_inputs:
      with self.test_session() as sess:
        with self.test_scope():
          a = array_ops.placeholder(dtype)
          index = array_ops.placeholder(dtypes.int32)
          out = math_ops.reduce_sum(a, index)
        result = sess.run(out, {
            a: np.array(test_input, dtype=dtype),
            index: [0]
        })
        # Compare the results using float32 type.
        self.assertAllClose(
            np.float32(result),
            np.float32(expected_result),
            rtol=rtol,
            atol=atol)
Example #8
0
    def track(self, frame):
        '''Returns a list of detected TrackedTarget objects'''
        self.frame_points, frame_descrs = self.detect_features(frame)
        if len(self.frame_points) < MIN_MATCH_COUNT:
            return []
        matches = self.matcher.knnMatch(frame_descrs, k = 2)
        matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * 0.75]
        if len(matches) < MIN_MATCH_COUNT:
            return []
        matches_by_id = [[] for _ in xrange(len(self.targets))]
        for m in matches:
            matches_by_id[m.imgIdx].append(m)
        tracked = []
        for imgIdx, matches in enumerate(matches_by_id):
            if len(matches) < MIN_MATCH_COUNT:
                continue
            target = self.targets[imgIdx]
            p0 = [target.keypoints[m.trainIdx].pt for m in matches]
            p1 = [self.frame_points[m.queryIdx].pt for m in matches]
            p0, p1 = np.float32((p0, p1))
            H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)
            status = status.ravel() != 0
            if status.sum() < MIN_MATCH_COUNT:
                continue
            p0, p1 = p0[status], p1[status]

            x0, y0, x1, y1 = target.rect
            quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])
            quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2)

            track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad)
            tracked.append(track)
        tracked.sort(key = lambda t: len(t.p0), reverse=True)
        return tracked
Example #9
0
def test_gpuspecifyshape():
    x = cuda.shared_constructor(numpy.ones(3, dtype='float32'), 'x')
    m = theano.tensor.specify_shape(x + numpy.float32(1), (3,))
    f = theano.function([], updates=[(x, m * numpy.float32(2))],
                        mode=mode_with_gpu)
    l = f.maker.fgraph.toposort()
    assert not numpy.any([isinstance(x.op, cuda.HostFromGpu) for x in l])
Example #10
0
 def apply(self , src , mask_length , tgt):
     """
         viterbi algorithm
     """
     result , updates = theano.scan(
         fn = self.train_step,
         sequences = src,
         outputs_info = [self.A_start, None] ,
         non_sequences = self.A ,
         n_steps = mask_length
     )
     # the score of best path
     best_path_score = result[0][-1].max()
     idx = T.argmax(result[0][-1])
     #backtracking
     res2 , _ = theano.scan(
         fn = lambda dps , idx , idx2 : [dps[idx] , idx],
         sequences = result[1][::-1],
         outputs_info = [idx , idx],
         n_steps = mask_length
     )
     # the path of best score
     best_path = res2[1]
     #if len(best_path) < seq_len:
     #    best_path.extend((seq_len - len(best_path)) * [2])
     # the score of tgt path
     tgt_score = self.decode(src , mask_length , tgt)
     # max_margin
     max_margin = T.sum(T.neq(tgt[:mask_length] , best_path))
     cost = best_path_score + max_margin - tgt_score
     return T.switch(T.lt(cost , T.alloc(numpy.float32(0.)))
                     , T.alloc(numpy.float32(0.))
                     , cost
                     ),best_path
Example #11
0
def testIFD():

    # test I
    assert packet.pack('I',3) == struct.pack('<ci', b'I', 3)
    assert packet.pack('I',3) != struct.pack('<ci', b'I', 4)

    assert packet.unpack_stream(
        io.BytesIO(struct.pack('<ci', b'I', 3))) == ('I', 3)
    assert packet.unpack_stream(
        io.BytesIO(struct.pack('<ci', b'I', 4))) != ('I', 3)

    # test F
    assert packet.pack('F',3.3) == struct.pack('<cf', b'F', 3.3)
    assert packet.pack('F',3.3) != struct.pack('<cf', b'F', 4.3)

    assert packet.unpack_stream(
        io.BytesIO(struct.pack('<cf', b'F', numpy.float32(3.3)))) == ('F', numpy.float32(3.3))
    assert packet.unpack_stream(
        io.BytesIO(struct.pack('<cf', b'F', 4.3))) != ('F', 3.3)

    # test D
    assert packet.pack('D',3.3) == struct.pack('<cd', b'D', 3.3)
    assert packet.pack('D',3.3) != struct.pack('<cd', b'D', 4.3)

    assert packet.unpack_stream(
        io.BytesIO(struct.pack('<cd', b'D', 3.3))) == ('D', 3.3)
    assert packet.unpack_stream(
        io.BytesIO(struct.pack('<cd', b'D', 4.3))) != ('D', 3.3)
Example #12
0
def rmsprop(lr, tparams, grads, inp, cost):
    zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.),
                                  name='%s_grad' % k)
                    for k, p in tparams.iteritems()]
    running_grads = [theano.shared(p.get_value() * numpy.float32(0.),
                                   name='%s_rgrad' % k)
                     for k, p in tparams.iteritems()]
    running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.),
                                    name='%s_rgrad2' % k)
                      for k, p in tparams.iteritems()]

    zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
    rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
    rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
             for rg2, g in zip(running_grads2, grads)]

    f_grad_shared = theano.function(inp, cost, updates=zgup+rgup+rg2up,
                                    profile=profile)

    updir = [theano.shared(p.get_value() * numpy.float32(0.),
                           name='%s_updir' % k)
             for k, p in tparams.iteritems()]
    updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4))
                 for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads,
                                            running_grads2)]
    param_up = [(p, p + udn[1])
                for p, udn in zip(itemlist(tparams), updir_new)]
    f_update = theano.function([lr], [], updates=updir_new+param_up,
                               on_unused_input='ignore', profile=profile)

    return f_grad_shared, f_update
Example #13
0
def adadelta(lr, tparams, grads, inp, cost):
    zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.),
                                  name='%s_grad' % k)
                    for k, p in tparams.iteritems()]
    running_up2 = [theano.shared(p.get_value() * numpy.float32(0.),
                                 name='%s_rup2' % k)
                   for k, p in tparams.iteritems()]
    running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.),
                                    name='%s_rgrad2' % k)
                      for k, p in tparams.iteritems()]

    zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
    rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
             for rg2, g in zip(running_grads2, grads)]

    f_grad_shared = theano.function(inp, cost, updates=zgup+rg2up,
                                    profile=profile)

    updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg
             for zg, ru2, rg2 in zip(zipped_grads, running_up2,
                                     running_grads2)]
    ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
             for ru2, ud in zip(running_up2, updir)]
    param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]

    f_update = theano.function([lr], [], updates=ru2up+param_up,
                               on_unused_input='ignore', profile=profile)

    return f_grad_shared, f_update
Example #14
0
 def child_indices(node):
     indices[node] = numpy.float32(counter[0])
     counter[0] += 1
     if isinstance(node, LeafNode):
         return [numpy.float32(0), numpy.float32(0)]
     else:
         return [indices[node.left], indices[node.right]]
Example #15
0
def coolBlack():
    IMAGE_WEIGHT = 0.5

    image = cv2.imread("G:/Filters/wasim.jpg",0)
    black = cv2.imread("G:/Filters/black5.jpg",0)
    black = cv2.resize(black, image.shape[::-1])

    res1 = cv2.addWeighted(image, IMAGE_WEIGHT, black, 1 - IMAGE_WEIGHT, 1)


    #NORMALIZE IMAGES
    image = np.float32(image)
    black = np.float32(black)

    image /= 255
    black /= 200

    res = image*black

    cv2.imshow("RES", res)
    cv2.waitKey(0)

    fname = "G:/Filtes/temp.jpg"
    cv2.imwrite(fname, res)
    res = cv2.imread(fname, 0)

    cv2.imshow("BLACK", res)
    cv2.waitKey(0)
Example #16
0
    def simulate_target(self,thetaX,thetaY,thetaZ, aX, aY, aZ, cX, cY, cZ, camera_height, camera_width, fov):
        img_width = self.target_width
        img_height = self.target_height

        #point maps
        corners = np.float32([[-img_width/2,img_height/2],[img_width/2 ,img_height/2],[-img_width/2,-img_height/2],[img_width/2, -img_height/2]])
        newCorners = np.float32([[0,0],[0,0],[0,0],[0,0]])


        #calculate projection for four corners of image
        for i in range(0,len(corners)):

            #shift to world
            x = corners[i][0] + cX - img_width/2.0
            y = corners[i][1] + cY - img_height/2.0


            #calculate perspective and position
            x , y = self.project_3D_to_2D(thetaX,thetaY,thetaZ, aY, aX, aZ, y, x, cZ,camera_height,camera_width,fov)

            #shift to camera
            x , y = shift_to_image((x,y),camera_width,camera_height)
            newCorners[i] = x,y


        #project image
        M = cv2.getPerspectiveTransform(corners,newCorners)
        sim = cv2.warpPerspective(self.target,M,(self.camera_width,self.camera_height),borderValue=self.backgroundColor)

        return sim
Example #17
0
def ammoniawater(allligand,index,bond_dist):

	D = 3.0

	allligandcoods = allligand.positions
	ncoods = np.zeros((1,3), dtype = float)
	ncoods[0,:] = allligandcoods[index,:]
	ncoods = np.float32(ncoods)

	tempdist = MDAnalysis.lib.distances.distance_array(allligandcoods, ncoods)
	A = np.where((tempdist < bond_dist) & (tempdist > 0.1))
	mates = np.ravel_multi_index(A, tempdist.shape)
	nummates = np.size(mates)
	hcoods = np.zeros((3,3), dtype = float)

	i = 0
	for j in mates:
		if allligand[j].type == 'H':
			hcoods[i,:] = allligandcoods[j,:]
			i = i + 1

	hcoods = np.float32(hcoods)
	tempvector = hcoods - ncoods
	vector1 = unitvector(tempvector[0,:])
	vector2 = unitvector(tempvector[1,:])
	vector3 = unitvector(tempvector[2,:])
	watercood = np.zeros((3,3), dtype = float)
	watercood[0,:] = ncoods + (D*vector1)
	watercood[1,:] = ncoods + (D*vector2)
	watercood[2,:] = ncoods + (D*vector3)


	return watercood
Example #18
0
  def trainLoopWithValidation(self, trainModel, validateModel, maxEpochs):
    lastValidationError = np.inf
    count = 0
    epoch = 0

    validationErrors = []

    while epoch < maxEpochs and count < 8:
      print "epoch " + str(epoch)

      momentum = np.float32(min(np.float32(0.5) + epoch * np.float32(0.01),
                     np.float32(0.99)))

      for batchNr in xrange(self.nrMiniBatches):
        trainModel(batchNr, momentum)

      meanValidation = np.mean(validateModel(), axis=0)
      validationErrors += [meanValidation]

      if meanValidation > lastValidationError:
          count +=1
      else:
          count = 0
      lastValidationError = meanValidation

      epoch +=1

    try:
      plt.plot(validationErrors)
      plt.show()
    except e:
      print "validation error plot not made"

    print "number of epochs"
    print epoch
Example #19
0
def secaminewater(allligand,index,bond_dist):

	D = 3.0

	allligandcoods = allligand.positions
	ncoods = np.zeros((1,3), dtype = float)
	ncoods[0,:] = allligandcoods[index,:]
	ncoods = np.float32(ncoods)

	tempdist = MDAnalysis.lib.distances.distance_array(allligandcoods, ncoods)
	A = np.where((tempdist < bond_dist) & (tempdist > 0.1))
	mates = np.ravel_multi_index(A, tempdist.shape)
	nummates = np.size(mates)
	hcoods = np.zeros((1,3), dtype = float)
	q = 0
	for j in mates:
		if allligand[j].type == 'H':
			hcoods[0,:] = allligandcoods[j,:]
			break

	watercood = np.zeros((1,3), dtype = float)

	hcoods = np.float32(hcoods)
	vector = unitvector(hcoods - ncoods)
	watercood[0,:] = ncoods + (D * vector)

	return watercood
Example #20
0
def carbonylorcarboxyl(allligand,index,bond_dist):

	allligandcoods = allligand.positions
	ocoods = np.zeros((1,3), dtype = float)
	ocoods[0,:] = allligandcoods[index,:]
	ocoods = np.float32(ocoods)

	tempdist = MDAnalysis.lib.distances.distance_array(ocoods,allligandcoods)
	A = np.argsort(tempdist)
	temp = int(A[0,1])

	Omatecood = np.zeros((1,3), dtype = float)
	Omatecood[0,:] = allligandcoods[temp,:]
	Omatecood = np.float32(Omatecood)

	tempdist2 = MDAnalysis.lib.distances.distance_array(Omatecood, allligandcoods)
	B = np.argsort(tempdist2)
	B = np.delete(B,0,axis = 1)
	for i in xrange(0,B.size):
		if B[0,i] == index:
			C = np.delete(B,i,axis = 1)
			break

	base1 = int(C[0,0])
	base2 = int(C[0,1])
	type1 = allligand[base1].type
	type2 = allligand[base2].type

	if type1 == 'O' or type2 == 'O':
		atype = 'carboxyl'
	else:
		atype = 'carbonyl'

	return atype
Example #21
0
def main():
	
	normalize.normalizeData('twitter_dataset.csv')
	cutoff =  0.5
	k = int(raw_input("Input Number of cluster:"))
	start_time = time.time()
    	reader = getDataFromFile()
    	points = []
	point_counter = 0
    	for row in reader:
		points.append(getinfo(row[1:]))
		point_counter = point_counter + 1
    	clusters = kmeans(points, k, cutoff)
	#total_points ,final_centroids = clusters
	cluster_per = []
	end_time = time.time() - start_time 

    	for i,c in enumerate(clusters):
		count = 0 
        	for p in c.points:
            		print " Cluster: ",i + 1,"\t Point :", p
			count = count + 1
		print "no of instance for cluster" , i + 1 ,":" ,count
		cluster_per.append(np.float32(((np.float32(count) / np.float32(point_counter)) * 100)))

	print "Time:" , end_time, "Seconds"
	print "Number of Clusters:" , k
	for idx,cp in enumerate(cluster_per):
		print "cluster", idx + 1 , ":" , cp , "%"
Example #22
0
    def adjust_lr(self, epoch, size, val_error_list = None):
        
        # lr is calculated every time as a function of epoch and size
        
        if self.config['lr_policy'] == 'step':
            
            if epoch >=20 and epoch < 40:

                self.step_idx = 1
        
            elif epoch >=40 and epoch < 60:
                
                self.step_idx = 2

            elif epoch >=60 and epoch < 70:
                
                self.step_idx = 3
                
            else:
                pass
            
            tuned_base_lr = self.base_lr * 1.0/pow(10.0,self.step_idx) 
                
        if self.config['lr_policy'] == 'auto':
            if epoch>5 and (val_error_list[-3] - val_error_list[-1] <
                                self.config['lr_adapt_threshold']):
                tuned_base_lr = self.base_lr / 10.0
                    
        if self.config['train_mode'] == 'cdd':
            self.shared_lr.set_value(np.float32(tuned_base_lr))
        elif self.config['train_mode'] == 'avg':
            self.shared_lr.set_value(np.float32(tuned_base_lr*size))
        
        if self.verbose: 
            print 'Learning rate now: %.10f' % np.float32(self.shared_lr.get_value())
Example #23
0
    def __init__(self,name,size,dim=1,dt=0.1,wrap=True,intensity=1.,width=0.1,
                 blink=False,blinkPeriod=0.1,
                 direction=np.float32((1,)*10),start = np.float32((0,)*10),
                 speed=0.01,speed_=(1,)*10):
        super().__init__(name=name,size=size,dim=dim,
                                           dt=dt,wrap=wrap,intensity=intensity,
                                           width=width,direction=direction,
                                           start=start,
                                           blink=blink,
                                           blinkPeriod = blinkPeriod,
                                           speed=speed,speed_=speed_)


        
        self.centerTraj = []
        self._blinkCounter = 0;#TODO reset
        self._hidden = False #true when hidden pahse of blinking
        dx = 1.0/size
        for d in range(dim):
            origin = start[d]*size
            self.centerTraj.append(StraightTraj(name+"_c"+str(d),size=0,dim=0,dt=dt,speed=speed_[d],
                        wrap=wrap,wrapSize=size,init=origin,dx=dx))

        for trajI,i in zip(self.centerTraj,range(len(self.centerTraj))):
                self.addChildren(**{'center'+str(i):trajI})
    def __init__(self, bounds, objectNum):
        self.meas=[]
        self.pred=[]
        self.objectNum = objectNum
        # self.frame = np.zeros((400,400,3), np.uint8) # drawing canvas
        self.mp = np.array((2,1), np.float32) # measurement
        # self.tp = np.zeros((2,1), np.float32) # tracked / prediction
        self.tp = np.array([[np.float32(bounds.center_x)],[np.float32(bounds.center_y)]])
        self.currentPrediction = (bounds.center_x,bounds.center_y)

        # cv2.namedWindow("kalman")
        # cv2.setMouseCallback("kalman",onmouse);
        self.kalman = cv2.KalmanFilter(4,2)
        self.kalman.measurementMatrix = np.array([[1,0,0,0],[0,1,0,0]],np.float32)
        self.kalman.transitionMatrix = np.array([[1,0,1,0],[0,1,0,1],[0,0,1,0],[0,0,0,1]],np.float32)
        self.kalman.processNoiseCov = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],np.float32) * .003
        # self.kalman.processNoiseCov = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],np.float32) * 0.03

        # APPLY KALMAN
        track_x, track_y = bounds.center_x,bounds.center_y
        self.meas.append( (track_x, track_y) )
        for i in range(100):
            self.mp = np.array([[np.float32(track_x)],[np.float32(track_y)]])
            self.kalman.correct(self.mp)
            self.predict()
Example #25
0
File: batchtps.py Project: rll/lfd
 def test_bending_cost(
     self,
     other,
     bend_coef=DEFAULT_LAMBDA[1],
     outlierprior=1e-1,
     outlierfrac=1e-2,
     outliercutoff=1e-2,
     T=5e-3,
     norm_iters=DEFAULT_NORM_ITERS,
 ):
     self.get_target_points(other, outlierprior, outlierfrac, outliercutoff, T, norm_iters)
     self.update_transform(bend_coef)
     bending_costs = self.bending_cost(bend_coef)
     for i in range(self.N):
         c_gpu = bending_costs[i]
         k_nn = self.kernels[i].get()
         w_nd = self.w_nd[i].get()
         c_cpu = np.float32(0)
         for d in range(DATA_DIM):
             r = np.dot(k_nn, w_nd[:, d]).astype(np.float32)
             r = np.float32(np.dot(w_nd[:, d], r))
             c_cpu += r
         c_cpu *= np.float32(bend_coef)
         if np.abs(c_cpu - c_gpu) > 1e-4:
             ## high err tolerance is b/c of difference in cpu and gpu precision?
             print "cpu and gpu bend costs differ!!!"
             ipy.embed()
             sys.exit(1)
Example #26
0
def adadelta(lr, tparams, grads, inp, cost, extra_ups=[], extra_outs=[],
             exclude_params=set([])):
    '''Adadelta'''
    zipped_grads = [theano.shared(p.get_value() * np.float32(0.), name='%s_grad'%k)
                    for k, p in tparams.iteritems()]
    running_up2 = [theano.shared(p.get_value() * np.float32(0.), name='%s_rup2'%k)
                   for k, p in tparams.iteritems()]
    running_grads2 = [theano.shared(p.get_value() * np.float32(0.), name='%s_rgrad2'%k)
                      for k, p in tparams.iteritems()]

    zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
    rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
        for rg2, g in zip(running_grads2, grads)]

    f_grad_shared = theano.function(
        inp, [cost]+extra_outs, updates=zgup+rg2up+extra_ups, profile=profile)

    updir = [-T.sqrt(ru2 + 1e-6) / T.sqrt(rg2 + 1e-6) * zg
             for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
    ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
        for ru2, ud in zip(running_up2, updir)]
    param_up = [(p, p + ud) for p, ud in zip(tools.itemlist(tparams), updir)
        if p.name not in exclude_params]

    if not isinstance(lr, list): lr = [lr]
    f_update = theano.function(lr, [], updates=ru2up+param_up,
        on_unused_input='ignore', profile=profile)

    return f_grad_shared, f_update
Example #27
0
def affine_skew(tilt, phi, img, mask=None):
    '''
    affine_skew(tilt, phi, img, mask=None) -> skew_img, skew_mask, Ai

    Ai - is an affine transform matrix from skew_img to img
    '''
    h, w = img.shape[:2]
    if mask is None:
        mask = np.zeros((h, w), np.uint8)
        mask[:] = 255
    A = np.float32([[1, 0, 0], [0, 1, 0]])
    if phi != 0.0:
        phi = np.deg2rad(phi)
        s, c = np.sin(phi), np.cos(phi)
        A = np.float32([[c,-s], [ s, c]])
        corners = [[0, 0], [w, 0], [w, h], [0, h]]
        tcorners = np.int32( np.dot(corners, A.T) )
        x, y, w, h = cv2.boundingRect(tcorners.reshape(1,-1,2))
        A = np.hstack([A, [[-x], [-y]]])
        img = cv2.warpAffine(img, A, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
    if tilt != 1.0:
        s = 0.8*np.sqrt(tilt*tilt-1)
        img = cv2.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01)
        img = cv2.resize(img, (0, 0), fx=1.0/tilt, fy=1.0, interpolation=cv2.INTER_NEAREST)
        A[0] /= tilt
    if phi != 0.0 or tilt != 1.0:
        h, w = img.shape[:2]
        mask = cv2.warpAffine(mask, A, (w, h), flags=cv2.INTER_NEAREST)
    Ai = cv2.invertAffineTransform(A)
    return img, mask, Ai
    def update(self, track_x, track_y):

        # APPLY KALMAN
        self.meas.append( (track_x, track_y) )
        self.mp = np.array([[np.float32(track_x)],[np.float32(track_y)]])
        self.kalman.correct(self.mp)
        self.predict()
Example #29
0
def test_maskandscale():
    t = np.linspace(20, 30, 15)
    t[3] = 100
    tm = np.ma.masked_greater(t, 99)
    fname = pjoin(TEST_DATA_PATH, 'example_2.nc')
    with netcdf_file(fname, maskandscale=True) as f:
        Temp = f.variables['Temperature']
        assert_equal(Temp.missing_value, 9999)
        assert_equal(Temp.add_offset, 20)
        assert_equal(Temp.scale_factor, np.float32(0.01))
        found = Temp[:].compressed()
        del Temp  # Remove ref to mmap, so file can be closed.
        expected = np.round(tm.compressed(), 2)
        assert_allclose(found, expected)

    with in_tempdir():
        newfname = 'ms.nc'
        f = netcdf_file(newfname, 'w', maskandscale=True)
        f.createDimension('Temperature', len(tm))
        temp = f.createVariable('Temperature', 'i', ('Temperature',))
        temp.missing_value = 9999
        temp.scale_factor = 0.01
        temp.add_offset = 20
        temp[:] = tm
        f.close()

        with netcdf_file(newfname, maskandscale=True) as f:
            Temp = f.variables['Temperature']
            assert_equal(Temp.missing_value, 9999)
            assert_equal(Temp.add_offset, 20)
            assert_equal(Temp.scale_factor, np.float32(0.01))
            expected = np.round(tm.compressed(), 2)
            found = Temp[:].compressed()
            del Temp
            assert_allclose(found, expected)
Example #30
0
def test_decode_cf_with_conflicting_fill_missing_value():
    expected = Variable(['t'], [np.nan, np.nan, 2], {'units': 'foobar'})
    var = Variable(['t'], np.arange(3),
                   {'units': 'foobar',
                    'missing_value': 0,
                    '_FillValue': 1})
    with warnings.catch_warnings(record=True) as w:
        actual = conventions.decode_cf_variable('t', var)
        assert_identical(actual, expected)
        assert 'has multiple fill' in str(w[0].message)

    expected = Variable(['t'], np.arange(10), {'units': 'foobar'})

    var = Variable(['t'], np.arange(10),
                   {'units': 'foobar',
                    'missing_value': np.nan,
                    '_FillValue': np.nan})
    actual = conventions.decode_cf_variable('t', var)
    assert_identical(actual, expected)

    var = Variable(['t'], np.arange(10),
                   {'units': 'foobar',
                    'missing_value': np.float32(np.nan),
                    '_FillValue': np.float32(np.nan)})
    actual = conventions.decode_cf_variable('t', var)
    assert_identical(actual, expected)
Example #31
0
#%% Datos para procesar de temperatura y densidad

for idx in range(0,len(lst)):

    rhoaux.append(np.loadtxt('/home/coronelth/validacion_VdW/'+lst[idx] +'/rhoaux',np.float32).reshape([300,3]).mean(1,np.float32)) # 300 x 1
    Taux.  append(np.loadtxt('/home/coronelth/validacion_VdW/'+lst[idx] +'/Taux',np.float32).reshape([300,3]).mean(1,np.float32)) # 300 x 1


rhoaux = np.array(rhoaux)

H = (rhoaux.shape[1])*1.0

Y_r = np.arange(H).astype(np.float32) / H

rho_r = rhoaux/np.float32(rhoc)
T_r = Taux/np.float32(Tc)


#%% Validación para Rho_r vs Y_r

esp = np.int32(15)
ms = '6'

plt.figure(1)
    
plt.plot( Y_r[0:rho_r.shape[1]:esp], rho_r[ 0 ][0:rho_r.shape[1]:esp], label ='$T_r = 0.6$', linestyle = 'None', color = 'r', marker =  'D', mfc = 'r', ms = ms ) 
plt.plot( Y_r[0:rho_r.shape[1]:esp], rho_r[ 4 ][0:rho_r.shape[1]:esp], label ='$T_r = 0.7$', linestyle = 'None', color = 'm', marker =  '>', mfc = 'm', ms = ms)     
plt.plot( Y_r[0:rho_r.shape[1]:esp], rho_r[ 8 ][0:rho_r.shape[1]:esp], label ='$T_r = 0.8$', linestyle = 'None', color = 'b', marker = 's', mfc = 'b', ms = ms)     
plt.plot( Y_r[0:rho_r.shape[1]:esp], rho_r[12 ][0:rho_r.shape[1]:esp], label ='$T_r = 0.9$', linestyle = 'None', color = 'g', marker = 'v', mfc = 'g', ms = ms)
Example #32
0
def variables(nc, data):
    import numpy as np

    #time
    v = nc.createVariable('time', np.float64, ('time', ))
    #variable attributes
    v.units = 'seconds since 1970-01-01 00:00:00'
    v.standard_name = 'time'
    v.long_name = 'Time (seconds since 1970-01-01 00:00:00)'
    v.axis = 'T'
    v.valid_min = np.float64(min(data.ET))
    v.valid_max = np.float64(max(data.ET))
    v.calendar = 'standard'
    #write data
    v[:] = np.float64(data.ET)

    #lat
    v = nc.createVariable('latitude', np.float32, ('latitude', ))
    #variable attributes
    v.units = 'degrees_north'
    v.standard_name = 'latitude'
    v.long_name = 'Latitude'
    #write data
    v[:] = np.float32(data.lat)

    #lon
    v = nc.createVariable('longitude', np.float32, ('longitude', ))
    #variable attributes
    v.units = 'degrees_east'
    v.standard_name = 'longitude'
    v.long_name = 'Longitude'
    #write data
    v[:] = np.float32(data.lon)

    #doy
    v = nc.createVariable('day_of_year', np.float32, ('time', ))
    #variable attributes
    v.units = '1'
    v.long_name = 'Day of Year'
    v.valid_min = np.float32(min(data.DoY))
    v.valid_max = np.float32(max(data.DoY))
    #write data
    v[:] = np.float32(data.DoY)

    #year
    v = nc.createVariable('year', np.int32, ('time', ))
    #variable attributes
    v.units = '1'
    v.long_name = 'Year'
    v.valid_min = np.int32(min(data.DT[:, 0]))
    v.valid_max = np.int32(max(data.DT[:, 0]))
    #write data
    v[:] = np.int32(data.DT[:, 0])

    #month
    v = nc.createVariable('month', np.int32, ('time', ))
    #variable attributes
    v.units = '1'
    v.long_name = 'Month'
    v.valid_min = np.int32(min(data.DT[:, 1]))
    v.valid_max = np.int32(max(data.DT[:, 1]))
    #write data
    v[:] = np.int32(data.DT[:, 1])

    #day
    v = nc.createVariable('day', np.int32, ('time', ))
    #variable attributes
    v.units = '1'
    v.long_name = 'Day'
    v.valid_min = np.int32(min(data.DT[:, 2]))
    v.valid_max = np.int32(max(data.DT[:, 2]))
    #write data
    v[:] = np.int32(data.DT[:, 2])

    #hour
    v = nc.createVariable('hour', np.int32, ('time', ))
    #variable attributes
    v.units = '1'
    v.long_name = 'Hour'
    v.valid_min = np.int32(min(data.DT[:, 3]))
    v.valid_max = np.int32(max(data.DT[:, 3]))
    #write data
    v[:] = np.int32(data.DT[:, 3])

    #minute
    v = nc.createVariable('minute', np.int32, ('time', ))
    #variable attributes
    v.units = '1'
    v.long_name = 'Minute'
    v.valid_min = np.int32(min(data.DT[:, 4]))
    v.valid_max = np.int32(max(data.DT[:, 4]))
    #write data
    v[:] = np.int32(data.DT[:, 4])

    #second
    v = nc.createVariable('second', np.float32, ('time', ))
    #variable attributes
    v.units = '1'
    v.long_name = 'Second'
    v.valid_min = np.float32(min(data.DT[:, 5]))
    v.valid_max = np.float32(max(data.DT[:, 5]))
    #write data
    v[:] = np.float32(data.DT[:, 5])

    del np
Example #33
0
def process(
    neighbour_cube: cli.inputcube,
    cube: cli.inputcube,
    lapse_rate: cli.inputcube = None,
    *,
    apply_lapse_rate_correction=False,
    land_constraint=False,
    similar_altitude=False,
    extract_percentiles: cli.comma_separated_list = None,
    ignore_ecc_bounds=False,
    new_title: str = None,
    suppress_warnings=False,
    realization_collapse=False,
):
    """Module to run spot data extraction.

    Extract diagnostic data from gridded fields for spot data sites. It is
    possible to apply a temperature lapse rate adjustment to temperature data
    that helps to account for differences between the spot site's real altitude
    and that of the grid point from which the temperature data is extracted.

    Args:
        neighbour_cube (iris.cube.Cube):
            Cube of spot-data neighbours and the spot site information.
        cube (iris.cube.Cube):
            Cube containing the diagnostic data to be extracted.
        lapse_rate (iris.cube.Cube):
            Optional cube containing temperature lapse rates. If this cube is
            provided and a screen temperature cube is being processed, the
            lapse rates will be used to adjust the temperature to better
            represent each spot's site-altitude.
        apply_lapse_rate_correction (bool):
            Use to apply a lapse-rate correction to screen temperature data so
            that the data are a better match the altitude of the spot site for
            which they have been extracted.
        land_constraint (bool):
            Use to select the nearest-with-land-constraint neighbour-selection
            method from the neighbour_cube. This means that the grid points
            should be land points except for sites where none were found within
            the search radius when the neighbour cube was created. May be used
            with similar_altitude.
        similar_altitude (bool):
            Use to select the nearest-with-height-constraint
            neighbour-selection method from the neighbour_cube. These are grid
            points that were found to be the closest in altitude to the spot
            site within the search radius defined when the neighbour cube was
            created. May be used with land_constraint.
        extract_percentiles (list or int):
            If set to a percentile value or a list of percentile values,
            data corresponding to those percentiles will be returned. For
            example "25, 50, 75" will result in the 25th, 50th and 75th
            percentiles being returned from a cube of probabilities,
            percentiles or realizations. Deterministic input data will raise
            a warning message.
            Note that for percentiles inputs, the desired percentile(s) must
            exist in the input cube.
        ignore_ecc_bounds (bool):
            Demotes exceptions where calculated percentiles are outside the ECC
            bounds range to warnings.
        new_title (str):
            New title for the spot-extracted data.  If None, this attribute is
            removed from the output cube since it has no prescribed standard
            and may therefore contain grid information that is no longer
            correct after spot-extraction.
        suppress_warnings (bool):
            Suppress warning output. This option should only be used if it
            is known that warnings will be generated but they are not required.
        realization_collapse (bool):
            Triggers equal-weighting blending of the realization coord if required.
            Use this if a threshold coord is also present on the input cube.

    Returns:
        iris.cube.Cube:
           Cube of spot data.

    Raises:
        ValueError:
            If the percentile diagnostic cube does not contain the requested
            percentile value.
        ValueError:
            If the lapse rate cube was provided but the diagnostic being
            processed is not air temperature.
        ValueError:
            If the lapse rate cube provided does not have the name
            "air_temperature_lapse_rate"
        ValueError:
            If the lapse rate cube does not contain a single valued height
            coordinate.

    Warns:
        warning:
           If diagnostic cube is not a known probabilistic type.
        warning:
            If a lapse rate cube was provided, but the height of the
            temperature does not match that of the data used.
        warning:
            If a lapse rate cube was not provided, but the option to apply
            the lapse rate correction was enabled.

    """

    import warnings

    import iris
    import numpy as np
    from iris.exceptions import CoordinateNotFoundError

    from improver.ensemble_copula_coupling.ensemble_copula_coupling import (
        ConvertProbabilitiesToPercentiles,
    )
    from improver.metadata.probabilistic import find_percentile_coordinate
    from improver.percentile import PercentileConverter
    from improver.spotdata.apply_lapse_rate import SpotLapseRateAdjust
    from improver.spotdata.neighbour_finding import NeighbourSelection
    from improver.spotdata.spot_extraction import SpotExtraction
    from improver.utilities.cube_extraction import extract_subcube
    from improver.utilities.cube_manipulation import collapse_realizations

    if realization_collapse:
        cube = collapse_realizations(cube)
    neighbour_selection_method = NeighbourSelection(
        land_constraint=land_constraint, minimum_dz=similar_altitude
    ).neighbour_finding_method_name()
    result = SpotExtraction(neighbour_selection_method=neighbour_selection_method)(
        neighbour_cube, cube, new_title=new_title
    )

    # If a probability or percentile diagnostic cube is provided, extract
    # the given percentile if available. This is done after the spot-extraction
    # to minimise processing time; usually there are far fewer spot sites than
    # grid points.
    if extract_percentiles:
        extract_percentiles = [np.float32(x) for x in extract_percentiles]
        try:
            perc_coordinate = find_percentile_coordinate(result)
        except CoordinateNotFoundError:
            if "probability_of_" in result.name():
                result = ConvertProbabilitiesToPercentiles(
                    ecc_bounds_warning=ignore_ecc_bounds
                )(result, percentiles=extract_percentiles)
                result = iris.util.squeeze(result)
            elif result.coords("realization", dim_coords=True):
                fast_percentile_method = not np.ma.isMaskedArray(result.data)
                result = PercentileConverter(
                    "realization",
                    percentiles=extract_percentiles,
                    fast_percentile_method=fast_percentile_method,
                )(result)
            else:
                msg = (
                    "Diagnostic cube is not a known probabilistic type. "
                    "The {} percentile could not be extracted. Extracting "
                    "data from the cube including any leading "
                    "dimensions.".format(extract_percentiles)
                )
                if not suppress_warnings:
                    warnings.warn(msg)
        else:
            constraint = ["{}={}".format(perc_coordinate.name(), extract_percentiles)]
            perc_result = extract_subcube(result, constraint)
            if perc_result is not None:
                result = perc_result
            else:
                msg = (
                    "The percentile diagnostic cube does not contain the "
                    "requested percentile value. Requested {}, available "
                    "{}".format(extract_percentiles, perc_coordinate.points)
                )
                raise ValueError(msg)
    # Check whether a lapse rate cube has been provided and we are dealing with
    # temperature data and the lapse-rate option is enabled.
    if apply_lapse_rate_correction and lapse_rate:
        if not result.name() == "air_temperature":
            msg = (
                "A lapse rate cube was provided, but the diagnostic being "
                "processed is not air temperature and cannot be adjusted."
            )
            raise ValueError(msg)

        if not lapse_rate.name() == "air_temperature_lapse_rate":
            msg = (
                "A cube has been provided as a lapse rate cube but does "
                "not have the expected name air_temperature_lapse_rate: "
                "{}".format(lapse_rate.name())
            )
            raise ValueError(msg)

        try:
            lapse_rate_height_coord = lapse_rate.coord("height")
        except (ValueError, CoordinateNotFoundError):
            msg = (
                "Lapse rate cube does not contain a single valued height "
                "coordinate. This is required to ensure it is applied to "
                "equivalent temperature data."
            )
            raise ValueError(msg)

        # Check the height of the temperature data matches that used to
        # calculate the lapse rates. If so, adjust temperatures using the lapse
        # rate values.
        if cube.coord("height") == lapse_rate_height_coord:
            plugin = SpotLapseRateAdjust(
                neighbour_selection_method=neighbour_selection_method
            )
            result = plugin(result, neighbour_cube, lapse_rate)
        elif not suppress_warnings:
            warnings.warn(
                "A lapse rate cube was provided, but the height of the "
                "temperature data does not match that of the data used "
                "to calculate the lapse rates. As such the temperatures "
                "were not adjusted with the lapse rates."
            )

    elif apply_lapse_rate_correction and not lapse_rate:
        if not suppress_warnings:
            warnings.warn(
                "A lapse rate cube was not provided, but the option to "
                "apply the lapse rate correction was enabled. No lapse rate "
                "correction could be applied."
            )

    # Remove the internal model_grid_hash attribute if present.
    result.attributes.pop("model_grid_hash", None)
    return result
    def paint_text(self, text, i, color=False):
        """ 使用PIL绘制文本图像,传入画布尺寸,返回文本图像
        :param h: 画布高度
        :param w: 画布宽度
        :return: img
        """
        if color == True:
            # 创建画布背景
            bg_b = np.random.randint(0, 255)  # 背景色
            bg_g = np.random.randint(0, 255)
            bg_r = np.random.randint(0, 255)
            # 前景色
            fg_b = np.random.randint(0, 255)  # 背景色
            fg_g = np.random.randint(0, 255)
            fg_r = np.random.randint(0, 255)
            # 计算前景和背景的彩色相似度+3
            bg_color = sRGBColor(bg_b, bg_g, bg_r)
            bg_color = convert_color(bg_color, CMYKColor)  # 转cmyk
            bg_color = convert_color(bg_color, LabColor)
            fg_color = sRGBColor(fg_b, fg_g, fg_r)
            fg_color = convert_color(fg_color, CMYKColor)  # 转cmyk
            fg_color = convert_color(fg_color, LabColor)
            delta_e = delta_e_cie2000(bg_color, fg_color)
            while delta_e < 150 and delta_e > 250:  # 150-250
                # 创建画布背景色
                bg_b = np.random.randint(0, 255)
                bg_g = np.random.randint(0, 255)
                bg_r = np.random.randint(0, 255)
                # 文字前景色
                fg_b = np.random.randint(0, 255)
                fg_g = np.random.randint(0, 255)
                fg_r = np.random.randint(0, 255)
                # 计算前景和背景的彩色相似度
                bg_color = sRGBColor(bg_b, bg_g, bg_r)
                bg_color = convert_color(bg_color, LabColor)
                fg_color = sRGBColor(fg_b, fg_g, fg_r)
                fg_color = convert_color(fg_color, LabColor)
                delta_e = delta_e_cie2000(bg_color, fg_color)
        else:
            # 创建画布背景
            bg_b = np.random.randint(200, 255)  # 背景色
            bg_g = bg_b
            bg_r = bg_b
            # 前景色
            fg_b = np.random.randint(0, 128)  # 前景色
            fg_g = fg_b
            fg_r = fg_b
        # 随机选择字体
        np.random.shuffle(self.font_name)
        cur_fonts = self.fonts.get(self.font_name[0])
        keys = list(cur_fonts.keys())
        np.random.shuffle(keys)
        font = cur_fonts.get(keys[0])
        text_size = font.getsize(text)

        # 根据字体大小创建画布
        img_w = text_size[0]
        img_h = text_size[1]

        # 文本区域上限
        h_space = np.random.randint(6, 20)
        w_space = 6
        h = img_h + h_space
        w = img_w + w_space
        canvas = Image.new('RGB', (w, h), (bg_b, bg_g, bg_r))
        draw = ImageDraw.Draw(canvas)

        # 随机平移
        start_x = np.random.randint(2, w_space-2)
        start_y = np.random.randint(2, h_space-2)

        # 绘制当前文本行
        draw.text((start_x, start_y), text, font=font, fill=(fg_b, fg_g, fg_r))
        img_array = np.array(canvas)
        # 透视失真
        src = np.float32([[start_x, start_y],
                          [start_x + w, start_y],
                          [start_x + w, start_y + h],
                          [start_x, start_y + h]])

        dst = np.float32([[start_x + np.random.randint(0, 10), start_y + np.random.randint(0, 5)],
                          [start_x + w - np.random.randint(0, 10), start_y + np.random.randint(0, 5)],
                          [start_x + w - np.random.randint(0, 10), start_y + h - np.random.randint(0, 5)],
                          [start_x + np.random.randint(0, 10), start_y + h - np.random.randint(0, 5)]])
        M = cv2.getPerspectiveTransform(src, dst)
        img_array = cv2.warpPerspective(img_array.copy(), M, (w, h),
                                  borderMode=cv2.BORDER_CONSTANT,
                                  borderValue=(bg_b, bg_g, bg_r))
        # Image.fromarray(img_array).show()
        # 随机旋转
        angle = np.random.randint(-8, 8)
        rotated = rotate_bound(img_array, angle=angle, bg_color=(bg_b, bg_g, bg_r))
        canvas = Image.fromarray(rotated)
        if color:
            img_array = np.array(canvas.convert('CMYK'))[:,:,0:3]  # rgb to cmyk
            img_array = cv2.resize(img_array.copy(), (128, 32), interpolation=cv2.INTER_CUBIC)  # resize
            ndimg = Image.fromarray(img_array).convert('CMYK')
        else:
            img_array = np.array(canvas.convert('L'))  # rgb to cmyk
            img_array = cv2.resize(img_array.copy(), (128, 32), interpolation=cv2.INTER_CUBIC)  # resize
            ndimg = Image.fromarray(img_array)
        # 保存
        save_path = os.path.join(self.save_path, '{}.jpeg'.format(i))  # 类别序列即文件名
        ndimg.save(save_path)
Example #35
0
def _read_float32(f):
    '''Read a 32-bit float'''
    return np.float32(struct.unpack('>f', f.read(4))[0])
Example #36
0
def f_1(x): #funcao utilizada na integracao
    return np.float32(6-6*(x**5))
Example #37
0
        input_image_batch = []
        output_image_batch = []

        # Collect a batch of images
        for j in range(args.batch_size):
            index = i*args.batch_size + j
            id = id_list[index]
            input_image = utils.load_image(train_input_names[id])
            output_image = utils.load_image(train_output_names[id])

            with tf.device('/cpu:0'):
                input_image, output_image = data_augmentation(input_image, output_image)


                # Prep the data. Make sure the labels are in one-hot format
                input_image = np.float32(input_image) / 255.0
                output_image = np.float32(helpers.one_hot_it(label=output_image, label_values=[0,255]))

                input_image_batch.append(np.expand_dims(input_image, axis=0))
                output_image_batch.append(np.expand_dims(output_image, axis=0))

        if args.batch_size == 1:
            input_image_batch = input_image_batch[0]
            output_image_batch = output_image_batch[0]
        else:
            input_image_batch = np.squeeze(np.stack(input_image_batch, axis=1))
            output_image_batch = np.squeeze(np.stack(output_image_batch, axis=1))

        # Do the training
        _,current=sess.run([opt,loss],feed_dict={net_input:input_image_batch,net_output:output_image_batch})
        current_losses.append(current)
parentFitArray=parentFitArray+1000000

injSize=int(sys.argv[1])

for k in range(numOuterIters):
    for i in range(numIters):
        geneMutationChance=baseGeneMutation+0.001*i
        injMutationChance=baseInjMutation+0.001*i
        paramMutationChance=baseParamMutation+0.001*i
        myIP=comm.scatter(iparray,root=0)
        myFitness,MNS,MXS,numViable=getFitness(numStochasticReplicates,myIP,injSize)

#    print("FITNESS_%s="%i,rank,myFitness,myIP[429:432],injSize)

        recvbuf=None
        sendbuf=np.float32(myFitness)
        if rank==0:
            recvbuf=np.empty([size], dtype=np.float32)
        comm.Gather(sendbuf, recvbuf, root=0)

#        recvbuf2=None
#        sendbuf2=numViable
#        if rank==0:
#            recvbuf2=np.empty([size], dtype=np.int16)
#        comm.Gather(sendbuf2, recvbuf2, root=0)
#        if(rank==0):
#            fnamev=str('NumViable_IS%s_Gen%s_%s.csv'%(injSize,k,i))
#            np.savetxt(fnamev,iparray,delimiter=',')

        if(rank==0):
            iname=str('InternalParameterization_IS%s_Gen%s_%s.csv'%(injSize,k,i))
Example #39
0
 def __init__(self, master, filter_num=0):
     """ Initialize filters """
     self.current_filter = filter_num  # current OpenCV filter_num
     self.master = master  # link to the main GUI window
     self.frame = None  # current frame
     self.previous = None  # previous frame (gray or color)
     self.background_subtractor = None
     self.opt_flow = {  # container for Optical Flow algorithm
         # Parameters for Shi Tomasi corner detection
         'feature_params': dict(maxCorners=100,
                                qualityLevel=0.3,
                                minDistance=7,
                                blockSize=7),
         # Parameters for Lucas Kanade optical flow
         'lk_params': dict(winSize=(15, 15),
                           maxLevel=2,
                           criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)),
         # Create some random colors
         'color': np.random.randint(0, 255, (100, 3)),
         # Container for corner points of the previous frame
         'points': None,
         # Container for image mask
         'mask': None,
     }
     self.affine_start = {  # starting rotation, shift and transformation
         'rotation': 0,
         'shift': [0, 0],
         'pointers3': np.float32([[0, 0], [400, 0], [0, 400]]),
         'pointers4': np.float32([[0, 0], [400, 0], [0, 400], [400, 400]]),
     }
     self.affine = None  # container for random affine values
     self.detector = None  # blob detector container
     # List of filters in the following format: [name, function, description]
     # Filter functions take frame, convert it and return converted image
     self.container = [
         ['Unchanged', self.filter_unchanged, 'Unchanged original image'],
         ['Canny', self.filter_canny, 'Canny edge detection'],
         [
             'Threshold', self.filter_threshold,
             'Adaptive Gaussian threshold'
         ],
         ['Harris', self.filter_harris, 'Harris corner detection'],
         [
             'SIFT', self.filter_sift,
             'SIFT (Scale-Invariant Feature Transform) algorithm, patented'
         ],
         [
             'SURF', self.filter_surf,
             'SURF (Speeded-Up Robust Features) algorithm, patented'
         ],
         [
             'ORB', self.filter_orb,
             'ORB (Oriented FAST and Rotated BRIEF) algorithm, free'
         ],
         [
             'BRIEF', self.filter_brief,
             'BRIEF descriptors with the help of CenSurE (STAR) detector'
         ],
         [
             'Contours', self.filter_contours,
             'Draw contours with mean colors inside them'
         ],
         [
             'SEEDS', self.filter_seeds,
             'SEEDS (Superpixels Extracted via Energy-Driven Sampling) algorithm'
         ],
         [
             'Blur', self.filter_blur,
             'Blur (Gaussian, median, bilateral or classic)'
         ],
         ['Motion', self.filter_motion, 'Motion detection'],
         [
             'Background', self.filter_background,
             'Background subtractor (KNN, MOG2, MOG or GMG)'
         ],
         ['Skin', self.filter_skin, 'Skin tones detection'],
         ['Optical Flow', self.filter_optflow, 'Lucas Kanade optical flow'],
         [
             'Affine1', self.filter_affine1,
             'Affine random rotations and shifts'
         ],
         ['Affine2', self.filter_affine2, 'Affine random transformations'],
         [
             'Perspective', self.filter_perspective,
             'Perspective random transformations'
         ],
         ['Equalize', self.filter_equalize, 'Histogram Equalization'],
         [
             'CLAHE', self.filter_clahe,
             'CLAHE (Contrast Limited Adaptive Histogram Equalization) algorithm'
         ],
         [
             'LAB', self.filter_lab,
             'Increase the contrast using LAB color space and CLAHE'
         ],
         ['Pyramid', self.filter_pyramid, 'Image pyramid'],
         ['Laplacian', self.filter_laplacian, 'Laplacian gradient filter'],
         [
             'Sobel X', self.filter_sobel_x,
             'Sobel / Scharr vertical gradient filter'
         ],
         [
             'Sobel Y', self.filter_sobel_y,
             'Sobel / Scharr horizontal gradient filter'
         ],
         ['Blobs', self.filter_blob, 'Blob detection'],
         ['First 3 bits', self.filter_3bits, 'Leave the first three bits'],
         ['Max RGB', self.filter_max_rgb, 'Max RGB filter'],
         [
             'Chaotic RGB', self.filter_chaotic_rgb,
             'Chaotic color change of the RGB image'
         ],
         [
             'Swap RGB', self.filter_swap_rgb,
             'Chaotic swap of the RGB channels'
         ],
     ]
     self.set_filter(self.current_filter)