예제 #1
0
파일: toydata.py 프로젝트: 10sun/tilitools
	def get_2state_gaussian_seq(lens,dims=2,means1=[2,2,2,2],means2=[5,5,5,5],vars1=[1,1,1,1],vars2=[1,1,1,1],anom_prob=1.0):
		
		seqs = co.matrix(0.0, (dims, lens))
		lbls = co.matrix(0, (1,lens))
		marker = 0

		# generate first state sequence
		for d in range(dims):
			seqs[d,:] = co.normal(1,lens)*vars1[d] + means1[d]

		prob = np.random.uniform()
		if prob<anom_prob:		
			# add second state blocks
			while (True):
				max_block_len = 0.6*lens
				min_block_len = 0.1*lens
				block_len = np.int(max_block_len*np.single(co.uniform(1))+3)
				block_start = np.int(lens*np.single(co.uniform(1)))

				if (block_len - (block_start+block_len-lens)-3>min_block_len):
					break

			block_len = min(block_len,block_len - (block_start+block_len-lens)-3)
			lbls[block_start:block_start+block_len-1] = 1
			marker = 1
			for d in range(dims):
				#print block_len
				seqs[d,block_start:block_start+block_len-1] = co.normal(1,block_len-1)*vars2[d] + means2[d]

		return (seqs, lbls, marker)
예제 #2
0
파일: voc.py 프로젝트: zgsxwsdxg/ademxapp
 def calc_updates(valid_labels, pred_label, label):
     num_classes = len(valid_labels)
     
     pred_flags = [set(np.where((pred_label == _).ravel())[0]) for _ in valid_labels]
     class_flags = [set(np.where((label == _).ravel())[0]) for _ in valid_labels]
     
     conf = [len(class_flags[j].intersection(pred_flags[k])) for j in xrange(num_classes) for k in xrange(num_classes)]
     pixel = [len(class_flags[j]) for j in xrange(num_classes)]
     return np.single(conf).reshape((num_classes, num_classes)), np.single(pixel)
예제 #3
0
def quad_poly_patch_color_basis(patch):
    h, w, c = patch.shape[0], patch.shape[1], patch.shape[2]
    assert c == 3
    basis = np.zeros((h, w, 10), dtype=np.single)
    for i in range(h):
        for j in range(w):
            l, a, b = np.single(patch[i, j, 0]), np.single(patch[i, j, 1]), np.single(patch[i, j, 2])
            basis[i, j, :] = [l * l, a * a, b * b, l * a, l * b, a * b, l, a, b, 1]
    return basis    
예제 #4
0
def roc_curve(predicted, actual, cls):
    si = np.argsort(-predicted)
    tp = np.cumsum(np.single(actual[si]==cls))
    fp = np.cumsum(np.single(actual[si]!=cls))
    tp = tp/np.sum(actual==cls)
    fp = fp/np.sum(actual!=cls)
    tp = np.hstack((0.0, tp, 1.0))
    fp = np.hstack((0.0, fp, 1.0))
    return tp, fp
예제 #5
0
def get_lightness_equalization_curve_control_points(L):
    h, w = L.shape[0], L.shape[1]
    bins = 100
    hist, bin_edges = np.histogram(L, bins, range=(0, 100), density=False)
    hist = np.single(hist) / np.single(h * w)
    cumsum_hist = np.cumsum(hist)
    spline, x = get_cum_hist_BSpline_curve(cumsum_hist, 0.0, 100.0, bins)
#     mpplot.plot(x,cumsum_hist)
#     mpplot.plot(x, spline(x),'.-')
#     mpplot.show()
    return spline.get_coeffs()
예제 #6
0
def point_wise_add_scalar(A, scalar1=1, scalar2=1, OUT_BUFFER=None, gpu_ind=0):
	assert isinstance(gpu_ind,int)
	check_buffer(A)
	
	if OUT_BUFFER != None:
		check_buffer(OUT_BUFFER)
		OUT_BUFFER[1] = copy.deepcopy(A[1])
	else:
		OUT_BUFFER = copy.deepcopy(A)
	
	_ntm_module.point_wise_add_scalar(A[0], np.single(scalar1), np.single(scalar2), OUT_BUFFER[0], gpu_ind)
예제 #7
0
def reverse_network_recur(deriv_above, layer_ind, LAYERS, WEIGHTS, OUTPUT, OUTPUT_PREV, PARTIALS, WEIGHT_DERIVS, keep_dims, scalar, abort_layer): # multiply all partials together
	L = LAYERS[layer_ind]
	N_ARGS = len(L['in_shape'])
	deriv_above_created = deriv_above is None
	
	if len(LAYERS[layer_ind]['out_shape']) > 1 and deriv_above is None: # skip image dim
		n_imgs = LAYERS[layer_ind]['out_shape'][0]
		deriv_above_shape = LAYERS[layer_ind]['out_shape'] + LAYERS[layer_ind]['out_shape'][1:]
		deriv_above = init_buffer(np.single(np.tile(np.eye(np.prod(LAYERS[layer_ind]['out_shape'][1:]))[np.newaxis], (n_imgs, 1, 1))).reshape(deriv_above_shape))
	elif deriv_above is None:
		deriv_above_shape = LAYERS[layer_ind]['out_shape'] + LAYERS[layer_ind]['out_shape']
		deriv_above = init_buffer(np.single(np.eye(np.prod(LAYERS[layer_ind]['out_shape'])).reshape(deriv_above_shape)))
	
	for arg in range(N_ARGS):
		src = L['in_source'][arg]
		if L['in_source'][arg] != -1: # don't compute gradients for user-supplied entries (Ex. images)
			
			# compute derivs
			args = build_forward_args(L, layer_ind, OUTPUT, OUTPUT_PREV, WEIGHTS)
			deriv_above_new = L['deriv_F'][arg](args, OUTPUT[layer_ind], deriv_above, additional_args=L['additional_deriv_args'][arg])
			
			# input is a layer:
			if isinstance(src, int) and src != -1:
				# memory partials, stop here, add these partials to the correct weight derivs:
				if L['in_prev'][arg]:
					P = PARTIALS[src]
					N_ARGS2 = len(P['in_source'])
					for arg2 in range(N_ARGS2):
						p_layer_ind = P['in_source'][arg2]
						p_arg = P['in_arg'][arg2]
						p_partial = P['partial'][arg2]
						
						# multiply partials batched over the images, then sum the results:
						deriv_temp = mult_partials(deriv_above_new, p_partial, LAYERS[src]['out_shape'], keep_dims=keep_dims)
						
						WEIGHT_DERIVS[p_layer_ind][p_arg] = add_points_inc((WEIGHT_DERIVS[p_layer_ind][p_arg], deriv_temp), scalar=scalar)
						
						free_buffer(deriv_temp)
						
				# another layer (At this time step, go back to earlier layers)
				# [do not go back if this is the abort_layer where we stop backpropping]
				elif src != abort_layer:
					reverse_network_recur(deriv_above_new, src, LAYERS, WEIGHTS, OUTPUT, OUTPUT_PREV, PARTIALS, WEIGHT_DERIVS, keep_dims, scalar, abort_layer)
			
			# input is not a layer, end here
			else:
				WEIGHT_DERIVS[layer_ind][arg] = add_points_inc((WEIGHT_DERIVS[layer_ind][arg], deriv_above_new), scalar=scalar)
				
			free_buffer(deriv_above_new)
	
	if deriv_above_created:
		free_buffer(deriv_above)
	
	return WEIGHT_DERIVS
예제 #8
0
def get_central_pixel_grad_vector(batch_img, nb_hs):
    nb_sz = 2 * nb_hs + 1
    ch, h, w, num_imgs = \
    batch_img.shape[0], batch_img.shape[1], batch_img.shape[2], batch_img.shape[3]
    ct_x, ct_y = w / 2, h / 2
        
    # 2D gradient vector for each RGB (or LAB channel)
    right, left = batch_img[:, ct_y, ct_x + 1, :], batch_img[:, ct_y, ct_x - 1, :]
    top, bottom = batch_img[:, ct_y + 1, ct_x, :], batch_img[:, ct_y - 1, ct_x, :]
    x_grad, y_grad = (right - left) / np.single(2.0), (top - bottom) / np.single(2.0)
    
    return np.concatenate((x_grad, y_grad), axis=0) 
예제 #9
0
 def test_against_known_values(self):
     R = fractions.Fraction
     assert_equal(R(1075, 512),
                  R(*np.half(2.1).as_integer_ratio()))
     assert_equal(R(-1075, 512),
                  R(*np.half(-2.1).as_integer_ratio()))
     assert_equal(R(4404019, 2097152),
                  R(*np.single(2.1).as_integer_ratio()))
     assert_equal(R(-4404019, 2097152),
                  R(*np.single(-2.1).as_integer_ratio()))
     assert_equal(R(4728779608739021, 2251799813685248),
                  R(*np.double(2.1).as_integer_ratio()))
     assert_equal(R(-4728779608739021, 2251799813685248),
                  R(*np.double(-2.1).as_integer_ratio()))
예제 #10
0
파일: voc.py 프로젝트: zgsxwsdxg/ademxapp
def _get_transformer_image():
    scale, mean_, std_ = _get_scalemeanstd()
    transformers = []
    if scale > 0:
        transformers.append(ts.ColorScale(np.single(scale)))
    transformers.append(ts.ColorNormalize(mean_, std_))
    return transformers
  def steer(self):

    N = self.fixedCL.shape

    # Obtain 2D view
    SteeringGeneric.update_slices(self)

    fixed2D, moving2D = self.get_slices()

    fixedV, movingV = self.get_slice_axes()

    F = SteeringGeneric.get_frame_matrix(self, fixedV, movingV)

    # Perform 2D registration
    scale, T2 = self.register_scale2d(fixed2D, moving2D)

    print "S = ", scale

    S = np.identity(3, np.single) * scale

    T = F[:,0] * T2[0]  + F[:,1] * T2[1]
    C = np.array([N[0]/2, N[1]/2, N[2]/2], np.single)
    T += C - np.dot(S,C)

    T = np.single(T)

    # Subtract identitiy from matrix to match convention for PolyAffineCL
    for dim in range(3):
      S[dim, dim] -= 1.0

    # Return 3D transformation that will be folded into an existing polyaffine
    return S, T
예제 #12
0
파일: so_hmm.py 프로젝트: 10sun/tilitools
	def get_scores(self, sol, idx, y=[]):
		y = np.array(y)
		if (y.size==0):
			y=np.array(self.y[idx])

		(foo, T) = y.shape
		N = self.states
		F = self.dims
		scores = matrix(0.0, (1, T))

		# this is the score of the complete example
		anom_score = sol.trans()*self.get_joint_feature_map(idx)

		# transition matrix
		A = self.get_transition_matrix(sol)
		# emission matrix without loss
		em = self.calc_emission_matrix(sol, idx, augment_loss=False, augment_prior=False);
		
		# store scores for each position of the sequence		
		scores[0] = self.start_p[int(y[0,0])] + em[int(y[0,0]),0]
		for t in range(1,T):
			scores[t] = A[int(y[0,t-1]),int(y[0,t])] + em[int(y[0,t]),t]

		# transform for better interpretability
		if max(abs(scores))>10.0**(-15):
			scores = exp(-abs(4.0*scores/max(abs(scores))))
		else:
			scores = matrix(0.0, (1,T))

		return (float(np.single(anom_score)), scores)
예제 #13
0
def get_example_list(num, dims, signal, label, start, min_lens=600, max_lens=800):
	(foo, LEN) = label.shape
	min_genes = int(float(num)*0.15)

	X = []
	Y = []
	phi = []
	marker = []

	cnt_genes = 0
	cnt = 0
	while (cnt<num):
		lens = np.int(np.single(co.uniform(1, a=min_lens, b=max_lens)))
		if (start+lens)>LEN:
			print('Warning! End of genome. Could not add example.')
			break
		(exm, lbl, phi_i, isGene, end_pos) = get_example(signal, label, dims, start, start+lens)
		
		# accept example, if it has the correct length
		if (end_pos-start<=max_lens or (isGene==True and end_pos-start<800)):		
			X.append(exm)
			Y.append(lbl)
			phi.append(phi_i)
			if isGene:
				marker.append(0)
				cnt_genes += 1
				min_genes -= 1
			else:
				marker.append(1)
			cnt += 1
		start = end_pos

	print('Number of examples {0}. {1} of them are genic.'.format(len(Y), cnt_genes))
	return (X, Y, phi, marker, start) 
예제 #14
0
 def test_floating(self):
     # Ticket #640, floats from string
     fsingle = np.single('1.234')
     fdouble = np.double('1.234')
     flongdouble = np.longdouble('1.234')
     assert_almost_equal(fsingle, 1.234)
     assert_almost_equal(fdouble, 1.234)
     assert_almost_equal(flongdouble, 1.234)
예제 #15
0
 def test_floats_from_string(self, level=rlevel):
     """Ticket #640, floats from string"""
     fsingle = np.single('1.234')
     fdouble = np.double('1.234')
     flongdouble = np.longdouble('1.234')
     assert_almost_equal(fsingle, 1.234)
     assert_almost_equal(fdouble, 1.234)
     assert_almost_equal(flongdouble, 1.234)
예제 #16
0
def add_points_inc(args, OUT_BUFFER=None, scalar=1, scalar0=1, gpu_ind=GPU_IND):
	t = time.time()
	
	A, B = args
	
	if OUT_BUFFER != None:
		check_buffer(OUT_BUFFER)
		OUT_BUFFER[1] = A[1]
	else:
		OUT_BUFFER = A
	
	_ntm_module3.add_points(A[0], B[0], np.single(scalar), np.single(scalar0), OUT_BUFFER[0], gpu_ind)

	if OUT_BUFFER[1] is None:
		OUT_BUFFER[1] = B[1]
	t_add[0] = time.time() - t
	return OUT_BUFFER
예제 #17
0
def get_lightness_detail_weighted_equalization_curve_control_points(L, sigma):
    bins = 100
    grad_mag = scipy.ndimage.filters.gaussian_gradient_magnitude(L, sigma)
    hist, bin_edges = np.histogram(L, bins, range=(0, 100), weights=grad_mag)
    hist = np.single(hist) / np.sum(grad_mag)
    cumsum_hist = np.cumsum(hist)
    spline, x = get_cum_hist_BSpline_curve(cumsum_hist, 0.0, 100.0, bins)
    return spline.get_coeffs()        
예제 #18
0
    def test_floating_overflow(self):
        """ Strings containing an unrepresentable float overflow """
        fhalf = np.half('1e10000')
        assert_equal(fhalf, np.inf)
        fsingle = np.single('1e10000')
        assert_equal(fsingle, np.inf)
        fdouble = np.double('1e10000')
        assert_equal(fdouble, np.inf)
        flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000')
        assert_equal(flongdouble, np.inf)

        fhalf = np.half('-1e10000')
        assert_equal(fhalf, -np.inf)
        fsingle = np.single('-1e10000')
        assert_equal(fsingle, -np.inf)
        fdouble = np.double('-1e10000')
        assert_equal(fdouble, -np.inf)
        flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000')
        assert_equal(flongdouble, -np.inf)
예제 #19
0
def reverse_network_btt_recur(deriv_above, layer_ind, LAYERS, WEIGHTS, OUTPUT, WEIGHT_DERIVS, frame, keep_dims, scalar, abort_layer): # multiply all partials together
	L = LAYERS[layer_ind]
	N_ARGS = len(L['in_shape'])
	deriv_above_created = deriv_above is None
	
	if len(LAYERS[layer_ind]['out_shape']) > 1 and deriv_above is None: # skip image dim
		n_imgs = LAYERS[layer_ind]['out_shape'][0]
		deriv_above_shape = LAYERS[layer_ind]['out_shape'] + LAYERS[layer_ind]['out_shape'][1:]
		deriv_above = init_buffer(np.single(np.tile(np.eye(np.prod(LAYERS[layer_ind]['out_shape'][1:]))[np.newaxis], (n_imgs, 1, 1))).reshape(deriv_above_shape))
	elif deriv_above is None:
		deriv_above_shape = LAYERS[layer_ind]['out_shape'] + LAYERS[layer_ind]['out_shape']
		deriv_above = init_buffer(np.single(np.eye(np.prod(LAYERS[layer_ind]['out_shape'])).reshape(deriv_above_shape)))
	
	for arg in range(N_ARGS):
		src = L['in_source'][arg]
		if L['in_source'][arg] != -1: # don't compute gradients for user-supplied entries (Ex. images)
			
			# compute derivs
			args = build_forward_args(L, layer_ind, OUTPUT[frame], OUTPUT[frame-1], WEIGHTS)
			deriv_above_new = L['deriv_F'][arg](args, OUTPUT[frame][layer_ind], deriv_above, additional_args=L['additional_deriv_args'][arg])
			
			# input is a layer:
			if isinstance(src, int) and src != -1:
				# memory partials--keep going back through the network...take step back in time...
				if L['in_prev'][arg]:
					if frame > 1:
						reverse_network_btt_recur(deriv_above_new, src, LAYERS, WEIGHTS, OUTPUT, WEIGHT_DERIVS, frame-1, keep_dims, scalar, abort_layer)
					
				# another layer (At this time step, go back to earlier layers)
				# [do not go back if this is the abort_layer where we stop backpropping]
				elif src != abort_layer:
					reverse_network_btt_recur(deriv_above_new, src, LAYERS, WEIGHTS, OUTPUT, WEIGHT_DERIVS, frame, keep_dims, scalar, abort_layer)
			
			# input is not a layer, end here
			else:
				WEIGHT_DERIVS[layer_ind][arg] = add_points_inc((WEIGHT_DERIVS[layer_ind][arg], deriv_above_new), scalar=scalar)
				
			free_buffer(deriv_above_new)
	
	if deriv_above_created:
		free_buffer(deriv_above)
	
	return WEIGHT_DERIVS
예제 #20
0
def leibnitz(k, n):
    sum = 0.0
    rev_sum = 0.0
    single = numpy.single(0.0)
    
    # 0 bis k
    for i in range(0,k):
        sum = sum + mround(pow(-1,i) / (2.0*i+1),n)
    r = range(0,k)
    
    list.reverse(r) 
    
    # umgekerte Reihenfolge
    for i in r:
        rev_sum = rev_sum + mround(pow(-1,i) / (2.0*i+1),n)
    # single precision 
    for i in range(0,k):
        single = single + numpy.single(pow(-1,i) / (2.0*i+1))
        
    return (sum, rev_sum, single)    
예제 #21
0
def point_wise_div_sqrt(args, OUT_BUFFER=None, clip=10, gpu_ind=GPU_IND):
	A, B = args
	
	if OUT_BUFFER != None:
		check_buffer(OUT_BUFFER)
		OUT_BUFFER[1] = A[1]
	else:
		OUT_BUFFER = A
	
	_ntm_module3.point_wise_div_sqrt(A[0], B[0], OUT_BUFFER[0], np.single(clip), gpu_ind)
	
	OUT_BUFFER[1] = B[1]
	return OUT_BUFFER
예제 #22
0
    def argmax(self, sol, idx, add_loss=False, opt_type='linear'):
        nd = self.feats
        d = 0  # start of dimension in sol
        val = -10.**10.
        cls = -1  # best class

        for c in range(self.num_classes):
            foo = sol[d:d+nd].T.dot(self.X[:, idx])
            # the argmax of the above function
            # is equal to the argmax of the quadratic function
            # foo = + 2*foo - normPsi
            # since ||\Psi(x_i,z)|| = ||\phi(x_i)|| = y \forall z
            d += nd
            if np.single(foo) > np.single(val):
                val = foo
                cls = c
        if opt_type == 'quadratic':
            normPsi = self.X[:, idx].T.dot(self.X[:, idx])
            val = 2.*val - normPsi

        psi_idx = self.get_joint_feature_map(idx, cls)
        return val, cls, psi_idx
예제 #23
0
def spontaneus_amplitudes(alpha, beta1, beta2, epsilon):
    """
    Spontaneous amplitude of fully expanded canonical model

    Args:
        alpha (float): :math:`\\alpha` parameter of the canonical model
        beta1 (float): :math:`\\beta_1` parameter of the canonical model
        beta2 (float): :math:`\\beta_2` parameter of the canonical model
        epsilon (float): :math:`\\varepsilon` parameter of the canonical model

    Returns:
        :class:`numpy.ndarray`: Spontaneous amplitudes for the oscillator

    """

    if beta2 == 0 and epsilon !=0:
        epsilon = 0

    eps = np.spacing(np.single(1))

    # Find r* numerically
    r = np.roots([float(epsilon*(beta2-beta1)),
                  0.0,
                  float(beta1-epsilon*alpha),
                  0.0,
                  float(alpha),
                  0.0])

    # only unique real values
    r = np.real(np.unique(r[np.abs(np.imag(r)) < eps]))

    r = r[r >=0]  # no negative amplitude
    if beta2 > 0:
        r = r[r < 1.0/np.sqrt(epsilon)]  # r* below the asymptote only

    def slope(r):
        return alpha + 3*beta1*r**2 + \
            (5*epsilon*beta2*r**4-3*epsilon**2*beta2*r**6) / \
            ((1-epsilon*r**2)**2)

    # Take only stable r*
    ind1 = slope(r) < 0

    ind2a = slope(r) == 0
    ind2b = slope(r-eps) < 0
    ind2c = slope(r+eps) < 0
    ind2 = np.logical_and(ind2a, np.logical_and(ind2b, ind2c))

    r = r[np.logical_or(ind1, ind2)]

    return sorted(r, reverse=True)
def get_local_context_color(img_ab, px, py, local_context_color_paras, res=None):
    h, w, ch = img_ab.shape[0], img_ab.shape[1], img_ab.shape[2]
    assert ch == 2
    hf_sz, hist_bin_num = \
    local_context_color_paras['half_size'], local_context_color_paras['hist_bin_num']
    xmin = px - hf_sz if (px - hf_sz) >= 0 else 0
    xmax = px + hf_sz + 1 if (px + hf_sz + 1) < w else w
    ymin = py - hf_sz if (py - hf_sz) >= 0 else 0
    ymax = py + hf_sz + 1 if (py + hf_sz + 1) < h else h
    region = img_ab[ymin:ymax, xmin:xmax, :]
    h2, w2 = ymax - ymin, xmax - xmin
    region = region.reshape((h2 * w2, 2))
    
    hist_a, bin_edges_a = np.histogram(region[:, 0], hist_bin_num, range=(-128, 128))
    hist_b, bin_edges_b = np.histogram(region[:, 1], hist_bin_num, range=(-128, 128))
    hist_a = np.single(hist_a) / np.single(h2 * w2)
    hist_b = np.single(hist_b) / np.single(h2 * w2)
#     print 'hist,hist_sum',hist,np.sum(hist)
    if res == None:
        return np.concatenate((hist_a, hist_b))
    else:
        res[:hist_bin_num] = hist_a
        res[hist_bin_num:(2 * hist_bin_num)] = hist_b
예제 #25
0
def conv_block(filters, imgs, stride=1, max_el=3360688123): 
	t_start = time.time()
	filters = np.single(filters); imgs = np.single(imgs)
	assert filters.shape[1] == filters.shape[2]
	assert imgs.shape[1] == imgs.shape[2]
	assert imgs.shape[0] == filters.shape[0]
	x_loc = 0
	y_loc = 0
	n_filters = filters.shape[3]
	n_imgs = imgs.shape[3]
	img_sz = imgs.shape[1]
	filter_sz = filters.shape[1]
	in_channels = filters.shape[0]
	output_sz = len(range(0, img_sz - filter_sz + 1, stride))
	filter_temp = filters.reshape((in_channels*filter_sz**2,n_filters)).T.reshape((n_filters,in_channels*filter_sz*filter_sz,1,1,1))	
	
	patches = np.zeros((1, in_channels*filter_sz*filter_sz,output_sz, output_sz, n_imgs),dtype='single')
	for x in range(output_sz):
		y_loc = 0
		for y in range(output_sz):
			patches[0,:,x,y] = imgs[:,x_loc:x_loc+filter_sz, y_loc:y_loc+filter_sz].reshape((1,in_channels*filter_sz*filter_sz,n_imgs))
			y_loc += stride
		x_loc += stride
	
	total_el = (n_filters*in_channels*filter_sz*filter_sz*output_sz*output_sz*n_imgs)
	n_groups = np.int(np.ceil(np.single(total_el) / max_el))
	imgs_per_group = np.int(np.floor(128.0/n_groups))
	#print n_groups
	#print imgs_per_group
	
	output = np.zeros((n_filters,output_sz,output_sz,n_imgs),dtype='single')
	for group in range(n_groups):
		p_t = patches[:,:,:,:,group*imgs_per_group:(group+1)*imgs_per_group]
		output[:,:,:,group*imgs_per_group:(group+1)*imgs_per_group] = ne.evaluate('filter_temp*p_t').sum(1)
	print time.time() - t_start
	return output
예제 #26
0
    def argmax(self, sol, idx, add_loss=False):
        nd = self.feats
        mc = self.num_classes
        d = 0  # start of dimension in sol
        val = -10**10
        cls = -1 # best choice so far
        psi_idx = matrix(0.0, (nd*mc,1))

        for c in range(self.num_classes):

            psi = matrix(0.0, (nd*mc,1))
            psi[nd*c:nd*(c+1)] = self.X[:,idx]

            foo = 2.0 * sol.trans()*psi - psi.trans()*psi
            # the argmax of the above function
            # is equal to the argmax of the quadratic function
            # foo = + 2*foo - normPsi
            # since ||\Psi(x_i,z)|| = ||\phi(x_i)|| = y \forall z   
            if (np.single(foo)>np.single(val)):
                val = -sol.trans()*sol + foo
                cls = c
                psi_idx = matrix(sol, (nd*mc,1))
                psi_idx[nd*c:nd*(c+1)] = self.X[:,idx]
        return (val, cls, psi_idx)
 def temp_parse(temp):
     ntemp = []
     errCount = 0
     for val in temp:
         if val.strip().upper() == "NAN":
             cval = "0"
             errCount += 1
         else:
             cval = val
         try:
             cval = single(cval)
         except:
             cval = -1
             errCount += 1
         ntemp += [cval]
     return (ntemp, errCount)
def ImportData(output, cols, filterList=[], synList=[]):
    """ Filter list is for adding lambda based filters, synList is for synthetic columns """

    print "Importing Next Step..."
    # ncols=lacpa_adddb(None,cols,header,'',False) # formatted and scaled
    # cols=ncols
    print "Filtering List"
    keepPoints = numpy.array([True] * len(cols.values()[0]))  # only
    for (cFiltCol, cFiltFunc) in filterList:
        keepPoints &= map(cFiltFunc, cols[cFiltCol])
    print "Keeping - " + str((sum(keepPoints), len(keepPoints)))
    keepPoints = [cValue for (cValue, isKept) in enumerate(keepPoints) if isKept]
    for cKey in cols.keys():
        cols[cKey] = numpy.array(cols[cKey])[keepPoints]
    print " Adding columns"
    for (cName, cData) in cols.items():
        output.AddColumn(initCol(cName, cData, None))
    print " Adding Triplets"
    allTrips = getTriplets(cols.keys())
    print allTrips
    for (cName, cCols) in allTrips:
        output.AddColumn(initCol3(cName, cols[cCols[0]], cols[cCols[1]], cols[cCols[2]], None))
    print " Adding Tensors"
    cols["NULL"] = 0 * single(cols.values()[0])
    allTens = getTensors(cols.keys())
    print allTens
    for (cName, cCols) in allTens:
        output.AddColumn(
            initColTens(
                cName,
                cols[cCols[0]],
                cols[cCols[1]],
                cols[cCols[2]],
                cols[cCols[3]],
                cols[cCols[4]],
                cols[cCols[5]],
                cols[cCols[6]],
                cols[cCols[7]],
                cols[cCols[8]],
                None,
            )
        )

    print " Adding synthetic columns"
    for (cName, cFunc) in synList:
        output.AddColumn(initCol(cName, cFunc(cols), None))
    return output
예제 #29
0
    def __init__(self, weight_learning_rate=0.2, bias_learning_rate=0.2, weight_momentum=0.5, bias_momentum=0.5,
                 weight_decay=0.0, bias_decay=0.0):

        self.weight_learning_rate = theano.shared(np.single(weight_learning_rate), 'lW')
        self.bias_learning_rate   = theano.shared(np.single(bias_learning_rate), 'lb')

        self.weight_momentum = theano.shared(np.single(weight_momentum), 'mW')
        self.bias_momentum   = theano.shared(np.single(bias_momentum), 'mb')

        self.weight_decay = theano.shared(np.single(weight_decay), 'rW')
        self.bias_decay   = theano.shared(np.single(bias_decay), 'rb')
예제 #30
0
def point_wise_mult_bcast2(A, B, scalar=1, OUT_BUFFER=None, gpu_ind=0):
	assert isinstance(gpu_ind,int)
	check_buffer(A)
	check_buffer(B)
	
	assert len(A[1]) > 2
	assert len(B[1]) == 2
	assert A[1][0] == B[1][0]
	assert A[1][1] == B[1][1]
	
	if OUT_BUFFER != None:
		check_buffer(OUT_BUFFER)
		OUT_BUFFER[1] = copy.deepcopy(A[1])
	else:
		OUT_BUFFER = copy.deepcopy(A)
	
	_ntm_module.point_wise_mult_bcast2(A[0], A[1], B[0], np.single(scalar), OUT_BUFFER[0], gpu_ind)
예제 #31
0
def get_scales(min_scale=0.2, max_scale=0.9,num_layers=6):
    """ Following the ssd arxiv paper, regarding the calculation of scales & ratios

    Parameters
    ----------
    min_scale : float
    max_scales: float
    num_layers: int
        number of layers that will have a detection head
    anchor_ratios: list
    first_layer_ratios: list

    return
    ------
    sizes : list
        list of scale sizes per feature layer
    ratios : list
        list of anchor_ratios per feature layer
    """

    # this code follows the original implementation of wei liu
    # for more, look at ssd/score_ssd_pascal.py:310 in the original caffe implementation
    min_ratio = int(min_scale * 100)
    max_ratio = int(max_scale * 100)
    step = int(np.floor((max_ratio - min_ratio) / (num_layers - 2)))
    min_sizes = []
    max_sizes = []
	
    for ratio in range(min_ratio, max_ratio + 1, step):
        min_sizes.append(ratio / 100.)
        max_sizes.append((ratio + step) / 100.)
    min_sizes = [int(100*min_scale / 2.0) / 100.0] + min_sizes
    max_sizes = [min_scale] + max_sizes

    # convert it back to this implementation's notation:
    scales = []
    for layer_idx in range(num_layers):
        scales.append([min_sizes[layer_idx], np.single(np.sqrt(min_sizes[layer_idx] * max_sizes[layer_idx]))])
    return scales
예제 #32
0
def clustMasks(opts):

    # Local Variables: gtWidth, E, zz, i, k, j, xx, yy, S, t, w, theta, dist, nDists, nOrients, K, opts
    # Function calls: all, cosd, sum, rem, nargout, single, meshgrid, zeros, linspace, error, sind, clustMasks, gradientMag, size
    gtWidth = opts.gtWidth
    nDists = opts.nDists
    nOrients = opts.nOrients
    if plt.rem(gtWidth, 2.) != 0.:
        matcompat.error('gtWidth should be even')
    
    
    #% define gtWidth x gtWidth grid
    [xx, yy] = matcompat.meshgrid(np.arange(-gtWidth/2.+1., (gtWidth/2.)+1))
    #% define the distances and orientations
    k = gtWidth/1.0.
    dist = np.linspace(k, (-k), nDists)
    theta = np.arange(0., (180.-.01)+(180./nOrients), 180./nOrients)
    #% render seg masks for each cluster
    K = np.dot(nDists, nOrients)
    S = np.zeros(gtWidth, gtWidth, K)
    for i in np.arange(1., (nOrients)+1):
        t = theta[int(i)-1]
        w = np.array(np.vstack((np.hstack((cosd(t))), np.hstack((sind(t))))))
        zz = np.dot(w[0], xx)+np.dot(w[1], yy)
        for j in np.arange(1., (nDists)+1):
            k = np.dot(i-1., nDists)+j
            S[:,:,int(k)-1] = zz > dist[int(j)-1]
            
        
    #% check for bugs
    #% demonstrate how to convert segs S to edges E
    if nargout > 1.:
        E = np.zeros(matcompat.size(S))
        for k in np.arange(1., (K)+1):
            E[:,:,int(k)-1] = gradientMag(np.single(S[:,:,int(k)-1])) > .01
            
    
    
    return [S, E]
예제 #33
0
    def saveMeasurements(self):
        t0 = time.time()
        timestamp = datetime.fromtimestamp(t0)
        timestamp = timestamp.strftime("%Y%m%d%H%M")
        pathname = self.filepath + '/reprocess'
        Path(pathname).mkdir(parents=True, exist_ok=True)
        simimagename = pathname + '/' + self.filetitle + timestamp + f'_reprocessed' + '.tif'
        wfimagename = pathname + '/' + self.filetitle + timestamp + f'_widefield' + '.tif'
        txtname = pathname + '/' + self.filetitle + timestamp + f'_reprocessed' + '.txt'
        tif.imwrite(simimagename, np.single(self.imageSIM))
        tif.imwrite(wfimagename, np.uint16(self.imageWF))
        print(type(self.imageSIM))

        savedictionary = {
            #"exposure time (s)":self.exposuretime,
            #"laser power (mW)": self.laserpower,
            # "z stepsize (um)":  self.
            # System setup:
            "magnification": self.h.magnification,
            "NA": self.h.NA,
            "refractive index": self.h.n,
            "wavelength": self.h.wavelength,
            "pixelsize": self.h.pixelsize,
            # Calibration parameters:
            "alpha": self.h.alpha,
            "beta": self.h.beta,
            "Wiener filter": self.h.w,
            "eta": self.h.eta,
            "cleanup": self.h.cleanup,
            "axial": self.h.axial,
            "modulation": self.h.usemodulation,
            "kx": self.h.kx,
            "ky": self.h.ky,
            "phase": self.h.p,
            "amplitude": self.h.ampl
        }
        f = open(txtname, 'w+')
        f.write(json.dumps(savedictionary, cls=NumpyEncoder, indent=2))
        self.isCalibrationSaved = True
예제 #34
0
    def save_COR(self):
        i = self.COR_slider.value()/10
        contrast = self.contrastSlider.value()
        self.COR_pos.setText(str((i + self.full_size) / 2))
        #self.CORSpinBox.setValue((i + self.full_size) / 2)

        self.rotated = ndimage.rotate(self.im_180_flipped, self.rotate.value(), axes=[1, 0], reshape=False, output=None, order=3, mode='nearest', cval=0.0, prefilter=True)
        im_180_flipped_shifted = ndimage.shift(numpy.single(numpy.array(self.im_180_flipped)), [0, i], order=3, mode='nearest', prefilter=True)
        divided = numpy.divide(im_180_flipped_shifted, self.im_000_normalized, out=numpy.zeros_like(im_180_flipped_shifted), where=self.im_000_normalized != 0)

        myarray = divided * contrast - (contrast - 128)
        yourQImage = qimage2ndarray.array2qimage(myarray)
        self.divided.setPixmap(QPixmap(yourQImage))

        print('Writing shifted:', self.filename_out)
        img = Image.fromarray(divided)
        img.save(self.filename_out)

        self.COR = (i + self.full_size) / 2
        self.rotate = self.rotate.value()

        print('find COR save')
        self.close()
예제 #35
0
def collapse(W, opts, usemex):

    # Local Variables: orthog, Wori, o, usemex, W, V, nDists, opts
    # Function calls: nargin, collapseMex, collapse_orient, collapse, single
    #% V = collapse( W, opts, usemex )
    if nargin<3.:
        usemex = 1.
    
    
    #% TODO get rid of these restrictions
    orthog = np.array(np.hstack((np.arange(90., (-67.5)+(-22.5), -22.5))))-90.
    if usemex:
        V = collapseMex(W, np.single(orthog), (opts.gtWidth), (opts.nDists), (opts.nOrients), (opts.shrink), (opts.nThreads))
    else:
        nDists = opts.nDists
        V = 0.*W[:,:,0:opts.nOrients]
        for o in np.arange(1., (opts.nOrients)+1):
            Wori = W[:,:,int(np.dot(o-1., nDists)+1.)-1:np.dot(o, nDists)]
            V[:,:,int(o)-1] = collapse_orient(Wori, orthog[int(o)-1], opts)
            
        
    
    return [V]
예제 #36
0
class TestNumpyJSONEncoder(unittest.TestCase):
    @parameterized.expand(
        [(numpy.bool_(1), True), (numpy.bool8(1), True), (numpy.byte(1), 1),
         (numpy.int8(1), 1), (numpy.ubyte(1), 1), (numpy.uint8(1), 1),
         (numpy.short(1), 1), (numpy.int16(1), 1), (numpy.ushort(1), 1),
         (numpy.uint16(1), 1), (numpy.intc(1), 1), (numpy.int32(1), 1),
         (numpy.uintc(1), 1), (numpy.uint32(1), 1), (numpy.int_(1), 1),
         (numpy.int32(1), 1), (numpy.uint(1), 1), (numpy.uint32(1), 1),
         (numpy.longlong(1), 1), (numpy.int64(1), 1), (numpy.ulonglong(1), 1),
         (numpy.uint64(1), 1), (numpy.half(1.0), 1.0),
         (numpy.float16(1.0), 1.0), (numpy.single(1.0), 1.0),
         (numpy.float32(1.0), 1.0), (numpy.double(1.0), 1.0),
         (numpy.float64(1.0), 1.0), (numpy.longdouble(1.0), 1.0)] + ([
             (numpy.float128(1.0), 1.0)  # unavailable on windows
         ] if hasattr(numpy, 'float128') else []))
    def test_numpy_primary_type_encode(self, np_val, py_val):
        self.assertEqual(json.dumps(py_val),
                         json.dumps(np_val, cls=NumpyEncoder))

    @parameterized.expand([
        (numpy.array([1, 2, 3], dtype=numpy.int), [1, 2, 3]),
        (numpy.array([[1], [2], [3]], dtype=numpy.double), [[1.0], [2.0],
                                                            [3.0]]),
        (numpy.zeros((2, 2), dtype=numpy.bool_), [[False, False],
                                                  [False, False]]),
        (numpy.array([('Rex', 9, 81.0), ('Fido', 3, 27.0)],
                     dtype=[('name', 'U10'), ('age', 'i4'),
                            ('weight', 'f4')]), [['Rex', 9, 81.0],
                                                 ['Fido', 3, 27.0]]),
        (numpy.rec.array([(1, 2., 'Hello'), (2, 3., "World")],
                         dtype=[('foo', 'i4'), ('bar', 'f4'),
                                ('baz', 'U10')]), [[1, 2.0, "Hello"],
                                                   [2, 3.0, "World"]])
    ])
    def test_numpy_array_encode(self, np_val, py_val):
        self.assertEqual(json.dumps(py_val),
                         json.dumps(np_val, cls=NumpyEncoder))
예제 #37
0
def nms(Es, alpha, tol, nThreads):

    # Local Variables: E, nThreads, outlier, Oe, Ye, O, Xe, Xt, tol, X, Y, alpha, Ot, nOrients, Yt, Es
    # Function calls: convTri, nms, edgeOrient, atan, sum, cos, edgesNmsMex, nargin, single, abs, pi, sin, size
    #% E = nms( Es, alpha, tol, nThreads )
    if nargin < 2.:
        alpha = 1.

    if nargin < 3.:
        tol = 80.

    if nargin < 4.:
        nThreads = 4.

    nOrients = matcompat.size(Es, 3.)
    tol = np.dot(tol, np.pi) / 180.
    #% Flatten and smooth slightly (it helps)
    E = np.sum(Es, 3.)
    E = convTri(E, 1.)
    #% Compute orientation map
    np.array([])
    Ot = np.dot(matdiv(np.single((Ot - 1.)), nOrients), np.pi)
    Oe = edgeOrient(E, 4.)
    if tol > 0.:
        outlier = np.abs((Oe - Ot)) > tol
        Oe[int(outlier) - 1] = Ot[int(outlier) - 1]

    Xt = np.cos(Ot)
    Yt = np.sin(Ot)
    Xe = np.cos(Oe)
    Ye = np.sin(Oe)
    X = Xt + np.dot(alpha, Xe - Xt)
    Y = Yt + np.dot(alpha, Ye - Yt)
    O = atan((Y / X))
    #% Use those orientations to do nms
    E = edgesNmsMex(E, O, 1., 5., 1.01, nThreads)
    return [E]
예제 #38
0
def test_np_sanitization():
    class CustomParamsLogger(CustomLogger):
        def __init__(self):
            super().__init__()
            self.logged_params = None

        @rank_zero_only
        def log_hyperparams(self, params):
            params = _convert_params(params)
            params = _sanitize_params(params)
            self.logged_params = params

    logger = CustomParamsLogger()
    np_params = {
        "np.bool_": np.bool_(1),
        "np.byte": np.byte(2),
        "np.intc": np.intc(3),
        "np.int_": np.int_(4),
        "np.longlong": np.longlong(5),
        "np.single": np.single(6.0),
        "np.double": np.double(8.9),
        "np.csingle": np.csingle(7 + 2j),
        "np.cdouble": np.cdouble(9 + 4j),
    }
    sanitized_params = {
        "np.bool_": True,
        "np.byte": 2,
        "np.intc": 3,
        "np.int_": 4,
        "np.longlong": 5,
        "np.single": 6.0,
        "np.double": 8.9,
        "np.csingle": "(7+2j)",
        "np.cdouble": "(9+4j)",
    }
    logger.log_hyperparams(Namespace(**np_params))
    assert logger.logged_params == sanitized_params
예제 #39
0
def read_data(rf_file=None):
    """Convert rf binary data into :class:`~numpy.ndarray` of power and time.

    .. code-block:: python

        from embers.rf_tools.rf_data import read_data
        power, times = read_data(rf_file='~/embers-data/rf.txt')

    :param rf_file: path to rf binary data file :class:`str`

    :returns:
        - power - power in dBm :class:`~numpy.ndarray`
        - times - times in UNIX :class:`~numpy.ndarray`

    """

    with open(rf_file, "rb") as f:
        next(f)
        lines = f.readlines()

        times = []
        data_lines = []

        for line in lines:
            time, data = line.split("$Sp".encode())
            times.append(time.decode())

            # List converts bytes to list of bytes
            # The last two charachters are excluded - Newline char
            data_lines.append(list(data[:-2]))

        # The (-1/2) converts an unsigned byte to a real value
        power = np.single(np.asarray(data_lines) * (-1 / 2))
        times = np.double(np.asarray(times))

        return (power, times)
예제 #40
0
    def _calc_spectrum(self, div):
        '''
        This function is used by compute() to determine the approximate
        frequency content of one division of audio data. 
        '''
        spectrum = np.zeros((self.nfft/2,))
        window = np.hanning(self.subdiv_len)
        slip = self.subdiv_len - self.noverlap
        if slip <= 0:
            raise ValueError('overlap exceeds subdiv_len, slip = %s' % str(slip))

        lo = 0
        hi = self.subdiv_len
        nsubdivs = 0
        while hi < self.div_len:
            nsubdivs += 1
            subdiv = div[lo:hi]
            tr = rfft(subdiv * window, int(self.nfft))
            spectrum += np.abs(tr[:self.nfft/2])
            lo += slip
            hi += slip

        spectrum = np.single(np.log(spectrum / self.nsubdivs))
        return spectrum
def get_img_lightness_hist(L, range_min=0, range_max=100, bins=50):
    h, w = L.shape[0], L.shape[1]
    L1 = scipy.ndimage.filters.gaussian_filter(L, sigma=10, order=0)
    L2 = scipy.ndimage.filters.gaussian_filter(L, sigma=20, order=0)

    hist, bin_edges = np.histogram(L.flatten(),
                                   bins,
                                   range=(range_min, range_max),
                                   normed=False)
    hist = np.single(hist) / np.single(h * w)
    hist1, bin_edges1 = np.histogram(L1.flatten(),
                                     bins,
                                     range=(range_min, range_max),
                                     normed=False)
    hist1 = np.single(hist1) / np.single(h * w)
    hist2, bin_edges2 = np.histogram(L2.flatten(),
                                     bins,
                                     range=(range_min, range_max),
                                     normed=False)
    hist2 = np.single(hist2) / np.single(h * w)
    return np.concatenate((hist, hist1, hist2))
예제 #42
0
 def _get_canvas(self):
     buf = self._surface.BufferAsNumpy()
     buf = buf.transpose((0, 2, 1, 3, 4))
     buf = buf.reshape((self._canvas_width, self._canvas_width, 4))
     canvas = np.single(_fix15_to_rgba(buf)) / 255.0
     return canvas
예제 #43
0
 def bin_pred_map(self,c_img, last_layer='prob',prediction_map=0):
     """get binary probability map prediction"""
     pred=self.prediction(c_img)
     prob_map=np.single(pred[last_layer][0,prediction_map,:,:])
     return prob_map
예제 #44
0
# Compute MOC
#   a. integrate wflux in zonal direction (zonal sum of wflux already in m^3/s)
time8 = timer.time()
rmlak = np.zeros((nx, ny, 2), dtype=np.int)
tmprmask = np.transpose(rmask.values, axes=[1, 0])
tmptlat = np.transpose(tlat.values, axes=[1, 0])
rmlak[:, :, 0] = np.where(tmprmask > 0, 1, 0)
rmlak[:, :, 1] = np.where((tmprmask >= 6) & (tmprmask <= 12), 1,
                          0)  # include Baltic for 0p1
tmpw = np.transpose(np.where(~np.isnan(weflux.values.copy()),
                             weflux.values.copy(), mval),
                    axes=[3, 2, 1, 0])
tmpmoc_e = moc_offline_0p1deg.wzonalsum(tmptlat, lat_aux_grid, rmlak, tmpw,
                                        mval, [nyaux, nx, ny, nz, nt, ntr])
tmpmoc_e = np.single(np.append(tmpmoc_e, np.zeros((nyaux, 1, ntr, nt)),
                               axis=1))  # add ocean bottom
#print('tmpmoc shape',np.shape(tmpmoc))
time9 = timer.time()
print('Timing:  wzonalsum call =  ', time9 - time8, 's')

#   b. integrate in meridional direction
if not sigmacoord:
    MOCnew = xr.DataArray(np.zeros((nt,ntr,ncomp,mocnz,nyaux),dtype=np.single),dims=['time','transport_reg','moc_comp','moc_z','lat_aux_grid'], \
       coords={'time':time,'transport_regions':transport_regions,'moc_components':moc_components,'moc_z':mocz,'lat_aux_grid':lat_aux_grid}, \
       name='MOC')
else:
    MOCnew = xr.DataArray(np.zeros((nt,ntr,ncomp,mocnz,nyaux),dtype=np.single),dims=['time','transport_reg','moc_comp','moc_s','lat_aux_grid'], \
       coords={'time':time,'transport_regions':transport_regions,'moc_components':moc_components,'moc_s':sigma_moc,'lat_aux_grid':lat_aux_grid}, \
       name='MOC')
MOCnew.values[:, :, 0, :, :] = np.transpose(tmpmoc_e, axes=[3, 2, 1, 0])
#print('mocnewshape',np.shape(MOCnew))
예제 #45
0
    def train_dc(self, zero_shot=False, max_iter=50, hotstart=matrix([])):
        """ Solve the optimization problem with a
            sequential convex programming/DC-programming
            approach:
            Iteratively, find the most likely configuration of
            the latent variables and then, optimize for the
            model parameter using fixed latent states.
        """
        N = self.sobj.get_num_samples()
        DIMS = self.sobj.get_num_dims()

        # intermediate solutions
        # latent variables
        latent = [0.0]*N

        #setseed(0)
        sol = self.sobj.get_hotstart_sol()
        #sol[0:4] *= 0.01
        if hotstart.size==(DIMS,1):
            print('New hotstart position defined.')
            sol = hotstart

        psi = matrix(0.0, (DIMS,N)) # (dim x exm)
        old_psi = matrix(0.0, (DIMS,N)) # (dim x exm)
        threshold = 0

        obj = -1
        iter = 0
        allobjs = []

        restarts = 0

        # terminate if objective function value doesn't change much
        while iter < max_iter and (iter < 2 or sum(sum(abs(np.array(psi-old_psi)))) >= 0.001):
            print('Starting iteration {0}.'.format(iter))
            print(sum(sum(abs(np.array(psi-old_psi)))))
            iter += 1
            old_psi = matrix(psi)
            old_sol = sol

            # 1. linearize
            # for the current solution compute the
            # most likely latent variable configuration
            for i in range(N):
                (foo, latent[i], psi[:,i]) = self.sobj.argmax(sol, i, add_prior=True)
                #print psi[:,i]
                #psi[:4,i] /= 600.0
                #psi[:,i] /= 600.0
                #psi[:4,i] = psi[:4,i]/np.linalg.norm(psi[:4,i],ord=2)
                #psi[4:,i] = psi[4:,i]/np.linalg.norm(psi[4:,i],ord=2)
                psi[:,i] /= np.linalg.norm(psi[:, i], ord=self.norm_ord)
                #psi[:,i] /= np.max(np.abs(psi[:,i]))
                #psi[:,i] /= 600.0
                #if i>10:
                #	(foo, latent[i], psi[:,i]) = self.sobj.argmax(sol,i)
                #else:
                #	psi[:,i] = self.sobj.get_joint_feature_map(i)
                #	latent[i] = self.sobj.y[i]
            print psi

            # 2. solve the intermediate convex optimization problem
            kernel = Kernel.get_kernel(psi, psi)
            svm = OCSVM(kernel, self.C)
            svm.train_dual()
            threshold = svm.get_threshold()
            #inds = svm.get_support_dual()
            #alphas = svm.get_support_dual_values()
            #sol = phi[:,inds]*alphas

            self.svs_inds = svm.get_support_dual()
            #alphas = svm.get_support_dual_values()
            sol = psi*svm.get_alphas()
            print matrix([sol.trans(), old_sol.trans()]).trans()
            if len(self.svs_inds) == N and self.C > (1.0 / float(N)):
                print('###################################')
                print('Degenerate solution.')
                print('###################################')

                restarts += 1
                if (restarts>10):
                    print('###################################')
                    print 'Too many restarts...'
                    print('###################################')
                    # calculate objective
                    self.threshold = threshold
                    slacks = [max([0.0, np.single(threshold - sol.trans()*psi[:,i]) ]) for i in xrange(N)]
                    obj = 0.5*np.single(sol.trans()*sol) - np.single(threshold) + self.C*sum(slacks)
                    print("Iter {0}: Values (Threshold-Slacks-Objective) = {1}-{2}-{3}".format(int(iter),np.single(threshold),np.single(sum(slacks)),np.single(obj)))
                    allobjs.append(float(np.single(obj)))
                    break

                # intermediate solutions
                # latent variables
                latent = [0.0]*N

                #setseed(0)
                sol = self.sobj.get_hotstart_sol()
                #sol[0:4] *= 0.01
                if hotstart.size==(DIMS,1):
                    print('New hotstart position defined.')
                    sol = hotstart

                psi = matrix(0.0, (DIMS,N)) # (dim x exm)
                old_psi = matrix(0.0, (DIMS,N)) # (dim x exm)
                threshold = 0

                obj = -1
                iter = 0
                allobjs = []

            # calculate objective
            self.threshold = threshold
            slacks = [max([0.0, np.single(threshold - sol.trans()*psi[:,i]) ]) for i in xrange(N)]
            obj = 0.5*np.single(sol.trans()*sol) - np.single(threshold) + self.C*sum(slacks)
            print("Iter {0}: Values (Threshold-Slacks-Objective) = {1}-{2}-{3}".format(int(iter),np.single(threshold),np.single(sum(slacks)),np.single(obj)))
            allobjs.append(float(np.single(obj)))

            # zero shot learning: single iteration, hence random
            # structure coefficient
            if zero_shot:
                print('LatentOcSvm: Zero shot learning.')
                break


        print '+++++++++'
        print threshold
        print slacks
        print obj
        print '+++++++++'
        self.slacks = slacks

        print allobjs
        print(sum(sum(abs(np.array(psi-old_psi)))))
        print '+++++++++ SAD END'
        self.sol = sol
        self.latent = latent
        return sol, latent, threshold
예제 #46
0
        data = unpickle(data_batch_file)
        logprobs = data['data']  # shape=(num_views*num_imgs,num_classes)
        labels = data['labels']  # shape=(1,num_views*num_imgs)
        probs = np.exp(logprobs)
        num_classes = probs.shape[1]
        num_imgs = probs.shape[0] / num_views
        assert labels.shape[1] == num_imgs * num_views
        probs = probs.reshape((num_views, num_imgs, num_classes))

        labels = labels[:, :num_imgs]
        mean_probs = np.mean(probs, axis=0)
        # top 1 error
        sort_idx = np.argsort(mean_probs, axis=1)
        correct_top1 += list(sort_idx[:, num_classes - 1] == labels[0, :])

        top_1_error = 1.0 - np.sum(
            sort_idx[:, num_classes - 1] == labels[0, :]) / np.single(num_imgs)
        # top 5 error
        correct = np.zeros((num_imgs))
        for i in range(5):
            correct += (sort_idx[:, num_classes - 1 - i] == labels[0, :])
        correct_top5 += list(correct)
        top_5_error = 1.0 - np.sum(correct) / np.single(num_imgs)

        print 'batch_num:%d num_imgs:%d num_views:%d num_classes:%d top-1 error:%f top-5 error:%f' % \
        (batch_num, num_imgs,num_views,num_classes,top_1_error,top_5_error)

    all_top1_error = 1.0 - np.sum(correct_top1) / np.single(len(correct_top1))
    all_top5_error = 1.0 - np.sum(correct_top5) / np.single(len(correct_top5))
    print 'In summary, top 1 error:%f top 5 error:%f' % (all_top1_error,
                                                         all_top5_error)
예제 #47
0
    def train(self, heur_constr=4.4):
        N = self.sobj.get_num_samples()
        DIMS = self.sobj.get_num_dims()

        w = matrix(self.sobj.get_hotstart_sol())

        slacks = [-10**10] * N
        sol = matrix([[w.trans()], [matrix(slacks, (1, N))]]).trans()

        # quadratic regularizer
        P = spdiag(matrix([[matrix(0.0, (1, N))], [matrix(1.0, (1, DIMS))]]))
        q = self.C * matrix([matrix(1.0, (N, 1)), matrix(0.0, (DIMS, 1))])

        # inequality constraints inits Gx <= h
        G1 = spdiag(matrix([[matrix(-1.0, (1, N))], [matrix(0.0, (1, DIMS))]]))
        G1 = G1[0:N, :]
        h1 = matrix(0.0, (1, N))

        dpsi = matrix(0.0, (DIMS, 0))
        delta = matrix(0.0, (1, 0))
        trigger = matrix(0.0, (N, 0))

        iter = 0
        new_constr = N
        while new_constr > 0:
            new_constr = 0
            for i in range(N):
                val, ypred, psi_i = self.sobj.argmax(np.array(w),
                                                     i,
                                                     add_loss=True)
                psi_true = self.sobj.get_joint_feature_map(i)

                psi_i = matrix(psi_i)
                psi_true = matrix(psi_true)

                v_true = w.trans() * psi_true
                v_pred = w.trans() * psi_i
                loss = self.sobj.calc_loss(i, ypred)

                if slacks[i] < np.single(loss - v_true + v_pred):
                    dpsi = matrix([[dpsi], [-(psi_true - psi_i)]])
                    delta = matrix([[delta], [-loss]])
                    tval = matrix(0.0, (N, 1))
                    tval[i] = -1.0
                    trigger = sparse([[trigger], [tval]])
                    new_constr += 1

            # G1/h1: -\xi_i <= 0
            # G2/h2: -dpsi -xi_i <= -delta_i
            G2 = sparse([[trigger.trans()], [dpsi.trans()]])
            h2 = delta

            # skip fullfilled constraints for this run (heuristic)
            if iter > 2:
                diffs = np.array(delta - (G2 * sol).trans())
                inds = np.where(diffs < heur_constr)[1]
                G2 = G2[inds.tolist(), :]
                h2 = delta[:, inds.tolist()]
                print('Iter{0}: Solving with {1} of {2} constraints.'.format(
                    iter, inds.shape[0], diffs.shape[1]))

            # Solve the intermediate QP using cvxopt
            G = sparse([G1, G2])
            h = matrix([[h1], [h2]])
            res = qp(P, q, G, h.trans())

            obj_primal = res['primal objective']
            sol = res['x']
            slacks = sol[0:N]
            w = sol[N:N + DIMS]
            print('Iter{0}: objective {1} #new constraints {2}'.format(
                iter, obj_primal, new_constr))
            iter += 1

        # store obtained solution
        self.w = np.array(w)
        self.slacks = np.array(slacks)
        return self.w, self.slacks
예제 #48
0
파일: scalars.py 프로젝트: nanbo99/numpy
np.intc()
np.intp()
np.int0()
np.int_()
np.longlong()

np.ubyte()
np.ushort()
np.uintc()
np.uintp()
np.uint0()
np.uint()
np.ulonglong()

np.half()
np.single()
np.double()
np.float_()
np.longdouble()
np.longfloat()

np.csingle()
np.singlecomplex()
np.cdouble()
np.complex_()
np.cfloat()
np.clongdouble()
np.clongfloat()
np.longcomplex()

np.bool_().item()
예제 #49
0
    def __init__(self, im, params):

        self.parameters = params
        self.pos = self.parameters.init_pos
        self.target_sz = self.parameters.target_size

        if self.parameters.features == 'CNN':
            caffe.set_device(0)
            caffe.set_mode_gpu()

            caffe_root = os.environ['CAFFE_ROOT'] +'/'
            import sys
            sys.path.insert(0, caffe_root + 'python')

            self.net = caffe.Net(caffe_root + 'models/3785162f95cd2d5fee77/VGG_ILSVRC_19_layers_deploy.prototxt',
                                 caffe_root + 'models/3785162f95cd2d5fee77/VGG_ILSVRC_19_layers.caffemodel',
                                 caffe.TEST)

            # input preprocessing: 'data' is the name of the input blob == net.inputs[0]
            self.transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
            self.transformer.set_transpose('data', (2, 0, 1))
            self.transformer.set_mean('data',
                                      np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1))
            self.transformer.set_raw_scale('data', 255)  # model operates on images in [0,255] range instead of [0,1]
            self.transformer.set_channel_swap('data', (0, 1, 2))  # model channels in BGR order instead of RGB

        # if self.target_sz[0] / 2.0 > self.target_sz[1]:
        #     self.target_sz[1] = self.target_sz[0]
        #     self.pos[1] = self.pos[1] - (self.target_sz[0] - self.target_sz[1]) / 2
        # elif self.target_sz[0] < self.target_sz[1]/2.0:
        #     self.target_sz[0] = self.target_sz[1]
        #     self.pos[0] = self.pos[0] - (self.target_sz[1] - self.target_sz[0]) / 2

        # Initial target size
        self.init_target_sz = self.parameters.target_size
        # target sz at scale = 1
        self.base_target_sz = self.parameters.target_size

        # Window size, taking padding into account
        self.window_sz = np.floor(np.array((max(self.base_target_sz),
                                            max(self.base_target_sz))) * (1 + self.parameters.padding))

        sz = self.window_sz
        sz = np.floor(sz / self.parameters.cell_size)
        self.l1_patch_num = np.floor(self.window_sz / self.parameters.cell_size)

        # Desired translation filter output (2d gaussian shaped), bandwidth
        # Proportional to target size
        output_sigma = np.sqrt(np.prod(self.base_target_sz)) * self.parameters.output_sigma_factor / self.parameters.cell_size
        self.yf = np.fft.fft2(desiredResponse.gaussian_response_2d(output_sigma, self.l1_patch_num))

        # Desired output of scale filter (1d gaussian shaped)
        scale_sigma = self.parameters.number_of_scales / np.sqrt(self.parameters.number_of_scales) * self.parameters.scale_sigma_factor
        self.ysf = np.fft.fft(desiredResponse.gaussian_response_1d(scale_sigma, self.parameters.number_of_scales))

        # Cosine window with the size of the translation filter (2D)
        self.cos_window = np.dot(np.hanning(self.yf.shape[0]).reshape(self.yf.shape[0], 1),
                                 np.hanning(self.yf.shape[1]).reshape(1, self.yf.shape[1]))

        # Cosine window with the size of the scale filter (1D)
        if np.mod(self.parameters.number_of_scales, 2) == 0:
            self.scale_window = np.single(np.hanning(self.parameters.number_of_scales + 1))
            self.scale_window = self.scale_window[1:]
        else:
            self.scale_window = np.single(np.hanning(self.parameters.number_of_scales))

        # Scale Factors [...0.98 1 1.02 1.0404 ...] NOTE: it is not a incremental value (see the scaleFactors values)
        ss = np.arange(1, self.parameters.number_of_scales + 1)
        self.scale_factors = self.parameters.scale_step**(np.ceil(self.parameters.number_of_scales / 2.0) - ss)

        # If the target size is over the threshold then downsample
        if np.prod(self.init_target_sz) > self.parameters.scale_model_max_area:
            self.scale_model_factor = np.sqrt(self.parameters.scale_model_max_area/np.prod(self.init_target_sz))
        else:
            self.scale_model_factor = 1

        self.scale_model_sz = np.floor(self.init_target_sz*self.scale_model_factor)

        self.currentScaleFactor = 1

        self.min_scale_factor = self.parameters.scale_step**np.ceil(np.log(np.max(5.0 / sz)) / np.log(self.parameters.scale_step))
        self.max_scale_factor = self.parameters.scale_step**np.floor(np.log(np.min(im.shape[0:-1] / self.base_target_sz)) / np.log(self.parameters.scale_step))

        self.confidence = np.array(())
        self.high_freq_energy = np.array(())
        self.psr = np.array(())

        # Flag that indicates if the track lost the target or not
        self.lost = False

        self.model_alphaf = None
        self.model_xf = None
        self.sf_den = None
        self.sf_num = None
예제 #50
0
    def __init__(self, im, params):

        self.parameters = params
        self.pos = self.parameters.init_pos
        self.target_sz = self.parameters.target_size

        self.graph = None

        # Initial target size
        self.init_target_sz = self.parameters.target_size
        # target sz at scale = 1
        self.base_target_sz = self.parameters.target_size

        # Window size, taking padding into account
        self.window_sz = np.floor(np.array((max(self.base_target_sz),
                                            max(self.base_target_sz))) * (1 + self.parameters.padding))

        sz = self.window_sz
        sz = np.floor(sz / self.parameters.cell_size)
        self.l1_patch_num = np.floor(self.window_sz / self.parameters.cell_size)

        # Desired translation filter output (2d gaussian shaped), bandwidth
        # Proportional to target size
        output_sigma = np.sqrt(np.prod(self.base_target_sz)) * self.parameters.output_sigma_factor / self.parameters.cell_size
        self.yf = np.fft.fft2(desiredResponse.gaussian_response_2d(output_sigma, self.l1_patch_num))

        # Desired output of scale filter (1d gaussian shaped)
        scale_sigma = self.parameters.number_of_scales / np.sqrt(self.parameters.number_of_scales) * self.parameters.scale_sigma_factor
        self.ysf = np.fft.fft(desiredResponse.gaussian_response_1d(scale_sigma, self.parameters.number_of_scales))

        # Cosine window with the size of the translation filter (2D)
        self.cos_window = np.dot(np.hanning(self.yf.shape[0]).reshape(self.yf.shape[0], 1),
                                 np.hanning(self.yf.shape[1]).reshape(1, self.yf.shape[1]))

        # Cosine window with the size of the scale filter (1D)
        if np.mod(self.parameters.number_of_scales, 2) == 0:
            self.scale_window = np.single(np.hanning(self.parameters.number_of_scales + 1))
            self.scale_window = self.scale_window[1:]
        else:
            self.scale_window = np.single(np.hanning(self.parameters.number_of_scales))

        # Scale Factors [...0.98 1 1.02 1.0404 ...] NOTE: it is not a incremental value (see the scaleFactors values)
        ss = np.arange(1, self.parameters.number_of_scales + 1)
        self.scale_factors = self.parameters.scale_step**(np.ceil(self.parameters.number_of_scales / 2.0) - ss)

        # If the target size is over the threshold then downsample
        if np.prod(self.init_target_sz) > self.parameters.scale_model_max_area:
            self.scale_model_factor = np.sqrt(self.parameters.scale_model_max_area/np.prod(self.init_target_sz))
        else:
            self.scale_model_factor = 1

        self.scale_model_sz = np.floor(self.init_target_sz*self.scale_model_factor)

        self.currentScaleFactor = 1

        self.min_scale_factor = self.parameters.scale_step**np.ceil(np.log(np.max(5.0 / sz)) / np.log(self.parameters.scale_step))
        self.max_scale_factor = self.parameters.scale_step**np.floor(np.log(np.min(im.shape[0:-1] / self.base_target_sz)) / np.log(self.parameters.scale_step))

        self.confidence = np.array(())
        self.high_freq_energy = np.array(())
        self.psr = np.array(())

        # Flag that indicates if the track lost the target or not
        self.lost = False

        self.model_alphaf = None
        self.model_xf = None
        self.sf_den = None
        self.sf_num = None
예제 #51
0
파일: optimizers.py 프로젝트: coxlab/tsnet
def regularize(obj, l2reg):

    newtadd(obj.G, np.single(l2reg), obj.W)

    return obj
예제 #52
0
def Detector_ThirdgenCurved(cfg):
    '''
    Returns a modular 3rd generation detector (multi-row) focused at the source.
    The order of total cells is row->col, in python the shape is [col, row].The dim is 2D, [pixel, xyz]
    
    Mingye Wu, GE Research
    
    '''

    # shortcuts
    sid = cfg.scanner.sid
    sdd = cfg.scanner.sdd
    nRow = cfg.scanner.detectorRowsPerMod
    nCol = cfg.scanner.detectorColsPerMod
    nMod = math.ceil(cfg.scanner.detectorColCount / nCol)
    rowSize = cfg.scanner.detectorRowSize
    colSize = cfg.scanner.detectorColSize

    # cell coords
    cols = (np.arange(0, nCol) - (nCol - 1) / 2) * colSize
    rows = (np.arange(0, nRow) - (nRow - 1) / 2) * rowSize
    cols = nm.repmat(cols, nRow, 1).T.reshape(nCol * nRow, 1)
    rows = nm.repmat(rows, 1, nCol).T
    cellCoords = np.c_[cols, rows]

    # sample U coords
    nu = cfg.physics.colSampleCount
    du = colSize * cfg.scanner.detectorColFillFraction / nu
    us = (np.arange(0, nu) - (nu - 1) / 2) * du

    # sample V coords
    nv = cfg.physics.rowSampleCount
    dv = rowSize * cfg.scanner.detectorRowFillFraction / nv
    vs = (np.arange(0, nv) - (nv - 1) / 2) * dv

    # sample coords
    nSamples = nu * nv
    us = nm.repmat(us, nv, 1).T.reshape(nSamples, 1)
    vs = nm.repmat(vs, 1, nu).T
    sampleCoords = np.c_[us, vs]
    weights = np.ones((nu, nv)) / nSamples

    # module offset
    modWidth = nCol * colSize
    dAlpha = 2 * math.atan(modWidth / 2 / sdd)
    uOffset = math.atan(cfg.scanner.detectorColOffset * colSize / sdd)
    vOffset = cfg.scanner.detectorRowOffset * rowSize
    alphas = (np.arange(0, nMod) - (nMod - 1.0) / 2) * dAlpha + uOffset
    alphas = make_col(alphas)

    # module coords, uvecs, vvecs
    sinA = np.sin(alphas)
    cosA = np.cos(alphas)
    modCoords = np.c_[sdd * sinA, sid - sdd * cosA,
                      nm.repmat(vOffset, nMod, 1)]
    uvecs = np.c_[cosA, sinA, np.zeros((nMod, 1))]
    vvecs = np.c_[(np.zeros((nMod, 1)), np.zeros((nMod, 1)), np.ones(
        (nMod, 1)))]
    startIndices = np.arange(0, nMod) * nRow * nCol

    # detector definition
    if not cfg.det:
        cfg.det = CFG()

    cfg.det.nCells = nRow * nCol
    cfg.det.cellCoords = np.single(cellCoords)

    cfg.det.nSamples = nSamples
    cfg.det.sampleCoords = np.single(sampleCoords)
    cfg.det.weights = np.single(weights)
    cfg.det.activeArea = colSize * cfg.scanner.detectorColFillFraction * rowSize * cfg.scanner.detectorRowFillFraction

    cfg.det.nMod = nMod
    cfg.det.modCoords = np.single(modCoords)
    cfg.det.uvecs = np.single(uvecs)
    cfg.det.vvecs = np.single(vvecs)

    cfg.det.totalNumCells = cfg.scanner.detectorColCount * cfg.scanner.detectorRowCount
    cfg.det.startIndices = np.int32(startIndices)
    cfg.det.nModDefs = 1
    cfg.det.modTypes = np.zeros((nMod, 1), dtype=np.int32)

    cfg.det.width = (nCol + 1) * colSize
    cfg.det.height = (nRow + 1) * rowSize

    return cfg
예제 #53
0
def logistic(r):
    r = np.abs(r)
    r = max(np.sqrt(np.single(1)), r)
    w = np.tanh(r) / r
    return w
예제 #54
0
 def flickers(self):
     return np.sin(2 * np.pi * np.single(self.stimulus.flicker_hz) *
                   self.t[:, np.newaxis])
예제 #55
0
    def saveMeasurements(self):
        if list_equal(self.imageRaw_store, self.imageRAW):
            self.show_text("Raw images are not saved: the same measurement.")
        else:
            timestamp = time.strftime("%y%m%d_%H%M%S", time.localtime())
            sample = self.app.settings['sample']
            if sample == '':
                sample_name = '_'.join([timestamp, self.name])
            else:
                sample_name = '_'.join([timestamp, self.name, sample])
            # create file path for both h5 and other types of files
            pathname = os.path.join(self.app.settings['save_dir'], sample_name)
            Path(pathname).mkdir(parents=True, exist_ok=True)
            self.pathname = pathname
            self.sample_name = sample_name

            # create h5 base file if the h5 file is not exist
            if self.ui.saveH5.isChecked():
                # create h5 file for raw
                fname_raw = os.path.join(pathname, sample_name + '_Raw.h5')
                self.h5file_raw = h5_io.h5_base_file(app=self.app,
                                                     measurement=self,
                                                     fname=fname_raw)
                # save measure component settings
                h5_io.h5_create_measurement_group(measurement=self,
                                                  h5group=self.h5file_raw)
                for ch_idx in range(2):
                    gname = f'data/c{ch_idx}/raw'
                    if np.sum(self.imageRAW[ch_idx]
                              ) == 0:  # remove the empty channel
                        self.h5file_raw.create_dataset(gname,
                                                       data=h5py.Empty("f"))
                        self.show_text("[H5] Raw images are empty.")
                    else:
                        self.h5file_raw.create_dataset(
                            gname, data=self.imageRAW[ch_idx])
                        self.show_text("[H5] Raw images are saved.")

                self.h5file_raw.close()

            if self.ui.saveTif.isChecked():
                for ch_idx in range(2):
                    fname_raw = os.path.join(
                        pathname, sample_name + f'_Raw_Ch{ch_idx}.tif')
                    if np.sum(self.imageRAW[ch_idx]
                              ) != 0:  # remove the empty channel
                        tif.imwrite(fname_raw,
                                    np.single(self.imageRAW[ch_idx]))
                        self.show_text("[Tif] Raw images are saved.")
                    else:
                        self.show_text("[Tif] Raw images are empty.")

            self.imageRaw_store = self.imageRAW  # store the imageRAW for comparision

        if self.isCalibrated:
            if self.ui.saveH5.isChecked():
                fname_pro = os.path.join(
                    self.pathname, self.sample_name +
                    f'_C{self.current_channel_display()}_Processed.h5')
                self.h5file_pro = h5_io.h5_base_file(app=self.app,
                                                     measurement=self,
                                                     fname=fname_pro)
                h5_io.h5_create_measurement_group(measurement=self,
                                                  h5group=self.h5file_pro)

                name = f'data/sim'
                if np.sum(self.imageSIM) == 0:
                    dset = self.h5file_pro.create_dataset(name,
                                                          data=h5py.Empty("f"))
                    self.show_text("[H5] SIM images are empty.")
                else:
                    dset = self.h5file_pro.create_dataset(name,
                                                          data=self.imageSIM)
                    self.show_text("[H5] SIM images are saved.")
                dset.attrs['kx'] = self.kx_full
                dset.attrs['ky'] = self.ky_full

                if self.numSets != 0:
                    for idx in range(self.numSets):
                        roi_group_name = f'data/roi/{idx:03}'
                        raw_set = self.h5file_pro.create_dataset(
                            roi_group_name + '/raw',
                            data=self.imageRaw_ROI[idx])
                        raw_set.attrs['cx'] = self.oSegment.selected_cx[idx]
                        raw_set.attrs['cy'] = self.oSegment.selected_cy[idx]
                        sim_set = self.h5file_pro.create_dataset(
                            roi_group_name + '/sim',
                            data=self.imageSIM_ROI[idx])
                        sim_set.attrs['kx'] = self.kx_roi[idx]
                        sim_set.attrs['ky'] = self.ky_roi[idx]
                    self.show_text("[H5] ROI images are saved.")

                self.h5file_pro.close()

            if self.ui.saveTif.isChecked():
                fname_sim = os.path.join(
                    self.pathname, self.sample_name +
                    f'_C{self.current_channel_display()}_SIM' + '.tif')
                fname_ini = os.path.join(
                    self.pathname, self.sample_name +
                    f'_C{self.current_channel_display()}_Settings' + '.ini')
                if np.sum(self.imageSIM) != 0:
                    tif.imwrite(fname_sim, np.single(self.imageSIM))
                    self.app.settings_save_ini(fname_ini, save_ro=False)
                    self.show_text("[Tif] SIM images are saved.")
                else:
                    self.show_text("[Tif] SIM images are empty.")

                if self.numSets != 0:
                    for idx in range(self.numSets):
                        fname_roi = os.path.join(
                            self.pathname, self.sample_name +
                            f'_Roi_C{self.current_channel_display()}_{idx:003}_SIM'
                            + '.tif')
                        tif.imwrite(fname_roi,
                                    np.single(self.imageSIM_ROI[idx]))
                    self.show_text("[Tif] ROI images are saved.")
    def saveMeasurements(self):
        timestamp = time.strftime("%y%m%d_%H%M%S", time.localtime())
        ch = self.current_channel_process()  # selected channel
        if self.isCalibrated:
            if self.ui.saveH5.isChecked():
                if list_equal(self.imageSIM_store, self.imageSIM):
                    self.show_text("[NOT SAVED]\tSIM images are identical.")
                else:
                    # create file name for the processed file
                    fname_pro = os.path.join(
                        self.filepath, self.filetitle +
                        f'_{timestamp}_C{self.current_channel_process()}_Processed.h5'
                    )
                    # create H5 file
                    self.h5file_pro = h5_io.h5_base_file(app=self.app,
                                                         measurement=self,
                                                         fname=fname_pro)
                    # create measurement group and save measurement settings
                    h5_io.h5_create_measurement_group(measurement=self,
                                                      h5group=self.h5file_pro)

                    # name of the sim image group
                    sim_name = f'data/sim'
                    avg_name = f'data/avg'
                    std_name = f'data/std'
                    if np.sum(self.imageSIM) == 0:
                        dset = self.h5file_pro.create_dataset(
                            sim_name, data=h5py.Empty("f"))
                        dset_1 = self.h5file_pro.create_dataset(
                            avg_name, data=h5py.Empty("f"))
                        dset_2 = self.h5file_pro.create_dataset(
                            std_name, data=h5py.Empty("f"))
                        # self.show_text("[UNSAVED] SIM images are empty.")
                    else:
                        dset = self.h5file_pro.create_dataset(
                            sim_name, data=self.imageSIM)
                        dset_1 = self.h5file_pro.create_dataset(
                            avg_name, data=self.imageAVG)
                        dset_2 = self.h5file_pro.create_dataset(
                            std_name, data=self.imageSTD)
                        self.show_text("[SAVED]\tSIM images to <H5>.")
                    dset.attrs['kx'] = self.kx_full
                    dset.attrs['ky'] = self.ky_full

                    if self.numSets != 0:
                        for idx in range(self.numSets):
                            roi_group_name = f'data/roi/{idx:03}'
                            raw_set = self.h5file_pro.create_dataset(
                                roi_group_name + '/raw',
                                data=self.imageRAW_ROI[idx])
                            raw_set.attrs['cx'] = self.oSegment.selected_cx[
                                idx]
                            raw_set.attrs['cy'] = self.oSegment.selected_cy[
                                idx]
                            sim_set = self.h5file_pro.create_dataset(
                                roi_group_name + '/sim',
                                data=self.imageSIM_ROI[idx])
                            sim_set.attrs['kx'] = self.kx_roi[idx]
                            sim_set.attrs['ky'] = self.ky_roi[idx]
                            avg_set = self.h5file_pro.create_dataset(
                                roi_group_name + '/avg',
                                data=self.imageAVG_ROI[idx])
                            std_set = self.h5file_pro.create_dataset(
                                roi_group_name + '/std',
                                data=self.imageSTD_ROI[idx])
                        self.show_text("[SAVED]\tROI images to <H5>.")

                    self.h5file_pro.close()

        if self.ui.saveTif.isChecked():
            fname_sim = os.path.join(
                self.filepath, self.filetitle +
                f'_{timestamp}_C{self.current_channel_process()}_SIM' + '.tif')
            fname_ini = os.path.join(
                self.filepath, self.filetitle +
                f'_{timestamp}_C{self.current_channel_process()}_Settings' +
                '.ini')
            fname_avg = os.path.join(
                self.filepath, self.filetitle +
                f'_{timestamp}_C{self.current_channel_process()}_AVG' + '.tif')
            fname_std = os.path.join(
                self.filepath, self.filetitle +
                f'_{timestamp}_C{self.current_channel_process()}_STD' + '.tif')

            self.app.settings_save_ini(fname_ini, save_ro=False)
            if np.sum(self.imageSIM) != 0:
                tif.imwrite(fname_sim, np.single(self.imageSIM))
                tif.imwrite(fname_avg, np.single(self.imageAVG))
                tif.imwrite(fname_std, np.single(self.imageSTD))
                self.show_text("[SAVED]\tSIM images to <TIFF>.")
            # else:
            # self.show_text("[UNSAVED] SIM images are empty.")

            if self.numSets != 0:
                for idx in range(self.numSets):
                    fname_roi_sim = os.path.join(
                        self.filepath, self.filetitle +
                        f'_{timestamp}_Roi_C{self.current_channel_process()}_{idx:003}_SIM'
                        + '.tif')
                    fname_roi_avg = os.path.join(
                        self.filepath, self.filetitle +
                        f'_{timestamp}_Roi_C{self.current_channel_process()}_{idx:003}_AVG'
                        + '.tif')
                    fname_roi_std = os.path.join(
                        self.filepath, self.filetitle +
                        f'_{timestamp}_Roi_C{self.current_channel_process()}_{idx:003}_STD'
                        + '.tif')
                    tif.imwrite(fname_roi_sim,
                                np.single(self.imageSIM_ROI[idx]))
                    tif.imwrite(fname_roi_avg,
                                np.single(self.imageAVG_ROI[idx]))
                    tif.imwrite(fname_roi_std,
                                np.single(self.imageSTD_ROI[idx]))
                self.show_text("[SAVED]\tROI images to <TIFF>.")
예제 #57
0
def init_nufft_params(sino, geom):
    # Function to initialize parameters associated with the forward model
    #inputs : sino - A list contating parameters associated with the sinogram
    #              Ns : Number of entries in the padded sinogram along the "detector" rows
    #              Ns_orig :  Number of entries  detector elements per slice
    #              center : Center of rotation in pixels computed from the left end of the detector
    #              angles : An array containg the angles at which the data was acquired in radians
    #       : geom - TBD
    #

    KBLUT_LENGTH = 256
    SCALING_FACTOR = 1.7
    #What is this ?
    k_r = 3  #kernel size 2*kr+1
    beta = 4 * math.pi
    Ns = sino['Ns']
    Ns_orig = sino['Ns_orig']
    ang = sino['angles']

    q_grid = np.arange(1, sino['Ns'] + 1) - np.floor((sino['Ns'] + 1) / 2) - 1
    sino['tt'], sino['qq'] = np.meshgrid(ang * 180 / math.pi, q_grid)

    # Preload the Bessel kernel (real components!)
    kblut, KB, KB1D, KB2D = KBlut(k_r, beta, KBLUT_LENGTH)
    KBnorm = np.array(
        np.single(
            np.sum(
                np.sum(
                    KB2D(
                        np.reshape(np.arange(-k_r, k_r + 1), (2 * k_r + 1, 1)),
                        (np.arange(-k_r, k_r + 1)))))))
    #print KBnorm
    kblut = kblut / KBnorm * SCALING_FACTOR  #scaling fudge factor

    #Normalization (density compensation factor)
    #    Dq=KBdensity1(sino['qq'],sino['tt'],KB1,k_r,Ns)';

    # polar to cartesian, centered
    [xi, yi] = pol2cart(sino['qq'], sino['tt'] * math.pi / 180)
    xi = xi + np.floor((Ns + 1) / 2)
    yi = yi + np.floor((Ns + 1) / 2)

    params = {}
    params['k_r'] = k_r
    params['deapod_filt'] = afnp.array(deapodization(Ns, KB, Ns_orig),
                                       dtype=afnp.float32)
    params['sino_mask'] = afnp.array(padmat(
        np.ones((Ns_orig, sino['qq'].shape[1])),
        np.array((Ns, sino['qq'].shape[1])), 0),
                                     dtype=afnp.float32)
    params['grid'] = [Ns, Ns]  #np.array([Ns,Ns],dtype=np.int32)
    params['scale'] = ((KBLUT_LENGTH - 1) / k_r)
    params['center'] = afnp.array(sino['center'])
    params['Ns'] = Ns

    # push parameters to gpu and initalize a few in-line functions
    params['gxi'] = afnp.array(np.single(xi))
    params['gyi'] = afnp.array(np.single(yi))
    params['gxy'] = params['gxi'] + 1j * params['gyi']
    params['gkblut'] = afnp.array(np.single(kblut))
    params['det_grid'] = np.array(
        np.reshape(np.arange(0, sino['Ns']), (sino['Ns'], 1)))

    #####Generate Ram-Lak/ShepLogan like filter kernel#########

    temp_mask = np.ones(Ns)
    kernel = np.ones(Ns)
    if 'filter' in sino:
        temp_r = np.linspace(-1, 1, Ns)
        kernel = (Ns) * np.fabs(temp_r) * np.sinc(temp_r / 2)
        temp_pos = (1 - sino['filter']) / 2
        temp_mask[0:np.int16(temp_pos * Ns)] = 0
        temp_mask[np.int16((1 - temp_pos) * Ns):] = 0
    params['giDq'] = afnp.array(kernel * temp_mask, dtype=afnp.complex64)

    temp = afnp.array((-1)**params['det_grid'], dtype=afnp.float32)
    temp2 = np.array((-1)**params['det_grid'], dtype=afnp.float32)
    temp2 = afnp.array(temp2.reshape(1, sino['Ns']))
    temp3 = afnp.array(
        afnp.exp(-1j * 2 * params['center'] * (afnp.pi / params['Ns']) *
                 params['det_grid']).astype(afnp.complex64))
    temp4 = afnp.array(
        afnp.exp(1j * 2 * params['center'] * afnp.pi / params['Ns'] *
                 params['det_grid']).astype(afnp.complex64))
    params['fft2Dshift'] = afnp.array(temp * temp2, dtype=afnp.complex64)
    params['fftshift1D'] = lambda x: temp * x
    params['fftshift1D_center'] = lambda x: temp3 * x
    params['fftshift1Dinv_center'] = lambda x: temp4 * x

    ################# Back projector params #######################
    xi = xi.astype(np.float32)
    yi = yi.astype(np.float32)

    #    [s_per_b,b_dim_x,b_dim_y,s_in_bin,b_offset,b_loc,b_points_x,b_points_y] = gnufft.polarbin(xi,yi,params['grid'],4096*4,k_r)
    #    params['gs_per_b']=afnp.array(s_per_b,dtype=afnp.int64) #int64
    #    params['gs_in_bin']=afnp.array(s_in_bin,dtype=afnp.int64)
    #    params['gb_dim_x']= afnp.array(b_dim_x,dtype=afnp.int64)
    #    params['gb_dim_y']= afnp.array(b_dim_y,dtype=afnp.int64)
    #    params['gb_offset']=afnp.array(b_offset,dtype=afnp.int64)
    #    params['gb_loc']=afnp.array(b_loc,dtype=afnp.int64)
    #    params['gb_points_x']=afnp.array(b_points_x,dtype=afnp.float32)
    #    params['gb_points_y']=afnp.array(b_points_y,dtype=afnp.float32)

    return params
예제 #58
0
reveal_type(np.short())  # E: {short}
reveal_type(np.intc())  # E: {intc}
reveal_type(np.intp())  # E: {intp}
reveal_type(np.int0())  # E: {intp}
reveal_type(np.int_())  # E: {int_}
reveal_type(np.longlong())  # E: {longlong}

reveal_type(np.ubyte())  # E: {ubyte}
reveal_type(np.ushort())  # E: {ushort}
reveal_type(np.uintc())  # E: {uintc}
reveal_type(np.uintp())  # E: {uintp}
reveal_type(np.uint0())  # E: {uintp}
reveal_type(np.uint())  # E: {uint}
reveal_type(np.ulonglong())  # E: {ulonglong}

reveal_type(np.half())  # E: {half}
reveal_type(np.single())  # E: {single}
reveal_type(np.double())  # E: {double}
reveal_type(np.float_())  # E: {double}
reveal_type(np.longdouble())  # E: {longdouble}
reveal_type(np.longfloat())  # E: {longdouble}

reveal_type(np.csingle())  # E: {csingle}
reveal_type(np.singlecomplex())  # E: {csingle}
reveal_type(np.cdouble())  # E: {cdouble}
reveal_type(np.complex_())  # E: {cdouble}
reveal_type(np.cfloat())  # E: {cdouble}
reveal_type(np.clongdouble())  # E: {clongdouble}
reveal_type(np.clongfloat())  # E: {clongdouble}
reveal_type(np.longcomplex())  # E: {clongdouble}
def draw(update=False):
    windowSurface.blit(blank, (0, 0))

    # draw lines
    for i in range(gv.n_rows):
        pygame.draw.line(windowSurface, BLACK, (0, i * psz + pszh),
                         (window_sz[0], i * psz + pszh), LINE_WIDTH)
        pygame.draw.line(windowSurface, BLACK, (i * psz + pszh, 0),
                         (i * psz + pszh, window_sz[1]), LINE_WIDTH)

    for i in range(gv.n_rows):
        for j in range(gv.n_cols):
            coord = np.asarray((i * psz, j * psz))
            if board[i, j] == 1:
                windowSurface.blit(blackp, coord)
            elif board[i, j] == -1:
                windowSurface.blit(whitep, coord)

            if P_map[i, j] != 0 and show_txt:
                visit_total = visit_count_map.sum()
                rc = np.int(
                    np.min((255, 3 * 255. * visit_count_map[i, j] /
                            np.single(visit_total))))
                bgc = [rc, 0, 0]
                fc = [255, 255, 255]

                txt = '%1.2f + %1.2f' % (Q_map[i, j], P_map[i, j])
                tsz = np.asarray(basicFont.size(txt), dtype='single')
                tcoord = coord + pszh - np.asarray(
                    [tsz[0] / 2., n_txt_rows * tsz[1] / 2])
                pygame.draw.rect(windowSurface, bgc, [tcoord, tsz])

                text = basicFont.render(txt, True, fc)
                windowSurface.blit(text, tcoord)
                tsz1 = copy.deepcopy(tsz)

                txt = '%1.2f  %i' % (Q_map[i, j] + P_map[i, j],
                                     visit_count_map[i, j])
                tsz = np.asarray(basicFont.size(txt), dtype='single')
                tcoord = coord + pszh - np.asarray(
                    [tsz[0] / 2., n_txt_rows * tsz[1] / 2])
                tcoord[1] += tsz1[1]
                pygame.draw.rect(windowSurface, bgc, [tcoord, tsz])

                text = basicFont.render(txt, True, fc)
                windowSurface.blit(text, tcoord)
                tsz2 = copy.deepcopy(tsz)
            else:
                tsz1 = tsz2 = [0, 0]

            if P_map_next[i, j] and show_txt:
                visit_total = visit_count_map_next.sum()
                rc = np.int(
                    np.min((255, 3 * 255. * visit_count_map_next[i, j] /
                            np.single(visit_total))))
                bgc = [0, rc, 0]
                fc = [255, 255, 255]

                txt = '%1.2f + %1.2f' % (Q_map_next[i, j], P_map_next[i, j])
                tsz = np.asarray(basicFont.size(txt), dtype='single')
                tcoord = coord + pszh - np.asarray(
                    [tsz[0] / 2., n_txt_rows * tsz[1] / 2])
                tcoord[1] += tsz1[1] + tsz2[1]
                pygame.draw.rect(windowSurface, bgc, [tcoord, tsz])

                text = basicFont.render(txt, True, fc)
                windowSurface.blit(text, tcoord)
                tsz3 = copy.deepcopy(tsz)

                txt = '%1.2f  %i' % (Q_map_next[i, j] + P_map_next[i, j],
                                     visit_count_map_next[i, j])
                tsz = np.asarray(basicFont.size(txt), dtype='single')
                tcoord = coord + pszh - np.asarray(
                    [tsz[0] / 2., n_txt_rows * tsz[1] / 2])
                tcoord[1] += tsz1[1] + tsz2[1] + tsz3[1]
                pygame.draw.rect(windowSurface, bgc, [tcoord, tsz])

                text = basicFont.render(txt, True, fc)
                windowSurface.blit(text, tcoord)

    if update:
        pygame.display.update()
예제 #60
0
def correctMatrix(M):
	D,V = linalg.eig(M)
	V1 = V[:,1]
	M2 = M + V1*V1.T*(-1*np.spacing(np.single(D[1].real))-D[1])
	return M2.real