Example #1
0
    def create_bernouilli(self, m):
        """
        Create a random direction to estimate the stochastic gradient.
        We use a Bernouilli distribution : bernouilli = (+1,+1,-1,+1,-1,.....)
        """
        bernouilli = copy.deepcopy(m)
        for (name, value) in m.items():
            bernouilli[name]['value'] = 1 if random.randint(0, 1) else -1

        g = utils.norm2(self.previous_gradient)
        d = utils.norm2(bernouilli)

        if g > 0.00001:
            bernouilli = utils.linear_combinaison(0.55, bernouilli,
                                                  0.25 * d / g,
                                                  self.previous_gradient)

        for (name, value) in m.items():
            if bernouilli[name]['value'] == 0.0:
                bernouilli[name][value] = 0.2
            if abs(bernouilli[name]['value']) < 0.2:
                bernouilli[name]['value'] = 0.2 * utils.sign_of(
                    bernouilli[name]['value'])

        return bernouilli
Example #2
0
    def __call__(self, L, P, psf_only=False):
        # compute
        utils.convolve2d(L, P, output=self._J)
        utils.dx(self._J, output=self._dxJ)
        utils.dy(self._J, output=self._dyJ)
        utils.dx(L, output=self._dxL)
        utils.dy(L, output=self._dyL)
        utils.dx_b(self._dxJ, output=self._dxxJ)
        utils.dy_b(self._dyJ, output=self._dyyJ)
        utils.dx_b(self._dyJ, output=self._dxyJ)
        # enegery for data compatibility

        R = self._J - self.I0
        dxR = self._dxJ - self._dxI0
        dyR = self._dyJ - self._dyI0
        dxxR = self._dxxJ - self._dxxI0
        dyyR = self._dyyJ - self._dyyI0
        dxyR = self._dxyJ - self._dxyI0

        E = self.w0 * utils.norm2(R)
        E += self.w1 * utils.norm2(dxR)
        E += self.w1 * utils.norm2(dyR)
        #~ E += self.w2 * utils.norm2(dxxR)
        #~ E += self.w2 * utils.norm2(dyyR)
        #~ E += self.w2 * utils.norm2(dxyR)

        if not psf_only:
            # energy for global prior
            E += self.lambda1 * utils.global_prior(self._dxL, self.a, self.b)
            E += self.lambda1 * utils.global_prior(self._dyL, self.a, self.b)
            # energy for local prior
            E += self.lambda2 * utils.local_prior(self._dxL, self._dxI0, self.M)
            E += self.lambda2 * utils.local_prior(self._dyL, self._dyI0, self.M)

        return E/self.I0.size
Example #3
0
def point_line_segment_distances2(points, linesA, linesB):
    n_p = points.shape[0]
    n_l = linesA.shape[0]

    dir_line = utils.normalize(linesB - linesA).unsqueeze(0).expand(
        n_p, -1, -1)
    vecA = points.unsqueeze(1).expand(
        -1, n_l, -1) - linesA.unsqueeze(0).expand(n_p, -1, -1)
    vecB = points.unsqueeze(1).expand(
        -1, n_l, -1) - linesB.unsqueeze(0).expand(n_p, -1, -1)

    # Distances to first endpoint
    dists2 = utils.norm2(vecA)

    # Distances to second endpoint
    dists2 = torch.min(dists2, utils.norm2(vecB))

    # Points within segment
    in_line = (utils.dot(dir_line, vecA) > 0) & (utils.dot(dir_line, vecB) < 0)

    # Distances to line
    line_dists2 = utils.norm2(
        utils.project_to_tangent(vecA[in_line], dir_line[in_line]))
    dists2[in_line] = line_dists2

    return dists2
Example #4
0
 def __init__(self, origin=[0, 0, 0], u=[1, 0, 0], v=[0, 1, 0], w=[0, 0, 1], direction=1):
     self.origin = toArray3(origin)
     self.u = toArray3(u)
     self.v = toArray3(v)
     self.w = toArray3(w)
     self.nw = Utils.norm2(numpy.cross(self.u, self.v))
     self.nu = Utils.norm2(numpy.cross(self.v, self.w))
     self.nv = Utils.norm2(numpy.cross(self.w, self.u))
Example #5
0
def grad_l2(beta, rng=RandomUniform(0, 1)):
    """Sub-gradient of the function

        f(x) = |x|_2,

    where |x|_2 is the L2-norm.
    """
    norm_beta = norm2(beta)
    if norm_beta > TOLERANCE:
        return beta * (1.0 / norm_beta)
    else:
        D = beta.shape[0]
        u = (rng(D, 1) * 2.0) - 1.0  # [-1, 1]^D
        norm_u = norm2(u)
        a = rng()  # [0, 1]

        return u * (a / norm_u)
Example #6
0
    def grad(self, x):
        """Sub-gradient of the function

            f(x) = |x|_2,

        where |x|_2 is the L2-norm.
        """
        norm_beta = norm2(x)
        if norm_beta > TOLERANCE:
            return x * (1.0 / norm_beta)
        else:
            D = x.shape[0]
            u = (self.rng(D, 1) * 2.0) - 1.0  # [-1, 1]^D
            norm_u = norm2(u)
            a = self.rng()  # [0, 1]

            return (self.l * (a / norm_u)) * u
Example #7
0
    def grad(self, x):
        """Sub-gradient of the function

            f(x) = |x|_2,

        where |x|_2 is the L2-norm.
        """
        norm_beta = norm2(x)
        if norm_beta > TOLERANCE:
            return x * (1.0 / norm_beta)
        else:
            D = x.shape[0]
            u = (self.rng(D, 1) * 2.0) - 1.0  # [-1, 1]^D
            norm_u = norm2(u)
            a = self.rng()  # [0, 1]

            return (self.l * (a / norm_u)) * u
Example #8
0
def grad_l2(beta, rng=RandomUniform(0, 1)):
    """Sub-gradient of the function

        f(x) = |x|_2,

    where |x|_2 is the L2-norm.
    """
    norm_beta = norm2(beta)
    if norm_beta > TOLERANCE:
        return beta * (1.0 / norm_beta)
    else:
        D = beta.shape[0]
        u = (rng(D, 1) * 2.0) - 1.0  # [-1, 1]^D
        norm_u = norm2(u)
        a = rng()  # [0, 1]

        return u * (a / norm_u)
Example #9
0
    def create_bernouilli(self, m):
        '''
            Create a random direction to estimate the stochastic gradient.
        '''
        bernouilli = {}
        for (name, value) in m.items():
            bernouilli[name] = 1 if random.randint(0, 1) else -1

        g = utils.norm2(self.previous_gradient)
        d = utils.norm2(bernouilli)
        if g > 0.00001:
            bernouilli = utils.linear_combinaison(0.55, bernouilli,
                                                  0.25 * d / g,
                                                  self.previous_gradient)

        for (name, value) in m.items():
            if bernouilli[name] == 0.0:
                bernouilli[name] = 0.2
            if abs(bernouilli[name]) < 0.2:
                bernouilli[name] = 0.2 * utils.sign_of(bernouilli[name])
        return bernouilli
Example #10
0
    def __call__(self, L, P, psf_only=False):
        # compute
        utils.convolve2d(L, P, output=self._J)
        utils.dx(self._J, output=self._dxJ)
        utils.dy(self._J, output=self._dyJ)
        utils.dx(L, output=self._dxL)
        utils.dy(L, output=self._dyL)
        utils.dx_b(self._dxJ, output=self._dxxJ)
        utils.dy_b(self._dyJ, output=self._dyyJ)
        utils.dx_b(self._dyJ, output=self._dxyJ)
        # enegery for data compatibility

        R = self._J - self.I0
        dxR = self._dxJ - self._dxI0
        dyR = self._dyJ - self._dyI0
        dxxR = self._dxxJ - self._dxxI0
        dyyR = self._dyyJ - self._dyyI0
        dxyR = self._dxyJ - self._dxyI0

        E = self.w0 * utils.norm2(R)
        E += self.w1 * utils.norm2(dxR)
        E += self.w1 * utils.norm2(dyR)
        #~ E += self.w2 * utils.norm2(dxxR)
        #~ E += self.w2 * utils.norm2(dyyR)
        #~ E += self.w2 * utils.norm2(dxyR)

        if not psf_only:
            # energy for global prior
            E += self.lambda1 * utils.global_prior(self._dxL, self.a, self.b)
            E += self.lambda1 * utils.global_prior(self._dyL, self.a, self.b)
            # energy for local prior
            E += self.lambda2 * utils.local_prior(self._dxL, self._dxI0,
                                                  self.M)
            E += self.lambda2 * utils.local_prior(self._dyL, self._dyI0,
                                                  self.M)

        return E / self.I0.size
Example #11
0
def random(shape,
           density=1.0,
           rng=utils.RandomUniform(0, 1).rand,
           sort=False,
           normalise=False):
    """Generates a random p-by-1 vector.

    shape : A tuple. The shape of the underlying data. E.g., beta may represent
            an underlying 2-by-3-by-4 image, and will in that case be 24-by-1.

    density : A scalar in (0, 1]. The density of the returned regression vector
            (fraction of non-zero elements). Zero-elements will be randomly
            distributed in the vector. Default is 1.0.

    rng : The random number generator. Must be a function that takes *shape as
            input. Default is utils.RandomUniform in the interval [0, 1).

    sort : A boolean. Whether or not to sort the vector. The vector is sorted
            along the dimensions in order from the first. Default is False.

    normalise : A boolean. Whether or not to normalise the vector. Default is
            False.
    """
    if not isinstance(shape, (list, tuple)):
        shape = (shape, )

    density = max(0.0, min(density, 1.0))

    p = np.prod(shape)
    ps = int(density * p + 0.5)

    beta = rng(p)
    beta[ps:] = 0.0
    beta = np.random.permutation(beta)

    if sort:
        beta = np.reshape(beta, shape)
        for i in xrange(len(shape)):
            beta = np.sort(beta, axis=i)

    beta = np.reshape(beta, (p, 1))

    if normalise:
        beta *= 1.0 / utils.norm2(beta)

    return beta
Example #12
0
    def run(self):
        '''
            Return a point which is (hopefully) a minimizer of the goal function f,
            starting from point theta0
            Returns:
                the point (as a dict) which is (hopefully) a minimize of 'f'.
        '''
        k = 0
        theta = self.theta0
        while True:
            if self.constraints is not None:
                theta = self.constraints(theta)
            print("theta = " + utils.pretty(theta))
            c_k = self.c / ((k + 1)**self.gamma)
            a_k = self.a / ((k + 1 + self.A)**self.alpha)
            gradient = self.approximate_gradient(theta, c_k)
            #For steepest descent we update via a constant small step in the gradient direction
            mu = -0.01 / max(1.0, utils.norm2(gradient))
            theta = utils.linear_combinaison(1.0, theta, mu, gradient)

            ## For RPROP, we update with information about the sign of the gradients
            theta = utils.linear_combinaison(1.0, theta, -0.01,
                                             self.rprop(theta, gradient))
            #We then move to the point which gives the best average of goal
            (avg_goal, avg_theta) = self.average_best_evals(30)
            theta = utils.linear_combinaison(0.8, theta, 0.2, avg_theta)

            k = k + 1
            if k >= self.max_iter:
                break

            if (k % 100 == 0) or (k <= 1000):
                (avg_goal, avg_theta) = self.average_evaluations(30)
                print("iter = " + str(k))
                print("mean goal (all) = " + str(avg_goal))
                print("mean theta (all) = " + utils.pretty(avg_theta))

                (avg_goal, avg_theta) = self.average_best_evals(30)
                print('mean goal (best) = ' + str(avg_goal))
                print('mean theta (best) = ' + utils.pretty(avg_theta))
            print(
                '-----------------------------------------------------------')
        return theta
def random(shape, density=1.0, rng=utils.RandomUniform(0, 1).rand,
         sort=False, normalise=False):
    """Generates a random p-by-1 vector.

    shape : A tuple. The shape of the underlying data. E.g., beta may represent
            an underlying 2-by-3-by-4 image, and will in that case be 24-by-1.

    density : A scalar in (0, 1]. The density of the returned regression vector
            (fraction of non-zero elements). Zero-elements will be randomly
            distributed in the vector. Default is 1.0.

    rng : The random number generator. Must be a function that takes *shape as
            input. Default is utils.RandomUniform in the interval [0, 1).

    sort : A boolean. Whether or not to sort the vector. The vector is sorted
            along the dimensions in order from the first. Default is False.

    normalise : A boolean. Whether or not to normalise the vector. Default is
            False.
    """
    if not isinstance(shape, (list, tuple)):
        shape = (shape,)

    density = max(0.0, min(density, 1.0))

    p = np.prod(shape)
    ps = int(density * p + 0.5)

    beta = rng(p)
    beta[ps:] = 0.0
    beta = np.random.permutation(beta)

    if sort:
        beta = np.reshape(beta, shape)
        for i in xrange(len(shape)):
            beta = np.sort(beta, axis=i)

    beta = np.reshape(beta, (p, 1))

    if normalise:
        beta /= utils.norm2(beta)

    return beta
def threat_level(vip_pos, guard_pos, hostile_pos):

    tv = utils.sub(hostile_pos, vip_pos)
    gv = utils.sub(guard_pos, vip_pos)

    if utils.norm2(tv) == 0:
        return 0

    dst_threat = 70 * math.exp(-math.sqrt(utils.dst2(vip_pos, hostile_pos)))

    coverage = 0
    if utils.norm2(gv) != 0 and utils.norm2(gv) < utils.norm2(tv):
        coverage = 10 * max(utils.dot(tv, gv), 0) / \
          math.sqrt(utils.norm2(tv) * utils.norm2(gv))

    return dst_threat - coverage
def kernel(params_i, params_j, const):
    n = norm2(params_i, params_j)
    return np.exp(-const * n)
Example #16
0
def main(lambda2):
	X = get_input("input_pos_neg.txt")
	m = 0
	m_pos = 0
	m_neg = 0
	m_distinct_X = len(X)
	for (pos,neg,s) in X:
		m_pos += pos
		m_neg += neg
		m += pos + neg

	print 'm,m_pos,m_neg=',m,m_pos,m_neg
	theta_max_pos = 1.0*m_neg/m
	theta_max_neg = 1.0*m_pos/m


	
	tmp  = Xtheta_mapper(X)
	# Z = X' * theta_max
	Z = Xtheta_reducer(tmp,theta_max_pos,theta_max_neg)
	lambda_max = max([abs(w)for w in Z.itervalues()])/m	
	print 'lambda_max = ',lambda_max,'lambda2 = ',lambda2
	'''
	sum_b= [e[0]for e in X]	
	for i in range(m-1):
		sum_b[i+1] += sum_b[i]
	'''
	if lambda2>=lambda_max:
		print "lambda2 >= lambda_max"
		return 
	print "get len_of_X_column"
	tmp = projection_scale_mapper(X) #PX
	len_of_X_column = projection_scale_reducer(tmp)
	
	j0 = 0
	sign_j0 = 0
	mi = m
	for (k,v) in Z.items():
		#print abs(v),m*lambda_max
		if abs(abs(v)-m*lambda_max)<1e-10: # theta*Xj == m*lambda_max
			if len_of_X_column[k]<mi: # find j0 with the sparsest column
				mi = len_of_X_column[k]
				j0 = k
				if v > 0:
					sign_j0 = 1
				elif v < 0:
					sign_j0 = -1
				else:
					sign_j0 = 0
	print 'j0 = ',j0
	X_raw_star = tmp[j0] # j0th column of X, list of instance_id
	L_X_star = mi
	#print 'length of X_raw_star', len(X_raw_star)
	P_X_star = [(instance_id,sign_j0*(1.0-1.0*L_X_star/m),pos+neg) for (pos,neg,instance_id) in X_raw_star] #PX*, list of (instance_id, a, b)
	sign_P_X_star_b = -1.0*sign_j0*L_X_star/m


	hash_P_X_star = [0] * m_distinct_X
	hash_P_X_star_a = [0] * m_distinct_X
	
	sum_a_star_p_n = 0
	sum_a_star_square_p_n = 0
	for (idx,a,p_n) in P_X_star:
		sum_a_star_p_n += a*p_n		
		sum_a_star_square_p_n+=a*a*p_n
		hash_P_X_star_a[idx] = a

	for (pos,neg,index) in X_raw_star:
		hash_P_X_star[index]=1
	print 'sum_a_star_p_n ', sum_a_star_p_n, ' sum_a_star_square_p_n ',sum_a_star_square_p_n
	
	print 'sign_P_X_star_b ', sign_P_X_star_b 

	P_X_star_norm2 = norm2(P_X_star,sign_P_X_star_b,m)
	print 'P_X_star_norm2 ',P_X_star_norm2
	print 'length of P_X_star = ',len(P_X_star)
	#X_raw_star = [feature_key,feature_key,...]
	
	#len_of_X_column = {column_id:len}
	r = g(theta_max_pos,theta_max_neg,m_pos,m_neg,lambda2/lambda_max) - g(theta_max_pos,theta_max_neg,m_pos,m_neg,1)
	r += (1-lambda2/lambda_max)*(m_pos*math.log(theta_max_pos/(1.0 - theta_max_pos))*theta_max_pos  + m_neg*math.log(theta_max_neg/(1.0 - theta_max_neg))*theta_max_neg)/m	
	r = math.sqrt(r*m/2.0)
	print 'r ',r
	#print j0,sign_j0
	print "final step"
	tmp = final_mapper(X,len_of_X_column,hash_P_X_star,hash_P_X_star_a)
	rejection_features = final_reducer(tmp,m,P_X_star,sign_P_X_star_b,lambda2,lambda_max,sign_j0,r,Z,P_X_star_norm2,L_X_star,sum_a_star_p_n,sum_a_star_square_p_n)
	print 'rejection length = ',len(rejection_features)
	Y = []
	length_feature_before = 0
	length_feature_after = 0
	for (pos,neg,s) in X:
		nt = []
		for t in s:
			if not t in rejection_features:
				nt.append(t)
		Y.append([pos,neg,nt])
		length_feature_after+=len(nt)
		length_feature_before +=len(s)
	print 'average length of feature before',length_feature_before*1.0/len(X),'after',length_feature_after*1.0/len(X),
	save_X(Y,'input_after_filter.txt')
Example #17
0
 def getDepth(self):
     return Utils.norm2(self.u)
Example #18
0
    def run(self):
        """
        Return a point which is (hopefully) a minimizer of the goal
        function f, starting from point theta0.

        Returns:
            The point (as a dict) which is (hopefully) a minimizer of "f".
        """

        k = 0
        theta = self.theta0

        while True:
            k = k + 1

            self.iter = k

            if self.constraints is not None:
                theta = self.constraints(theta)

            #print("theta  = " + utils.pretty(theta))

            c_k = self.c / (k**self.gamma)
            a_k = self.a / ((k + self.A)**self.alpha)

            gradient = self.approximate_gradient(theta, c_k)

            #print(str(k) + " gradient = " + utils.pretty(gradient))
            # if k % 1000 == 0:
            # print(k + utils.pretty(theta) + "norm2(g) = " + str(utils.norm2(gradient)))
            # print(k + " theta = " + utils.pretty(theta))

            ## For SPSA we update with a small step (theta = theta - a_k * gradient)
            ## theta = utils.linear_combinaison(1.0, theta, -a_k, gradient)

            ## For steepest descent we update via a constant small step in the gradient direction
            mu = -0.01 / max(1.0, utils.norm2(gradient))
            theta = utils.linear_combinaison(1.0, theta, mu, gradient)

            ## For RPROP, we update with information about the sign of the gradients
            theta = utils.linear_combinaison(1.0, theta, -0.01,
                                             self.rprop(theta, gradient))

            ## We then move to the point which gives the best average of goal
            (avg_goal, avg_theta) = self.average_best_evals(30)
            theta = utils.linear_combinaison(0.98, theta, 0.02, avg_theta)

            if (k % 10 == 0):
                (avg_goal, avg_theta) = self.average_evaluations(30)
                print("iter = " + str(k))
                print("mean goal (all)   = " + str(avg_goal))
                print("mean theta (all)  = " + utils.pretty(avg_theta))

                (avg_goal, avg_theta) = self.average_best_evals(30)
                print("mean goal (best)  = " + str(avg_goal))
                print("mean theta (best) = " + utils.pretty(avg_theta))
                print(
                    "-----------------------------------------------------------------"
                )

            if k >= self.max_iter:
                break

        return theta
Example #19
0
 def getWidth(self):
     return Utils.norm2(self.u)
Example #20
0
 def getHeight(self):
     return Utils.norm2(self.v)
def gkernel(params_i, params_j, const):
    n = norm2(params_i, params_j)
    return -2.0 * const * (params_i - params_j) * np.exp(-const * n)
Example #22
0
def final_reducer(tmp,m,P_X_star,sign_P_X_star_b,lambda2,lambda_max,sign_j0,r,Z,P_X_star_norm2,L_X_star,sum_a_star_p_n,sum_a_star_square_p_n):
	dic = {}
	print "enter final_reducer"
	print 'number of distinct features = ',len(tmp.keys())
	for (feature_id,v) in tmp.items():  # k = column id 
		#print feature_id
		done = True
		##########################################
			# check PXj = 0
		for (idx,p_n,l,package) in v:
			if l!= m:
				done = False
				break
		if done == True:
			dic[feature_id] = 1
			continue
		#print "PXj=0 done "
		##########################################
		cnt_1_1 = 0
		cnt_1_0 = 0
		cnt_0_1 = 0
		cnt_0_0 = 0
		dot_PX_P_X_star = 0
		PX = []
		sum_a_star_p_n_1_1 = 0
		for (idx,p_n,l,package) in v:
			a = (1.0-1.0*l/m)
			PX.append((idx,a,p_n))
			
			if package[0]==1:
				cnt_1_1+=p_n
				dot_PX_P_X_star+=a*package[1]*p_n #a_star = package[1]
				sum_a_star_p_n_1_1+=package[1]*p_n
			else:
				cnt_1_0+=p_n
				dot_PX_P_X_star+=a*sign_P_X_star_b*p_n
		sign_PX_b = -1.0*l/m
		cnt_0_1 = L_X_star - cnt_1_1
		cnt_0_0 = m - cnt_1_1 - cnt_0_1 - cnt_1_0
		dot_PX_P_X_star += cnt_0_0*sign_PX_b*sign_P_X_star_b
		dot_PX_P_X_star += sign_PX_b*(sum_a_star_p_n-sum_a_star_p_n_1_1)
		#print cnt_0_0,cnt_0_1,cnt_1_0,cnt_1_1
		#PX = [(idx,b*(1.0-1.0*l/m),b) for (idx,b,l,package) in v]
		#print "length of PX = ",len(PX)
		
		#PX = [(instance_id, a,label),..]
		
				
		#print 'done P_X_star_norm2'
		d = m*(lambda_max-lambda2)*1.0/r/P_X_star_norm2
	
		PX_norm2 = norm2(PX,sign_PX_b,m)
		#print sign_PX_b,dot_PX_P_X_star,PX_norm2
		#print len(PX),len(P_X_star),m
		'''
		dot_PX_P_X_star2 = dot(PX,sign_PX_b,P_X_star,sign_P_X_star_b,m)
		if not abs(dot_PX_P_X_star - dot_PX_P_X_star2)<1e-10:
			print "dot failed"
		else:
			print "succeed ",feature_id
		'''
		#print "PX* done"
		z  = dot_PX_P_X_star/PX_norm2/P_X_star_norm2

		a2 = pow(P_X_star_norm2,4)*(1.0-d*d)
		a1 = 2*dot_PX_P_X_star*pow(P_X_star_norm2,2)*(1.0-d*d)
		a0 = pow(dot_PX_P_X_star,2) - d*d*pow(PX_norm2,2)*pow(P_X_star_norm2,2)
		delta = a1*a1 - 4.0*a2*a0
		if abs(delta) < 1e-10:  #border case
			delta = 0
		u2 = (-a1 + math.sqrt(delta))/(2.0 * a2)
		
		#print "a0a1a.. done"


		if z >= d:
			T = r*PX_norm2 - Z[feature_id]
		else:
			P_new_norm2 = 0
			sum_a_star_square_p_n_1_1=0
			sum_a_star_p_n_1_1=0
			for (idx,p_n,l,package) in v:
				a = (1.0-1.0*l/m)				
				if package[0]==1:
					P_new_norm2 += pow(a + u2*package[1],2) * p_n	 #a_star = package[1]														
					sum_a_star_square_p_n_1_1 += package[1]*package[1]*p_n
					sum_a_star_p_n_1_1 = package[1]*p_n
				else:
					P_new_norm2 += pow(a + u2*sign_P_X_star_b,2)*p_n
			P_new_norm2 += pow(sign_PX_b+u2*sign_P_X_star_b,2)*cnt_0_0
			P_new_norm2 += cnt_0_1*sign_PX_b*sign_PX_b+u2*u2*(sum_a_star_square_p_n - sum_a_star_square_p_n_1_1)+2*sign_PX_b*u2*(sum_a_star_p_n-sum_a_star_p_n_1_1)
			P_new_norm2 = math.sqrt(P_new_norm2)
			'''
			P_new = get_P_new(PX,sign_PX_b,P_X_star,sign_P_X_star_b,u2)			
			sign_P_new_b = sign_PX_b+u2*sign_P_X_star_b
			P_new_norm2_2 = math.sqrt(dot(P_new,sign_P_new_b,P_new,sign_P_new_b,m))
			if abs(P_new_norm2 - P_new_norm2_2) < 1e-10:
				print "P_new_norm2 succeed",feature_id
			else:
				print P_new_norm2,P_new_norm2_2,'failed1'
			'''
			T = r*P_new_norm2-u2*m*(lambda_max-lambda2) - Z[feature_id]
		#print "T done"
		"""
			There are several differences between T(1) and T(-1)
			1. z done
			2. a1 (u2) done
			3. <theta_max, X'> Z[feature_id] done
			4. PX' (a,sign_PX_b)
		"""
		z2 = -z
		if z2>=d:
			T2 = r*PX_norm2 + Z[feature_id]
		else:			
			u2 = (a1 + math.sqrt(delta))/(2.0 * a2)
			
			P_new_norm2 = 0
			sum_a_star_square_p_n_1_1=0
			sum_a_star_p_n_1_1=0
			for (idx,p_n,l,package) in v:
				a = -(1.0-1.0*l/m)				
				if package[0]==1:
					P_new_norm2 += pow(a + u2*package[1],2) * p_n	 #a_star = package[1]														
					sum_a_star_square_p_n_1_1 += package[1]*package[1]*p_n
					sum_a_star_p_n_1_1 = package[1]*p_n
				else:
					P_new_norm2 += pow(a + u2*sign_P_X_star_b,2)*p_n
			P_new_norm2 += pow(-sign_PX_b+u2*sign_P_X_star_b,2)*cnt_0_0
			P_new_norm2 += cnt_0_1*sign_PX_b*sign_PX_b+u2*u2*(sum_a_star_square_p_n - sum_a_star_square_p_n_1_1) - 2*sign_PX_b*u2*(sum_a_star_p_n-sum_a_star_p_n_1_1)
			P_new_norm2 = math.sqrt(P_new_norm2)
			'''
			P_new = get_P_new([(idx,-a,b) for (idx,a,b) in PX],-sign_PX_b,P_X_star,sign_P_X_star_b,u2)			
			sign_P_new_b = -sign_PX_b+u2*sign_P_X_star_b
			P_new_norm2_2 = math.sqrt(dot(P_new,sign_P_new_b,P_new,sign_P_new_b,m))
			if abs(P_new_norm2 - P_new_norm2_2) < 1e-10:
				print "P_new_norm2 succeed",feature_id
			else:
				print P_new_norm2,P_new_norm2_2,'failed2'
			'''
			T2 = r*P_new_norm2-u2*m*(lambda_max-lambda2) + Z[feature_id]
		#print "T2 done"
		TT = max(T,T2)
		#print TT,m*lambda2
		if TT<m*lambda2:
			dic[feature_id] = 1


	return dic
	
Example #23
0
    def run(self):
        """
        Return a point which is (hopefully) a minimizer of the goal
        function f, starting from point theta0.

        Returns:
            The point (as a dict) which is (hopefully) a minimizer of "f".
        """
        is_spsa = True
        is_steep_descent = False
        is_rprop = False

        k = 0
        theta = self.theta0

        while True:
            k = k + 1

            self.iter = k
            print(f'starting iter {k} ...')

            if self.constraints is not None:
                theta = self.constraints(theta)

            print('current param:')
            for name, value in utils.true_param(theta).items():
                print(f'  {name}: {value["value"]}')

            c_k = self.c / (k**self.gamma)
            a_k = self.a / ((k + self.A)**self.alpha)

            # print(f'  ck: {c_k:0.5f}')
            # print(f'  ak: {a_k:0.5f}')

            # Run the engine match here to get the gradient
            print('Run engine match ...')
            gradient = self.approximate_gradient(theta, c_k, k)

            # For SPSA we update with a small step (theta = theta - a_k * gradient)
            if is_spsa:
                theta = utils.linear_combinaison(1.0, theta, -a_k, gradient)
                logging.info(f'{__file__} > theta from spsa: {theta}')
                # print(f'new param after application of gradient:')
                # for n, v in theta.items():
                #     print(f'  {n}: {int(v["value"] * v["factor"])}')

            # For steepest descent we update via a constant small step in the gradient direction
            elif is_steep_descent:
                mu = -0.01 / max(1.0, utils.norm2(gradient))
                theta = utils.linear_combinaison(1.0, theta, mu, gradient)

            # For RPROP, we update with information about the sign of the gradients
            elif is_rprop:
                theta = utils.linear_combinaison(1.0, theta, -0.01,
                                                 self.rprop(theta, gradient))

            # Apply parameter limits
            theta = utils.apply_limits(theta)
            logging.info(f'{__file__} > theta with limits: {theta}')
            # print(f'new param after application of limits:')
            # for n, v in theta.items():
            #     print(f'  {n}: {int(v["value"] * v["factor"])}')

            # We then move to the point which gives the best average of goal
            (avg_goal, avg_theta) = self.average_best_evals(30)
            logging.info(
                f'{__file__} > avg_theta from average_best_evals: {avg_theta}')

            theta = utils.linear_combinaison(0.98, theta, 0.02, avg_theta)
            logging.info(f'{__file__} > theta with avg_theta: {theta}')
            # print(f'new param after application of best average param:')
            # for n, v in theta.items():
            #     print(f'  {n}: {int(v["value"] * v["factor"])}')

            # Apply parameter limits
            theta = utils.apply_limits(theta)  # This is the best param.
            logging.info(f'{__file__} > best param: {theta}')
            # print(f'new param after application of limits:')
            # for n, v in theta.items():
            #     print(f'  {n}: {int(v["value"] * v["factor"])}')

            # Log best param values
            for kv, vv in theta.items():
                logging.info(
                    f'<best> iter: {k}, param: {kv}, value: {int(vv["value"]*vv["factor"])}'
                )
            print('best param:')
            for n, v in theta.items():
                print(f'  {n}: {int(v["value"] * v["factor"])}')

            mean_all_goal, _ = self.average_evaluations(30)
            print(f'mean all goal: {mean_all_goal}')

            mean_best_goal, _ = self.average_best_evals(30)
            print(f'mean best goal: {mean_best_goal}')

            # Save data in csv for plotting.
            plot_data = {}
            plot_data.update({'iter': k})
            plot_data.update({'meanbestgoal': mean_best_goal})
            plot_data.update({'meanallgoal': mean_all_goal})
            plot_theta = utils.true_param(theta)
            for name, value in plot_theta.items():
                plot_data.update({name: value["value"]})

            with open(self.plot_data_file, 'a') as f:
                cnt = 0
                for name, value in plot_data.items():
                    cnt += 1
                    if cnt == len(plot_data):
                        f.write(f'{value}\n')
                    else:
                        f.write(f'{value},')

            print(f'done iter {k} / {self.max_iter}')
            logging.info(f'{__file__} > done iter {k} / {self.max_iter}')
            print('=========================================')

            # Stopping rule 1: Average goal and iteration meet the
            # stop_all_mean_goal and stop_min_iter criteria.
            if k >= self.stop_min_iter and mean_all_goal <= self.stop_all_mean_goal:
                print('Stop opimization due to good average all goal!')
                break

            # Stopping rule 2: Average best goal and iteration meet the
            # stop_best_mean_goal and stop_min_iter criteria.
            if k >= self.stop_min_iter and mean_best_goal <= self.stop_best_mean_goal:
                print('Stop opimization due to good average best goal!')
                break

            # Stopping rule 3: Max iteration is reached.
            if k >= self.max_iter:
                print('Stop opimization due to max iteration!')
                break

        return utils.true_param(theta)