def nonlinmin(f, x1, tol=10**(-6), maxit=16):
    ab = x1.size  # The dimension of the problem
    xs = np.empty(([ab, 1]))  # Initiate the 2d array to save steps

    H = Hessian(f, x1, ab)
    G = Gradient(f, x1, ab)
    HG = np.linalg.solve(H, G)  # G^(-1)*H
    x2 = x1 - HG.T  # Newton step
    x = x1
    xs = x  # Save the steps taken
    count = 1  # Initiate counter
    x2 = x2.flatten(
    )  # Previous steps creates arrays within arrays. This fixes this. Still new. Let me be!
    x = x.flatten(
    )  # Previous steps creates arrays within arrays. This fixes this. Still new. Let me be!
    d = distance(x2, x, ab)  # How far have we traveled?
    while (d > tol) and (count < maxit):
        x2 = x
        H = Hessian(f, x, ab)
        G = Gradient(f, x, ab)
        HG = np.linalg.solve(H, G)  # G^(-1)*H
        x = x2 - HG.T  # Newton step
        x2 = x2.flatten(
        )  # Previous steps creates arrays within arrays. This fixes this. Still new. Let me be!
        x = x.flatten(
        )  # Previous steps creates arrays within arrays. This fixes this. Still new. Let me be!
        d = distance(x2, x, ab)  # Distance traveled
        xs = np.vstack((xs, x))  # Save the steps
        count = count + 1  # Increment the counter
    xs = xs.T  # Transposes the steps
    return x, count, xs
Example #2
0
def gonzalez(data, k, distance):
	# Initialize every point to the first cluster
	phi = {v:0 for v in data.keys()};

	# Create the list of cluster centers
	c = [0 for v in range(k)];
	# Arbitrarily choose the first cluster center
	initial = random.randrange(0,len(data.keys()));
	c[0] = data[data.keys()[initial]];
	# Loop through the number of clusters we want
	for i in range(1,k):
		# Find the point farthest away from it's current assigned cluster center
		Max = 0;
		c[i] = 0;
		for (key,val) in data.iteritems():
			dist = distance(val,c[phi[key]])
			if(dist > Max):
				Max = dist;
				c[i] = val;
		#print c[i]
		# Find the closest cluster center to each point
		for key,val in data.iteritems():
			if distance(val,c[phi[key]]) > distance(val, c[i]):
				phi[key] = i;
	return (c, phi); # List of centers, dictionary of phis ==> Labels mapped to center index
    def test_distance(self):
        self.assertEqual(
            distance(two_rooms_many_hallway_nodes_json, 'r1', 'r2'),
            465.20720723947005, "Invalid output")

        self.assertEqual(
            distance(three_rooms_many_hall_ways_nodes_json, 'A', 'C'),
            362.02462714231825, "Invalid output")
Example #4
0
def reduced_shear2(wavelength_obs, z_ini, mu_max, z_alpha, z_beta, l_mag,
                   l_phi):
    sigma_galaxy = np.pi * r_virial**2
    shear = 0
    D_alpha = distance(z_ini, z_alpha)
    D_beta = distance(z_ini, z_beta)
    #redshift integral from 0 to Chi(z_alpha)
    delta_z = (z_alpha - z_ini) / z_step
    for i in range(z_step):
        zi = (z_ini + i * delta_z)
        zmid = 1 / 2 * (zi + z_ini + (i + 1) * delta_z)
        D_mid = distance(z_ini, zmid)
        window_alpha = window_distance(D_mid, D_alpha)
        window_beta = window_distance(D_mid, D_beta)
        halo_data = halo_info(zmid, M_halo_min, M_halo_max,
                              n_halo_integral_step)
        factor = 2 * window_alpha * window_beta * sigma_galaxy * numberdensity_galaxy * tau_g(
            zmid, wavelength_obs) * (1 + zmid)**(2) * d_h / np.sqrt(
                Omega_r * (1 + zmid)**4 + Omega_m * (1 + zmid)**3 + Omega_k *
                (1 + zmid)**2 + Omega_L) * (9 * Omega_m**2 *
                                            d_h**(-4)) / (4 * 1 /
                                                          (1 + zmid)**2)
        #mu parameter integral from 0 to some max
        delta_mu = (mu_max) / mu_step
        for j in range(mu_step):
            mu_j = j * delta_mu
            mu_mid = 1 / 2 * (mu_j + (j + 1) * delta_mu)
            #nu parameter integral from 0 to pi FOR THE SPECIAL CASE l_phi = 0 rad!!
            delta_nu = np.pi / nu_step
            for k in range(nu_step):
                nu_k = k * delta_nu
                nu_mid = 1 / 2 * (nu_k + (k + 1) * delta_nu)
                psi = np.arccos((-np.cosh(mu_mid) * np.cos(nu_mid) + 1) /
                                ((np.cosh(mu_mid) - np.cos(nu_mid))))
                my_tri = kTriangle(
                    l_mag / D_mid,
                    l_mag / 2 * (np.cosh(mu_mid) - np.cos(nu_mid)) / D_mid,
                    l_phi - (np.pi - psi))
                shear_ijk = factor * np.cos(
                    2 * l_phi - 2 *
                    (np.pi - psi)) * total_halo_dust_bispectrum(
                        zmid, my_tri, halo_data)[0, 0] * 1 / (
                            2 * np.pi)**2 * 1 / 4 * l_mag**2 * np.abs(
                                (np.cosh(2 * mu_mid) - np.cos(2 * nu_mid)
                                 )) * delta_z * delta_mu * delta_nu
                #shear_ijk = factor * np.cos(2*l_phi - 2*(np.arccos((np.cosh(mu_mid) * np.cos(nu_mid) + 1)/(2*(np.cosh(mu_mid) - np.cos(nu_mid))))))  * total_halo_dust_bispectrum(zmid,kTriangle(l_mag/D_mid,l_mag * (np.cosh(mu_mid) - np.cos(nu_mid))/D_mid,l_phi - (np.arccos((np.cosh(mu_mid) * np.cos(nu_mid) + 1)/(2*(np.cosh(mu_mid) - np.cos(nu_mid)))))),halo_data)[0,0] * 1/(2*np.pi)**2 * 1/2 * l_mag**2 * (np.cosh(2 * mu_mid) - np.cos(2 * nu_mid)) * delta_z * delta_mu * delta_nu
                shear += shear_ijk
                #print(i,j,k,shear_ijk)
                #sys.stdout.flush()
    return (shear)
Example #5
0
def hcluster(rows, distance=distance.pearson, threshold=sys.maxint, maxclusters=sys.maxint):
    distances = {}
    currentclusterid = -1

    # clusters are initially just the rows 
    clusters = [cluster(rows[i], id=i, smallest_id=i, size=1) for i in range(len(rows))]

    while len(clusters) > 1:
        lowestpair = (0, 1)
        closest = distance(clusters[0].vec, clusters[1].vec)

        # loop through every pair looking for the smallest distance
        for i in range(len(clusters)):
            for j in range(i + 1, len(clusters)):
                # distances is the cache of distance calcurations
                if (clusters[i].id, clusters[j].id) not in distances:
                    distances[(clusters[i].id, clusters[j].id)] = distance(clusters[i].vec, clusters[j].vec)
                dist = distances[ (clusters[i].id, clusters[j].id) ]
                if dist < closest:
                    closest = dist
                    lowestpair = (i, j)

        # stop clustering if closest distance is larger than threshold
        # and num of clusters is less than miximum cluster counts
        if closest > threshold and len(clusters) <= maxclusters:
          break

        # calculate the average of the two clusters
        #mergevec = _mergeVector2Ave(clusters, lowestpair)
        mergevec = _mergeVectorAllAve(clusters, lowestpair)

        # create the new cluster
        newcluster = cluster(
            mergevec,
            left=clusters[lowestpair[0]],
            right=clusters[lowestpair[1]],
            distance=closest,
            id=currentclusterid,
            smallest_id=min(clusters[lowestpair[0]].smallest_id, clusters[lowestpair[1]].smallest_id),
            size=sum([clusters[lowestpair[0]].size, clusters[lowestpair[1]].size]))

        # cluster ids that weren't in the original set are negative
        currentclusterid -= 1
        del clusters[lowestpair[1]]
        del clusters[lowestpair[0]]
        clusters.append(newcluster)

    return clusters
Example #6
0
def get_trajectory(start, goal):

    distance_1sec = 50  #mm

    start_x, start_y = start.getxy()
    goal_x, goal_y = goal.getxy()
    # Find maximum reachable target point
    if distance(start.getxy(), goal.getxy()) > distance_1sec:
        rise = goal_y - start_y
        run = goal_x - start_x

        a = 50.0 / np.sqrt(run**2 + rise**2)

        goal_x = a * run + start_x
        goal_y = a * rise + start_y

    if start_x > goal_x:
        x_vals = [goal_x, start_x]
        y_vals = [goal_y, start_y]
        x_eval = np.linspace(goal_x, start_x, num=5)
        trajectory_y = np.interp(x_eval, [goal_x, start_x], [goal_y, start_y])
        x_eval = x_eval[::-1]
        trajectory_y = trajectory_y[::-1]
    else:
        x_vals = [start_x, goal_x]
        y_vals = [start_y, goal_y]
        x_eval = np.linspace(start_x, goal_x, num=5)
        trajectory_y = np.interp(x_eval, [start_x, goal_x], [start_y, goal_y])

    return zip(x_eval, trajectory_y)
    def _cluster_data(self, datapoints, centroids, distance, data_matrix):
        """Run until the centroids have not been moved"""

        num_samples = self.num_samples
        num_clusters = self.num_clusters
        vec_len = self.vec_len
        average = numpy.average

        moved_flag = 1

        while moved_flag:
            moved_flag = 0
            cluster_ids = dict(
            )  # key: centroid number, value: list of indices

            #Assign clusters
            for i in xrange(num_samples):
                cluster_ids.setdefault(
                    min([(distance(centroids[j], data_matrix[i]), j)
                         for j in xrange(num_clusters)])[1], []).append(i)

            #Move centroids
            for i in xrange(num_clusters):
                if i in cluster_ids:
                    cluster_data_means = data_matrix.take(
                        tuple(cluster_ids[i]), 0).mean(0)

                    if not (centroids[i] == cluster_data_means).all():
                        centroids[i] = cluster_data_means
                        moved_flag = 1

        #Assign cluster ids to datapoints
        for cluster_id in cluster_ids:
            for idx in cluster_ids[cluster_id]:
                datapoints[idx].cluster_id = cluster_id
Example #8
0
def lloyds(data, k, distance):
	keys = data.keys();
	phi = {v:0 for v in keys};
	c = [[] for i in range(k)];
	rando = random.sample(range(len(keys)), k);
	hasNotChanged = lambda x,y: collections.Counter(x) == collections.Counter(y);
	for i in range(k):
		c[i] = data[keys[rando[i]]];

	for loop in range(50):

		for (key,val) in data.iteritems():
			mind = float('inf');
			mini = 0;
			for i in range(k):
				dist = distance(val, c[i])
				if(dist < mind):
					mind = dist;
					mini = i;
			phi[key] = mini;

		for i in range(k):
			avg = [0]*len(c[i]);
			count = 0;
			for key,val in data.iteritems():
				if(phi[key] == i):
					for dim in range(len(avg)):
						avg[dim] += val[dim];
					count += 1;
			for dim in range(len(avg)):
				avg[dim] /= count;
			c[i] = avg;
	return (c,phi)
Example #9
0
def centerCost(data, centers, phi, distance):
	Max = 0;
	for key,point in data.iteritems():
		dis = distance(centers[phi[key]], point);
		if dis > Max:
			Max = dis;
	return Max;
    def _cluster_data(self, datapoints, centroids, distance, data_matrix):
        """Run until the centroids have not been moved"""
        
        num_samples  = self.num_samples
        num_clusters = self.num_clusters
        vec_len      = self.vec_len
        average      = numpy.average
        
        moved_flag = 1

        while moved_flag:
            moved_flag = 0
            cluster_ids = dict() # key: centroid number, value: list of indices

            #Assign clusters
            for i in xrange(num_samples):
                cluster_ids.setdefault(min([ (distance(centroids[j], data_matrix[i]), j) for j in xrange(num_clusters) ])[1], []).append(i)

            #Move centroids
            for i in xrange(num_clusters):
                if i in cluster_ids:
                    cluster_data_means = data_matrix.take(tuple(cluster_ids[i]), 0).mean(0)

                    if not (centroids[i] == cluster_data_means).all():
                        centroids[i] = cluster_data_means
                        moved_flag = 1

        #Assign cluster ids to datapoints
        for cluster_id in cluster_ids:
            for idx in cluster_ids[cluster_id]:
                datapoints[idx].cluster_id = cluster_id
Example #11
0
def getFitnessFunction(langs, word, weightFunc):
    words = defaultdict(int)
    for l in langs:
        weightSum = sum([v for k, v in l[word].items()])
        for wd, wt in l[word].items():
            words[wd] += weightFunc(l) * wt / weightSum
    return lambda w: -1 * sum(
        [wt * distance(w, wd) for wd, wt in words.items()])
Example #12
0
	def kmeans(data, k=4, distance=distance.euclidean, prototype=prototype.rand):
		"""
		A static factory method providing a kmeans clustering of the source data.
		"""

		result_clustering = clustering(data, clusters=[], centroids=prototype(data, k))

		attributes = range(len(data[0]))

		clusters_last = None
		for t in range(1, 101):
			result_clustering.clusters = [[] for i in range(k)]

			#assign each object to the closest centroid
			for object_num in range(len(data)):
				obj = data[object_num]
				assigned_cluster_num = random.choice(range(k))

				for cluster_num in range(k):
					d = distance(result_clustering.centroids[cluster_num], obj)
					if d < distance(result_clustering.centroids[assigned_cluster_num], obj):
						assigned_cluster_num = cluster_num

				result_clustering.clusters[assigned_cluster_num].append(object_num)
			
			#terminate the loop if the centroids list hasn't changed
			if result_clustering.clusters == clusters_last: break
			clusters_last = result_clustering.clusters

			#recompute the centroid of each cluster
			for i in range(k):
				attr_averages = [0.0] * len(attributes)

				if len(result_clustering.clusters[i]) > 0:
					for object_num in result_clustering.clusters[i]:
						for attr_num in attributes:
							attr_averages[attr_num] += data[object_num][attr_num]
					
					for attr_num in attributes:
						attr_averages[attr_num] /= len(result_clustering.clusters[i])
					
					result_clustering.centroids[i] = attr_averages
		
		print "kmeans finished after %d iterations" % t
		
		return result_clustering
Example #13
0
def distance_check_front():
    while distance(ULTRASONIC_FRONT_TRIG, ULTRASONIC_FRONT_ECHO)>20:
        pass
    clock(C1A, C1B)
    clock(C2A, C2B)
    time.sleep(0.1)
    stop(C1A, C1B)
    stop(C2A, C2B)
Example #14
0
def distance_check_back():
    while distance(ULTRASONIC_BACK_TRIG, ULTRASONIC_BACK_ECHO)>20:
        pass
    anticlock(C1A, C1B)
    anticlock(C2A, C2B)
    time.sleep(0.1)
    stop(C1A, C1B)
    stop(C2A, C2B) 
Example #15
0
def computeDistanceMatrix(descs, sets_ground_truth, distance=cosineDistance):
    #Compute distance for given set using predefinded distance
    matches_scores = []
    mismatches_scores = []
    if len(sets_ground_truth[0]) > 0:
        for matches in sets_ground_truth[0]:
            matches_scores.append(
                distance(descs[matches[0]], descs[matches[1]]))
        # print "Size matches_scores ", len(matches_scores), matches_scores[-1].shape
    if len(sets_ground_truth[1]) > 0:
        for matches in sets_ground_truth[1]:
            mismatches_scores.append(
                distance(descs[matches[0]], descs[matches[1]]))

    print len(matches_scores), len(mismatches_scores)

    return (matches_scores, mismatches_scores)
Example #16
0
def reduced_shear1(z_ini, l_tripleprime_max, z_alpha, z_beta, l_mag, l_phi):
    sigma_galaxy = np.pi * r_virial**2
    shear = 0
    D_alpha = distance(z_ini, z_alpha)
    D_beta = distance(z_ini, z_beta)
    #redshift integral from 0 to Chi(z_alpha)
    delta_z = (z_alpha - z_ini) / z_step
    for i in range(z_step):
        zi = (z_ini + i * delta_z)
        zmid = 1 / 2 * (zi + z_ini + (i + 1) * delta_z)
        D_mid = distance(z_ini, zmid)
        window_alpha = window_distance(D_mid, D_alpha)
        window_beta = window_distance(D_mid, D_beta)
        halo_data = halo_info(zmid, M_halo_min, M_halo_max,
                              n_halo_integral_step)
        factor = 2 * window_alpha * window_beta * sigma_galaxy * numberdensity_galaxy * tau_g(
            zmid) * (1 + zmid)**(2) * d_h / np.sqrt(
                Omega_r * (1 + zmid)**4 + Omega_m * (1 + zmid)**3 + Omega_k *
                (1 + zmid)**2 + Omega_L) * (9 * Omega_m**2 *
                                            d_h**(-4)) / (4 * 1 /
                                                          (1 + zmid)**2)
        #l_tripleprime magnitude integral from 0 to some max
        delta_l_tripleprime = (l_tripleprime_max) / l_tripleprime_step
        for j in range(l_tripleprime_step):
            l_tripleprime_j = j * delta_l_tripleprime
            l_tripleprime_mid = 1 / 2 * (l_tripleprime_j +
                                         (j + 1) * delta_l_tripleprime)
            #angular integral for l_tripleprime from 0 to pi FOR THE SPECIAL CASE l_phi = 0 rad!!
            delta_phi = np.pi / phi_step
            for k in range(phi_step):
                phi_k = k * delta_phi
                phi_mid = 1 / 2 * (phi_k + (k + 1) * delta_phi)
                shear_ijk = factor * np.cos(
                    2 * l_phi - 2 * phi_mid
                ) * total_halo_dust_bispectrum(
                    zmid,
                    kTriangle(l_mag / D_mid, l_tripleprime_mid / D_mid,
                              l_phi - phi_mid), halo_data
                )[0, 0] * 1 / (
                    2 * np.pi
                )**2 * delta_z * delta_phi * l_tripleprime_mid * delta_l_tripleprime
                shear += shear_ijk
                print(i, j, k, zmid, l_tripleprime_mid, phi_mid, shear_ijk)
                sys.stdout.flush()
    return (shear)
Example #17
0
 def convert_to_distmatrix(cls, data, distance, lower=True):
     matrix = numpy.zeros((len(data), len(data)))
     for i,j in combinations(xrange(len(data)), 2):
         matrix[i][j] = distance(data[i], data[j])
         if lower == True:
             matrix[j][i] = matrix[i][j]
     # add a nan-diagonal, useful for further computations.
     numpy.fill_diagonal(matrix, numpy.nan)
     return matrix
Example #18
0
def getFitnessFunction(langs, word, weightFunc, avg=True):
	if not avg:
		return gff2(langs, word, weightFunc)
	words = defaultdict(int)
	for l in langs:
		weightSum = sum([v for k,v in l[word].items()])
		for wd,wt in l[word].items():
			words[wd] += weightFunc(l) * wt/weightSum
	return lambda w: -1 * sum([wt * distance(w, wd) for wd, wt in words.items()])
Example #19
0
def meanCost(data, centers, phi, distance):
	cost = 0.0;
	count = 0;
	for key,point in data.iteritems():
		dis = distance(centers[phi[key]], point);
		cost += dis**2;
		count += 1;

	return math.sqrt(cost/count);
    def _gen_distance_matrix(self, data_matrix, distance, num_samples):
        """Generate a distance matrix so that we can use indices to specify distance rather than data vectors"""

        distance_matrix = numpy.zeros((num_samples, num_samples), dtype = float)

        for i, j in comb(xrange(num_samples), 2):
            distance_matrix[i][j] = distance(data_matrix[i], data_matrix[j])

        return distance_matrix + distance_matrix.T
Example #21
0
    def _gen_distance_matrix(self, data_matrix, distance, num_samples):
        """Generate a distance matrix so that we can use indices to specify distance rather than data vectors"""

        distance_matrix = numpy.zeros((num_samples, num_samples), dtype=float)

        for i, j in comb(xrange(num_samples), 2):
            distance_matrix[i][j] = distance(data_matrix[i], data_matrix[j])

        return distance_matrix + distance_matrix.T
Example #22
0
 def convert_to_distmatrix(cls, data, distance, lower=True):
     matrix = numpy.zeros((len(data), len(data)))
     for i, j in combinations(xrange(len(data)), 2):
         matrix[i][j] = distance(data[i], data[j])
         if lower == True:
             matrix[j][i] = matrix[i][j]
     # add a nan-diagonal, useful for further computations.
     numpy.fill_diagonal(matrix, numpy.nan)
     return matrix
Example #23
0
def menu():
    print """
    Python converter program
        1. Convert Measurements
        2. Convert Weight
        3. Convert Temperatures
        4. Convert Distances
        5. Compute Training Zones
    """
    choice = input("Enter choice: ")
    if choice == 1:
        measure()
    if choice == 2:
        weight()
    if choice == 3:
        temp()
    if choice == 4:
        distance()
    if choice == 5:
        Zones()
def Clz_iz_j(z_alpha, z_beta, l_mag):
    Clij = 0
    D_alpha = distance(0, z_alpha)
    D_beta = distance(0, z_beta)
    for i in range(n):
        delta_z = z_alpha / n
        zi = i * delta_z
        zmid = 1 / 2 * (zi + (i + 1) * delta_z)
        D_mid = distance(0, zmid)
        window_alpha = window_distance(D_mid, D_alpha)
        window_beta = window_distance(D_mid, D_beta)
        Clij += l_mag**4 * (window_alpha * window_beta) / (D_mid**2) * (
            9 * Omega_m**2 *
            d_h**(-4)) / (4 * 1 / (1 + zmid)**2) * PSetNL.P_interp(
                zmid, l_mag /
                D_mid)[0, 0] * d_h / np.sqrt(Omega_r *
                                             (1 + zmid)**4 + Omega_m *
                                             (1 + zmid)**3 + Omega_k *
                                             (1 + zmid)**2 + Omega_L) * delta_z
    return (Clij)
Example #25
0
def findCircles(black_image, frame):
    circles = black_image
    cimg = frame.copy()
    im2, cnts, hierarchy = cv2.findContours(circles, cv2.RETR_TREE,
                                            cv2.CHAIN_APPROX_SIMPLE)
    # cv2.drawContours(frame, contours, -1, (0,255,0), 5)
    (cnts, boundingBoxes) = sort_contours(cnts)
    for (i, c) in enumerate(cnts):
        draw_contour(frame, c, i)
    for cnt in cnts:
        (x, y), radius = cv2.minEnclosingCircle(cnt)
        center = (int(x), int(y))
        radius = int(radius)
        cv2.circle(frame, center, 50, (0, 255, 0), 3)
    cv2.putText(frame, str(len(cnts)), (10, 450), cv2.FONT_HERSHEY_TRIPLEX, 3,
                (255, 255, 255), 4)
    cv2.imshow("Circles", frame)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    distance(frame, cnts)
Example #26
0
def main():
    try:
        with Input(keynames='curses') as input_generator:
            for key in input_generator:
                if key == 'KEY_UP' and distance(ULTRASONIC_FRONT_TRIG, ULTRASONIC_FRONT_ECHO)>3:
                    anticlock(C1A, C1B)
                    anticlock(C2A, C2B)
                    t_front = threading.Thread(target=distance_check_front)
                    t_front.start()
                    t_front1 = threading.Thread(target=proximity_check, args=(INFRA_FRONT_LEFT,))
                    t_front2 = threading.Thread(target=proximity_check, args=(INFRA_FRONT_RIGHT,))
                    t_front1.start()
                    t_front2.start()
  
                if key == 'KEY_DOWN' and distance(ULTRASONIC_BACK_TRIG, ULTRASONIC_BACK_ECHO)>3:
                    clock(C1A, C1B)
                    clock(C2A, C2B)
                    t_back = threading.Thread(target=distance_check_back)
                    t_back.start()
            
                if key == 'KEY_RIGHT':
                    stop(C1A, C1B)
                    stop(C2A, C2B)
                    clock(C1A, C1B)
                    time.sleep(0.1)
                    stop(C1A, C1B)

                if key == 'KEY_LEFT':
                    stop(C1A, C1B)
                    stop(C2A, C2B)
                    clock(C2A, C2B)
                    time.sleep(0.1)
                    stop(C2A, C2B)

                if key == 's':
                    stop(C1A, C1B)
                    stop(C2A, C2B)
    except KeyboardInterrupt:
        gpio.cleanup()
        sys.exit(0)
def reduced_shear(z_ini, l_tripleprime_max, z_alpha, z_beta, l_mag, l_phi):
    sigma_galaxy = np.pi * r_virial**2
    shear = 0
    D_alpha = distance(z_ini, z_alpha)
    D_beta = distance(z_ini, z_beta)
    #redshift integral from 0 to Chi(z_alpha)
    delta_z = (z_alpha - z_ini) / z_step
    for i in range(z_step):
        zi = (z_ini + i * delta_z)
        zmid = 1 / 2 * (zi + z_ini + (i + 1) * delta_z)
        #zmid = np.linspace(0.5*delta_z,(n-1/2)*delta_z,n)
        D_mid = distance(z_ini, zmid)
        #l_tripleprime magnitude integral from 0 to some max
        delta_l_tripleprime = (l_tripleprime_max) / l_tripleprime_step
        #for j in range (l_tripleprime_step):
        #l_tripleprime_j = j*delta_l_tripleprime
        #l_tripleprime_mid = 1/2*(l_tripleprime_j + (j+1)*delta_l_tripleprime)
        l_tripleprime_mid = np.linspace(0.5 * delta_l_tripleprime,
                                        (n - 1 / 2) * delta_l_tripleprime, n)
        #angular integral for l_tripleprime from 0 to pi but this is FOR THE SPECIAL CASE l_phi = 0 rad
        delta_phi = np.pi / phi_step
        #for k in range (phi_step):
        #phi_k = k*delta_phi
        #phi_mid = 1/2*(phi_k + (k+1)*delta_phi)
        phi_mid = np.linspace(0.5 * delta_phi, (n - 1 / 2) * delta_phi, n)
        shear += np.sum(
            2 * window_distance(D_mid, D_alpha) *
            window_distance(D_mid, D_beta) * sigma_galaxy *
            numberdensity_galaxy * tau_g(zmid) *
            (1 + zmid)**(2) * d_h / np.sqrt(Omega_r * (1 + zmid)**4 + Omega_m *
                                            (1 + zmid)**3 + Omega_k *
                                            (1 + zmid)**2 + Omega_L) *
            np.cos(2 * l_phi - 2 * phi_mid) * (9 * Omega_m**2 * d_h**(-4)) /
            (4 * 1 / (1 + zmid)**2) * B_matterspec(
                zmid,
                kTriangle(l_mag / D_mid, l_tripleprime_mid / D_mid,
                          l_phi - phi_mid))[0, 0] * 1 / (2 * np.pi)**2 *
            l_tripleprime_mid) * delta_z * delta_phi * delta_l_tripleprime
    return (shear)
    def _assign_clusters(self, nodes, data_matrix, distance, hdim, vdim):
        """Assign clusters to each sample in sdata"""

        clusters = dict()

        matches = [ min([ (distance(nodes[i][j], data_matrix[k]), i, j) for i in xrange(vdim) for j in xrange(hdim) ]) for k in xrange(self.num_samples) ]

        for i in xrange(len(matches)):
            key = (matches[i][1], matches[i][2])
            clusters.setdefault(key, []).append(self.datapoints[i])

        for clust in enumerate(clusters):
            for sample in clusters[clust[1]]:
                sample.cluster_id = clust[0]
Example #29
0
def get_trajectory(start, goal):
    max_angular_velocity = 10.0 / 3.0 * np.pi
    distance_1sec = 50  #mm

    start_x, start_y, start_theta = start.get_x_y_theta()
    goal_x, goal_y = goal.getxy()

    eucl_dist = distance(start.getxy(), goal.getxy())

    if eucl_dist > distance_1sec:
        rise = goal_y - start_y
        run = goal_x - start_x

        a = 50.0 / np.sqrt(run**2 + rise**2)

        goal_x = a * run + start_x
        goal_y = a * rise + start_y

    trajectory_obj, omega_l, omega_r, final_angle, d = get_trajectory_input(
        (start_x, start_y, start_theta), (goal_x, goal_y))

    # new_x, new_y, new_theta = robot_model(omega_l, omega_r, start_x, start_y, start_theta)
    if type(trajectory_obj) is lines.Line2D:
        x_eval = np.linspace(goal_x, start_x, num=20)
        trajectory_y = np.interp(x_eval, [goal_x, start_x], [goal_y, start_y])
        x_eval = x_eval[::-1]
        trajectory_y = trajectory_y[::-1]
    elif type(trajectory_obj) is patches.Arc:
        # print type(trajectory_obj)
        angles = np.linspace(trajectory_obj.theta1,
                             trajectory_obj.theta2,
                             num=20)
        radius = trajectory_obj.height / 2.0

        # print radius
        center = trajectory_obj.center
        x_eval = radius * np.cos(angles * np.pi / 180) + center[0]
        trajectory_y = radius * np.sin(angles * np.pi / 180) + center[1]

        arc_length = abs(trajectory_obj.theta1 -
                         trajectory_obj.theta2) * radius * np.pi / 180.0

        if distance_1sec * 5 < arc_length:
            # print arc_length, eucl_dist, d
            return [], [], []

    return zip(x_eval, trajectory_y), trajectory_obj, final_angle
def calculates_nearest_centroid(data, centroids, distance):
    """
    Assign for a element to a centroid by distance returning the index of the centroid list

    >>> calculates_nearest_centroid([41.0],[[39.0],[45.0]],abs_distance)
    0
    >>> calculates_nearest_centroid([64.0],[[39.0],[45.0]],abs_distance)
    1
    """
    minimum_distance = float("infinity")
    index = (-1)
    for i, c in enumerate(centroids):
        dist = distance(data, c)
        if dist < minimum_distance:
            minimum_distance = dist
            index = i
    return index
Example #31
0
 def _cluster(self, data, means, distance):
     """
     Sets data
     data: List of data points
     means: List of means of the clusters
     distance: Function to calculate distance between two points
     """
     cluster = [None for d in data]
     for d in range(len(data)):
         min = None
         mean = None
         for m in range(len(means)):
             dist = distance(data[d], means[m])
             if dist < min or min is None:
                 min = dist
                 cluster[d] = m
     return cluster
Example #32
0
    def _assign_clusters(self, nodes, data_matrix, distance, hdim, vdim):
        """Assign clusters to each sample in sdata"""

        clusters = dict()

        matches = [
            min([(distance(nodes[i][j], data_matrix[k]), i, j)
                 for i in xrange(vdim) for j in xrange(hdim)])
            for k in xrange(self.num_samples)
        ]

        for i in xrange(len(matches)):
            key = (matches[i][1], matches[i][2])
            clusters.setdefault(key, []).append(self.datapoints[i])

        for clust in enumerate(clusters):
            for sample in clusters[clust[1]]:
                sample.cluster_id = clust[0]
Example #33
0
def find_valid_zips(all_zips, zip_code, radius):
    '''
        Finds all zip codes centered at zip_code within radius miles

        Inputs:
        (dict) all_zips of { zip_code: <Point object> } mappings
        (str) zip_code center of search
        (float) radius in miles of search area

        Returns:
        (dict) of {zip_code: distance-to-center } mappings

    '''
    center = all_zips[zip_code]

    zip_dists = {}
    for zip_code, point in all_zips.iteritems():
        d = distance(center, point)
        if d <= radius:
            # save the point and distance so no need to recalc later
            zip_dists[zip_code] = d
    return zip_dists
Example #34
0
    #list_dict[5]=peaks5
    #list_dict[6]=peaks6
    #list_dict[7]=peaks7
    #list_dict[8]=peaks8

    input_file.close()


    output_data = []

    for i in range(1, len(list_dict)+1):
        row_data = {}
        print('CHANNEL ' + str(i) + ' -------------------------------------------')
        time_channel = time_list(time_column, list_dict[i])
        speed_channel = speed_list(time_channel, i)
        time_n, speed_n, dist, av_speed = distance(time_channel, speed_channel)
        fly_time, short_bout, long_bout, flight, fly_to_300, fly_to_900, fly_to_3600, fly_to_14400, fly_more_14400, event_300, event_900, event_3600, event_14400, event_more_14400 = flying_bouts(time_n, speed_n, i, tot_duration)
        print('Average speed channel ' + str(i) + ' -> ' + '%.2f' % av_speed)
        row_data['average_speed'] = av_speed		# row_data['column'] = value
        print('Total flight time channel ' + str(i) + ' -> ' + '%.2f' % fly_time)
        row_data['total_flight_time'] = fly_time  
        print('Distance channel ' + str(i) + ' -> ' + '%.2f' % dist)
        row_data['distance'] = dist 
        print('Shortest flying bout channel ' + str(i) + ' -> ' + '%.2f' % short_bout)
        row_data['shortest_flying_bout'] = short_bout 
        print('Longest flying bout channel ' + str(i) + ' -> ' + '%.2f' %long_bout)
        row_data['longest_flying_bout'] = long_bout 
        print('This individual spent ' + '%.3f' %flight + ' of its time flying with this composition: ')
        row_data['portion_flying'] = flight
        row_data['total_duration'] = tot_duration
        print('	 60s-300s = ' + '%.3f' %fly_to_300 + ' with ',event_300, 'events')
Example #35
0
 def sortkey(cand):
     return sum([wt * distance(cand, wd) for wd, wt in words.items()])
Example #36
0
time.sleep(0.1)                # Wait

GPIO.setwarnings(False) # turn off warnings about DMA channel in use
GPIO.setmode(GPIO.BCM)

# make some space
print ''
if DEBUG:
    print('Initializing Pigpio...')

# intialize pigpio library and socket connection for daemon (pigpiod)
PIGPIO_HANDLE = pigpio.pi()              # use defaults
PIGPIO_VERSION = PIGPIO_HANDLE.get_pigpio_version()

# initalize the distance class    
valve_detector = distance()


#############################
# Main while loop
#############################
functions = [server_start]
p = ProcessHandler(functions)
p.start()

try:
    while True:
        p.watchDog()
        
        if not p.event_q.empty():
            print p.event_q.get()
Example #37
0
	def ff(w):
		cost = 0
		for l in langs:
			if len(l[word]) > 0:
				cost += weightFunc(l) * min(map(lambda x: distance(w, x), l[word]))
		return -1 * cost
Example #38
0
    def _train_data(self, data_matrix, distance, nodes, chessdists, hdim, vdim,
                    lr, num_epochs, radius):
        """Use the sample set to train the SOM"""

        t_const = num_epochs / math.log(radius)

        node_adjust = numpy.zeros((vdim, hdim, self.vec_len))

        e = math.e
        maxint = sys.maxint

        for t in xrange(1, num_epochs):

            new_radius = radius * e**(-t / t_const)
            new_learn = lr * e**(-t / t_const)  # This should be -t...
            r = 2 * t * new_radius * new_radius

            for k in xrange(self.num_samples):

                bmu = [maxint]

                #Rather than a one line min([ listcomp ]) here, the added complexity reduces function calls by quite a bit
                for i in xrange(vdim):
                    for j in xrange(hdim):
                        dist = distance(nodes[i][j], data_matrix[k])

                        if dist < bmu[0]:
                            bmu = [dist, i, j]

                try:
                    y = bmu[1]
                except:
                    #FIXME: We need a command line way to lower learn rate, or GUI option
                    raise ValueError, "Distance from nodes to data matrix too large? Try lowering learn rate."

                x = bmu[2]
                rad_int = int(new_radius)

                #Again, performance vs code succinctness
                if y > rad_int:
                    min_i = y - rad_int - 1
                else:
                    min_i = 0

                if x > rad_int:
                    min_j = x - rad_int - 1
                else:
                    min_j = 0

                max_i = y + rad_int + 1
                max_j = x + rad_int + 1

                if max_i > vdim:
                    max_i = vdim
                if max_j > hdim:
                    max_j = hdim

                for i in xrange(min_i, max_i):
                    for j in xrange(min_j, max_j):
                        dist = chessdists[(y, x, i, j)]

                        inf = e**(-dist**2 / r)

                        #W(t+1) = W(t) + O(t)L(t)(V(t) - W(t))
                        node_adjust[i][j] += inf * new_learn * (
                            data_matrix[k] - nodes[i][j])

            nodes += node_adjust
            node_adjust.fill(0)

        return nodes
Example #39
0
    for _ in range(100):
        print(_, 'iteration')
        wing = Wing(airfoil)
        for i in range(points_num):
            pi[i], index = uct(wing, (points_num - i) * 5, pro, val, x, sess, t=2)
            air_input[i] = wing.airfoil
            val_i[i, index] = 1
            wing.draw(index)

        print(wing.airfoil.astype(np.int))

        wing_shape = write_dict(wing.airfoil)
        wing_shape = np.row_stack((wing_shape, wing_shape[0]))

        func = target_pressure_fn()
        dis = distance(func)
        print(dis)
        D = np.zeros(points_num).reshape([-1, 1])
        D += dis

        name = 'iteration_%d_distance=%f' % (_, dis)
        fig_path = 'pic/iteration_%d.png' % _
        fig, ax = plt.subplots()
        ax.plot(wing_shape[:, 0], wing_shape[:, 1])
        plt.xlim(0, 1)
        plt.ylim(-0.5, 0.5)
        ax.set(title=name)
        fig.savefig(fig_path)
        plt.close()
        print('save wing shape')
Example #40
0
	def ff(w):
		cost = 0
		for l in langs:
			if len(l[word]) > 0:
				cost += weightFunc(l) * min(map(lambda x: distance(w, x), l[word]))
		return -1 * cost
Example #41
0
dist_std_list = []

plt.figure()
#for fi in range(len(distance_function_list)):


def plot(data, std, color, text='Untitled'):
    max_data = np.max(data + std)
    data = data / max_data
    std = std / max_data
    plt.plot(range(len(data)), data, color=color, label=text)
    plt.plot(range(len(data)), data + std, color + 's')
    plt.plot(range(len(data)), data - std, color + 's')


distance = distance()
distance.neighbor_indices_flat_init(Y0list[0], [6, 6])
distance.calculate_max((160, 120))
""" distance 5 """

#distance_function = distance_function_list[4]
#distance.neighbor_indices_flat_init(Y0list[0],[6,6])
#dist_list = [[]]*max_dept
#dist_mean = np.zeros(max_dept)
#dist_std = np.zeros(max_dept)
#for dept in range(max_dept):
#    print('Computing dept: '+str(dept))
#    dist = []
#    for i in range(n_samp):
#        if i+dept<len(Y0list):
#            y1 = Y0list[i]
 def test_distance(self):
     boulangerie = marqueurs.Commerce('Boulangerie', 'Nice', (8, 4),
                                      'ouvert', 'velo')
     jean = marqueurs.Individu('Jean', 'Nice', (1, 1), 82, 'velo')
     self.assertFalse(distance(jean, boulangerie))
Example #43
0
i = 1
for i in range(1, len(fp), 4):
    time = fp[i]
    pos = fp[i + 1]
    lat_long = pos.split(",")
    #print(time)
    long = lat_long[0]
    lat = lat_long[1]
    li = [time]
    #print(lat+"\n")
    with open("coordinates.csv", 'r') as images:
        csv_reader = csv.reader(images)

        for image in csv_reader:
            #print(image)
            #image = image.read().split(",")
            img_name = image[0]
            if image[1]:
                img_lat = image[1]
            if image[2]:
                img_long = image[2]
            #print(img_lat)
            dist = distance(float(lat), float(img_lat), float(long),
                            float(img_long))
            #print(img_name)
            if (dist <= 35):
                li.append(img_name)
        with open("result.csv", 'a') as res:
            writer = csv.writer(res)
            writer.writerow(li)
Example #44
0
dist_mean_list = []
dist_std_list = []

plt.figure()
#for fi in range(len(distance_function_list)):

def plot(data,std,color,text='Untitled'):
    max_data = np.max(data+std)
    data = data/max_data
    std = std/max_data
    plt.plot(range(len(data)),data,color=color,label=text)
    plt.plot(range(len(data)),data+std,color+'s')
    plt.plot(range(len(data)),data-std,color+'s')

distance = distance()
distance.neighbor_indices_flat_init(Y0list[0],[6,6])
distance.calculate_max((160,120))
""" distance 5 """

#distance_function = distance_function_list[4]
#distance.neighbor_indices_flat_init(Y0list[0],[6,6])
#dist_list = [[]]*max_dept
#dist_mean = np.zeros(max_dept)
#dist_std = np.zeros(max_dept)
#for dept in range(max_dept):
#    print('Computing dept: '+str(dept))
#    dist = []
#    for i in range(n_samp):
#        if i+dept<len(Y0list):
#            y1 = Y0list[i]
Example #45
0
def getDistanceToNeighbors(neighbors, test, length):
    dist_tuple_array = []
    for neighbor in neighbors:
        dist_tuple_array.append((neighbor, distance(neighbor, test, length)))
    return dist_tuple_array
    def _train_data(self, data_matrix, distance, nodes, chessdists, hdim, vdim, lr, num_epochs, radius):
        """Use the sample set to train the SOM"""

        t_const = num_epochs / math.log(radius)

        node_adjust = numpy.zeros((vdim, hdim, self.vec_len))
        
        e = math.e
        maxint = sys.maxint

        for t in xrange(1, num_epochs):

            new_radius = radius * e**(-t / t_const)
            new_learn = lr * e**(-t / t_const) # This should be -t...
            r = 2 * t * new_radius * new_radius

            for k in xrange(self.num_samples):

                bmu = [maxint]

                #Rather than a one line min([ listcomp ]) here, the added complexity reduces function calls by quite a bit
                for i in xrange(vdim):
                    for j in xrange(hdim):
                        dist = distance(nodes[i][j], data_matrix[k])

                        if dist < bmu[0]:
                            bmu = [dist, i, j]
                
                try:
                    y = bmu[1]
                except:
                    #FIXME: We need a command line way to lower learn rate, or GUI option
                    raise ValueError, "Distance from nodes to data matrix too large? Try lowering learn rate."

                x = bmu[2]
                rad_int = int(new_radius)
                
                #Again, performance vs code succinctness
                if y > rad_int:
                    min_i = y - rad_int - 1
                else:
                    min_i = 0

                if x > rad_int:
                    min_j = x - rad_int - 1
                else:
                    min_j = 0

                max_i = y + rad_int + 1
                max_j = x + rad_int + 1

                if max_i > vdim:
                    max_i = vdim
                if max_j > hdim:
                    max_j = hdim
            
                for i in xrange(min_i, max_i):
                    for j in xrange(min_j, max_j):
                        dist = chessdists[(y,x,i,j)]

                        inf = e**(-dist**2 / r)

                        #W(t+1) = W(t) + O(t)L(t)(V(t) - W(t))
                        node_adjust[i][j] += inf * new_learn * (data_matrix[k] - nodes[i][j])

            nodes += node_adjust
            node_adjust.fill(0)

        return nodes