def search(img_path):
    cf = CaculateColorVector.get_color_feature(img_path)
    cfs = File_Operation.read_list_from_file('/Users/ligang/Documents/cfs.txt')
    distances = []
    for cf_tuple in cfs:
        d = Distance.distance(cf, cf_tuple[1])
        distances.append((cf_tuple[0], d))
    top_distances = heapq.nsmallest(10, distances, key=lambda x: x[1])
    print top_distances
Example #2
0
	def getNodeNChildNodeDistance(self, neighbors, neighborsList, location, n):
		if (n.visited):
			return neighborsList
		else:

			distance = Distance.distance_on_unit_sphere(float(location[0]), float(location[1]), float(n.location[0]), float(n.location[1]))  * 3960
			n.visited = True
			
			if len(neighborsList) < neighbors:
				neighborsList[n.locationid] = distance
			else:
				neighborsList = self.getNearestNeighbors( neighborsList, distance, n)
			return neighborsList
def search(img_path):
    ulbp_f = ulbp.ulbp(img_path)
    ulbps = File_Operation.read_list_from_file('/Users/ligang/Documents/ulbp.txt')
    distances = []
    for ulbp_tuple in ulbps:
        d = Distance.distance(ulbp_f, ulbp_tuple[1])
        distances.append((ulbp_tuple[0], d))
    top_distances = heapq.nsmallest(20, distances, key=lambda x: x[1])
    dstDir = '/Users/ligang/Documents/Emilie/lbp_search_result'
    img_set_dir = '/Users/ligang/Documents/Emilie/dress'
    for top in top_distances:
        shutil.copy2(os.path.join(img_set_dir, top[0]), os.path.join(dstDir, top[0]))
    print top_distances
def search(img_path):
    cf = CaculateColorVector.get_color_feature(img_path)
    unlbp_feature = Uniform_LBP.ulbp(img_path)

    cfs = File_Operation.read_list_from_file('/Users/ligang/Documents/cfs.txt')
    ulbps = File_Operation.read_list_from_file('/Users/ligang/Documents/ulbp.txt')

    distances = []

    for cf_tuple, ulbp_tuple in zip(cfs, ulbps):
        assert cf_tuple[0] == ulbp_tuple[0]

        d_color = Distance.distance(cf, cf_tuple[1])
        d_ulbp = Distance.distance(unlbp_feature, ulbp_tuple[1])
        d = d_color + d_ulbp

        distances.append((cf_tuple[0], d))

    top_distances = heapq.nsmallest(20, distances, key=lambda x: x[1])
    dstDir = '/Users/ligang/Documents/Emilie/colorlbp_search_result'
    img_set_dir = '/Users/ligang/Documents/Emilie/dress'
    for top in top_distances:
        shutil.copy2(os.path.join(img_set_dir, top[0]), os.path.join(dstDir, top[0]))
    print top_distances
Example #5
0
def moveDist(address, addressPharm):     # function to send the drone the distance it needs to fly
                    # also used for return trips

    deliveryLat = Distance.latitude(address)  # converts an address to its latitude equivalent
    deliveryLong = Distance.longtitude(address)   # converts an address to its longitude equivalent
    pharmcacyLat = Distance.latitude(addressPharm)
    pharmacyLong = Distance.longitude(addressPharm)
    flyDist = Distance.conversion(deliveryLat, deliveryLong, pharmcacyLat, pharmacyLong)
    # conversion finds the distance in miles between two longitudes and latitudes

    drone.moveForward()
    time.sleep(Distance.findtime(flyDist, speedofdrone))  # receives the time the drone must move for
Example #6
0
def separate_path(all_orders, path, condition, params):
    """
    将一个序列分离为满足条件的车辆路径
    :type all_orders: list
    :type path: list
    :type condition: dict
    :type params: dict
    """
    assert len(all_orders) - 1 == len(path)
    separate_number_list = []

    start = all_orders[0]  # 起点
    v = params['velocity']  # 速度
    stay_time = params['stay_time']  # 停留时间

    path_time = 0  # 记录每车的总时间
    path_len = len(path)  # 路径长度,不包括起点
    i = 0  # 路径中 node 的指针
    pre = start
    total_distance = 0
    while i < path_len:
        curr_node = all_orders[path[i]]
        distance = Distance.get_distance(pre['lat'], pre['lng'], curr_node['lat'], curr_node['lng'])
        time = distance / v
        if path_time + time > condition["time"]:
            # 重置迭代变量
            if pre == start:
                raise RuntimeError("经纬度{}离原点{}太远".format(curr_node, pre))
            path_time = 0
            pre = start
            separate_number_list.append(i)
        else:
            pre = curr_node
            path_time += time + stay_time
            total_distance += distance
            i += 1

    separate_number_list.append(i)
    return {"separate_number_list": separate_number_list, "distance": total_distance}
Example #7
0
def getRandom() :
	return randint(0,15)
	
startValue = 0.0
data = 'false'
orderflag = 0

while True :
	data = serverflag.parseHtml()
	print data
	if data == 'true' :

		#distance = getRandom();
		
		distance = sensor.getDistance()
		print distance

		if initialvalue.isIntialNotSet():
		#if startValue == 0:
			startValue =  distance
			
			#startValue = 15.0
			initialvalue.setInitialValue(startValue)

		else:
			startValue = initialvalue.getInitialValue()

		percnt = ((startValue-distance)/startValue)*100
		print percnt
		file2_contents = file2.read().split('\n')
		f2contents = file2_contents[1].split(',') # index 0 contains feature headers

		data1 = []
		data2 = []
		for k in range(0, len(f1contents)):
			try:
				f1contents[k] = float(f1contents[k])
				data1.append(f1contents[k])
			except:
				pass
		for k in range(0, len(f2contents)):
			try:
				f2contents[k] = float(f2contents[k])
				data2.append(f2contents[k])
			except:
				pass

		indices = Distance.prune(data1, data2)
		#print("Euclidean: " + str(Distance.euclidean_distance(data1, data2, indices)))
		output.write(files[i] + "," + files[j] + "," + "euclidean" + "," + str(Distance.euclidean_distance(data1, data2, indices)) + "\n")
		#print("City: " + str(Distance.city_distance(data1, data2)))
		output.write(files[i] + "," + files[j] + "," + "city" + "," + str(Distance.city_distance(data1, data2)) + "\n")
		#print("Chebychev: " + str(Distance.chebychev_distance(data1, data2)))
		output.write(files[i] + "," + files[j] + "," + "chebychev" + "," + str(Distance.chebychev_distance(data1, data2)) + "\n")
		#print("Cosine: " + str(Distance.cosine_difference(data1, data2)))
		output.write(files[i] + "," + files[j] + "," + "cosine" + "," + str(Distance.cosine_difference(data1, data2)) + "\n")
		#print("Correlation: " + str(Distance.correlation_distance(data1, data2)))
		output.write(files[i] + "," + files[j] + "," + "correlation" + "," + str(Distance.correlation_distance(data1, data2)) + "\n")

def main(argv):
	if len(argv) != 5:
		print('USAGE: <native pdb file> <pdb file> <model limit> <output file prefix> <lrmsd criteria>')
		sys.exit(2)
	try: #TODO: add better checking here
		native_in = str(argv[0])
		file_in = str(argv[1])
		nr_models = int(argv[2])
		output_prefix = str(argv[3])
		lrmsd_criteria = int(argv[4])
	except:
		print('USAGE: <native pdb file> <pdb file> <model limit> <output file prefix> <lrmsd criteria>')
		sys.exit(2)
	#Create lists of conformations	
	labels, nativeconformation, conformations = Parser.PDB(native_in, file_in, nr_models)
	#Sort into positive and negative sets using lRMSD 
	withinlRMSD, morethanlRMSD = Distance.sortBylRMSDs(nativeconformation, conformations, lrmsd_criteria)
	
	#output image of native graph
	#nativeGraph = nx.Graph()
	#curr_conf = nativeconformation[0]
	#for j in range(len(curr_conf)-RES_DISTANCE):
	#	for k in range(j+RES_DISTANCE, len(curr_conf)):
	#		atom1 = curr_conf[j]
	#		atom2 = curr_conf[k]
	#		#add nodes to graph with labels
	#		nativeGraph.add_node(j)
	#		nativeGraph.node[j]['aminoAcid'] = labels[j]
	#		nativeGraph.add_node(k)
	#		nativeGraph.node[k]['aminoAcid'] = labels[k]
	#		#find euclidean distance between atoms
	#		d = Distance.euclideanDistance(atom1, atom2)
	#		#if less than BIN_CRITERIA, add edge
	#		if(d <= BIN_CRITERIA):
	#			nativeGraph.add_edge(j, k, distance=d)
	#printGraph(nativeGraph, 'Output/PosGraphs/native')
	
	#output graph attributes for each data set
	#Note: removed newline='' from open() for linux
	dt = time.strftime("_%Y%m%d-%H%M%S")
	with open('Output/'+output_prefix+dt+'.csv', 'w') as csvfile:
		writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
		writer.writerow(['num_edges', 'density','avg_degree','percent_endpoints','energy', 'second_eigen', 'unique_eigen', 'spectral_rad', 'inverse_product',
			'link_impurity', 'neighborhood_impurity', 'avg_closeness', 'avg_clustering', 'small_worldness','eccentricity','diameter',
			'radius','%central_nodes', '%Hydrophobic_center', 'near_native'])
		#Positive Data Set
		for i in range(len(withinlRMSD)):
			graph = nx.Graph()
			curr_conf = withinlRMSD[i]
			for j in range(len(curr_conf)-RES_DISTANCE):
				for k in range(j+RES_DISTANCE, len(curr_conf)):
					atom1 = curr_conf[j]
					atom2 = curr_conf[k]
					#add nodes to graph with labels
					graph.add_node(j)
					graph.node[j]['aminoAcid'] = labels[j]
					if(labels[j] in HPHOBIC):
						graph.node[j]['hydro'] = 'phobic'
					else:
						graph.node[j]['hydro'] = 'philic'
					graph.add_node(k)
					graph.node[k]['aminoAcid'] = labels[k]
					if(labels[k] in HPHOBIC):
						graph.node[k]['hydro'] = 'phobic'
					else:
						graph.node[k]['hydro'] = 'philic'
					#find euclidean distance between atoms
					d = Distance.euclideanDistance(atom1, atom2)
					#if less than BIN_CRITERIA, add edge
					if(d <= BIN_CRITERIA):
						graph.add_edge(j, k, distance=d)
			##FOR TESTING ONLY
			#printGraph(graph, 'Output/PosGraphs/pos_'+str(i))
			#################
			#once graph is done, create attribute vector
			attributes = graphAttributes(graph)
			##FOR TESTING##
			#attributes = []
			#if(not nx.is_connected(graph)):
			#	print("Graph " + i + "from within is not connected")
			#	sys.exit(2)
			#else:
			#	attributes.append(nx.is_connected(graph))
			#add 1 to the end since near native
			attributes.append(1)
			#and output to file as row
			writer.writerow(attributes)
		#Negative Data Set
		for i in range(len(morethanlRMSD)):
			graph = nx.Graph()
			curr_conf = morethanlRMSD[i]
			for j in range(len(curr_conf)-RES_DISTANCE):
				for k in range(j+RES_DISTANCE, len(curr_conf)):
					atom1 = curr_conf[j]
					atom2 = curr_conf[k]
					#add nodes to graph with labels
					graph.add_node(j)
					graph.node[j]['aminoAcid'] = labels[j]
					if(labels[j] in HPHOBIC):
						graph.node[j]['hydro'] = 'phobic'
					else:
						graph.node[j]['hydro'] = 'philic'
					graph.add_node(k)
					graph.node[k]['aminoAcid'] = labels[k]
					if(labels[k] in HPHOBIC):
						graph.node[k]['hydro'] = 'phobic'
					else:
						graph.node[k]['hydro'] = 'philic'
					#find euclidean distance between atoms
					d = Distance.euclideanDistance(atom1, atom2)
					#if less than BIN_CRITERIA, add edge
					if(d <= BIN_CRITERIA):
						graph.add_edge(j, k, distance=d)
			##FOR TESTING ONLY
			#printGraph(graph, 'Output/NegGraphs/neg_'+str(i))
			#################
			#once graph is done, create attribute vector
			attributes = graphAttributes(graph)
			##FOR TESTING ONLY##
			#if(not nx.is_connected(graph)):
			#	print("Graph " + i + "from morethan is not connected")
			#	sys.exit(2)
			#else:
			#	attributes.append(nx.is_connected(graph))
			#add 0 to the end since decoy
			attributes.append(0)
			#and output to file as row
			writer.writerow(attributes)
		print("ATTRIBUTES HAVE BEEN OUTPUTTED")
def main(argv):
	if len(argv) != 3:
		print("Usage: <native.pdb> <decoys.pdb> <output.csv>")
		sys.exit(2)
	try:
		native_in = argv[0]
		file_in = argv[1]
		#nr_models = m
		output_file = argv[2]
	except:
		print("Usage: <native.pdb> <decoys.pdb> <output.csv>")
		sys.exit(2)
	#Count atoms and calculate LCS if needed
	native_atoms = Parser.countResidues(native_in)
	decoy_atoms = Parser.countResidues(file_in)
	if len(native_atoms) != len(decoy_atoms):
		print("Unequal, find longest common sequence")
		native_result, decoy_result = Parser.lcs(native_atoms, decoy_atoms)
		print("New length: " + str(len(native_result)))		
	else:
		native_result = []
		decoy_result = []	
	#Read and store native conformation
	nativelabels, nativeconformation = Parser.readConformations(str(native_in), 1, native_result)
	#Read decoys and store how many are within distance, morethan distance
	#using criteria{2,4}
	criteria = [2.0000000,4.0000000]
	models = 0
	atoms = []
	output_data = []
	currConf = []
	within2 = 0
	morethan2 = 0
	within4= 0
	morethan4 = 0
	with open(file_in, 'r') as f:
		for line in f:
		#while models < nr_models:
			#line = f_read.readline()
			splt = line.split()
			if splt[0] == 'MODEL':
				atoms = []
				currConf = []
				alpha_carbons = 1
			elif splt[0] == 'ATOM':
				if(splt[2] == 'CA'):
					if(len(decoy_result) == 0 or (str(splt[3]), int(splt[5])) in decoy_result):
						atoms.append((float(splt[6]), float(splt[7]), float(splt[8])))
			elif splt[0] == 'TER':
				if(len(atoms) > 0):
					currConf.append(atoms)
					models += 1
					distance = Distance.lrmsd(nativeconformation[0], currConf[0])
					#output_data.append([distance])
					if distance <= criteria[0]:
						within2 += 1
						within4 += 1
					else:
						morethan2 += 1
						if distance <= criteria[1]:
							within4 +=1 
						else: 
							morethan4 += 1
		#Output results in table with protein name, lcs length, number within/morethan for each criteria
		output_data.append(native_in[5:-4])
		output_data.append(len(nativeconformation[0]))
		output_data.append(within2+morethan2)
		output_data.append(within2)
		output_data.append(morethan2)
		output_data.append(within4)
		output_data.append(morethan4)
	with open(output_file, 'a+') as csvfile:
		writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
		if(csvfile.readline() == ""):
			writer.writerow(["Protein", "Num CA", "Num Confs", "Within 2", "Morethan 2", "Within 4", "Morethan 4"])
		writer.writerow(output_data)
		#for d in output_data:	
		#	writer.writerow(d)
	print("Completed")