def reset(self, episode): # prefix_state --> [split_point, index] # suffix_state --> [index + 1, len - 1] # return observation self.split_point = 0 self.DIS = Distance(len(self.cand_train_data[episode]), len(self.query_train_data[episode])) self.DIS_R = Distance(len(self.cand_train_data[episode]), len(self.query_train_data[episode])) self.length = len(self.cand_train_data[episode]) self.presim = self.DIS.FRECHET(self.cand_train_data[episode][self.split_point:1], self.query_train_data[episode]) self.sufsim = self.DIS_R.FRECHET(self.cand_train_data[episode][1:][::-1],self.query_train_data[episode][::-1]) whole = self.DIS_R.FRECHET(self.cand_train_data[episode][::-1],self.query_train_data[episode][::-1]) #self.DIS.FRECHET(self.cand_train_data[episode], self.query_train_data[episode]) observation = np.array([whole, self.presim, self.sufsim]).reshape(1,-1) self.subsim = min(whole, self.presim, self.sufsim) #print('episode', episode, whole, self.presim, self.sufsim) if self.subsim == whole: self.subtraj = [0, self.length - 1] if self.subsim == self.presim: self.subtraj = [0, 0] if self.subsim == self.sufsim: self.subtraj = [1, self.length - 1] return observation, self.length
def test_concatenation_error(self): euc_dist = Distance("euclidean", 1, 2) cos_dist = Distance("cosine", 3, 4) with self.assertRaises(TypeError): euc_dist.concatenate(cos_dist) with self.assertRaises(TypeError): cos_dist.concatenate(euc_dist)
def test_args_valueerror(self): with self.assertRaises(ValueError): Distance("cosine") inputs = [[1, "2", 3], [True, [], 3.], [9.3, 1., set()], {"e": "1"}] for inp in inputs: with self.subTest(text=inp), self.assertRaises(ValueError): Distance("manhattan", (inp))
def __calculateDistance__(src: str, dst: str, distanceStr: str, distanceUpto: int) -> int: if distanceUpto == -1: return distanceUpto, Distance(src, dst, distanceUpto) else: distance = -1 if distanceStr.isnumeric(): distance = int(distanceStr, base=10) return distance, Distance(src, dst, distance - distanceUpto) else: return distance, Distance(src, dst, distance)
def test_concatenation(self): metric = "euclidean" ed1 = Distance(metric, 1, 2) ed2 = Distance(metric, 3, 4) ed12 = ed1.concatenate(ed2) self.assertEqual([1, 2, 3, 4], ed12.nums) metric = "manhattan" md1 = Distance(metric, 1, 2) md2 = Distance(metric, 3, 4) md12 = md1.concatenate(md2) self.assertEqual([1, 2, 3, 4], md12.nums)
def enter(): game_framework.reset_time() global map, player, house, background, avalanche, coin, snows, map_on_coins, game_over, santa, game_clear, distance, stones map = Map() player = Player() house = House() background = Background() avalanche = Avalanche() coin = Coin() game_over = Game_over() santa = Santa() game_clear = Game_clear() distance = Distance() map_on_coins = [Map_on_Coin() for i in range(200)] snows = [Snow() for i in range(20)] stones = [Stone() for i in range(10)] Player.x = 300.0 Player.y = 300.0 Player.unreal_x = 300.0 Player.unreal_y = 0 Player.jump_before_y = 0 Map.map_move_y_minor = 0 Avalanche.game_over = 0 Game_clear.game_clear = 0 Santa.game_clear = 0
def __init__(self, node, outer_d, shortlist, key, find_value, rpc): self.node = node self.outer_d = outer_d self.shortlist = shortlist self.key = key self.find_value = find_value self.rpc = rpc # all distance operations in this class only care about the distance # to self.key, so this makes it easier to calculate those self.distance = Distance(key) # List of active queries; len() indicates number of active probes # # n.b: using lists for these variables, because Python doesn't # allow binding a new value to a name in an enclosing # (non-global) scope self.active_probes = [] # List of contact IDs that have already been queried self.already_contacted = [] # Probes that were active during the previous iteration # A list of found and known-to-be-active remote nodes self.active_contacts = [] # This should only contain one entry; the next scheduled iteration call self.pending_iteration_calls = [] self.prev_closest_node = [None] self.find_value_result = {} self.slow_node_count = [0]
def __init__(self, center=[], tag=""): self.samples = [] # 本群分类进来的样本点 self.center = center # 群心 self.old_center = [] # 旧的群心 self.distance = Distance() self.distance_method = Method.Eculidean self.tag = tag
def test_distance_with_valid(self): obj = Distance('euc') expected = 'euclidean' actual = obj.func.__name__ self.assertEqual(expected, actual)
def test_distance_with_args(self): obj = Distance('euc') expected = (float, int) actual = obj((4, 5), (1, 1)) self.assertTrue(isinstance(actual, expected))
def __init__(self, node, shortlist, key, rpc, exclude=None): self.exclude = set(exclude or []) self.node = node self.finished_deferred = defer.Deferred() # all distance operations in this class only care about the distance # to self.key, so this makes it easier to calculate those self.distance = Distance(key) # The closest known and active node yet found self.closest_node = None if not shortlist else shortlist[0] self.prev_closest_node = None # Shortlist of contact objects (the k closest known contacts to the key from the routing table) self.shortlist = shortlist # The search key self.key = str(key) # The rpc method name (findValue or findNode) self.rpc = rpc # List of active queries; len() indicates number of active probes self.active_probes = [] # List of contact (address, port) tuples that have already been queried, includes contacts that didn't reply self.already_contacted = [] # A list of found and known-to-be-active remote nodes (Contact objects) self.active_contacts = [] # Ensure only one searchIteration call is running at a time self._search_iteration_semaphore = defer.DeferredSemaphore(1) self._iteration_count = 0 self.find_value_result = {} self.pending_iteration_calls = []
def biomass_worker(driver, biomass, out_name, distance='hav'): """Worker function for parallel execution. Computes the biomass emissions (AGB and BGB) by using ``biomass_emissions`` function. Further, the pixel resolution in square meter is computed and delegated to ``biomass_emissions``. Result is stored on disk as raster image by using the metadata profile of the first argument. Args: driver (str or Path): Path to Proximate Deforestation Driver tile. biomass (str or Path): Path to Above-ground Woody Biomass Density stratum. out_name (str or Path): Path plus name of out file. distance (str, optional): Default is Haversine equation. """ with open(driver, 'r') as h1, open(biomass, 'r') as h2: driver_data = h1.read(1) biomass_data = h2.read(1) profile = h1.profile transform = h1.transform haversine = Distance(distance) x = haversine((transform.xoff, transform.yoff), (transform.xoff + transform.a, transform.yoff)) y = haversine((transform.xoff, transform.yoff), (transform.xoff, transform.yoff + transform.e)) area = round(x * y) emissions = biomass_emissions(driver_data, biomass_data, area=area) # write updates the dtype corresponding to the array dtype write(emissions, out_name, **profile)
def distance(location1, location2): """ Using the Great Circle distance by using the Harversine formula. >>> import geocoder >>> d = geocoder.distance('Ottawa', 'Toronto') >>> d.km 351.902264779 >>> d.miles 218.672067333 ... Different ways to use the Distance calculator, you can input the locations by using a tuple (lat, lng) or a dictionary with lat/lng keys. >>> import geocoder >>> ottawa = (45.4215296, -75.69719309999999) >>> toronto = {'lat':43.653226, 'lng':-79.3831843} >>> d = geocoder.distance(ottawa, toronto) >>> d.meters 351902 ... Wiki Docs --------- http://en.wikipedia.org/wiki/Haversine_formula """ return Distance(location1, location2)
def findCloseNodes(self, key, count=None, sender_node_id=None): """ Finds a number of known nodes closest to the node/value with the specified key. @param key: the n-bit key (i.e. the node or value ID) to search for @type key: str @param count: the amount of contacts to return, default of k (8) @type count: int @param sender_node_id: Used during RPC, this is be the sender's Node ID Whatever ID is passed in the paramater will get excluded from the list of returned contacts. @type sender_node_id: str @return: A list of node contacts (C{kademlia.contact.Contact instances}) closest to the specified key. This method will return C{k} (or C{count}, if specified) contacts if at all possible; it will only return fewer if the node is returning all of the contacts that it knows of. @rtype: list """ exclude = [self._parentNodeID] if sender_node_id: exclude.append(sender_node_id) if key in exclude: exclude.remove(key) count = count or constants.k distance = Distance(key) contacts = self.get_contacts() contacts = [c for c in contacts if c.id not in exclude] contacts.sort(key=lambda c: distance(c.id)) return contacts[:min(count, len(contacts))]
def test_a_distance(self): a = Distance(self.earth, self.office) for i in range(len(self.result)): distance = a.get_km(self.testLat[i], self.testLon[i]) self.assertEqual(distance, self.result[i]) print('DISTANCE TESTS PASSED')
def iterativeAnnounceHaveBlob(self, blob_hash, value): known_nodes = {} contacts = yield self.iterativeFindNode(blob_hash) # store locally if we're the closest node and there are less than k contacts to try storing to if self.externalIP is not None and contacts and len( contacts) < constants.k: is_closer = Distance(blob_hash).is_closer(self.node_id, contacts[-1].id) if is_closer: contacts.pop() yield self.store(blob_hash, value, originalPublisherID=self.node_id, self_store=True) elif self.externalIP is not None: pass else: raise Exception("Cannot determine external IP: %s" % self.externalIP) contacted = [] @defer.inlineCallbacks def announce_to_contact(contact): known_nodes[contact.id] = contact try: responseMsg, originAddress = yield contact.findValue( blob_hash, rawResponse=True) if responseMsg.nodeID != contact.id: raise Exception("node id mismatch") value['token'] = responseMsg.response['token'] res = yield contact.store(blob_hash, value) if res != "OK": raise ValueError(res) contacted.append(contact) log.debug("Stored %s to %s (%s)", blob_hash.encode('hex'), contact.id.encode('hex'), originAddress[0]) except protocol.TimeoutError: log.debug("Timeout while storing blob_hash %s at %s", blob_hash.encode('hex')[:16], contact.id.encode('hex')) except ValueError as err: log.error("Unexpected response: %s" % err.message) except Exception as err: log.error( "Unexpected error while storing blob_hash %s at %s: %s", binascii.hexlify(blob_hash), contact, err) dl = [] for c in contacts: dl.append(announce_to_contact(c)) yield defer.DeferredList(dl) log.debug("Stored %s to %i of %i attempted peers", blob_hash.encode('hex')[:16], len(contacted), len(contacts)) contacted_node_ids = [c.id.encode('hex') for c in contacted] defer.returnValue(contacted_node_ids)
def check_distance(self, near_customers): for customer in near_customers: self.assertLessEqual( Distance().calculate_from_coordinates_in_degrees( self.nearby_customers.office_lat, self.nearby_customers.office_long, float(customer['latitude']), float(customer['longitude'])), self.nearby_customers.match_radius_km)
def step(self, episode, action, index): if action == 0: #non-split #state transfer self.presim = self.DIS.DTW( self.cand_train_data[episode][self.split_point:(index + 1)], self.query_train_data[episode]) self.sufsim = self.DIS_R.DTW( self.cand_train_data[episode][(index + 1):][::-1], self.query_train_data[episode][::-1]) if (index + 1) == self.length: self.sufsim = self.presim observation = np.array([self.subsim, self.presim, self.sufsim]).reshape(1, -1) last_subsim = self.subsim if self.presim < self.subsim: self.subsim = self.presim self.subtraj = [self.split_point, index] if self.sufsim < self.subsim: self.subsim = self.sufsim self.subtraj = [index + 1, self.length - 1] self.RW = last_subsim - self.subsim #print('action0', self.RW) return observation, self.RW if action == 1: #split self.split_point = index self.DIS = Distance( len(self.cand_train_data[episode][self.split_point:]), len(self.query_train_data[episode])) #state transfer self.presim = self.DIS.DTW( self.cand_train_data[episode][self.split_point:(index + 1)], self.query_train_data[episode]) self.sufsim = self.DIS_R.DTW( self.cand_train_data[episode][(index + 1):][::-1], self.query_train_data[episode][::-1]) if (index + 1) == self.length: self.sufsim = self.presim observation = np.array([self.subsim, self.presim, self.sufsim]).reshape(1, -1) last_subsim = self.subsim if self.presim < self.subsim: self.subsim = self.presim self.subtraj = [self.split_point, index] if self.sufsim < self.subsim: self.subsim = self.sufsim self.subtraj = [index + 1, self.length - 1] self.RW = last_subsim - self.subsim #print('action1', self.RW) return observation, self.RW
def reset(self, episode, label='E'): # prefix_state --> [split_point, index] # suffix_state --> [index + 1, len - 1] # return observation self.split_point = 0 self.DIS = Distance(len(self.cand_train_data[episode]), len(self.query_train_data[episode])) self.DIS_R = Distance(len(self.cand_train_data[episode]), len(self.query_train_data[episode])) self.length = len(self.cand_train_data[episode]) self.skip = [] self.presim = self.DIS.DTW( self.cand_train_data[episode][self.split_point:1], self.query_train_data[episode]) whole = self.DIS_R.DTW(self.cand_train_data[episode][::-1], self.query_train_data[episode][::-1]) self.sufsim = self.DIS_R.D[ self.length - 2, -1] #self.DIS_R.DTW(self.cand_train_data[episode][1:][::-1],self.query_train_data[episode][::-1]) #print('reset',self.sufsim,self.DIS_R.D[self.length-2,-1]) #self.DIS.DTW(self.cand_train_data[episode], self.query_train_data[episode]) observation = np.array([whole, self.presim, self.sufsim]).reshape(1, -1) self.subsim = min(whole, self.presim, self.sufsim) #print('episode', episode, whole, self.presim, self.sufsim) if self.subsim == whole: self.subtraj = [0, self.length - 1] if self.subsim == self.presim: self.subtraj = [0, 0] if self.subsim == self.sufsim: self.subtraj = [1, self.length - 1] if label == 'T': self.REWARD_DIS = Distance(len(self.cand_train_data[episode]), len(self.query_train_data[episode])) self.presim_real = self.REWARD_DIS.DTW( self.cand_train_data[episode][self.split_point:1], self.query_train_data[episode]) self.subsim_real = self.subsim return observation, self.length, -1
def __init__(self, center=[], tag=""): self.samples = [] # 本群分类进来的样本点 (跟 all_memberships 没有索引位置上的对应关系) self.all_memberships = [] # 所有样本点对本群心的归属度 # FCM 是整体样本在做估计和运算(含更新群心、SSE),不像 K-Means 是个体群在做运算和更新群心 self.center = center # 群心 self.old_center = [] # 旧的群心 self.distance = Distance() self.distance_method = Method.Eculidean self.tag = tag self.sse = 0.0 # 本群 SSE (由外部提供, 這里做記錄)
def _shouldSplit(self, bucketIndex, toAdd): # https://stackoverflow.com/questions/32129978/highly-unbalanced-kademlia-routing-table/32187456#32187456 if self._buckets[bucketIndex].keyInRange(self._parentNodeID): return True contacts = self.get_contacts() distance = Distance(self._parentNodeID) contacts.sort(key=lambda c: distance(c.id)) kth_contact = contacts[-1] if len( contacts) < constants.k else contacts[constants.k - 1] return distance(toAdd) < distance(kth_contact.id)
def __init__(self): self.samples = [] # 训练样本集: [features array of each sample] self.distance_method = Method.Eculidean # 要用什么距离算法 self.distance = Distance() # 距离算法 self.groups = [] # 每一个分类好的群聚 [Group] self.convergence = 0.001 # 收敛误差 self.max_iteration = 100 # 最大迭代次数 self.m = 2 # 计算归属度的参数 (m 是模糊器, 用来决定每一个群聚的模糊程度,m 太小则算法效果会接近硬聚类,太大则效果差) self.iteration_callback = None self.completion_callback = None
def get(self): self.nearby_customers = [ customer for customer in self.customers if Distance().calculate_from_coordinates_in_degrees( self.office_lat, self.office_long, float(customer['latitude']), float(customer['longitude'])) <= self.match_radius_km ] self.nearby_customers.sort(key=lambda cust: int(cust['user_id'])) return self.nearby_customers
def main(self): print("Start \n") k = Distance(self.earth,self.office) f = File(self.url) list = f.parse_file() guest_list = self.make_glist(k,list) self.save_file(guest_list) for i in range(len(guest_list)): print(guest_list[i]) print("\nFinish \nPlease check output.txt file too.")
def __init__(self): self.samples = [] self.distance_method = Method.Eculidean self.distance = Distance() self.center_choice = Choice.Shuffing self.center_maker = Maker() self.groups = [] self.convergence = 0.001 self.max_iteration = 100 self.iteration_callback = None self.completion_callback = None
def test_b_guestlist(self): a = Distance(self.earth, self.office) b = Hsine(self.earth, self.office, "url") mkgl = b.make_glist(a, self.list) #user_id's 2 and 77 should appeared in sorted list ascendingly self.assertEqual(mkgl[0]["user_id"], 2) self.assertEqual(mkgl[1]["user_id"], 77) #there are 2 results filing "within 100KM" criterium self.assertEqual(len(mkgl), 2) print('GLIST FILTERING and SORTING TESTS PASSED')
def get_for_big_customer_data(self, customer_file): self.nearby_customers = [] with open(customer_file) as fp: for customer_json in fp: customer = json.loads(customer_json) if Distance().calculate_from_coordinates_in_degrees( self.office_lat, self.office_long, float(customer['latitude']), float(customer['longitude'])) <= self.match_radius_km: self.nearby_customers.append(customer) self.nearby_customers.sort(key=lambda cust: int(cust['user_id'])) return self.nearby_customers
def getContacts(self, count=-1, excludeContact=None, sort_distance_to=None): """ Returns a list containing up to the first count number of contacts @param count: The amount of contacts to return (if 0 or less, return all contacts) @type count: int @param excludeContact: A node id to exclude; if this contact is in the list of returned values, it will be discarded before returning. If a C{str} is passed as this argument, it must be the contact's ID. @type excludeContact: str @param sort_distance_to: Sort distance to the id, defaulting to the parent node id. If False don't sort the contacts @raise IndexError: If the number of requested contacts is too large @return: Return up to the first count number of contacts in a list If no contacts are present an empty is returned @rtype: list """ contacts = [ contact for contact in self._contacts if contact.id != excludeContact ] # Return all contacts in bucket if count <= 0: count = len(contacts) # Get current contact number currentLen = len(contacts) # If count greater than k - return only k contacts if count > constants.k: count = constants.k if not currentLen: return contacts if sort_distance_to is False: pass else: sort_distance_to = sort_distance_to or self._node_id contacts.sort(key=lambda c: Distance(sort_distance_to)(c.id)) return contacts[:min(currentLen, count)]
def __add_groups(self, added_groups, min_distance): new_group = Group() new_group.name = self.__next_char() for group in self.__groups: if group not in added_groups: min_distance_temp = group.get_distance(added_groups[0]) for curr_group in added_groups: if group.get_distance(curr_group) < min_distance_temp: min_distance_temp = group.get_distance(curr_group) group.delete_distances(added_groups) group.distances.append(Distance(min_distance_temp, new_group)) new_group.distances.append(Distance(min_distance_temp, group)) for added_group in added_groups: if added_group.x == 0.0: added_group.x = self.__offset_x self.__offset_x += 1 new_group.sub_groups = added_groups sub_groups_points = [] for added_group in added_groups: sub_groups_points.append(Point(added_group.x, added_group.y)) try: self.__groups.remove(added_group) except ValueError: print('group has been deleted before') x = 0.0 for point in sub_groups_points: x += point.x new_group.x = x / len(sub_groups_points) new_group.y = min_distance self.__groups.append(new_group)
def output(self, index, episode, label='E'): #print('check', self.subsim, self.subtraj) if label == 'T': #print('check', self.subsim, self.subtraj, self.subsim_real) return [self.subsim_real, self.subtraj] if label == 'E': self.DIS = Distance( len(self.cand_train_data[episode] [self.subtraj[0]:self.subtraj[1] + 1]), len(self.query_train_data[episode])) self.subsim_real = self.DIS.FRECHET( self.cand_train_data[episode][self.subtraj[0]:self.subtraj[1] + 1], self.query_train_data[episode]) return [self.subsim_real, self.subtraj]