def show_nearest(word_2_vec, w_vec, exclude_w, sim_metric): #word_2_vec: a dictionary of word-context vectors. The vector could be a sparse (dictionary) or dense (numpy array). #w_vec: the context vector of a particular query word `w`. It could be a sparse vector (dictionary) or dense vector (numpy array). #exclude_w: the words you want to exclude in the responses. It is a set in python. #sim_metric: the similarity metric you want to use. It is a python function # which takes two word vectors as arguments. # return: an iterable (e.g. a list) of up to 10 tuples of the form (word, score) where the nth tuple indicates the nth most similar word to the input word and the similarity score of that word and the input word # if fewer than 10 words are available the function should return a shorter iterable # # example: #[(cat, 0.827517295965), (university, -0.190753135501)] # print type(word_2_vec) # if(type(word_2_vec) != dict or type(w_vec)!= dict or type(exclude_w) != set): # return "wrong arguments" result = [] pq = PQ() for word in word_2_vec.keys(): if word not in exclude_w: pq.put((-sim_metric(word_2_vec.get(word), w_vec), word)) # print (word) # print pq for i in range(10): if pq.empty(): break tmp = pq.get() result.append((tmp[1], -tmp[0])) return result
def peopleIndexes(self, favoriteCompanies): """ :type favoriteCompanies: List[List[str]] :rtype: List[int] """ pq = PQ() tmp = [] res = [] maxLength = -1 for i in range(len(favoriteCompanies)): campanies = set(favoriteCompanies[i]) pq.put((-len(campanies), i, campanies)) maxLength = max(maxLength, len(campanies)) while (pq.qsize() != 0): cur = pq.get() if (cur[0] + maxLength == 0): res.append(cur[1]) else: flag = True for s in tmp: if (cur[2].issubset(s)): flag = False break if (flag): res.append(cur[1]) tmp.append(cur[2]) return sorted(res)
def numPoints(self, points, r): """ :type points: List[List[int]] :type r: int :rtype: int """ def euclidean(p1, p2): return sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) length = len(points) res = length record = {} pq = PQ() blockLength = -1 blockIndex = -1 #dist = [[0] * length for _ in range(length)] for i in range(length): for j in range(i + 1, length): dist = euclidean(points[i], points[j]) if (dist > 2 * r): #print((i, j)) if (i not in record): record[i] = set() record[i].add(j) ''' delete node from that has the lagest blocked points recursionly ''' return res
def maxDistance(self, position, m): """ :type position: List[int] :type m: int :rtype: int """ position.sort() if (m == 2): return position[-1] - position[0] m -= 2 pq = PQ() pq.put((-position[-1] + position[0], position[0], position[-1])) res = float("inf") while (m != 0): cur = pq.get() print(cur) if (cur[0] % 2 == 0): pq.put((cur[0] / 2, cur[1], cur[1] - cur[0] / 2)) pq.put((cur[0] / 2, cur[2] + cur[0] / 2, cur[2])) res = min(res, -cur[0] / 2) else: pq.put((floor(cur[0] / 2), cur[1], cur[1] - floor(cur[0] / 2))) pq.put((floor(cur[0] / 2) + 1, cur[2] + floor(cur[0] / 2) + 1, cur[2])) res = min(res, -floor(cur[0] / 2)) m -= 1 return int(res)
def Solve(self,problem): figures = problem.getFigures() A = cv2.imread(figures["A"].fullpath, cv2.CV_LOAD_IMAGE_GRAYSCALE).astype(np.int32) B = cv2.imread(figures["B"].fullpath, cv2.CV_LOAD_IMAGE_GRAYSCALE).astype(np.int32) C = cv2.imread(figures["C"].fullpath, cv2.CV_LOAD_IMAGE_GRAYSCALE).astype(np.int32) best_guess = float("inf") answer = "" #corr = signal.correlate2d(A, B) #corr = signal.fftconvolve(A, B) sumpq = PQ() diffpq = PQ() ssimpq = PQ() mulpq = PQ() trapzpq = PQ() ssdpq = PQ() # Generate values for ALL of the guesses, then with [1,2,3,4], use the ratio 1:2 to 1:4 to guess how # confident you are in the guess, use the confidence as weight for guess in map(str, range(1,7)): trial = cv2.imread(figures[guess].fullpath, cv2.CV_LOAD_IMAGE_GRAYSCALE).astype(np.int32) sumpq.put((features.sumFeature(A, B, C, trial).value(), guess)) diffpq.put((features.diffFeature(A, B, C, trial).value(), guess)) ssimpq.put((features.ssimFeature(A, B, C, trial).value(), guess)) #sumpq.put((features.corrFeature(A, B, C, trial, corr) mulpq.put((features.mulFeature(A, B, C, trial).value(), guess)) trapzpq.put((features.trapzFeature(A, B, C, trial).value(), guess)) ssdpq.put((features.ssdFeature(A, B, C, trial).value(), guess)) #print trial, gsf #val = gsf#.value() #if val < best_guess: # best_guess = val # answer = guess weightMap = {0:1.0, 1:0.8, 2:0.6, 3:0.4, 4:0.2,5:0.1} totalPQ = [] FEATURES = 6 for i in range(FEATURES): totalPQ.append(PQ()) for i in range(6): totalPQ[0].put((-weightMap[i], sumpq.get()[1])) totalPQ[1].put((-weightMap[i], diffpq.get()[1])) totalPQ[2].put((-weightMap[i], ssimpq.get()[1])) totalPQ[3].put((-weightMap[i], mulpq.get()[1])) totalPQ[4].put((-weightMap[i], trapzpq.get()[1])) totalPQ[5].put((-weightMap[i], ssdpq.get()[1])) finalVal = {'1':0,'2':0,'3':0,'4':0,'5':0,'6':0} for i in range(6): for j in range(FEATURES): v, guess = totalPQ[j].get() finalVal[guess] += -v val=0 answer = "" for guess in map(str, range(1,7)): if val < finalVal[guess]: answer = guess val = finalVal[guess] return answer
def dijkstra(myg, start): distance_to = {i: -1 for i in V} distance_to[start] = 0 frontier = PQ() frontier.put(start) while not frontier.empty(): u = frontier.get(False) for v in myg.get_neighbors(u): if (distance_to[v[0]] == -1 or v[1] + distance_to[u] < distance_to[v[0]]): distance_to[v[0]] = v[1] + distance_to[u] frontier.put(v[0]) return distance_to
def Solve(self,problem,threshold=THRESHOLD): figures = problem.getFigures() A = cv2.imread(figures["A"].fullpath, cv2.CV_LOAD_IMAGE_GRAYSCALE) B = cv2.imread(figures["B"].fullpath, cv2.CV_LOAD_IMAGE_GRAYSCALE) C = cv2.imread(figures["C"].fullpath, cv2.CV_LOAD_IMAGE_GRAYSCALE) guesses = {} for guess in ["1", "2", "3", "4", "5", "6"]: guesses[guess] = cv2.imread(figures[guess].fullpath, cv2.CV_LOAD_IMAGE_GRAYSCALE) pq_A = PQ() for i, A_g in self.d.iteritems(): pq_A.put((-ssim(A, A_g), i)) pq_Guess = PQ() for i in range(5): val, ind = pq_A.get() if -val > threshold: B_g = cv2.imread('templates%stemplate%d%sB.png' % (os.sep, ind, os.sep), cv2.CV_LOAD_IMAGE_GRAYSCALE) C_g = cv2.imread('templates%stemplate%d%sC.png' % (os.sep, ind, os.sep), cv2.CV_LOAD_IMAGE_GRAYSCALE) ans_g = cv2.imread('templates%stemplate%d%sans.png' % (os.sep, ind, os.sep), cv2.CV_LOAD_IMAGE_GRAYSCALE) B_val = ssim(B_g, B) if B_val < threshold: continue C_val = ssim(C_g, C) if C_val < threshold: continue max_val = -2 best_guess = 0 for guess in ["1", "2", "3", "4", "5", "6"]: guessim = ssim(guesses[guess], ans_g) if guessim > max_val: max_val = guessim best_guess = guess if max_val < threshold: continue sum_val = -val + max_val + B_val + C_val pq_Guess.put((-sum_val, best_guess)) if not pq_Guess.empty(): return pq_Guess.get()[1]
def getStrongest(self, arr, k): """ :type arr: List[int] :type k: int :rtype: List[int] """ res = [] pq = PQ() length = len(arr) arr.sort() median = arr[int((length - 1) / 2)] for i in range(length): pq.put((-abs(arr[i] - median), -arr[i], i)) for i in range(k): tmp = pq.get() res.append(arr[tmp[2]]) return res
def fix_graph(self, roots): graph = self.buildGraph fixed_graph = nx.Graph() pq = PQ() for node in roots: self.add_node_to_queue_z(node, graph, pq) fixed_graph.add_node(node) #Now the priority queue has been initialized while not pq.empty(): #While there are still edges print fixed_graph.number_of_nodes() (node, edge) = pq.get() print 'in the loop' if not (edge[1] in fixed_graph): #if sink of edge hasn't been added yet fixed_graph.add_node(edge[1]) fixed_graph.add_edge(node, edge[1], points=edge[2]) self.add_node_to_queue_z(edge[1], graph, pq) return fixed_graph
def arrangeWords(self, text): """ :type text: str :rtype: str """ words = text.split(" ") res = "" pq = PQ() for i in range(len(words)): word = words[i] pq.put((len(word), i, word.lower())) tmp = pq.get()[2] res = tmp[0].upper() + tmp[1:] while (pq.qsize() != 0): #print(pq.get()) res += " " res += pq.get()[2] return res
def mergeKLists(self, lists): """ :type lists: List[ListNode] :rtype: ListNode """ pq = PQ() n = len(lists) res = ListNode('header') prev = res for i in range(0, n): if lists[i] is None: continue pq.put((lists[i].val, lists[i])) while not pq.empty(): (v, node) = pq.get() prev.next = ListNode(v) prev = prev.next node = node.next if node is not None: pq.put((node.val, node)) return res.next
def graph_to_ordering(self, graph, roots, mid_x, mid_y): pq = PQ() curves = [] print 'roots in gto' print roots for node in roots: print 'min dist' print self.minDistFromCenter(node, mid_x, mid_y) pq.put(node, self.minDistFromCenter(node, mid_x, mid_y)) while not pq.empty(): #While there are still edges print 'in da loop again' node = pq.get() curves.append(node) edges = graph.edges(node, data=True) for edge in edges: if not ( edge[1] in curves ): #if sink of edge hasn't been seen yet, no point in adding it twice pq.put(edge[1], self.minDistFromCenter(edge[1], mid_x, mid_y)) return curves
def plot_method_pairs_and_matrix(case_studies, fileappend=''): case_cov = np.cov(case_studies.transpose()) case_corr = np.corrcoef(case_studies.transpose()) cmatrix = case_corr fig = plt.figure(figsize=(twocol, twocol), dpi=figdpi, tight_layout=True) ax = fig.add_subplot(111) ppl.pcolormesh( fig, ax, cmatrix[inds][:, inds], #-np.diag([.99]*len(meths)), yticklabels=np.array(mindex)[inds].tolist(), xticklabels=np.array(mindex)[inds].tolist(), cmap=ppl.mpl.cm.RdBu, vmax=0.4, vmin=-0.4) ax.tick_params(axis='both', which='major', labelsize=8) plt.setp(ax.get_xticklabels(), rotation='vertical') cm = dark2 [ l.set_color(cm[m_codes[mindex.index(l.get_text())]]) for i, l in enumerate(ax.get_yticklabels()) ] [ l.set_color(cm[m_codes[mindex.index(l.get_text())]]) for i, l in enumerate(ax.get_xticklabels()) ] fig.show() fig.savefig(figure_path + ('method_matrix%s.pdf' % fileappend)) #Show the highly correlated methods pq = PQ() pq_cross = PQ() for i in range(len(meths)): for j in range(i + 1, len(meths)): m1text = '(%s) %s' % (meths[i, 1], meths[i, 0]) m2text = '(%s) %s' % (meths[j, 1], meths[j, 0]) pq.put((-cmatrix[i, j], (m1text, m2text))) if meths[i, 1] != meths[j, 1]: pq_cross.put((-cmatrix[i, j], (m1text, m2text))) # Output the method correlations # Sets how many highly correlated methods should be displayed print_cap = 20 moutfile = open(results_path + ('method_corrs%s.csv' % fileappend), 'w') print 'All methods:' for i in range(pq.qsize()): v, (m1, m2) = pq.get() if i < print_cap: print '%.2f & %s & %s\\\\' % (-v, m1, m2) moutfile.write('%.9f | %s | %s\n' % (-v, m1, m2)) moutfile.close() moutfile = open(results_path + ('method_corrs_cross%s.csv' % fileappend), 'w') print 'Just cross methods:' for i in range(pq_cross.qsize()): v, (m1, m2) = pq_cross.get() if i < print_cap: print '%.2f & %s & %s\\\\' % (-v, m1, m2) moutfile.write('%.9f | %s | %s\n' % (-v, m1, m2)) moutfile.close()
T = input() #T = 1 for test in xrange(1, T + 1): N, Q = map(int, raw_input().split()) #Q = 1 # E S horses = [map(int, raw_input().split()) for x in xrange(N)] #print horses #adj = [map(int, raw_input().split()) for x in xrange(N)] edges = defaultdict(dict) for i in xrange(N): for j, v in enumerate(raw_input().split()): if int(v) != -1: edges[i][j] = int(v) routes = [map(int, raw_input().split()) for x in xrange(Q)] pq = PQ() results = [] for u, v in routes: u -= 1 v -= 1 #print u, v #ests = defaultdict(lambda:float("inf")) seen = set() pq.put((0., u, horses[u][0], u)) while not pq.empty(): cost, curr, left, horse = pq.get(False) if (curr, horse) in seen: continue seen |= {(curr, horse)} speed = horses[horse][1] newLeft, newSpeed = horses[curr]