def test_invert(self): mode = "L" size = (4, 4) invert = transforms.Compose([Invert(), transforms.ToTensor()]) convert = transforms.ToTensor() a = Image.fromarray(np.eye(size[0], size[1], dtype=np.uint8), mode=mode) # Invert L img = Image.fromarray(np.arange(0, 255, 16, dtype=np.uint8).reshape(size), mode=mode) inv = Image.fromarray(np.arange(255, 0, -16, dtype=np.uint8).reshape(size), mode=mode) assert torch.equal(invert(img), convert(inv)) # Invert LA img.putalpha(a) inv.putalpha(a) assert torch.equal(invert(img), convert(inv)) # Invert RGB r = Image.fromarray(np.arange(0, 255, 16, dtype=np.uint8).reshape(size), mode=mode) g = Image.fromarray(np.arange(255, 0, -16, dtype=np.uint8).reshape(size), mode=mode) b = Image.fromarray(np.arange(127, 0, -8, dtype=np.uint8).reshape(size), mode=mode) img = Image.merge('RGB', (r, g, b)) r = Image.fromarray(np.arange(255, 0, -16, dtype=np.uint8).reshape(size), mode=mode) g = Image.fromarray(np.arange(0, 255, 16, dtype=np.uint8).reshape(size), mode=mode) b = Image.fromarray(np.arange(128, 255, 8, dtype=np.uint8).reshape(size), mode=mode) inv = Image.merge('RGB', (r, g, b)) assert torch.equal(invert(img), convert(inv)) # Invert RGBA img.putalpha(a) inv.putalpha(a) assert torch.equal(invert(img), convert(inv)) # Checking if Invert can be printed as string Invert().__repr__()
def __init__(self, auto=False): """ main function of the application, sets the default values for stopword and stemming """ self.stopword_toggle = False self.stemming_toggle = False self.posting_list = {} self.term_dictionary = {} self.search_times = [] self.invert = Invert() self.load_files() if not auto: self.search_user_input()
def add_text_to_image(ten, text, which="orig", dis=None): from PIL import ImageFont from PIL import ImageDraw img = transforms.ToPILImage(mode='RGB')(ten) img = Invert()(img) draw = ImageDraw.Draw(img) # ont = ImageFont.truetype(<font-file>, <font-size>) sfont = ImageFont.truetype("Vera.ttf", 9) font = ImageFont.truetype("Vera.ttf", 11) draw.text((0, 0), text, (0, 0, 0), font=sfont) if which is not None: draw.text((225, 225), which, (0, 0, 0), font=font) if dis is not None: draw.text((0, 225), "edit: " + dis, (0, 0, 0), font=font) img.convert('RGB') return transforms.ToTensor()(img).float().view(1, 3, 256, 256)
def generate_data_loader(root, batch_size, data_size): invert = transforms.Compose([ Invert(), transforms.ToTensor() ]) return torch.utils.data.DataLoader( ImageFolderWithFile(root, transform=invert), batch_size=batch_size, shuffle=False, drop_last=True, sampler=torch.utils.data.SubsetRandomSampler(list(range(0, data_size))), **kwargs)
def transform_image(image_bytes): my_transforms = transforms.Compose([transforms.Resize((28, 28)), transforms.Grayscale(1), Invert(), transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)) ]) image = Image.open(io.BytesIO(image_bytes)) return my_transforms(image).unsqueeze(0)
def resize(a): resize = T.Compose([ T.ToPILImage(), T.Grayscale(num_output_channels=1), Invert(), T.ToTensor(), T.Normalize((0.5, ), (0.5, )) ]) return resize(np.uint8(a))
def ColumnModelCal(self): # Calculate angle between wind direction and canyon orientation (theta_S) [deg] theta_S = (360+abs(self.theta_can-self.ForcWindDir))%90 # Road roughness z0g = 0.05 # Roof roughness z0r = 0.15 # gas constant dry air [J kg^-1 K^-1] r = 287.04 rcp = r / self.Cp # Define explicit and implicit parts of source and sink terms srex_vx = numpy.zeros(self.nz) # Explicit part of x component of horizontal wind speed [m s^-2] srim_vx = numpy.zeros(self.nz) # Implicit part of x component of horizontal wind speed [s^-1] srex_vy = numpy.zeros(self.nz) # Explicit part of y component of horizontal wind speed [m s^-2] srim_vy = numpy.zeros(self.nz) # Implicit part of y component of horizontal wind speed [s^-1] srex_tke = numpy.zeros(self.nz) # Explicit part of turbulent kinetic energy [m^2 s^-3] srim_tke = numpy.zeros(self.nz) # Implicit part of turbulent kinetic energy [s^-1] srex_th = numpy.zeros(self.nz) # Explicit part of potential temperature [K s^-1] srim_th = numpy.zeros(self.nz) # Implicit part of potential temperature [s^-1] srex_qn = numpy.zeros(self.nz) # Explicit part of specific humidity [K s^-1] ????? srim_qn = numpy.zeros(self.nz) # Implicit part of specific humidity [s^-1] ????? srex_th_veg = numpy.zeros(self.nz) # Explicit part of potential temperature caused by vegetation srex_qn_veg = numpy.zeros(self.nz) # Explicit part of specific humidity caused by vegetation Tveg = numpy.zeros(self.nz) # Apply boundary conditions at the top of the domain using vertical diffusion model(VDM) outputs self.th[self.nz-1] = self.ForcTemp self.qn[self.nz-1] = self.ForcHum self.vx[0] = 0.001 self.vy[0] = 0.001 self.tke[0] = 0.00001 # Calculate bulk Richardson number (Ri_b): # Ri_b = (g*H/((Uroof - Ustreet)^2+(Vroof - Vstreet)^2))*(Troof - Tstreet)/Tavg (equation 6, Aliabadi et al., 2018) delU = ((self.vx[0]-self.vx[self.nz_u+1])**2+(self.vy[0]-self.vy[self.nz_u+1])**2) # Denominator of the fraction must not be zero. So, a minimum value for denominator is considered delU = max(delU,0.1) #For calculation of Rib, option 1: surface temperature difference #delT = self.RoofTemp-self.RoadTemp #For calculation of Rib, option 2: air temperature difference delT = self.th[self.nz_u+1]-self.th[1] Ri_b = ((self.g*self.hmean)/delU)*(delT/numpy.mean(self.th[0:self.nz_u])) # Calculate turbulent diffusion coefficient (Km) [m^2 s^-1] TurbDiff = CdTurb(self.nz, self.Ck, self.tke, self.dlk,self.it,Ri_b,self.var_sens) Km = TurbDiff.TurbCoeff() # Road surface temperature [K] ptg = self.RoadTemp # Wall surface temperature [K] ptw = self.WallTemp # Roof surface temperature [K] ptr = self.RoofTemp # Call "BuildingCol" to calculate sink and source terms in momentum, temperature and turbulent kinetic energy (TKE) # equations which are caused by building BuildingCoef = BuildingCol(self.nz, self.dz, self.dt, self.vol, (1-self.VegCoverage), self.lambdap, self.lambdaf, self.hmean, self.Ck, self.Cp, self.th0, self.vx, self.vy, self.th, self.Cdrag, ptg,ptr, ptw, self.rho,self.nz_u, self.pb, self.ss,self.g,z0g,z0r,self.SensHt_HVAC,self.HVAC_street_frac,self.HVAC_atm_frac) # Calculate shear production [m^2 s^-3] in TKE equation. (Term II of equation 5.2, Krayenhoff 2014, PhD thesis) Shear_Source = Shear(self.nz, self.dz, self.vx, self.vy, Km) sh = Shear_Source.ShearProd() # Calculate buoyant production [m^2 s^-3] in TKE equation. (Term IX of equation 5.2, Krayenhoff 2014, PhD thesis) Buoyancy_Source = Buoyancy(self.nz, self.dz, self.th, Km, self.th0, self.prandtl, self.g) bu = Buoyancy_Source.BuoProd() # Calculate dissipation (td) [s^-1] in TKE equation. (Term VI of equation 5.2, Krayenhoff 2014, PhD thesis) # parameterization of dissipation is based on Nazarian's code. (https://github.com/nenazarian/MLUCM/blob/master/Column_Model/column_lkPro.f90) td = numpy.zeros(self.nz) for i in range(0, self.nz): if self.dls[i] != 0: td[i] = -self.Ceps*(math.sqrt(self.tke[i]))/self.dls[i] else: td[i] = 0 sh[i] = sh[i]*self.sf[i] bu[i] = bu[i]*self.sf[i] # Return sink and source terms caused by buildings srex_vx_h, srex_vy_h, srex_tke_h, srex_th_h, srim_vx_v, srim_vy_v, srex_tke_v, srim_th_v, srex_th_v, sff,swf, ustarCol = BuildingCoef.BuildingDrag() # Friction velocity (Aliabadi et al, 2018) ustar = 0.07 * self.ForcWind + 0.12 # Calculate pressure gradient C_dpdx = 5.4 dpdx = C_dpdx*self.rho[self.nz-1]*(ustar**2)*math.cos(math.radians(theta_S))/(self.dz*self.nz) dpdy = C_dpdx*self.rho[self.nz-1]*(ustar**2)*math.sin(math.radians(theta_S))/(self.dz*self.nz) # Latent heat of vaporization [J kg^-1] latent = 2.45e+06 # Latent heat of vaporization [J mol^-1](Campbell and Norman,1998) latent2 = 44100 # The average surface and boundary-layer conductance for humidity for the whole leaf gvs = 0.330 # Set leaf dimension of trees leaf_width = 0.05 leaf_dim = 0.72 * leaf_width # Air pressure [Pa] pr = 101300 # Total neighbourhood foliage clumping [non dimensional] omega = 1 # Molar heat capacity [J mol^-1 K^-1](Campbell and Norman, 1998) cp_mol = 29.3 # Drag coefficient for vegetation foliage cdv = 0.2 omega_drag = 0.34 # Calculate source and sink terms caused by trees and then calculate total source and sink terms for i in range(0, self.nz): # source/sink terms of specific humidity wind = numpy.sqrt(self.vx[i] ** 2 + self.vy[i] ** 2) # Boundary-layer conductance for vapor (p. 101 Campbell and Norman, 1998) gva = 1.4 * 0.147 * numpy.sqrt(wind / leaf_dim) # Overall vapour conductance for leaves [mol m^-2 s^-1] (equation 14.2, Campbell and Norman, 1998): gv = gvs * gva / (gvs + gva) # Conductance for heat [mol m^-2 s^-1] gHa = 1.4 * 0.135 * numpy.sqrt(wind / leaf_dim) # Since a leaf has two sides in parallel, gHa should be multiplied by 2 gHa = gHa * 2 # Convert potential air temperature to real temperature [K] # potential temperature = real temperature * (P0/P)^(R/cp) tair = self.th[i] / (pr / 1.e+5) ** (-rcp) # Convert absolute humidity to vapour pressure [Pa] eair = self.qn[i] * pr / 0.622 # Saturation vapor pressure [Pa] (equation 7.5.2d, Stull 1988) es = 611.2 * numpy.exp(17.67 * (tair - 273.16) / (tair - 29.66)) D = es - eair desdT = 0.622 * latent * es / r / (tair) ** 2 s = desdT / pr # Calculate terms in transport equations caused by trees. "wt" is term in temperature equation adn "wt_drag" # is term in TKE and momentum equations. It is assumed there is no vegetation above average building height if self.dz * i > max(self.h_LAD): wt = 0 # [m^2 m^-3] wt_drag = 0 # [m^2 m^-3] else: wt = self.f_LAD(self.dz * i) * omega * (1 - self.lambdap) / self.vol[i] # [m^2 m^-3] wt_drag = self.f_LAD(self.dz * i) * omega_drag * (1. - self.lambdap) / self.vol[i] # [m^2 m^-3] # Stefan-Boltzmann constant [W m^-2 K^-4] sigma = 5.67e-8 gam = 6.66e-4 # Emissivity of leaves surface emveg = 0.95 # Total fraction scattered by leaves: reflected & transmitted albv_u = 0.5 fact = 1 # Total radiation absorbed by leaves [W m^-2] Rabs = (1-albv_u)*self.S_t+self.L_t*emveg gr = 4 * emveg * sigma * tair ** 3 / cp_mol gr = gr * 2. * omega * fact sides = 2. * omega * fact gHr = gHa + gr gamst = gam * gHr / gv # Calculate temperature of vegetation [K] tveg_tmp = tair+gamst/(s+gamst)*((Rabs-sides*emveg*sigma*(tair**4))/gHr/cp_mol-D/pr/gamst) Tveg[i] = tveg_tmp # Calculate terms in temperature and humidity equations caused by trees. if self.dz * i > max(self.h_LAD): srex_th_veg[i] = 0 srex_qn_veg[i] = 0 else: srex_th_veg[i] = cp_mol*gHa*tveg_tmp*wt/self.Cp/self.rho[i] srex_qn_veg[i] = (latent2*gv*(s*(tveg_tmp-tair)+es/pr))*wt/self.rho[i]/latent # Calculate total explicit terms # Explicit term in x momentum equation [m s^-2] = fluxes from horizontal surfaces + pressure gradient # pressure gradient is zero, because boundary conditions are forced by vertical diffusion model srex_vx[i] = srex_vx_h[i]+dpdx # Explicit term in y momentum equation [m s^-2] = fluxes from horizontal surfaces + pressure gradient # pressure gradient is zero, because boundary conditions are forced by vertical diffusion model srex_vy[i] = srex_vy_h[i]+dpdy # Explicit term in TKE equation [m^2 s^-3] = terms from urban horizontal surfaces [??????] + # terms from walls [m^2 s^-3] + shear production [m^2 s^-3] + buoyant production [m^2 s^-3] + # term caused by vegetation [m^2 s^-3] srex_tke[i] = srex_tke_h[i] + srex_tke_v[i] + sh[i] + bu[i] + cdv*wind**3.*wt_drag # Explicit term in temperature equation [K s^-1] = term from urban horizontal surfaces [K s^-1] + # term from walls [K s^-1] + term caused by vegetation [K s^-1] srex_th[i] = srex_th_h[i] + srex_th_v[i] + srex_th_veg[i] #+ 4*rho_abs*kbs*(1-self.lambdap)*self.L_abs/self.rho/self.Cp/self.vol[i] # Explicit term in humidity equation [K s^-1] = term caused by latent heat from vegetation [K s^-1] srex_qn[i] = srex_qn[i] + srex_qn_veg[i] # Calculate total Implicit terms # Implicit term in x momentum equation [s^-1] = term from walls [s^-1] - term caused by vegetation [s^-1] srim_vx[i] = srim_vx_v[i]-cdv*wind*wt_drag # Implicit term in y momentum equation [s^-1] = term from walls [s^-1] - term caused by vegetation [s^-1] srim_vy[i] = srim_vy_v[i]-cdv*wind*wt_drag # Implicit term in TKE equation [s^-1] = dissipation [s^-1] - term caused by vegetation [s^-1] srim_tke[i] = td[i]-6.5*cdv*wind*wt_drag # Implicit term in temperature equation [s^-1] = term from wall [s^-1] - term caused by vegetation [s^-1] srim_th[i] = srim_th_v[i]-cp_mol*gHa*wt/self.Cp/self.rho[i] # Implicit term in humidity equation [s^-1] = term caused by latent heat from vegetation [s^-1] srim_qn[i] = srim_qn[i]-latent2*gv*(pr/0.622)/pr*wt/self.rho[i]/latent # Solve transport equations # Set type of boundary conditions (B.Cs): # Neumann boundary condition (Flux): iz = 1 # Dirichlet boundary condition (Constant value): iz = 2 # Sol.Solver(B.C. at the bottom of domain) Sol = Diff(self.nz, self.dt, self.sf, self.vol, self.dz, self.rho) # Solve x component of momentum equation A_vx = Sol.Solver21(2, 1, self.vx, srim_vx, srex_vx,Km)[0] RHS_vx = Sol.Solver21(2, 1, self.vx, srim_vx, srex_vx,Km)[1] Inv_vx = Invert(self.nz, A_vx, RHS_vx) self.vx = Inv_vx.Output() # Solve y component of momentum equation A_vy = Sol.Solver21(2, 1, self.vy, srim_vy, srex_vy,Km)[0] RHS_vy = Sol.Solver21(2, 1, self.vy, srim_vy, srex_vy,Km)[1] Inv_vy = Invert(self.nz, A_vy, RHS_vy) self.vy = Inv_vy.Output() # Solve TKE equation A_tke = Sol.Solver21(2, 1, self.tke, srim_tke, srex_tke,Km)[0] RHS_tke = Sol.Solver21(2, 1, self.tke, srim_tke, srex_tke,Km)[1] Inv_tke = Invert(self.nz, A_tke, RHS_tke) self.tke = Inv_tke.Output() # Solve temperature equation A_th = Sol.Solver12(1, 2, self.th, srim_th, srex_th,Km/self.prandtl)[0] RHS_th = Sol.Solver12(1, 2, self.th, srim_th, srex_th,Km/self.prandtl)[1] Inv_th = Invert(self.nz, A_th, RHS_th) self.th = Inv_th.Output() # Solve specific humidity equation A_qn = Sol.Solver12(1, 2, self.qn, srim_qn, srex_qn,Km/self.schmidt)[0] RHS_qn = Sol.Solver12(1, 2, self.qn, srim_qn, srex_qn,Km/self.schmidt)[1] Inv_qn = Invert(self.nz, A_qn, RHS_qn) self.qn = Inv_qn.Output() # Set a minimum value for kinetic energy which avoid trapping of heat at street level for i in range(0, self.nz): if self.tke[i] < 1e-3: self.tke[i] = 1e-3 return self.vx,self.vy,self.tke,self.th,self.qn, ustarCol,Km,tveg_tmp,Ri_b,Tveg
import re import sys import time import json import pprint import stemmer import invert from stemmer import PorterStemmer from invert import Invert invert = Invert() keep_loop = '' while keep_loop != 'ZZEND'.lower(): start_time = time.clock() stop_question = raw_input("would you like stop words? (yes/no)") stem_question = raw_input("would you like stemming? (yes/no)") def parse_cacm(dictionary): f = open("cacm/cacm.all", "r") regexI = r"^[.]+[I]\s" regexT = r"^[.]+[T]\s" regexW = r"^[.]+[W]\s" regexB = r"^[.]+[B]\s" regexA = r"^[.]+[A]\s" regexN = r"[.]+[N]\s" regex = r"[.]+[A-Z]\s" for line in f: if re.match(regexI, line): x = line.split() doc = x[0]
class Test: def __init__(self, auto=False): """ main function of the application, sets the default values for stopword and stemming """ self.stopword_toggle = False self.stemming_toggle = False self.posting_list = {} self.term_dictionary = {} self.search_times = [] self.invert = Invert() self.load_files() if not auto: self.search_user_input() #self.k_value = 10 def search_user_input(self): """ gets user input and acts accordingly to provided term strips the leading spaces Note: all term searches are not case sensitive special key terms stopword: toggles the use of stop words stemming: toggles the use of stemming HELP: help menu ZZEND: exits the program :return: """ data = input('Enter a search term or stopword to toggle stopwords\n') while data is not None: query = data.lower().rstrip() if 'HELP' in data: print('Enter stopword to toggle the use of stopwords') print('Enter stemming to toggle the use of stemming') print('Enter ZZEND to exit') elif 'stopword' in query: use_stop_words = input('use stop words? (y/n)\n') while use_stop_words is not None: if use_stop_words.lower() == 'y': self.stopword_toggle = True self.load_files() print('Stopwords are being used in the search') print(len(self.posting_list)) break elif use_stop_words.lower() == 'n': self.stopword_toggle = False self.load_files() print('Stopwords are not being used in the search') break use_stop_words = input('please enter y or n\n') elif 'stemming' in query: use_stemming = input('stem the words? (y/n)\n') while use_stemming is not None: if use_stemming.lower() == 'y': print(len(self.posting_list)) self.stemming_toggle = True self.load_files() print('the words are now being stemmed in search') break elif use_stemming.lower() == 'n': self.stemming_toggle = False self.load_files() print('the words are not being stemmed in search') break use_stemming = input('please enter y or n\n') elif data == 'ZZEND': average_search_time = round( sum(self.search_times) / len(self.search_times), 3) print('Average search time:', average_search_time, ' seconds') print('exiting program') exit() else: found_documents = self.search_term(query) if len(found_documents) > 0: print(json.dumps(found_documents, indent=4, sort_keys=True)) else: print('No results found for ' + query) data = input('Enter a search term or HELP for more options\n') def load_files(self): """ calls the create_posting_list function in invert using the stopword and stemming toggles imports the files created by invert into dictionaries """ self.invert.create_posting_list(self.stopword_toggle, self.stemming_toggle) self.invert.format_ranking_list() f = open('posting-list.json', 'r') self.posting_list = json.load(f) f.close() f = open('dictionary.json', 'r') self.term_dictionary = json.load(f) f.close() def search_term(self, word): """ searches for the term provided by the user used the stemming toggles to stem the provided term formats the search result to provide: doc_id: id of the document title: document title term_frequency: number of times the term appeared in documents positions: array of index of each term in the document summary: summary of document with 10 characters around the first instance of term :param word: term to search :return: prints out the time taken and the results in pretty print json and the number of documents the term appeared in """ k_value = 10 processed_query = self.process_query(word) document_ranking = {} query_vector = numpy.sqrt( numpy.sum(numpy.square(list(processed_query.values())))) for doc_id, term_weights in self.invert.vector_space_dictionary.items( ): doc_vector = numpy.sqrt( numpy.sum(numpy.square(list(term_weights.values())))) if query_vector == 0 or doc_vector == 0: document_ranking[doc_id] = 0.0 continue dot_product = 0 for word, weight in processed_query.items(): if word in term_weights.keys(): dot_product += (weight * term_weights[word]) cosine_similarity = dot_product / (query_vector * doc_vector) document_ranking[doc_id] = cosine_similarity document_ranking = dict( sorted(document_ranking.items(), key=operator.itemgetter(1), reverse=True)) found_documents = [] for doc_id, similarity in document_ranking.items(): if len(found_documents) == k_value: break if similarity <= 0 or numpy.isnan(similarity): continue document = { 'ranking': len(found_documents) + 1, 'doc_id': doc_id, 'title': self.invert.documents[doc_id]['title'], 'author': self.invert.documents[doc_id]['author'], } found_documents.append(document) return found_documents def process_query(self, query): all_doc_count = len(self.invert.documents.keys()) query_array = [x.lower() for x in query.split(' ')] query_weights = {} stopwords = [] if self.stopword_toggle: stopwords = fetch_stopwords() while query_array: word = query_array.pop(0) frequency = 1 for a in [',', '.', '{', '}', '(', ')', ';', ':', '"', '\'']: if a in word: if word.index(a) == 0 or word.index(a) == len(word) - 1: word = word.replace(a, '') while word in query_array: query_array.pop(query_array.index(word)) frequency += 1 if self.stemming_toggle: p = PorterStemmer() word = p.stem(word, 0, len(word) - 1) if word in stopwords: continue term_weight = 0 if word in self.invert.termsDictionary.keys(): document_frequency = self.invert.termsDictionary[word] idf = math.log(all_doc_count / document_frequency) term_frequency = 1 + math.log(frequency) term_weight = idf * term_frequency query_weights[word] = term_weight return query_weights
img_name=dirs actual=dirs[0:dirs.index('-')] pred='' dict1={} print(img_name) for filename in sorted(os.listdir(os.path.join(input_dir,dirs))): img = Image.open(os.path.join(input_dir, dirs, filename)) # plt.imshow(img,cmap=cm.gray) # plt.show() # print(img.size) # img = normalize(img) img = torch.stack( [transforms.Compose([transforms.Resize( (32, 32) ), Invert(), transforms.ToTensor()])(img)]) # print(img) # print(img.shape) # plt.imshow(transforms.ToPILImage()(img[0]),cmap=cm.gray) # plt.show() # plt.imshow( transforms.ToPILImage()(img[0]) ) # plt.show() prediction = model(img) _,prediction=torch.max(prediction.data,1) # print(str(prediction.item())) pred=pred+str(prediction.item())
# transforms.RandomHorizontalFlip(), # 随机水平翻转 transforms.ToTensor(), ]) transform2 = transforms.Compose([ transforms.Resize((IMG_SIZE2, IMG_SIZE2)), transforms.Grayscale(num_output_channels=1), # 灰度化 # Invert(), # transforms.RandomHorizontalFlip(), # 随机水平翻转 transforms.ToTensor(), ]) transform3 = transforms.Compose([ transforms.Resize((IMG_SIZE1, IMG_SIZE1)), transforms.Grayscale(num_output_channels=1), # 灰度化 Invert(), # transforms.RandomHorizontalFlip(), # 随机水平翻转 transforms.ToTensor(), ]) # 返回每一类的距离 def get_distance(vector, Mean_Vectors): result = [] for key in Mean_Vectors.keys(): dis = torch.nn.functional.pairwise_distance( vector, Mean_Vectors[key].unsqueeze(0)).item() result.append(dis) return result