def findCountriesLang(): """Find names of countries and their official languages for all countries in “North America”""" officialLang_df = countryLang_df[countryLang_df['IsOfficial'] == 'T'] merged_df = country_df.merge(officialLang_df, how='left', left_on="Code", right_on="CountryCode") merged_df = merged_df[(merged_df['Continent'] == 'North America' )].loc[:, ['Name', 'Language']].sort_values( by=['Name']) merged_df['Language'] = merged_df['Language'].fillna('None') langaugeLists = merged_df.groupby(['Name'])['Language'].apply(list) formatted_df = pd.DataFrame({ 'Name': np.unique(merged_df['Name']), 'languages': langaugeLists }) vals = formatted_df.values #For loop for sake of printing: for country in vals: str1 = '' str1 += country[0] str1 += ', ' str1 += listToString(country[1]) print(str1) return None
def findCountriesLang(): """Find names of countries and their official languages for all countries in “North America”""" url = 'https://hw1-dsci551-31309.firebaseio.com/world/country_nested.json?orderBy="Continent"&equalTo="North America"&print=pretty' response = requests.get(url) perf_file = open("performance.txt","a") perf_file.write("firebase-B-nested: Requests Made - 1, Size of Download (Bytes) - ") perf_file.write(str(len(response.content))) perf_file.write("\n\n") perf_file.close() countryNested_data = response.json() data = {} for code in countryNested_data.keys(): lang_d = countryNested_data[code]['languages'] official = [] for lang in lang_d.keys(): if lang_d[lang]['IsOfficial'] == "T": official.append(lang) data[countryNested_data[code]['Name']] = listToString(official) for k,v in data.items(): if len(v) == 0: data[k] = 'None' data = dict(sorted(data.items())) for k,v in data.items(): str1 = k str1 += ', ' str1 += v print(str1) return None
def findCountriesLang(): """Find names of countries and their official languages for all countries in “North America”""" data = {} for country in country_data: if country['Continent'] == 'North America': country_code = country['Code'] name = country['Name'] lang_true = [] for countryLang in countryLang_data: if countryLang['CountryCode'] == country_code and countryLang[ 'IsOfficial'] == 'T': lang_true.append(countryLang['Language']) data[name] = lang_true #Change empty to 'None': for k, v in data.items(): if len(v) == 0: data[k] = ['None'] data = dict(sorted(data.items())) for k, v in data.items(): str1 = k str1 += ', ' str1 += listToString(v) print(str1) return None
def findCountriesCapitals(): """Find names of countries and their capital cities for all countries in “North America”""" merged_df = country_df.merge(city_df, how = 'left', left_on = "Capital", right_on = "ID").rename(columns = {'Name_x': 'CountryName', 'Name_y': 'CapitalName'}) merged_df = merged_df.loc[:, ['CountryName', 'CapitalName', 'Continent']] vals = merged_df[merged_df['Continent'] == 'North America'][['CountryName', 'CapitalName']].sort_values(by=['CountryName']).values #For loop for sake of printing: for country in vals: print(listToString(country)) return None
zPresp = resptf.root.zPresp.read() mask = resptf.root.mask.read() # Print matrix shapes print("zRresp shape (num time points, num voxels): ", zRresp.shape) print("zPresp shape (num time points, num voxels): ", zPresp.shape) print("mask shape (Z, Y, X): ", mask.shape) R_texts = [] P_texts = [] R_voxels_to_be_removed = [] P_voxels_to_be_removed = [] for index in range(len(Rstim)): sList = Rstim[index] if len(sList) > 0: R_texts.append(listToString(sList)) else: R_voxels_to_be_removed.append(index) #Not sure if we need to add held-out set stimuli for index in range(len(Pstim)): sList = Pstim[index] if len(sList) > 0: P_texts.append(listToString(sList)) else: P_voxels_to_be_removed.append(index) print("Number of R sentences: ") print(len(R_texts)) print("Number of P sentences: ") print(len(P_texts))