def get_restaurants_from_coordinate(coords): rests = get_restaurants.get_restaurants(coords['lat'], coords['lng'],pmin=1,pmax=5) if rests==None: return [] rests = unique_word_builder.build_words_entry(rests) rests = sentiment_builder.build_sent_entry(rests) return ranking.rank(rests)
def _ranking_response(txn, iostatus, api_name, request_msg, response_msg): if iostatus != Txn.IO_OK: logger.error("Ranking_E2", "Ranking response IO error: {}".format(iostatus)) return False #print "response: {}".format(response_msg) sharedData = txn.data() # If the total records count <= 1, just store and return nrecords = len(response_msg.result) if nrecords <= 1: for record in response_msg.result: sharedData.rankingRecord.add(ip = record.ip, ttl = record.ttl) return True # Do the score and rank global CFG_LOW_LATENCY_BAR global CFG_LATENCY_FACTOR scoringResults = ranking.score(response_msg.result) rankingResults, filtered = ranking.rank(scoringResults, CFG_LOW_LATENCY_BAR, CFG_LATENCY_FACTOR) logger.info("Ranking", "question: {} ,total: {} ,filtered {} ,Results: {}".format( request_msg.question, len(scoringResults), filtered, rankingResults)) global CFG_MIN_TTL for record in rankingResults: # Recalculate ttl when there is no scoring information and ttl > CFG_MIN_TTL ttl = record['ttl'] latency = record['avgLatency'] httpInfoCnt = record['httpInfoCnt'] new_ttl = CFG_MIN_TTL * httpInfoCnt if new_ttl > 0: ttl = min([new_ttl, ttl]) if latency == 0 and ttl > CFG_MIN_TTL: ttl = CFG_MIN_TTL elif ttl > CFG_MAX_TTL: ttl = CFG_MAX_TTL sharedData.rankingRecord.add(ip = record['ip'], ttl = ttl) return True
def get_designs(self, partition): designs = [] for language in _languages: if language.can_express(partition): designs.append(language.design(partition)) return rank(partition, designs)
def get_designs(self, partition): designs = [] for language in _languages: if language.can_express(partition): designs.append(language.design(partition)) return rank(partition, designs)
if resample: fit = sm.sampling(data=stan_data, iter=4000, chains=4, n_jobs = 1) la = fit.extract(permuted = True) with open("la_complex.pkl", 'wb') as f: pickle.dump(la, f) else: with open("la_complex.pkl", 'rb') as f: la = pickle.load(f) #Extract mcmc samples for the parameters of interest beta = la['beta'] z_rank = la['z_rank'] ##Post process and visualization mean_rank = rk.rank(beta.mean(0), axis = -1) #Get the posterior mean rank all_ranks = rk.rank(beta, axis = -1) #Rank all the beta samples # # # ###Main plots # # ##Plot the raw data cond_order = ['C1', 'C2', 'C3'] # # fig, ax = plt.subplots(1, len(conds), sharex = True, sharey = True, figsize = (6, 2.5)) # # for i, c in enumerate(cond_order): # print(i) # plot_raw_data_complex(conds, c, stan_data, ax = ax[i]) # ax[i].set_xlim([0,1])
# parse the tags to get the name of the input file and the respective climbing and shooting weights parser = argparse.ArgumentParser() parser.add_argument("-f", "--file", default="./report.csv") parser.add_argument("-s", "--shooter", type=float, help="shooter weighting factor", default=1) parser.add_argument("-c", "--climber", type=float, help="climber weighting factor", default=1) args = parser.parse_args() # print the resulting ranking r = numpy.array( ranking.rank(csvtojson.csvtojson(args.file), args.climber, args.shooter)) f = open("report.json", "w") f.write(json.dumps(csvtojson.csvtojson(args.file))) f.close() s = '' for x in r[:, 0][::-1]: s += str(x) + '\n' f = open("rankings", "w") f.write(s) f.close()
def run(self): root.quit() global originalPath,scoredImages,scoredImagesList, pres_time, rankPlot ,f3,counter_red,counter_green,counter_total,m global mc_and_dc_list, number_of_segments, good_metaphases_list, good_metaphases_list_with_coordinates, file_with_good_metaphases, actual_contours_path, dict_of_coordinates mc_and_dc_list, number_of_segments,good_metaphases_list, good_metaphases_list_with_coordinates,segpath, dict_of_coordinates = find_good_metaphases(downloaded, progress,file_name_list,f2,root,25) red=0 green=0 total=0 originalPath,scoredImages, pres_time, scoredImagesList, rankPlot = rank(segpath) time.sleep(0.3) segment_path='' for i in range(0, len(originalPath)): #originalPath[i][0] rnk = originalPath[i][1][0] q = originalPath[i][0].split('/') imgname = q[len(q) - 1] segmented_img = '' for j in range(0, len(q)-1): segmented_img = segmented_img + '/' + str(q[j]) segment_path=segmented_img+'/segments/scoredData.xlsx' segmented_img = segmented_img + '/segments/' + imgname img1 = cv2.imread(segmented_img) #print filename[-2] path_to_store = selected_folder + '/segments/score' + str(rnk)+ '/' + str(imgname) status = cv2.imwrite(path_to_store, img1) segment_path=segment_path[1:] wb = openpyxl.load_workbook(segment_path) ws = wb.active col=ws.max_column c1=ws.cell(2, 7) if c1.value is None: c1=ws.cell(1, col) c1.value="Dicentric" c1=ws.cell(1, col+1) c1.value="Monocentric" c1=ws.cell(1, col+2) c1.value="Total Chromsome" for k in range(0,ws.max_row-1): for i in range(2, ws.max_row+1 ): c1=ws.cell(i,1) if(mc_and_dc_list[k][3]==c1.value): for j in range(col,col+4): c1=ws.cell(i, j) c1.value=mc_and_dc_list[k][j-col] # else: # mc_and_dc_list[k][j-col]=c1.value break wb.save(segment_path) row=ws.max_row col=ws.max_column for i in range(2, row+1 ): for j in range(col-3,col+1): c1=ws.cell(i, j) # print c1.value # print mc_and_dc_list[i-2][j-col+3] mc_and_dc_list[i-2][j-col+3]=c1.value for i in mc_and_dc_list: red+=i[0] green+=i[1] total+=i[2] counter_red=IntVar() counter_green=IntVar() counter_total=IntVar() counter_red.set(red) counter_green.set(green) counter_total.set(total)
sns.set_style("whitegrid") ############################################ #------------------------------------------# #--------Load the data and process---------# #------------------------------------------# #------------------------------------------# ############################################ data = pd.read_csv("raw_data/VET_data/VET_data.csv".format(1)) data = pd.pivot_table(data, values = "Rating", index = ["ID", "Question", "Q_n", "Condition", "Base"], columns = "Height").reset_index() # data = data.rename(columns = {'H': 'Y1', 'M': 'Y2', 'L': 'Y3'}) #Rename the columns data[['Y1', 'Y2', 'Y3']] = rk.rank(data[['H', 'M', 'L']]) q_data = data[data['Q_n'] == 1] q_data = q_data[q_data['Base'] != 'JLR'] q_data['ID'] = q_data['ID'] + 1 myThurst = thurstonian(design_formula = '~Base', data = q_data, subject_name = "ID") resample = False if resample: #NOTE: IF using multiprocessing you will need to run in an external cmd window. Will not work in Spyder #Prepare for sampling using multiple cores myThurst.pre_sample()
import csvtojson, ranking, argparse # parse the tags to get the name of the input file and the respective climbing and shooting weights parser = argparse.ArgumentParser() parser.add_argument("-f", "--file", default="./AMCscouting2013.csv") parser.add_argument("-s", "--shooter", type=float, help="shooter weighting factor", default=1) parser.add_argument("-c", "--climber", type=float, help="climber weighting factor", default=1) args = parser.parse_args() # print the resulting ranking print ranking.rank(csvtojson.csvtojson(args.file), args.climber, args.shooter)
else: out[i] = out[i - 1] + np.exp(x[i]) return out #a = np.array([0, 0.6, 7, 24]) # #stan_data = dict(N = 100, K = 5) ##sm = pystan.StanModel(file="simple_transform.stan") ## ##with open("model.pkl", 'wb') as f: ## pickle.dump(sm, f) #sm = pickle.load(open('model.pkl', 'rb')) # #fit = sm.sampling(data=stan_data, iter=1000, chains=1, control = dict(adapt_delta = 0.999)) # #la = fit.extract() #print(np.argsort(la['z_plus'])) N = 10 mu = np.array([0, -2, 1]) K = mu.size res = sts.norm.rvs(0, 1, size=(N, K - 1)) z = np.zeros((N, K)) z[:, 1:] = mu[1:] + res y_obs = rk.rank(z, axis=-1)
import numpy import csvtojson, ranking, argparse import json # parse the tags to get the name of the input file and the respective climbing and shooting weights parser = argparse.ArgumentParser() parser.add_argument("-f", "--file", default="./report.csv") parser.add_argument("-s", "--shooter", type=float, help="shooter weighting factor", default=1) parser.add_argument("-c", "--climber", type=float, help="climber weighting factor", default=1) args = parser.parse_args() # print the resulting ranking r = numpy.array(ranking.rank(csvtojson.csvtojson(args.file), args.climber, args.shooter)) f = open("report.json", "w") f.write(json.dumps(csvtojson.csvtojson(args.file))) f.close() s = '' for x in r[:,0][::-1]: s += str(x) + '\n' f = open("rankings", "w") f.write(s) f.close()
# OUR MAIN FILE from critics import critics from euclid import euclid_sim from pearson import pearson_sim from ranking import rank from math import sqrt print(rank(critics, "Siddharth", pearson_sim)) print(rank(critics, "Siddharth", euclid_sim))
#!E:/Installed Setup/Python/python import cgi, os import cgitb; cgitb.enable() import os import output import ranking if __name__=="__main__": ranking.rank("explicit.txt","implicit.txt") output.show(5)