def evaluate_model_manually(model: Sequential) -> None: total_count = 0 correct_count = 0 predicted_y: ndarray = model.predict(x_train) converted_predicted_y: ndarray = get_prediction_value_from_sigmoid_result( predicted_y) for correct_value, predicted_value in zip( ndarray.tolist(y_train), ndarray.tolist(converted_predicted_y)): if correct_value == predicted_value: correct_count += 1 total_count += 1 print(f"accuracy calculated manually: {correct_count / total_count}")
def __populate_table__(self, tablename: str, df: DataFrame): self.__meta.reflect(bind=self.__engine) table: Table = self.__meta.tables[tablename] print(type(table)) with self.__engine.connect() as conn: for row in df.values: print(row) ins = table.insert(values=ndarray.tolist(row)) conn.execute(ins)
def get_prediction_value_from_sigmoid_result( sigmoid_result: ndarray) -> ndarray: sigmoid_result_list: List[List[float]] = ndarray.tolist(sigmoid_result) prediction_value_list: List[List[int]] = [] for row in sigmoid_result_list: if row[0] < row[1]: prediction_value_list.append([0, 1]) else: prediction_value_list.append([1, 0]) return asarray(prediction_value_list)
def get_score(self, theme_list, sen_list, sen_tfidf_list, sen_title_list): self.calc_cos(theme_list, sen_list) self.calc_loc(sen_list) self.calc_len(sen_tfidf_list) self.calc_simi(sen_title_list) cos_array = 0.75 * np.array(self.cos_score) loc_array = 0.2 * np.array(self.loc_score) len_array = 0.1 * np.array(self.len_score) simi_array = 0.15 * np.array(self.simi_score) score_list = cos_array + len_array + simi_array# + loc_array return ndarray.tolist(score_list)
def calc_word_average(self, words): words = [word for word in words if word != ''] if len(words) == 0: return [np.float32(0.0)] * self.config.embedding_size average = sum([ self.embeddings.get(word[0], word[1], default=np.asarray([np.float32(0.0)] * self.config.embedding_size)) for word in words ]) / len(words) return nd.tolist(average)
def find_proceding_embeddings(self, mention, word_num): line = copy.copy(self.sentences[mention[0]]) assert line != [] word_index = mention[1] for i in range(word_num): line = ['None'] + line proceding = line[word_index:word_index + word_num] proced_embed = [] for proced in proceding: if proced == "None": proced_embed.append([0.0] * self.config.embedding_size) else: proced_embed.append( nd.tolist( self.embeddings.get( proced[0], proced[1], default=np.asarray([0.0] * self.config.embedding_size)))) del line print('type of proced_embed is: ', type(proced_embed)) # return flatten(proced_embed) return functools.reduce(lambda x, y: x + y, proced_embed)
def find_following_embeddings(self, mention, word_num): line = copy.copy(self.sentences[mention[0]]) assert line != [] word_index = mention[1] for i in range(word_num): line.append('None') following = line[word_index + 1:word_index + word_num + 1] follow_embed = [] for follow in following: if follow == "None": follow_embed.append([0.0] * self.config.embedding_size) else: follow_embed.append( nd.tolist( self.embeddings.get( follow[0], follow[1], default=np.asarray([0.0] * self.config.embedding_size)))) del line # return flatten(follow_embed) return functools.reduce(lambda x, y: x + y, follow_embed)
def problem_1_a(show_plot: bool): """ This is the result of problem 1a, returning the interpolated ytm and ttm Use input to indicate whether or not to plot graphs """ x = [] y = [] for days in range(10): x.append([]) y.append([]) for bonds in My_Bonds: x[days].append(bonds.get_ttm()[days]) y[days].append(bonds.get_ytm(days)) ttm_inter = [] ytm_inter = [] for i in range(10): x_new = [(k+1)/2 for k in range(10)] ttm_inter.append(x_new) ytm_inter.append(ndarray.tolist(interp(x_new, x[i], y[i]))) if show_plot: for i in range(10): plt.plot(ttm_inter[i],ytm_inter[i]) plt.xlabel("time to maturity") plt.ylabel("yield to maturity") plt.show() return [ttm_inter, ytm_inter]
import random from numpy import ndarray import numpy as np if __name__ == '__main__': conceptNum = 10; label = []; L = os.listdir('Concept'); for filename in L: label.append(np.loadtxt('Concept/' + filename)); trainingSet = np.loadtxt('new_trainid.txt'); featureList = np.loadtxt('Feature/traindata_image.txt'); tagList = np.loadtxt('Feature/traindata_text.txt'); print 'Load Basic Data Finished!' featureList = ndarray.tolist(featureList); tagList = ndarray.tolist(tagList); allList = []; for i in range(0, len(featureList)): allList.append(featureList[i] + tagList[i]); allList = np.array(allList); np.savetxt('Data/trdata.txt', allList, '%d'); print 'Basic Features Finished' for i in range(0, conceptNum): reList = []; filename = 'Data/redata' + str(i) + '.txt'; for j in range(0, len(featureList)): if(label[i][j] == 1): reList.append([1]); else:
def image_as_list(self): if not self._image_object: return [] return ndarray.tolist(self._image_object.image)
def getBinning(binningName): if binningName == 'inclusive': binningTitle = 'p_{T}^{probe} (GeV)' binning = [175., 6500.] fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'pt_{low:.0f}_{high:.0f}'.format(**repl) cut = 'probes.scRawPt > {low:.0f} && probes.scRawPt < {high:.0f}'.format(**repl) fitBins.append((name, cut)) binning.pop() binning.append(500.) elif binningName =='ptlow': binningTitle = 'p_{T}^{probe} (GeV)' binning = ndarray.tolist(linspace(20, 80,8)) fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'pt_{low:.0f}_{high:.0f}'.format(**repl) cut = 'probes.scRawPt > {low:.0f} && probes.scRawPt < {high:.0f}'.format(**repl) fitBins.append((name, cut)) binning.pop() binning.append(500.) elif binningName =='ptmedium': binningTitle = 'p_{T}^{probe} (GeV)' binning = ndarray.tolist(linspace(80, 200,8)) fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'pt_{low:.0f}_{high:.0f}'.format(**repl) cut = 'probes.scRawPt > {low:.0f} && probes.scRawPt < {high:.0f}'.format(**repl) fitBins.append((name, cut)) binning.pop() binning.append(500.) elif binningName =='pthigh': binningTitle = 'p_{T}^{probe} (GeV)' #binning = [200., 225., 250., 300., 500., 1000., 6500.] binning = [200., 225., 250., 350., 6500] fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'pt_{low:.0f}_{high:.0f}'.format(**repl) cut = 'probes.scRawPt > {low:.0f} && probes.scRawPt < {high:.0f}'.format(**repl) fitBins.append((name, cut)) binning.pop() binning.append(500.) elif binningName =='pthigh2': binningTitle = 'p_{T}^{probe} (GeV)' #binning = [200., 225., 250., 300., 500., 1000., 6500.] binning = [200., 300., 1000., 5000., 10000.] fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'pt_{low:.0f}_{high:.0f}'.format(**repl) cut = 'probes.scRawPt > {low:.0f} && probes.scRawPt < {high:.0f}'.format(**repl) fitBins.append((name, cut)) binning.pop() binning.append(500.) elif binningName in ['pt', 'ptnlo', 'ptnloalt']: binningTitle = 'p_{T}^{probe} (GeV)' binning = [175., 200., 250., 6500.] fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'pt_{low:.0f}_{high:.0f}'.format(**repl) cut = 'probes.scRawPt > {low:.0f} && probes.scRawPt < {high:.0f}'.format(**repl) fitBins.append((name, cut)) binning.pop() binning.append(500.) elif binningName == 'ptalt': binningTitle = 'p_{T}^{probe} (GeV)' binning = [175., 200., 250., 300., 350., 400., 6500.] fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'pt_{low:.0f}_{high:.0f}'.format(**repl) cut = 'probes.scRawPt > {low:.0f} && probes.scRawPt < {high:.0f}'.format(**repl) fitBins.append((name, cut)) # hack to elimiate one large-weight event fitBins[-1] = (fitBins[-1][0], fitBins[-1][1] + ' && (weight == 1 || weight < 0.0001)') binning.pop() binning.append(500.) elif binningName == 'ptalt2': binningTitle = 'p_{T}^{probe} (GeV)' binning = [175., 200., 250., 6500.] fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'pt_{low:.0f}_{high:.0f}'.format(**repl) cut = 'probes.scRawPt > {low:.0f} && probes.scRawPt < {high:.0f}'.format(**repl) fitBins.append((name, cut)) # hack to elimiate one large-weight event fitBins[-1] = (fitBins[-1][0], fitBins[-1][1] + ' && (weight == 1 || weight < 0.0001)') binning.pop() binning.append(500.) elif binningName == 'highpt': binningTitle = 'p_{T}^{probe} (GeV)' binning = [175., 200., 225., 250., 275., 300., 600., 6500.] #binning = [175., 200., 225., 250., 275., 300., 350., 400., 600., 6500.] fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'pt_{low:.0f}_{high:.0f}'.format(**repl) cut = 'probes.scRawPt > {low:.0f} && probes.scRawPt < {high:.0f}'.format(**repl) fitBins.append((name, cut)) binning.pop() binning.append(500.) elif binningName == 'lowpt': binningTitle = 'p_{T}^{probe} (GeV)' binning = [24., 28., 32., 35., 38., 40., 42., 44., 46., 48., 50., 54., 58., 62., 66., 70., 75., 80., 85., 90., 100., 120., 140., 160.] fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'pt_{low:.0f}_{high:.0f}'.format(**repl) cut = 'probes.scRawPt > {low:.0f} && probes.scRawPt < {high:.0f}'.format(**repl) fitBins.append((name, cut)) elif binningName in ['pogpt', 'pogptalt']: binningTitle = 'p_{T}^{probe} (GeV)' binning = [20., 35., 50., 90., 150., 6500.] fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'pt_{low:.0f}_{high:.0f}'.format(**repl) cut = 'probes.scRawPt > {low:.0f} && probes.scRawPt < {high:.0f}'.format(**repl) fitBins.append((name, cut)) binning.pop() binning.append(200.) elif binningName == 'pteta': binningTitle = 'p_{T}^{probe} (GeV)' ptBinning = [175., 200., 250., 300., 350., 400., 6500.] etaBinning = [-1.5, -0.8, 0., 0.8, 1.5] fitBins = [] for iBin in range(len(etaBinning) - 1): etaRepl = {'low': etaBinning[iBin], 'high': etaBinning[iBin + 1]} etaName = 'eta_{low:.0f}_{high:.0f}'.format(**etaRepl) etaCut = 'probes.scEta > {low:.2f} && probes.scEta < {high:.2f}'.format(**etaRepl) for jBin in range(len(ptBinning) - 1): ptRepl = {'low': ptBinning[jBin], 'high': ptBinning[jBin + 1]} ptName = 'pt_{low:.0f}_{high:.0f}'.format(**ptRepl) ptCut = 'probes.scRawPt > {low:.0f} && probes.scRawPt < {high:.0f}'.format(**ptRepl) name = etaName + '_' + ptName cut = etaCut + ' && ' + ptCut fitBins.append((name, cut)) binning = range(len(fitBins) + 1) elif binningName in ['ht', 'htalt']: binningTitle = 'H_{T} (GeV)' binning = [0., 100., 200., 400., 600., 800., 1200., 13000.] fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'ht_{low:.0f}_{high:.0f}'.format(**repl) # ht = 'Sum$(partons.pt_ * (TMath::Abs(partons.pdgid) < 6 || TMath::Abs(partons.pdgid) == 21))' ht = 'Sum$(jets.pt_)' cut = ht + ' > {low:.0f} && ' + ht + ' < {high:.0f}' cut = cut.format(**repl) fitBins.append((name, cut)) binning.pop() binning.append(1500.) elif binningName in ['ptht', 'pthtalt']: binningTitle = 'p_{T}^{probe} (GeV)' ptBinning = [20., 35., 50., 90., 150., 6500.] # [175., 200., 250., 6500.] htBinning = [0., 200., 400., 600., 800., 13000.] fitBins = [] for iBin in range(len(htBinning) - 1): htRepl = {'low': htBinning[iBin], 'high': htBinning[iBin + 1]} htName = 'ht_{low:.0f}_{high:.0f}'.format(**htRepl) ht = 'Sum$(jets.pt_)' htCut = ht + ' > {low:.0f} && ' + ht + ' < {high:.0f}' htCut = htCut.format(**htRepl) for jBin in range(len(ptBinning) - 1): ptRepl = {'low': ptBinning[jBin], 'high': ptBinning[jBin + 1]} ptName = 'pt_{low:.0f}_{high:.0f}'.format(**ptRepl) ptCut = 'probes.scRawPt > {low:.0f} && probes.scRawPt < {high:.0f}'.format(**ptRepl) name = htName + '_' + ptName cut = htCut + ' && ' + ptCut fitBins.append((name, cut)) binning = range(len(fitBins) + 1) elif binningName == 'test': binningTitle = 'p_{T}^{probe} (GeV)' binning = [28., 32.] fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'pt_{low:.0f}_{high:.0f}'.format(**repl) cut = 'probes.scRawPt > {low:.0f} && probes.scRawPt < {high:.0f}'.format(**repl) fitBins.append((name, cut)) elif binningName == 'mutest': binningTitle = 'p_{T}^{probe} (GeV)' binning = [24., 28.] fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'pt_{low:.0f}_{high:.0f}'.format(**repl) cut = 'probes.pt_ > {low:.0f} && probes.pt_ < {high:.0f}'.format(**repl) fitBins.append((name, cut)) elif binningName == 'eta': binningTitle = '|#eta^{probe}| (GeV)' binning = [0., 0.2, 0.4, 0.6, 1., 1.5] fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'eta_{low:.1f}_{high:.1f}'.format(**repl) cut = 'probes.scRawPt > 40. && TMath::Abs(probes.eta_) > {low:.1f} && TMath::Abs(probes.eta_) < {high:.1f}'.format(**repl) fitBins.append((name, cut)) #added aug 1st elif binningName == 'eta_high': binningTitle = '#eta^{probe} (GeV)' binning = ndarray.tolist(linspace(-1.5, 1.5, 8)) fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'eta_{low:.1f}_{high:.1f}'.format(**repl) cut = 'probes.scRawPt > 40. && probes.eta_ > {low:.1f} && probes.eta_ < {high:.1f}'.format(**repl) fitBins.append((name, cut)) #added aug 2nd elif binningName == 'eta_high_2': binningTitle = '#eta^{probe} (GeV)' binning = [ -1.5, -1.25, -1., 0., 1.,1.25, 1.5] fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'eta_{low:.1f}_{high:.1f}'.format(**repl) cut = 'probes.scRawPt > 40. && probes.eta_ > {low:.1f} && probes.eta_ < {high:.1f}'.format(**repl) fitBins.append((name, cut)) #added aug 2nd elif binningName == 'eta_high_3': binningTitle = '|#eta^{probe}| (GeV)' binning = [-1.5, -1., 0., 1., 1.5] fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'eta_{low:.1f}_{high:.1f}'.format(**repl) cut = 'probes.scRawPt > 40. && probes.eta_ > {low:.1f} && probes.eta_ < {high:.1f}'.format(**repl) fitBins.append((name, cut)) elif binningName == 'njet': binningTitle = 'N^{jet}' binning = [0., 1., 2., 3., 4., 10.] fitBins = [] for low, high in [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 10)]: repl = {'low': low, 'high': high} name = 'njet_{low}_{high}'.format(**repl) if low == high: cut = 'probes.scRawPt > 40. && TMath::Max(0, jets.size - 2) == {low}'.format(**repl) else: cut = 'probes.scRawPt > 40. && TMath::Max(0, jets.size - 2) >= {low} && TMath::Max(0, jets.size - 2) <= {high}'.format(**repl) fitBins.append((name, cut)) #added august 2nd elif binningName == 'njet_2': binningTitle = 'N^{jet}' binning = [0., 2., 4., 6., 8.] fitBins = [] for low, high in [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 10)]: repl = {'low': low, 'high': high} name = 'njet_{low}_{high}'.format(**repl) if low == high: cut = 'probes.scRawPt > 40. && TMath::Max(0, jets.size - 2) == {low}'.format(**repl) else: cut = 'probes.scRawPt > 40. && TMath::Max(0, jets.size - 2) >= {low} && TMath::Max(0, jets.size - 2) <= {high}'.format(**repl) fitBins.append((name, cut)) #added august 2nd elif binningName == 'njet_3': binningTitle = 'N^{jet}' binning = [0., 5., 10., 15., 20.] fitBins = [] for low, high in [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 10)]: repl = {'low': low, 'high': high} name = 'njet_{low}_{high}'.format(**repl) if low == high: cut = 'probes.scRawPt > 40. && TMath::Max(0, jets.size - 2) == {low}'.format(**repl) else: cut = 'probes.scRawPt > 40. && TMath::Max(0, jets.size - 2) >= {low} && TMath::Max(0, jets.size - 2) <= {high}'.format(**repl) fitBins.append((name, cut)) #added July 26 elif binningName == 'phi_high': binningTitle = 'phi' binning = ndarray.tolist(linspace(-math.pi, math.pi, 30)) fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'phi_{low:f}_{high:f}'.format(**repl) cut = 'tags.phi_ > {low:f} && tags.phi_ < {high:f}'.format(**repl) #cut = ' TMath::Abs(tags.phi_) > {low:f} && TMath::Abs(tags.phi_) < {high:f}'.format(**repl) fitBins.append((name, cut)) #added August 2nd elif binningName == 'phi_high_2': binningTitle = 'phi' binning = [-math.pi, -2., -1., 0., 1., 2., math.pi] fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'phi_{low:f}_{high:f}'.format(**repl) cut = 'tags.phi_ > {low:f} && tags.phi_ < {high:f}'.format(**repl) #cut = ' TMath::Abs(tags.phi_) > {low:f} && TMath::Abs(tags.phi_) < {high:f}'.format(**repl) fitBins.append((name, cut)) #added august 2nd elif binningName == 'npv': binningTitle = 'N^{PV}' binning = [0., 10., 20., 30.,40., 50., 60.] fitBins = [] for low, high in [(0, 9), (10, 19), (20, 29)]: repl = {'low': low, 'high': high} name = 'npv_{low}_{high}'.format(**repl) if low == high: cut = 'npv == {low}'.format(**repl) else: cut = 'npv >= {low} && npv <= {high}'.format(**repl) fitBins.append((name, cut)) #added august elif binningName =='npv_3': binningTitle = 'N^{PV}' #binning = binning = [0., 10., 20., 30.,40., 50., 60.] fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'npv_{low}_{high}'.format(**repl) cut = 'npv >= {low} && npv <= {high}'.format(**repl) fitBins.append((name, cut)) #added july 25 elif binningName == 'npv_low': binningTitle = 'N^{PV}' binning = [0., 10., 20., 30.,40., 50., 60.] fitBins = [] for low, high in [(0, 9), (10, 19), (20, 29)]: repl = {'low': low, 'high': high} name = 'npv_{low}_{high}'.format(**repl) if low == high: cut = ' npv == {low}'.format(**repl) else: cut = 'npv >= {low} && npv <= {high}'.format(**repl) fitBins.append((name, cut)) #added july 25 elif binningName == 'npv_high': binningTitle = 'N^{PV}' binning = [0., 10., 20., 30.,40., 50., 60.] fitBins = [] for low, high in [(0, 9), (10, 19), (20, 29)]: repl = {'low': low, 'high': high} name = 'npv_{low}_{high}'.format(**repl) if low == high: cut = 'npv == {low}'.format(**repl) else: cut = 'npv >= {low} && npv <= {high}'.format(**repl) fitBins.append((name, cut)) elif binningName == 'npv_2': binningTitle = 'N^{PV}' #binning = [0., 10., 20., 30.] binning = [0., 5., 10., 15., 20., 25., 30.] fitBins = [] for low, high in [(0, 9), (10, 19), (20, 29)]: repl = {'low': low, 'high': high} name = 'npv_{low}_{high}'.format(**repl) if low == high: cut = 'probes.scRawPt > 175. && npv == {low}'.format(**repl) else: cut = 'probes.scRawPt > 175. && npv >= {low} && npv <= {high}'.format(**repl) fitBins.append((name, cut)) elif binningName == 'npvh': binningTitle = 'N^{PV}' binning = [0., 10., 20., 30.] fitBins = [] for low, high in [(0, 9), (10, 19), (20, 29)]: repl = {'low': low, 'high': high} name = 'npvh_{low}_{high}'.format(**repl) if low == high: cut = 'probes.scRawPt > 175. && npv == {low} && runNumber >= 280919 && runNumber <= 284044'.format(**repl) else: cut = 'probes.scRawPt > 175. && npv >= {low} && npv <= {high} && runNumber >= 280919 && runNumber <= 284044'.format(**repl) fitBins.append((name, cut)) elif binningName == 'npvbg': binningTitle = 'N^{PV}' binning = [0., 10., 20., 30.] fitBins = [] for low, high in [(0, 9), (10, 19), (20, 29)]: repl = {'low': low, 'high': high} name = 'npvh_{low}_{high}'.format(**repl) if low == high: cut = 'probes.scRawPt > 175. && npv == {low} && runNumber >= 272007 && runNumber <= 280385'.format(**repl) else: cut = 'probes.scRawPt > 175. && npv >= {low} && npv <= {high} && runNumber >= 272007 && runNumber <= 280385'.format(**repl) fitBins.append((name, cut)) elif binningName == 'run': binningTitle = 'run' binning = [] fitBins = [] # fitBins.append(('Run2016B', 'runNumber >= 272007 && runNumber <= 275376 && probes.scRawPt > 175.')) # fitBins.append(('Run2016C', 'runNumber >= 275657 && runNumber <= 276283 && probes.scRawPt > 175.')) fitBins.append(('Run2016BCD', 'runNumber >= 272007 && runNumber <= 276811 && probes.matchHLT[][2] && probes.scRawPt > 175.')) # fitBins.append(('Run2016D', 'runNumber >= 276315 && runNumber <= 276811 && probes.pt > 175.')) # fitBins.append(('Run2016E', 'runNumber >= 276831 && runNumber <= 277420 && probes.matchHLT[][2] && probes.scRawPt > 175.')) # fitBins.append(('Run2016F', 'runNumber >= 277772 && runNumber <= 278808 && probes.matchHLT[][2] && probes.scRawPt > 175.')) fitBins.append(('Run2016EF', 'runNumber >= 276831 && runNumber <= 278808 && probes.matchHLT[][2] && probes.scRawPt > 175.')) fitBins.append(('Run2016G', 'runNumber >= 278820 && runNumber <= 280385 && probes.matchHLT[][2] && probes.scRawPt > 175.')) fitBins.append(('Run2016H1', 'runNumber >= 280919 && runNumber <= 282500 && probes.matchHLT[][2] && probes.scRawPt > 175.')) fitBins.append(('Run2016H2', 'runNumber >= 282500 && runNumber <= 284044 && probes.matchHLT[][2] && probes.scRawPt > 175.')) elif binningName == 'chiso': binningTitle = 'chiso' binning = [0., 0.01, 0.5, 1., 1.37] fitBins = [] for iBin in range(len(binning) - 1): repl = {'low': binning[iBin], 'high': binning[iBin + 1]} name = 'chiso_{low:.2f}_{high:.2f}'.format(**repl) cut = 'probes.scRawPt > 175. && probes.chIso >= {low:.2f} && probes.chIso < {high:.2f}'.format(**repl) fitBins.append((name, cut)) return binningTitle, binning, fitBins
def rlocfind2(num, den, desired_zeta): ''' Find the locations on the root locus with the closest damping values Computes numerically, a bit hacky Parameters: :param num: [array-like] coefficients of numerator of open loop transfer function :param den: [array-like] coefficients of denominator of open loop transfer function :param desired_zeta: [float] desired damping coefficient value Returns: :return: polelocs: [array-like] complex valued pole locations that meet requested zeta values :return: ks: [array-like] gain at selected pole locations on root locus :return: wnvals: [array-like] natural frequency at selected pole locations :return: zvals: [array-like] actual damping value and selected pole locations ''' rlist, klist = rlocus(tf(num, den)) anglelist = angle(rlist) tem = shape(anglelist) tem = tem[1] zlist = ones(shape(rlist)) for k in range(tem): for j in range(len(klist)): zlist[j, k] = abs(cos(anglelist[j, k])) locclosest = ones(tem) eps = ones(tem) for k in range(tem): difflist = ones(len(klist)) for j in range(len(klist)): difflist[j] = abs(desired_zeta - zlist[j, k]) # minv = min(difflist[0:len(difflist)]) for j in range(len(klist)): if difflist[j + 1] <= difflist[j]: locclosest[k] = j + 1 eps[k] = difflist[j + 1] elif difflist[j + 1] > difflist[j]: break locclosest = ndarray.tolist(locclosest) for k in range(len(locclosest)): locclosest[k] = int(locclosest[k]) locs = ones((tem, 3)) for k in range(tem): locs[k, :] = [ real(rlist[locclosest[k], k]), imag(rlist[locclosest[k], k]), klist[locclosest[k]] ] polelocs = locs[:, 0] + locs[:, 1] * 1j ks = locs[:, 2] validvals = zeros((tem, 1)) for k in range(len(eps)): if eps[k] < 0.1: validvals[k] = 1 inc = 0 finallocs = ndarray.tolist(zeros(int(sum(validvals)))) finalks = zeros(int(sum(validvals))) for k in range(len(eps)): if validvals[k] == 1.: finallocs[inc] = polelocs[k] finalks[inc] = ks[k] inc = inc + 1 ks = finalks polelocs = finallocs wnvals = sqrt(real(polelocs)**2 + imag(polelocs)**2) zvals = angle(polelocs) for k in range(len(zvals)): zvals[k] = abs(cos(zvals[k])) return polelocs, ks, wnvals, zvals
def Drawfilledcontour(zerosarray, vertices, centroid_x, centroid_y): """ Creates np.array with dimensions defined by shape Fills polygon defined by vertices with ones, all other values zero""" vertices = squeeze(vertices, axis=1) centroid = asarray((centroid_x, centroid_y)) base_array = zerosarray # zeros(shape, dtype=float) # Initialize your array of zeros fill = ones(base_array.shape ) * True # Initialize boolean array defining shape fill for k in range(len(vertices)): verticess = [] verticess = concatenate([[vertices[k - 1]], [vertices[k]], [centroid]]) m1 = (verticess[1][0] - verticess[0][0]) / (verticess[1][1] - verticess[0][1]) m2 = (verticess[2][0] - verticess[1][0]) / (verticess[2][1] - verticess[1][1]) m3 = (verticess[0][0] - verticess[2][0]) / (verticess[0][1] - verticess[2][1]) # print(m1, m2, m3) # y = m(x-x0) + y0 maxx1 = max(verticess[1][1], verticess[0][1]) maxx2 = max(verticess[2][1], verticess[1][1]) maxx3 = max(verticess[0][1], verticess[2][1]) minx1 = min(verticess[1][1], verticess[0][1]) minx2 = min(verticess[2][1], verticess[1][1]) minx3 = min(verticess[0][1], verticess[2][1]) for i in range(minx1, maxx1): y1 = float(m1 * (i - verticess[0][1]) + verticess[0][0]) base_array[i, round(y1)] = 1 for i in range(minx2, maxx2): y2 = float(m2 * (i - verticess[1][1]) + verticess[1][0]) base_array[i, round(y2)] = 1 for i in range(minx3, maxx3): y3 = float(m3 * (i - verticess[2][1]) + verticess[2][0]) base_array[i, round(y3)] = 1 # Create check array for each edge segment, combine into fill array for k in range(len(vertices)): base_array[vertices[k][1]][vertices[k][0]] = 1 for k in range(len(vertices)): fill = alll( [fill, check(vertices[k - 1], vertices[k], base_array)], axis=0) base_array[fill] = 1 for k in range(len(vertices)): verticess = [] verticess = concatenate([[vertices[k - 1]], [vertices[k]], [centroid]]) for j in range(len(verticess)): fill = alll( [fill, checkk(verticess[j - 1], verticess[j], base_array)], axis=0) base_array[fill] = 1 for k in range(len(vertices)): verticess = [] verticess = concatenate([[vertices[k - 1]], [vertices[k]], [centroid]]) verticess = ndarray.tolist(verticess) t = [] for i in range(len(verticess)): for j in verticess: if j[0] < 30 and j[1] < 40: t.append(j) verticess.pop(verticess.index(j)) break for j in verticess: if j[0] < 30 and j[1] > 40: t.append(j) verticess.pop(verticess.index(j)) break for j in verticess: if j[0] > 30 and j[1] > 40: t.append(j) verticess.pop(verticess.index(j)) break for j in verticess: t.append(j) verticess.pop(verticess.index(j)) break verticess = array(t) for j in range(len(verticess)): fill = alll( [fill, checkk(verticess[j - 1], verticess[j], base_array)], axis=0) base_array[fill] = 1 # Set all values inside polygon to one return base_array
C = pi * pi * hbar / (2 * m * L * L) # Create arrays repsi = zeros(N, float) impsi = zeros(N, float) x = linspace(0, L, N) #Set initial conditions for n in range(1, N): xn = n * a x[n] = xn gauss = exp(-(xn - x0)**2 / (2 * (sigma**2))) repsi[n] = gauss * cos(kappa * xn) impsi[n] = gauss * sin(kappa * xn) lines = plt.plot(x, ndarray.tolist(repsi)) #determine fourier coefficients alpha = dst(repsi) eta = dst(impsi) b = empty(N, float) i = 0 for t in arange(0.0, tmax, tstep): i += 1 for k in range(1, N): angle = C * k * k * t b[k] = alpha[k] * cos(angle) + eta[k] * sin(angle) repsi = idst(b) if (i % 20 == 0):
def z_score_list(data): standardized = ndarray.tolist(stats.zscore(data.copy())) return standardized
def convolution(disc, disc_centre, disc_radius, disc_normal, disc_up, cell_centre_list, cell_volume_list): from mpi4py import MPI import libconvolution as cv from numpy import zeros, array, dot, linalg, cross, asarray, ndarray cell_centre_list_np = asarray(cell_centre_list) cell_volume_list_np = asarray(cell_volume_list) kernel_3d = False weighted_sum = np.zeros(len(disc)) weighted_sum = cv.convolution_2dkernel_weights(disc, disc_centre, disc_radius, disc_normal, disc_up, cell_centre_list_np, cell_volume_list_np) # Need to reduce weighted sum over all processes totals = np.zeros_like(weighted_sum) MPI.COMM_WORLD.Allreduce(weighted_sum, totals, op=MPI.SUM) weighted_sum = totals thrust_check_total = 0 cell_force = np.zeros(len(cell_centre_list_np) * 3) thrust_check = cv.convolution_2dkernel_force(disc, disc_centre, disc_radius, disc_normal, disc_up, cell_centre_list_np, cell_volume_list_np, weighted_sum, cell_force) thrust_check_array = np.array([thrust_check]) thrust_check_total_array = np.array([0.0]) MPI.COMM_WORLD.Allreduce(thrust_check_array, thrust_check_total_array, op=MPI.SUM) thrust_check_total = thrust_check_total_array[0] thrust_check = thrust_check_total # if MPI.COMM_WORLD.Get_rank() == 0: # print 'Convolved total thrust: ',thrust_check #thrust_check = 0.0 total_thrust = 0.0 for idx, w in enumerate(weighted_sum): segment = disc[idx] # if w > 0.0: # thrust_check += segment[0] total_thrust += segment[0] # if MPI.COMM_WORLD.Get_rank() == 0: # print 'Specified total thrust: ',total_thrust # Broken: Cell_force_scaled will have a different struct to cell_force if thrust_check > 0.0: thrust_factor = total_thrust / thrust_check # if MPI.COMM_WORLD.Get_rank() == 0: # print 'Scaling thrust: ', thrust_factor cell_force_scaled = [] for cell in range(len(cell_force) / 3): cell_force_scaled.append( (cell_force[cell * 3 + 0] * thrust_factor, cell_force[cell * 3 + 1] * thrust_factor, cell_force[cell * 3 + 2] * thrust_factor)) return cell_force_scaled else: cell_force = ndarray.tolist(cell_force) cell_array = iter(cell_force) return zip(cell_array, cell_array, cell_array) cell_force = ndarray.tolist(cell_force) cell_array = iter(cell_force) return zip(cell_array, cell_array, cell_array)
elif(predict[i] > 0.5 and actual[i] < 0.5): fp += 1; else: fn += 1; truepositive = float(tp) / float(tp + tn); falsenegative = float(fn) / float(fn + fp); correctness = float(tp + fn) / float(tp + tn + fp + fn); return [correctness, truepositive, falsenegative]; if __name__ == '__main__': x = np.loadtxt('Data/trdata_all.txt'); y = np.loadtxt('Data/redata7.txt'); x_train, x_test, y_train, y_test = cv.train_test_split(x, y, test_size=0.2, random_state=42); print 'Data Ready!' #clf = svm.SVC(kernel='rbf',C=1024, class_weight='auto') #clf = lm.LogisticRegression(); clf = tree.DecisionTreeClassifier(); #clf = nb.GaussianNB(); clf = clf.fit(x_train, y_train); pre = clf.predict(x_test); for i in range(0, 50): print pre[i], print ' ', print y_test[i]; [CC, TP, FN] = calc_correct(ndarray.tolist(pre), ndarray.tolist(y_test)); print CC; print TP; print FN;
def h(self, a, m): if a == 0 and m == 0: result = [np.float32(0.0)] * self.config.I return result if a == '#': a = m embed_a = nd.tolist( self.embeddings.get(a[2], a[3], default=np.asarray( [0.0] * self.config.embedding_size))) embed_m = nd.tolist( self.embeddings.get(m[2], m[3], default=np.asarray( [0.0] * self.config.embedding_size))) # print len(embed_m) first_aw_embed = nd.tolist(self.find_first_word_embedding(a)) # print len(first_aw_embed) first_mw_embed = nd.tolist(self.find_first_word_embedding(m)) # print len(first_mw_embed) last_aw_embed = nd.tolist(self.find_last_word_embedding(a)) # print len(last_aw_embed) last_mw_embed = nd.tolist(self.find_last_word_embedding(m)) # print len(last_mw_embed) proced2_a_embed = self.find_proceding_embeddings(a, 2) follow2_a_embed = self.find_following_embeddings(a, 2) proced2_m_embed = self.find_proceding_embeddings(m, 2) follow2_m_embed = self.find_following_embeddings(m, 2) avg5f_a = self.calc_word_average(self.find_following(a, 5)) # print len(avg5f_a) avg5p_a = self.calc_word_average(self.find_proceding(a, 5)) # print len(avg5p_a) avg5f_m = self.calc_word_average(self.find_following(m, 5)) # print len(avg5f_m) avg5p_m = self.calc_word_average(self.find_proceding(m, 5)) # print len(avg5p_m) avgsent_a = self.average_sent(a) # print len(avgsent_a) avgsent_m = self.average_sent(m) # print len(avgsent_m) avg_all = [self.all_word_average] # print len(avg_all) type_a = [self.t_dict[a[3]]] # self.type_dict[a[3]] type_m = [self.t_dict[m[3]]] # self.type_dict[m[3]] mention_pos_a = self.mention_pos(a) mention_pos_m = self.mention_pos(m) mention_len_a = [len(a[2])] mention_len_m = [len(m[2])] distance = self.distance_mentions(a, m) distance_m = self.distance_intervening_mentions(a, m) result = embed_a + first_aw_embed + last_aw_embed + proced2_a_embed + follow2_a_embed + avg5f_a + avg5p_a + avgsent_a + type_a + mention_pos_a + mention_len_a + embed_m + first_mw_embed + last_mw_embed + proced2_m_embed + follow2_m_embed + avg5f_m + avg5p_m + avgsent_m + type_m + mention_pos_m + mention_len_m + avg_all + distance + distance_m if len(result) != self.config.I: print(len(proced2_a_embed)) print(len(follow2_a_embed)) print(len(proced2_m_embed)) print(len(follow2_m_embed)) print(len(result)) # print sys.exit(0) return result
category_index = label_map_util.create_category_index_from_labelmap( PATH_TO_LABELS, use_display_name=True) image_path = 'kites_detections_output.jpg' image = Image.open(image_path) # the array based representation of the image will be used later in order to prepare the # result image with boxes and labels on it. image_np = load_image_into_numpy_array(image) # Expand dimensions since the model expects images to have shape: [1, None, None, 3] image_np_expanded = np.expand_dims(image_np, axis=0) # Actual detection. output_dict = run_inference_for_single_image(image_np, detection_graph) print(output_dict) from numpy import ndarray a = ndarray.tolist(output_dict['detection_scores']) b = ndarray.tolist(output_dict['detection_classes']) # Visualization of the results of a detection. a = list( filter( lambda x: x[0] > 0.5, zip(output_dict['detection_scores'], output_dict['detection_classes']))) indexes = [ k for k, v in enumerate(output_dict['detection_scores']) if (v > 0.5) ] result = [] for i in a: result.append({category_index[i[1]]['name']: float(i[0])})
xintb1t2 = xinter(b1t2polyfit[0], b1t2polyfit[1]) xintb1t3 = xinter(b1t3polyfit[0], b1t3polyfit[1]) xintb2t1 = xinter(b2t1polyfit[0], b2t1polyfit[1]) xintb2t2 = xinter(b2t2polyfit[0], b2t2polyfit[1]) xintb2t3 = xinter(b2t3polyfit[0], b2t3polyfit[1]) b1 = array([xintb1t2, xintb1t1, xintb1t3]) b2 = array([xintb2t2, xintb2t1, xintb2t3]) avgxintb1 = mean(b1) avgxintb2 = mean(b2) ib1 = dataB1T1['col2'] + dataB1T2['col2'] + dataB1T2['col2'] ib2 = iB2T1 + iB2T2 + iB2T3 vb1 = ndarray.tolist(dataB1T1['col1']) + ndarray.tolist( dataB1T2['col1']) + ndarray.tolist(dataB1T3['col1']) vb2 = ndarray.tolist(dataB2T1['col1']) + ndarray.tolist( dataB2T2['col1']) + ndarray.tolist(dataB2T3['col1']) ib1err = b1t1erri + b1t2erri + b1t3erri ib2err = b2t1erri + b2t2erri + b2t3erri vb1err = b1t1errv + b1t2errv + b1t3errv vb2err = b2t1errv + b2t2errv + b2t3errv ib1lower = [] ib1upper = [] vb1lower = [] vb1upper = []
y_test = np.loadtxt('testset_txt_img_cat.list'); #min_max_scaler = pp.MinMaxScaler(); #x_train = min_max_scaler.fit_transform(x_train); #x_test = min_max_scaler.fit_transform(x_test); print 'Data Ready!' #clf = svm.SVC(kernel='rbf',C=2048, class_weight='auto') clf = lm.LogisticRegression(C = 0.01, random_state=42); #clf = nb.GaussianNB(); clf = clf.fit(x_train, y_train); pre = clf.predict(x_test); for i in range(0, 50): print pre[i], print ' ', print y_test[i]; CC = calc_correct2(ndarray.tolist(pre), ndarray.tolist(y_test)); print CC; #groundtruth(ndarray.tolist(y_test), ndarray.tolist(y_train)); result = [[] for i in range(0, len(pre))]; for i in range(0, len(pre)): flag = [0 for j in range(0, len(y_train))]; for j in range(0, len(y_train)): if(y_train[j] == pre[i]): result[i].append(j+1); flag[j] = 1; for j in range(0, len(y_train)): if(flag[j] == 0): result[i].append(j+1); flag[j] = 1; result = np.array(result);
def Visualizer(): # Call the predictions classifier_rectangle() classifier_longitudinal() classifier_linear() classifier_circle() classifier_polygon() classifier_organic() classifier_atrium() classifier_column_grid() classifier_staircase() # Convert a 2D Numpy Array to a Python list to parse it from numpy import ndarray label_list = np.array(label_list_Numpy_2D) list(label_list_Numpy_2D) ndarray.tolist(label_list) print("Printing a python list of estimated patterns...") print(label_list) # create the visualization def add_legend_to_image(image_path: test_image_path, feature_vector: Tuple[int, int, int, int, int, int, int, int, int]): from PIL import Image from PIL import ImageFont from PIL import ImageDraw # resize the test image and paste on new canvas for visualization img = Image.open(image_path) img = img.resize((1600, 1024), Image.ANTIALIAS) new_image = Image.new("RGB", (1500, 1400), color="white") new_image.paste(img, (0, 0, 1600, 1024)) draw = ImageDraw.Draw(new_image) font = ImageFont.truetype( "your favourite typeface as .ttf or equivalent", 19, encoding="unic") # List to process the predictions from the classifiers labels_to_add = [] if feature_vector[0]: labels_to_add.append(("Rectangle", "black")) if feature_vector[1]: labels_to_add.append(("Composite", "pink")) if feature_vector[2]: labels_to_add.append(("Longitudinal", "blue")) if feature_vector[3]: labels_to_add.append(("Circle", "violet")) if feature_vector[4]: labels_to_add.append(("Polygonal", "gray")) if feature_vector[5]: labels_to_add.append(("Organic", "purple")) if feature_vector[6]: labels_to_add.append(("Atrium", "magenta")) if feature_vector[7]: labels_to_add.append(("Columns", "Orange")) if feature_vector[8]: labels_to_add.append(("Staircase", "green")) rectangle_x1 = 15 text_x1 = 20 if labels_to_add: draw.text(xy=(10, 610), text="Architectural Patterns", fill="black", font=font) for label in labels_to_add: draw.rectangle(xy=(rectangle_x1, 640, rectangle_x1 + 110, 670), fill=label[1]) draw.text(xy=(text_x1, 645), text=label[0], font=font, fill="white") rectangle_x1 += 130 text_x1 += 130 else: draw.text(xy=(10, 610), text="Did not find any pattern", fill="black") new_image.show() new_image.save('predicted_1.png') # load the image again for display add_legend_to_image(image_path=test_image_path, feature_vector=label_list)
y_test = np.loadtxt('testset_txt_img_cat.list') #min_max_scaler = pp.MinMaxScaler(); #x_train = min_max_scaler.fit_transform(x_train); #x_test = min_max_scaler.fit_transform(x_test); print 'Data Ready!' #clf = svm.SVC(kernel='rbf',C=2048, class_weight='auto') clf = lm.LogisticRegression(C=0.01, random_state=42) #clf = nb.GaussianNB(); clf = clf.fit(x_train, y_train) pre = clf.predict(x_test) for i in range(0, 50): print pre[i], print ' ', print y_test[i] CC = calc_correct2(ndarray.tolist(pre), ndarray.tolist(y_test)) print CC #groundtruth(ndarray.tolist(y_test), ndarray.tolist(y_train)); result = [[] for i in range(0, len(pre))] for i in range(0, len(pre)): flag = [0 for j in range(0, len(y_train))] for j in range(0, len(y_train)): if (y_train[j] == pre[i]): result[i].append(j + 1) flag[j] = 1 for j in range(0, len(y_train)): if (flag[j] == 0): result[i].append(j + 1) flag[j] = 1 result = np.array(result)
import random from numpy import ndarray import numpy as np if __name__ == '__main__': conceptNum = 10 label = [] L = os.listdir('Concept') for filename in L: label.append(np.loadtxt('Concept/' + filename)) trainingSet = np.loadtxt('new_trainid.txt') featureList = np.loadtxt('Feature/traindata_image.txt') tagList = np.loadtxt('Feature/traindata_text.txt') print 'Load Basic Data Finished!' featureList = ndarray.tolist(featureList) tagList = ndarray.tolist(tagList) allList = [] for i in range(0, len(featureList)): allList.append(featureList[i] + tagList[i]) allList = np.array(allList) np.savetxt('Data/trdata.txt', allList, '%d') print 'Basic Features Finished' for i in range(0, conceptNum): reList = [] filename = 'Data/redata' + str(i) + '.txt' for j in range(0, len(featureList)): if (label[i][j] == 1): reList.append([1]) else:
def render(vec): pairs = list(zip(list(range(10)), ndarray.tolist(vec))) pairs.sort(key=itemgetter(1), reverse=True) for num, prob in pairs: print("%d : %.2f" % (num, prob))