def _onAnalyseClicked(self): filePath = self.ui.filePath.text() analyser = Analyser(filePath) list = analyser.analyse() print(list) display = ConsolDisplay(list, self) display.show() display.update() display.output()
def __enter__(self): Analyser.__enter__(self) psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY) # open database connections + output file self.gisconn = psycopg2.connect(self.config.db_string) psycopg2.extras.register_hstore(self.gisconn, unicode=True) self.giscurs = self.gisconn.cursor(cursor_factory=psycopg2.extras.DictCursor) self.apiconn = OsmOsis.OsmOsis(self.config.db_string, self.config.db_schema) return self
def __init__(self, config, logger = None): Analyser.__init__(self, config, logger) self.classs = {} self.classs_change = {} self.explain_sql = False self.FixTypeTable = { self.node:"node", self.node_full:"node", self.node_new:"node", self.node_position:"node", self.way:"way", self.way_full:"way", self.relation:"relation", self.relation_full:"relation", }
def main(): f = open('test_data.json', encoding='utf-8') res = f.read() data = json.loads(res) dataProcesser = DataProcesser(data) avg = dataProcesser.Avg() acNum = dataProcesser.AcNum() for i in range(0, 271): if (acNum[i] > 200): acNum[i] = 200 uploadNum = dataProcesser.UploadTimes() alltime = dataProcesser.AllTime() avgDebugTime = dataProcesser.AvgDebugTime() userIds = dataProcesser.userIds codeProcesser = CodeProcesser(dataProcesser.userIds, dataProcesser.cases) codeProcesser.group() for i in range(0, 271): acNum[i] -= dataProcesser.invalidNum[i] if (acNum[i] < 0): acNum[i] = 0 analyser = Analyser(avg, acNum, uploadNum, alltime, avgDebugTime, userIds, dataProcesser.cases) ultimateScore = analyser.ultimateScore() ug = codeProcesser.uGroup avgPerGroup = codeProcesser.AvgPerGroup() hardest = min(avgPerGroup) '''根据每组题目难度平衡分数''' for i in range(0, 271): for j in range(0, 5): if (userIds[i] in ug[j]): ultimateScore[i] = ultimateScore[i] / (avgPerGroup[j] / hardest) '''打印中间数据''' for i in range(0, len(userIds)): print("id: " + str(userIds[i]) + " 完成数:" + str(acNum[i]) + " 平均分:" + str(avg[i]) + " 平均提交次数:" + str(uploadNum[i]) + " 历时:" + str(alltime[i]) + " 平均debug时间:" + str(avgDebugTime[i][0]) + "秒" + " 总分:" + str(ultimateScore[i])) print(max(ultimateScore)) res = [0] * 10 '''分组''' for i in range(0, 271): if (ultimateScore[i] % 10 != 0): res[math.floor( (ultimateScore[i] - ultimateScore[i] % 10) // 10)] += 1 elif (ultimateScore[i] == 0): res[0] += 1 else: res[math.floor((ultimateScore[i])) // 10 - 1] += 1 print("分数区间:", end="") print(res) ultimateScore.sort() print("最终得分:", end="") print(ultimateScore) verify(ultimateScore)
def __enter__(self): Analyser.__enter__(self) psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY) # open database connections + output file self.gisconn = psycopg2.connect(self.config.db_string) psycopg2.extras.register_hstore(self.gisconn, unicode=True) self.giscurs = self.gisconn.cursor( cursor_factory=psycopg2.extras.DictCursor) self.apiconn = OsmOsis.OsmOsis(self.config.db_string, self.config.db_schema, dump_sub_elements=False) return self
def __init__(self, config, logger = None): Analyser.__init__(self, config, logger) self.classs = {} self.classs_change = {} self.explain_sql = False self.FixTypeTable = { self.node:"node", self.node_full:"node", self.node_new:"node", self.node_position:"node", self.way:"way", self.way_full:"way", self.relation:"relation", self.relation_full:"relation", } self.typeMapping = {'N': self.node_full, 'W': self.way_full, 'R': self.relation_full} if hasattr(config, "verbose") and config.verbose: self.explain_sql = True
def __init__(self): self.root = Tk() self.analyse = An() self.root.title('HTJZ') #self.root.iconbitmap("d:/projects/images.ico") self.root.minsize(400, 800) self.Frame1 = LabelFrame(self.root, bg='#2980B9') self.Frame1.grid(row=0, column=3) self.button1 = Button(self.Frame1, text="Choose a file", command=self.openfile) self.button1.grid(row=0, column=0) self.Process_button = Button(self.root, text="CLick Here To Process Data", width=50, height=5, bg='red', command=self.process) self.Process_button.grid(row=2, column=3) self.frame2 = LabelFrame(self.root, bg='yellow') self.frame2.grid(row=4, column=3) m = 5 self.subject_box = list()
def readTextInput(self): files = os.listdir("Input(text)") names = [] for entry in files: mtg = Mtg('Input(text)\\' + entry) text = mtg.readInputFile() names.append(entry[:-4]) deck = Deck(entry[:-4], [], [], 0) print("Now creating: " + deck.name + " please wait.\n") ana = Analyser(deck, text) ana.analyseDeck() if(deck.cardAmount < 70): pg = PictureGenerator(deck) pg.createPicture() else: self.chunkify(deck) if(len(deck.sideboard) != 0): sidedeck = Deck(deck.name + " sideboard", deck.sideboard, [], 0) pg = PictureGenerator(sidedeck) pg.createPicture() return names
def analyse(self): print(self.files) for file in self.files: analyser = Analyser(file) self.McCabeCyclomaticComplexity = self.McCabeCyclomaticComplexity + analyser.cyclomaticComplexity( ) self.NumberOfOperators = self.NumberOfOperators + analyser.numberOfOperators( ) self.NumberOfOperands = self.NumberOfOperands + analyser.numberOfOperands( ) self.TotalNumberOfOperators = self.TotalNumberOfOperators + analyser.totalNumberOfOperators( ) self.TotalNumberOfOperands = self.TotalNumberOfOperands + analyser.totalNumberOfOperands( ) self.LinesOfCode = self.LinesOfCode + analyser.linesOfCode() self.LinesOfComments = self.LinesOfComments + analyser.linesOfComments( ) self.HalsteadProgramLength = self.HalsteadProgramLength + analyser.halsteadProgramLength( ) self.McCabeCyclomaticComplexity = self.McCabeCyclomaticComplexity / ( len(self.files))
def init(): storage = Storage(FILE_PATH) preprocesser = Preprocesser(storage) preprocesser.grouping() preprocesser.organize() preprocesser.complement() analyser = Analyser(storage) analyser.kmeans() Output.write_case_cluster(storage.get_case_cluster()) Output.visualize_case_cluster(storage.get_km_data(), storage.get_label()) analyser.calc_total_weight() analyser.calc_user_result() Output.write_user_result(storage.get_user_result()) Output.visualize_user_result(storage.get_user_result())
def __init__(self, config, logger=None): Analyser.__init__(self, config, logger) self.classs = {} self.classs_change = {} self.explain_sql = False self.FixTypeTable = { self.node: "node", self.node_full: "node", self.node_new: "node", self.node_position: "node", self.way: "way", self.way_full: "way", self.relation: "relation", self.relation_full: "relation", } self.typeMapping = { 'N': self.node_full, 'W': self.way_full, 'R': self.relation_full } if hasattr(config, "verbose") and config.verbose: self.explain_sql = True
import sys, os from CsvParser import FileReader from Analyser import Analyser for fileName in os.listdir("../files"): xmlFile = FileReader('../files/', fileName) Analyser(xmlFile, fileName)
import pandas as pd from Analyser import Analyser if __name__ == '__main__': dataframe = pd.read_csv('data.csv') analyser = Analyser(dataframe) #analyser.form_ordinal_list() Analyser.calculate_distance_between_objects(analyser) analyser.print_triples() analyser.make_and_print_distances_matrix() analyser.calculate_sum_distances() analyser.calculate_distance_between_nominal_features()
from matplotlib import pyplot as plt import tifffile as tf import numpy as np from Analyser import Analyser tif = tf.TiffFile('./181008_CecB-PCPG vesicles- after flushing_1_MMStack_Pos22.ome.tif/') vesiclecount = [] A = Analyser(None) Num_frames = tif.imagej_metadata['frames'] Num_frames = 800 drug_start_frame = 337 for num in range(drug_start_frame,Num_frames): if not num%200: print(num) frame = tif.asarray(key = num) threshold = np.average(frame) + np.std(frame)
from Analyser import Analyser from Logger import Logger log = Logger() analysis = Analyser()
def __init__(self, config, logger = OsmoseLog.logger()): Analyser.__init__(self, config, logger)
def __init__(self, config, logger = None): Analyser.__init__(self, config, logger)
dfFullData.info() #%% # Base on displot 01_INITIAL_DistPlot.png, boxplot 02_OUTLIER_BoxPlot.png, data information above and the original datset: # 1.some feature in dataset does not have normal distibution thus has to be skewed, # 2.The true label, 'diagnosis' has binary values and the ratio of Yes to No is disproportionate thus stratification has to be done. # 3.The range of numercal values in some features is wide thus has to be scaled down. # 4.The true label, 'diagnosis' has to be converted to numbers via label encoding since it has only 2 values # 5.There are some features with outliers, thus outliers has to be removed # 6.Features 'ID' and 'Unnamed' has to be dropped as they are not useful # 7. There are no empty cells # In addition, the follwing steps also has to be checked: # 7.Check for duplicates # 8.Check for high correlation between features analyser = Analyser() # Display Histogram.To check general data distibution on numrical data after unskew. File output is at \output\01_INITIAL_DistPlot.png analyser.histogramOrBoxPlotAnalysis(dfFullData, strCols=True, hist=True, boxSize=size, fileName='01_INITIAL') # %% # Display Boxplot.To check on outliers on numrical data which has been scaled. File output is at \output\02_OUTLIER_BoxPlot.png analyser.histogramOrBoxPlotAnalysis(dfFullData, strCols=True, hist=False, boxSize=size, fileName='02_OUTLIER') # %% # 3. Data Pre Process
def __init__(self, config, logger=OsmoseLog.logger()): Analyser.__init__(self, config, logger)
def start_analyse(self, filename): a = Analyser(filename) a.printResult()
from Parser import PDFConverter, Tokenizer from Analyser import Analyser, Calculator pdfFilePath = raw_input("Please enter the location of the JD in pdf format: ") jd = PDFConverter(pdfFilePath).str pdfFilePath = raw_input("Please enter the location of the CV in pdf format: ") cv = PDFConverter(pdfFilePath).str jd = Tokenizer(jd).tokens cv = Tokenizer(cv).tokens jd = Analyser(jd, cv) jd = jd.countIntScore() jd = Calculator(jd) print "The score is: " print jd.countScore()
def __exit__(self, exc_type, exc_value, traceback): # close database connections + output file self.giscurs.close() self.gisconn.close() self.apiconn.close() Analyser.__exit__(self, exc_type, exc_value, traceback)
from Analyser import Analyser productsPath = "" salesPath = "" productsPath = raw_input("Please input the path of products.tab: ") salesPath = raw_input("Please input the path of sales.tab: ") analyzer = Analyser(productsPath, salesPath) topNumber = input("Please input the top number: ") analyzer.findTopCategories(topNumber) analyzer.findBestSaleCandy()
class gui: def __init__(self): self.root = Tk() self.analyse = An() self.root.title('HTJZ') #self.root.iconbitmap("d:/projects/images.ico") self.root.minsize(400, 800) self.Frame1 = LabelFrame(self.root, bg='#2980B9') self.Frame1.grid(row=0, column=3) self.button1 = Button(self.Frame1, text="Choose a file", command=self.openfile) self.button1.grid(row=0, column=0) self.Process_button = Button(self.root, text="CLick Here To Process Data", width=50, height=5, bg='red', command=self.process) self.Process_button.grid(row=2, column=3) self.frame2 = LabelFrame(self.root, bg='yellow') self.frame2.grid(row=4, column=3) m = 5 self.subject_box = list() def openfile(self): self.filename = filedialog.askopenfilename( initialdir="/", title="Select a Pdf", filetypes=(("pdf files", "*.pdf"), ("all files", "*.*"))) self.my_label = Label(self.Frame1, text=self.filename) self.my_label.grid(row=1, column=0, padx=20, pady=20) def process(self): m = 5 self.subject_codes = self.analyse.PDF_parser(self.filename) for i in self.analyse.subject_codes: self.subject_label = Label(self.frame2, text=i) self.subject_label.grid(row=m, column=2) self.subject = Entry(self.frame2, width=50) self.subject.grid(row=m, column=3, padx=20, pady=20) self.subject_box.append(self.subject) m += 1 self.sub_button = Button(self.frame2, text="Confirm", command=self.get_data) self.sub_button.grid(row=m, column=3) def get_data(self): subs = list() self.m = 0 self.Sub_codes_to_names = dict() for i in self.analyse.subject_codes: subs.append(str(self.subject_box[self.m].get())) self.subject_box[self.m].insert(INSERT, subs[self.m]) self.subject_box[self.m].delete(0, END) self.Sub_codes_to_names[i] = subs[self.m] self.m += 1 #print(s1,s2,s3,s4,s5) File = self.analyse.Analysis(self.Sub_codes_to_names) temp.plot_pie( [self.analyse.Total_rank["Pass"], self.analyse.Total_rank["Fail"]], ["Pass", "Fail"]) os.startfile(File)
def __enter__(self): Analyser.__enter__(self) # open database connections self._load_reader() self._load_parser() return self
def __init__(self, config, logger=None): Analyser.__init__(self, config, logger)
def __exit__(self, exc_type, exc_value, traceback): # close database connections self._log(u"Closing reader and parser") del self.parser del self._reader Analyser.__exit__(self, exc_type, exc_value, traceback)
def start(self): #self.test_getlotsfigure() a = Analyser('../university_lib.txt') a.prepareData() pass
""" Created on Tue Mar 12 09:35:36 2019 @author: mjsf3 """ from Analyser import Analyser from BackgroundFinder import BackgroundFinder import numpy as np import tifffile as tf import sys if __name__ == '__main__': if len(sys.argv) > 1: videopath = sys.argv[1] A = Analyser(videopath) A.bgf = BackgroundFinder() A.frames, A.videolength = A.load_frames() A.bgf.get_background(A.frames) A.bgf.get_data_gradient() A.bgf.find_correct_gaussian_scale() A.sett0frame(A.bgf.peak_begin_frame) A.videopos = videopath[videopath.find('Pos') + 3] A.get_traps(A.bgf.peak_begin_frame) A.get_clips_alt() A.sett0frame(A.bgf.peak_max_arg) A.classify_clips()
def __init__(self, dataFileName, outputDir): Analyser.__init__(self, dataFileName, outputDir)