def load_dict_from_json(file_name): if os.path.exists(file_name): with codecs.open(file_name, 'r', 'utf-8') as file: return json.loads(file.read()) GlobalFunctions.print( f'load_dict_from_json: {file_name} not found. empty dictionary returned' ) return {}
def getSource(testTF, queries,dicc,df): queryResult = {} for x in queries: queryResult[x] = dicc.get(x) print(queryResult) totalDoc = GlobalFunctions.docCount() for word in testTF: try: try: testTF[word] = round(GlobalFunctions.tfidf(testTF[word], df[word], totalDoc),3)#, smooth=True), 3) except KeyError: testTF[word] = round(GlobalFunctions.tfidf(testTF[word], 1, totalDoc),3)#, smooth=True)) except: print(dicc[word]) print(df[word]) print(totalDoc) sr = testTF.copy() testTF = {} for x in queries: if x in sr: testTF[x] = sr.get(x) normalizedTest = 0 dotProduct = {} normalizedDoc = {} for q in testTF: try: for doc in queryResult[q]: if doc in dotProduct: dotProduct[doc] += testTF.get(q) * queryResult.get(q).get(doc) else: dotProduct[doc] = testTF.get(q) * queryResult.get(q).get(doc) try: normalizedDoc[doc] += math.pow(queryResult.get(q).get(doc), 2) except: normalizedDoc[doc] = math.pow(queryResult.get(q).get(doc), 2) except TypeError: print(q) finally: normalizedTest += math.pow(testTF.get(q), 2) for doc in normalizedDoc: normalizedDoc[doc] = math.sqrt(normalizedDoc[doc]) normalizedTest = math.sqrt(normalizedTest) cosineSim = {} for doc in dotProduct: cosineSim[doc] = dotProduct[doc] / (normalizedDoc[doc] * normalizedTest) sortResult = sorted(cosineSim.items(), key=lambda kv: kv[1]) sortResult.reverse() return sortResult[:10]
def updateOrderComplete(self, connection): config = python_config.read_db_config() host = config.get('host') user = config.get('user') database = config.get('database') password = config.get('password') try: connection = mysql.connector.connect(host=host, user=user, database=database, password=password) cursor = connection.cursor() updateOrderCompleteSQL = ("UPDATE dat_master SET " "o_comp = %s, " "updated_at = %s " "WHERE route_no = %s " "AND stop_no = %s") # currentTimeStamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S') currentTimeStamp = datetime.now().strftime( '%Y-%m-%d %H:%M:%S.%f')[:-3] updateOrderValues = (1, currentTimeStamp, self.Route, self.Stop) cursor.execute(updateOrderCompleteSQL, updateOrderValues) connection.commit() rowcount = cursor.rowcount print("Rows updated: " + str(rowcount)) cursor.close() connection.close() if rowcount > 0: return True else: return False except Exception as e: print(e) exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) exceptionMsg = exc_value exceptionDetails = ''.join('!! ' + line for line in lines) GlobalFunctions.logExceptionStackTrace(exceptionMsg, exceptionDetails) hostLog.dbLog("Eby_OrderComplete", "Upd Err", self.AsciiRequestMessage) return False finally: cursor.close() connection.close()
def createCSV(self, dataset): """ We perfrom the statistical test seeing in the paper. essentially finding the maximum with our newley defined order. We save the result in a dictionary which later transforms into a pandas.DataFrame which is saved to a CSV file. Args: dataSet (DataSet object): data-set we want to check the fitment between external labels and prediction labels. """ num_random_stats = len(self.randomStateList) dataset_index = dataset.get_index() n_clusters = dataset.get_n_clusters() sill_scores_csv_file_path = get_csv_file_path(num_random_stats, dataset_index, n_clusters) result_df = GlobalFunctions.get_df_by_path(sill_scores_csv_file_path) stat_test_results_df, sorted_df = sort_df_by_stat_test(result_df) ############################# save stat test results file_name = f"{num_random_stats}RandomStatesWith{n_clusters}ClustersStatisiticalTestResults.csv" csv_file_path = get_csv_file_path(len(self.randomStateList), dataset_index, n_clusters, file_name) print(stat_test_results_df) stat_test_results_df.to_csv(csv_file_path) ############################ save sorted. file_name = f"{num_random_stats}RandomStatesWith{n_clusters}ClustersSorted.csv" csv_file_path = get_csv_file_path(len(self.randomStateList), dataset_index, n_clusters, file_name) sorted_df.loc['mean'] = sorted_df.mean() sorted_df.to_csv(csv_file_path)
def findSourceDoc(self): root.filename = tkinter.filedialog.askopenfilename( initialdir=os.path.dirname(os.path.realpath(__file__)), title="Select file to search", filetypes=(("txt files", "*.txt"), ("all files", "*.*"))) if root.filename == '': print("cancelled") else: srcCandidate, dupl = FindSourceDoc.run(root.filename, root, filter=False, filamt=0) try: ParseXML.run(root.filename) except: print("already in annotation") dicc = g.openResult('output/annotation.csv') annoSource = dicc.get(ntpath.basename(root.filename[:-4]))[0] annoDup = dicc.get(ntpath.basename(root.filename[:-4]))[1] p, r, f = g.allmeasure(srcCandidate, dupl, annoSource, annoDup) print(p) print(r) print(f) result = tkinter.Toplevel(root) result.minsize(200, 200) result.title("Evaluation " + ntpath.basename(root.filename)) tkinter.Label(result, text="Precision ").grid(row=0, column=0, padx=3) tkinter.Label(result, text="Recall ").grid(row=1, column=0, padx=3) tkinter.Label(result, text="F1 Score ").grid(row=2, column=0, padx=3) tkinter.Label(result, text=round(p, 3)).grid(row=0, column=1, padx=3) tkinter.Label(result, text=round(r, 3)).grid(row=1, column=1, padx=3) tkinter.Label(result, text=round(f, 3)).grid(row=2, column=1, padx=3)
def main(): time.sleep(2) HATstarttime = GlobalFunctions.millis() GlobalVariables.timers = {"HAT" : HATstarttime} GlobalVariables.machineState = {"FCV301" : "0", "V201a" : "0", "V201b" :"0", "V301" : "0", "V302" : "0", "heya" : "1", "FL301" : 30} GlobalVariables.handshakes = {"FCV301" : "", "V201a" : "", "V201b" :"", "V301" : "", "V302" : "", "heya" : ""} print GlobalVariables.timers print GlobalVariables.machineState print GlobalVariables.handshakes ArbiAI = AI.AIclass() times = 0 while True: time.sleep(1) times = times + 1 if times == 2: GlobalVariables.machineState = {"FCV301" : "0", "V201a" : "0", "V201b" :"0", "V301" : "0", "V302" : "0", "heya" : "1", "FL301" : "30"} GlobalVariables.handshakes = {"FCV301" : "1", "V201a" : "1", "V201b" :"1", "V301" : "1", "V302" : "1", "heya" : "0"} if times == 6: GlobalVariables.machineState = {"FCV301" : "1", "V201a" : "1", "V201b" :"1", "V301" : "1", "V302" : "1", "heya" : "1", "FL301" : "30"} GlobalVariables.handshakes = {"FCV301" : "1", "V201a" : "1", "V201b" :"1", "V301" : "1", "V302" : "1", "heya" : "1"} if times == 9: GlobalVariables.machineState = {"FCV301" : "1", "V201a" : "0", "V201b" :"1", "V301" : "0", "V302" : "0", "heya" : "1", "FL301" : "200"} GlobalVariables.handshakes = {"FCV301" : "1", "V201a" : "0", "V201b" :"1", "V301" : "1", "V302" : "0", "heya" : "1"} #make handshake wrong if times == 10: GlobalVariables.machineState = {"FCV301" : "1", "V201a" : "0", "V201b" :"0", "V301" : "0", "V302" : "0", "heya" : "1", "FL301" : "200"} GlobalVariables.handshakes = {"FCV301" : "1", "V201a" : "1", "V201b" :"1", "V301" : "1", "V302" : "1", "heya" : "1"} if times == 12: GlobalVariables.machineState = {"FCV301" : "1", "V201a" : "1", "V201b" :"0", "V301" : "0", "V302" : "0", "heya" : "1", "FL301" : "30"} GlobalVariables.handshakes = {"FCV301" : "1", "V201a" : "1", "V201b" :"1", "V301" : "1", "V302" : "1", "heya" : "1"} if times == 15: GlobalVariables.machineState = {"FCV301" : "1", "V201a" : "1", "V201b" :"0", "V301" : "0", "V302" : "0", "heya" : "1", "FL301" : "10000"} GlobalVariables.handshakes = {"FCV301" : "1", "V201a" : "1", "V201b" :"1", "V301" : "0", "V302" : "1", "heya" : ""} # make handshake wrong if times == 17: GlobalVariables.machineState = {"FCV301" : "1", "V201a" : "0", "V201b" :"0", "V301" : "0", "V302" : "0", "heya" : "1", "FL301" : "10000"} GlobalVariables.handshakes = {"FCV301" : "1", "V201a" : "1", "V201b" :"1", "V301" : "1", "V302" : "1", "heya" : "1"} outcome = ArbiAI.ArbiCSVFlowReader1.doNextStepInFlowChart() print outcome + str(times) ArbiAI.status() ArbiAI.decide() ArbiAI.ArbiDataLogger.logData(str(GlobalVariables.machineState)) ArbiAI.ArbiUART.serialWrite("1_hi0_ACM0\r\n") ArbiAI.ArbiUART.serialWrite("1_hi1_ACM1\r\n") ArbiAI.ArbiUART.serialReceive() print GlobalVariables.UARTvar time.sleep(4)
def removeUnneededAssignments(self, connection): config = python_config.read_db_config() host = config.get('host') user = config.get('user') database = config.get('database') password = config.get('password') try: connection = mysql.connector.connect(host=host, user=user, database=database, password=password) cursor = connection.cursor() deleteAssignmentSQL = ("DELETE FROM dat_master " "WHERE assignment_id = %s " "AND c_comp = 0") deleteAssignmentValues = (self.AssignmentID, ) cursor.execute(deleteAssignmentSQL, deleteAssignmentValues) connection.commit() cursor.close() connection.close() except Exception as e: print(e) exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) exceptionMsg = exc_value exceptionDetails = ''.join('!! ' + line for line in lines) GlobalFunctions.logExceptionStackTrace(exceptionMsg, exceptionDetails) hostLog.dbLog("Eby_AssignmentComplete", "Upd Err", self.AsciiRequestMessage) return False finally: cursor.close() connection.close()
def get_csv_file_path(num_random_states, dataset_index, n_clusters, file_name=None): file_name = get_csv_file_name( num_random_states, n_clusters) if file_name == None else file_name return GlobalFunctions.get_plot_file_path( file_name, dataset_index, GlobalParameters.STATISTICAL_TEST_FOLDER_NAME)
def getMasterRecordByAssignmentId(self): config = python_config.read_db_config() host = config.get('host') user = config.get('user') database = config.get('database') password = config.get('password') try: connection = mysql.connector.connect( host= host, user= user, database= database, password= password ) cursor = connection.cursor(buffered=True) #getByContainerIdSQL = "SELECT * FROM dat_master WHERE container_id = %s" sql = "SELECT date FROM assignment.dat_master WHERE assignment_id=" + "'" + str(self.AssignmentID) + "'" #selectData = (self.ContainerID,) cursor.execute(sql) result = cursor.fetchone() cursor.close() connection.close() return result except Exception as e: print(e) #connection.rollback() exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) exceptionMsg = exc_value exceptionDetails = ''.join('!! ' + line for line in lines) GlobalFunctions.logExceptionStackTrace(exceptionMsg, exceptionDetails) hostLog.dbLog("Eby_NewContainer", "Upd Err", self.AsciiRequestMessage) finally: cursor.close() connection.close()
def getDatMasterByContainerId(self, containerId): config = python_config.read_db_config() host = config.get('host') user = config.get('user') database = config.get('database') password = config.get('password') try: connection = mysql.connector.connect(host=host, user=user, database=database, password=password) cursor = connection.cursor() getByContainerIdSQL = "SELECT * FROM dat_master WHERE container_id = %s" selectData = (containerId, ) cursor.execute(getByContainerIdSQL, selectData) result = cursor.fetchone() cursor.close() connection.close() return result except Exception as e: print(e) #connection.rollback() exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) exceptionMsg = exc_value exceptionDetails = ''.join('!! ' + line for line in lines) GlobalFunctions.logExceptionStackTrace(exceptionMsg, exceptionDetails) hostLog.dbLog("Eby_ContainerComplete", "Upd Err", self.AsciiRequestMessage) finally: cursor.close() connection.close()
def __init__(self): YamlParser = ruamel.yaml.YAML() ConfigPath = GlobalFunctions.Find("Config.yaml", "../") Config = open(ConfigPath, "r") self.Config = YamlParser.load(Config) self.Config = self.Config["Config"]["Start Menu"] self.Resolution = (self.Config["Aspect Ratio"][0] * self.Config["Scale"], self.Config["Aspect Ratio"][1] * self.Config["Scale"]) if self.Config["FullScreen"]: self.FullScreen = FULLSCREEN
def GetBestRouteInOrbits(self, source, destination, orbits): routes = list() for orbit in orbits: route = GlobalFunctions.GetRouteNameByProblem(orbit, self) speed = orbit.Speed if self.Speed < orbit.Speed: speed = self.Speed time = orbit.Distance / speed routeObject = Route(route, source, destination, orbit.Name, self, time) routes.extend([routeObject]) routes.sort(key=lambda x: x.Time) return routes[0]
def getDatFileRecordByContainerId(containerId): try: config = python_config.read_db_config() host = config.get('host') user = config.get('user') database = config.get('database') password = config.get('password') assignmentConnection = mysql.connector.connect(host=host, user=user, database=database, password=password) cursor = assignmentConnection.cursor() sql = "select * from dat_master where container_id = %s" queryValues = (containerId, ) cursor.execute(sql, queryValues) result = cursor.fetchone() if result == None: return "ContainerNotFound" cursor.close() assignmentConnection.close() return result except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) exceptionMsg = exc_value.msg exceptionDetails = ''.join('!! ' + line for line in lines) GlobalFunctions.logExceptionStackTrace(exceptionMsg, exceptionDetails)
def __init__(self, index: int, csv_seperator: str = ','): """ init method. Args: path (str): path to the CSV file containing the data. seperator (str): CSV seperator. datasetIndex (int): the data-set index. dimReductionAlgorithm (DimRecutionAlgorithm object, optional): dimension redcution algorithm. Defaults to PCAAlgorithm(). """ self.index = index self.csv_seperator = csv_seperator self.csv_file_path = GlobalFunctions.get_dataset_CSV_file_path(self.index) self.n_classes = 0 self.df = pd.DataFrame() self.ground_truth = None
def __init__(self): self.ArbiGUI = GUI.GUIclass("ARBI GUI") self.guiThread = threading.Thread(target = self.ArbiGUI.createGUI) self.guiThread.start() self.ArbiDataLogger = DataLogger.DataLoggerclass("textfile.txt") self.ArbiDataLogger.clearFile() self.ArbiUART = UART.UARTclass() self.ArbiCSVTaskReader = CSVTaskReader.CSVTaskReaderclass("CSVTask.csv", "Step") GlobalVariables.taskCSVDict = self.ArbiCSVTaskReader.returnTaskDictionary() self.ArbiCSVFlowReader1 = CSVFlowReader.CSVFlowReaderclass("CSVFlow.csv", "Flow") # Intialise start-time reference in milliseconds GlobalVariables.timers["startTime"] = GlobalFunctions.millis()
def hasTimePassed(self): self.timerReference = self.csvDict[self.flowChartStep]["ConditionValues"].split("_")[0] self.timewait = self.csvDict[self.flowChartStep]["ConditionValues"].split("_")[1] self.referenceTime = GlobalVariables.timers[self.timerReference] self.timedif = GlobalFunctions.millis()/1000 - self.referenceTime/1000 print "timer" print self.timedif print self.timewait if int(self.timedif) > int(self.timewait): return "Yes" else: return "No"
def GetBestRoute(self): self.Routes.clear() orbits = list([ orbit for orbit in self.Orbits if ((orbit.Source == self.Source and orbit.Destination == self.Destination) or (orbit.Destination == self.Source and orbit.Source == self.Destination)) ]) for orbit in orbits: for vehicle in GlobalVariables.vehicleswithWeather: if self.Weather not in vehicle.Weathers: continue route = GlobalFunctions.GetRouteNameByProblem(orbit, vehicle) speed = orbit.Speed if vehicle.Speed < orbit.Speed: speed = vehicle.Speed time = orbit.Distance / speed routeObject = Route(route, self.Source, self.Destination, orbit.Name, vehicle, time) self.Routes.extend([routeObject]) self.Routes.sort(key=lambda x: x.Time) return self.Routes[0]
def run(filename): xmldoc = minidom.parse(filename[:-4]+'.xml') if "simulatedFiles" in filename: plg = xmldoc.getElementsByTagName('plagiarized') srcs = [] for x in plg: print(x.attributes['sourceid'].value) y = x.attributes['sourceid'].value if str(y)+'.txt' not in srcs: srcs.append(str(y)+'.txt') else: plg = xmldoc.getElementsByTagName('features') srcs = [] for x in plg: print(x.attributes['source_reference'].value) y = x.attributes['source_reference'].value if str(y) + '.txt' not in srcs: srcs.append(str(y) + '.txt') if 'testdoc2' in filename: dupesDict = GlobalFunctions.openFiles('duplicate/FirstDuplicates.csv') else: dupesDict = GlobalFunctions.openFiles('duplicate/NumericDuplicate.csv') dupes = [] for dupe in dupesDict: for x in srcs: if x in dupesDict.get(dupe) and dupe not in srcs: dupes.append(dupe) for x in srcs: print(dupesDict.get(x)) try: if len(dupesDict.get(x)) > 1: for dupe in dupesDict.get(x): if dupe not in dupes: dupes.append(dupe) else: try: if dupesDict.get(x)[0] not in dupes: dupes = dupes + dupesDict.get(x) except: continue except: print("not in dictionary") print(dupes) tupleS = [srcs, dupes] print(tupleS) try: dicc = GlobalFunctions.openResult('output/annotation.csv') dicc[ntpath.basename(filename[:-4])] = tupleS except: dicc = {ntpath.basename(filename[:-4]):tupleS} with open('output/annotation.csv', 'w', encoding='utf-8', newline='')as csvfile: fieldname = ['TestDocs', 'Source','Duplicates'] writer = csv.DictWriter(csvfile, fieldnames=fieldname) writer.writeheader() for x in dicc: try: writer.writerow({'TestDocs':x, 'Source':dicc.get(x)[0],'Duplicates':dicc.get(x)[1]}) except: writer.writerow({'TestDocs': x, 'Source': dicc.get(x)[0]}) csvfile.close()
def get_plot_file_path(self, random_state, dataset_index): self.get_plot_file_name(random_state) return GlobalFunctions.get_plot_file_path( self.file_name, dataset_index, GlobalParameters.CLUSTERING_PLOT_FOLDER_NAME)
def get_external_fittment_file_path(dataset_index, file_name): file_path = GlobalFunctions.get_plot_file_path( file_name, dataset_index, GlobalParameters.EXTERNAL_LABELS_FITTMENT_RESULTS_FOLDER) return file_path
def __init__(self): YamlParser = ruamel.yaml.YAML() ConfigPath = GlobalFunctions.Find("Config.yaml", "../") Config = open(ConfigPath, "r") self.Config = YamlParser.load(Config) self.Config = self.Config["Config"]["Main Menu"]
def simul(filteramt, file): if file == "Simulated": file = glob.glob("testdoc/simulatedFiles/*.txt") else: file = glob.glob("testdoc/artificialFile/*.txt") #print(file) #print(filteramt) precision = [] recall = [] F1 = [] docname = [] count = 0 for fi in file: count += 1 docname.append(count) if fi == '': print('cancelled') else: if filteramt == 0: srcCandidate, dupl = FindSourceDoc.run(fi, root, show=False, filter=False, filamt=0) else: srcCandidate, dupl = FindSourceDoc.run(fi, root, show=False, filamt=filteramt) try: ParseXML.run(fi) except: print("already in annotation") dicc = g.openResult('output/annotation.csv') annoSource = dicc.get(ntpath.basename(fi[:-4]))[0] annoDup = dicc.get(ntpath.basename(fi[:-4]))[1] p, r, f = g.allmeasure(srcCandidate, dupl, annoSource, annoDup) precision.append(p) recall.append(r) F1.append(f) plt.subplot(2, 2, 1) plt.bar(docname, precision) plt.title('Precision = ' + str(round(sum(precision) / len(precision), 3))) plt.ylim(top=1.05) plt.subplot(2, 2, 2) plt.bar(docname, recall) plt.title('Recall = ' + str(round(sum(recall) / len(recall), 3))) plt.ylim(top=1.05) plt.subplot(2, 2, 3) plt.bar(docname, recall) plt.title('F1 Score = ' + str(round(sum(F1) / len(F1), 3))) plt.ylim(top=1.05) plt.subplots_adjust(hspace=0.3) plt.show()
def updateContainerAsComplete(self, connection): config = python_config.read_db_config() host = config.get('host') user = config.get('user') database = config.get('database') password = config.get('password') try: connection = mysql.connector.connect(host=host, user=user, database=database, password=password) cursor = connection.cursor() updateContainerSQL = ("UPDATE dat_master SET " "c_comp = %s, " "carton_qty = %s, " "qc_flag = %s, " "updated_at = %s " "WHERE container_id = %s ") #currentTimeStamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S') currentTimeStamp = datetime.now().strftime( '%Y-%m-%d %H:%M:%S.%f')[:-3] updateContainerValues = (1, self.CigaretteQuantity, int(self.QCFlag), currentTimeStamp, self.ContainerID) cursor.execute(updateContainerSQL, updateContainerValues) connection.commit() rowcount = cursor.rowcount print("Rows updated: " + str(rowcount)) #Web-72 datMaster = self.getDatMasterByContainerId(self.ContainerID) pickCode = datMaster[6][:3] if pickCode == "001": Eby_Jurisdiction_Processor.process(self.ContainerID) cursor.close() connection.close() if rowcount > 0: return True else: return False except Exception as e: print(e) exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) exceptionMsg = exc_value exceptionDetails = ''.join('!! ' + line for line in lines) GlobalFunctions.logExceptionStackTrace(exceptionMsg, exceptionDetails) hostLog.dbLog("Eby_ContainerComplete", "Upd Err", self.AsciiRequestMessage) return False finally: cursor.close() connection.close()
def get_csv_file_path(num_random_states, dataset_index, n_clusters, file_name=None): return GlobalFunctions.get_plot_file_path(file_name, dataset_index, "AnomalyDetecion")
#This is the main script which is the control loop that the program follows import StartScreen, pygame, ruamel.yaml, GlobalFunctions, MenuScreen, time, os, subprocess from pygame.locals import * from Colours.BasicColours import * pygame.init() _StartScreen_ = StartScreen.StartScreenInit() Resolution = _StartScreen_.Resolution Screen = pygame.display.set_mode(Resolution, _StartScreen_.FullScreen) YamlParser = ruamel.yaml.YAML() ConfigPath = GlobalFunctions.Find("Config.yaml", "/.") Config = open(ConfigPath, "r") Config = YamlParser.load(Config) _StartScreenText_ = StartScreen.Start() TextSize = _StartScreenText_.PixelFont.size("Press Any Key To Continue") ImageSize = _StartScreenText_.LogoImage.get_size() #Menu Class MainMenu = MenuScreen.MenuScreenInit() _MainMenu_ = MenuScreen.Menu() AllGames = _MainMenu_.ListGames() #BigTextHeight BigTextHeight = _MainMenu_.GetTextSize(AllGames)[1] #For Main Menu Selector = 0 SelectorPos = [0, 0]
def get_csv_file_path(num_random_states, dataset_index): file_name = get_csv_file_name(num_random_states) return GlobalFunctions.get_plot_file_path( file_name, dataset_index, GlobalParameters.STATISTICAL_TEST_FOLDER_NAME)
def saveNewContainer(self): loggingConfig = python_config.read_logging_config() enabled = loggingConfig.get('enabled') auth = loggingConfig.get('auth') domain = loggingConfig.get('domain') existingRecord = self.doesNewContainerAlreadyExist() if existingRecord is not None: if enabled == "1": #the ContainerID hostLog.log(auth, domain, "HOST to WXS", "Dupl", existingRecord[2]) return config = python_config.read_db_config() host = config.get('host') user = config.get('user') database = config.get('database') password = config.get('password') date = None assignmentRecords = self.getMasterRecordByAssignmentId() if assignmentRecords is not None and len(assignmentRecords) > 0: date = assignmentRecords[0] # (rjw 2020-11-14 11:47) -- needed to add in assignment date based on original date of assignment drop in case this ADDCONTA comes after midnight # date = "SELECT date FROM assignment.dat_master WHERE assignment_id=" + "'" + str(self.AssignmentID) + "'" # cursor.execute(date) # date = str(cursor.fetchone()[0]) #print(date) try: connection = mysql.connector.connect( host= host, user= user, database= database, password= password ) cursor = connection.cursor() addNewContainerSQL = ("INSERT INTO dat_master " "(record_id, container_id, assignment_id, route_no, stop_no, pick_code, pick_type, jurisdiction, carton_qty, c_comp, a_comp, o_comp, r_comp, assign_name, status, date, created_at, updated_at) " "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)") currentTimeStamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3] newContainer = ( self.MessageID, self.ContainerID, self.AssignmentID, self.RouteNumber, self.StopNumber, self.PickArea, self.PickType, self.Jurisdiction, self.NumberCartons, 0, 0, 0, 0, 'SOCKET', 'Pending', date, currentTimeStamp, currentTimeStamp ) cursor.execute(addNewContainerSQL, newContainer) connection.commit() rowcount = cursor.rowcount print("Rows inserted: " + str(rowcount)) cursor.close() connection.close() return True except Exception as e: print(e) #connection.rollback() exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) exceptionMsg = exc_value exceptionDetails = ''.join('!! ' + line for line in lines) GlobalFunctions.logExceptionStackTrace(exceptionMsg, exceptionDetails) hostLog.dbLog("Eby_NewContainer", "Upd Err", self.AsciiRequestMessage) return False finally: cursor.close() connection.close()
def run(filename, root, show = True, filter = True, filamt = 5): dicc = GlobalFunctions.openFiles('PLs/CompiledPLs/postingList.csv') df = GlobalFunctions.openFiles('PLs/dictionary/documentFreq.csv') testTF, queries = TestDocProcessing.queryExtract(filename) querySources = [] counter = 0 if show: win = tkinter.Toplevel(root) win.minsize(200, 200) win.title("Search Query for "+ntpath.basename(filename)) c=0 for que in queries: r=0 counter +=1 querySources.append(getSource(testTF,que,dicc,df)) if show: tkinter.Label(win, text = "Search Query "+str(counter)).grid(row=r, column = c, padx = 3) r+=1 try: for q in que: if show: tkinter.Label(win, text=q).grid(row=r, column = c, padx= 3) r +=1 except: print("unable to call new window") c += 1 if show: qwin = tkinter.Toplevel(win) qwin.minsize(200, 200) qwin.title("Query Result for " + ntpath.basename(filename)) c=0 counter = 0 for src in querySources: r=0 counter+=1 tkinter.Label(qwin, text = "Query Result "+str(counter)).grid(row=r, column = c, padx = 3) tkinter.Label(qwin, text="Percentage " + str(counter)).grid(row=r, column=c+1, padx=3) r+=1 try: for s in src: tkinter.Label(qwin, text = s[0]).grid(row=r, column = c, padx = 3) tkinter.Label(qwin, text = round(s[1]*100,2)).grid(row=r,column = c+1, padx=3) r+=1 except: print("no val") c+=2 result =[] for n in querySources: for x in n: result.append(x) result.sort(key=operator.itemgetter(1)) result.reverse() filters = [] copyResult = result.copy() result = [] for x in copyResult: if x[0] not in filters: filters.append(x[0]) result.append(x) print(result) if 'testdoc2' in filename: dupes = GlobalFunctions.openFiles('duplicate/FirstDuplicates.csv') else: dupes = GlobalFunctions.openFiles('duplicate/NumericDuplicate.csv') duplicate = [] copyRes = result.copy() result = [] for res in copyRes: result.append(res[0]) for res in result.copy(): try: for x in dupes.get(res): if x not in duplicate and res not in duplicate: duplicate.append(x) result.remove(x) except: pass if 'testdoc2' in filename: dupes = GlobalFunctions.openFiles('duplicate/NumericDuplicate.csv') else: dupes = GlobalFunctions.openFiles('duplicate/FirstDuplicates.csv') for res in result.copy(): try: for x in dupes.get(res): if x not in duplicate and res not in duplicate: duplicate.append(x) result.remove(x) except: pass print(result) print(duplicate) if filter: result = result[:filamt] if 'testdoc2' in filename: dupes = GlobalFunctions.openFiles('duplicate/FirstDuplicates.csv') else: dupes = GlobalFunctions.openFiles('duplicate/NumericDuplicate.csv') duplicate = [] for res in result: try: for x in dupes.get(res): duplicate.append(x) except: pass if show: fres = tkinter.Toplevel(qwin) fres.minsize(200, 200) fres.title("Source Candidate for " + ntpath.basename(filename)) c = 0 r = 0 dr = 0 counter += 1 tkinter.Label(fres, text="Query Result").grid(row=r, column=c, padx=3) r += 1 try: for s in result: tkinter.Label(fres, text=s).grid(row=r, column=c, padx=3) r += 1 for d in duplicate: dr+=1 tkinter.Label(fres, text="Duplicate").grid(row=0, column=c+1, padx=3) tkinter.Label(fres,text = d).grid(row=dr, column=c+1, padx=3) except: print("no val") if not os.path.exists('output/'): os.makedirs('output/') copyResult = [] for res in result.copy(): copyResult.append(res) copyDupe = [] for dupes in duplicate.copy(): copyDupe.append(dupes) tupleS = [copyResult, copyDupe] print(tupleS) print(result) print(duplicate) if filter and filamt == 5: saveResult = 'output/resultsFilter5.csv' elif filter and filamt ==10: saveResult = 'output/resultsFilter10.csv' else: saveResult = 'output/resultsNoFil.csv' try: dicc = GlobalFunctions.openResult(saveResult) dicc[ntpath.basename(filename[:-4])] = tupleS except: dicc = {ntpath.basename(filename[:-4]):tupleS} if filter: with open('output/resultsFilter'+str(filamt)+'.csv', 'w', encoding='utf-8', newline='')as csvfile: fieldname = ['TestDocs', 'Source_Candidate','Duplicates'] writer = csv.DictWriter(csvfile, fieldnames=fieldname) writer.writeheader() for x in dicc: try: writer.writerow({'TestDocs':x, 'Source_Candidate':dicc.get(x)[0],'Duplicates':dicc.get(x)[1]}) except: writer.writerow({'TestDocs': x, 'Source_Candidate': dicc.get(x)[0]}) csvfile.close() else: with open('output/resultsNoFil.csv', 'w', encoding='utf-8', newline='')as csvfile: fieldname = ['TestDocs', 'Source_Candidate','Duplicates'] writer = csv.DictWriter(csvfile, fieldnames=fieldname) writer.writeheader() for x in dicc: try: writer.writerow({'TestDocs':x, 'Source_Candidate':dicc.get(x)[0],'Duplicates':dicc.get(x)[1]}) except: writer.writerow({'TestDocs': x, 'Source_Candidate': dicc.get(x)[0]}) csvfile.close() return copyResult,copyDupe