def CNN_Identifier(): unique_words = 1000 word_depth = 500 pdf_to_text('Test_pdf','Test_text') with open(os.path.join(Address(1), r"model_architecture.json"), 'r') as f: model = model_from_json(f.read()) model.load_weights(os.path.join(Address(1), r"model_weights.h5")) with open(os.path.join(Address(1), r"word_list.txt"), "rb") as file: freq = pickle.load(file) ##########################################################DEBUG final_output = [] type_list = ['ADC', 'CDC', 'Opamp', 'LDO', 'PLL', 'SRAM', 'Temperature_Sensor', 'DCDC', 'BDRT', 'counters', 'Digital_Potentiometers', 'DSP', 'IO'] for file in os.listdir(os.path.join(Address(1), r"Test_pdf")): #Ideally this will be using Test_text, but I was having issues raw_test_data = convert(os.path.join(Address(1), r"Test_pdf", file)) clean_test_data = [clean(raw_test_data, word_depth)] test_data = numpy.expand_dims(array_creater(clean_test_data, freq, word_depth), axis = 3) results = model.predict(test_data) final_output.append(type_list[numpy.argmax(results)]) return final_output ########################################################
def setUp(self) -> None: self.ur = User() self.ur2 = User() self.ad = Address() self.ad2 = Address() self.dbu = DBTools() self.logic = Logic()
def feed(self, value, erase_last = True): self.last_value = value new_iter = copy.copy(self.last_iteration) all_types = Locator.get_typemap(value) if self.type == 'unknown' else [self.type] for type in all_types: if type not in new_iter: try: new_iter[type] = [ Address(x, self.mw.process, type) for x in self.mw.mem_search(value, type, start_offset=self.start, end_offset=self.end) ] except struct.error: new_iter[type] = [] else: l = [] for address in new_iter[type]: try: found = self.mw.process.read(address, type) if int(found) == int(value): l.append(Address(address, self.mw.process, type)) except Exception as e: pass new_iter[type] = l if erase_last: del self.last_iteration self.last_iteration = new_iter return new_iter
def Init(persons=[]): p1 = Person(Name('Alan', 'Turing'), Address('Bletchley Park', 'Bletchley Park')) p2 = Person(Name('Alan', 'Turing'), Address('Cambridge', 'Cambridge')) p3 = Person(Name('Joan', 'Clarke'), Address('Bletchley Park', 'Bletchley Park')) p4 = Person(Name('Joan', 'Clarke'), Address('London', 'London')) p5 = Person(Name('Grace', 'Hopper'), Address('New York', 'New York')) persons.extend((p1, p2, p3, p4, p5)) print('MinRelationLevel:' + str(FindMinRelationLevel(p1, p4, persons)))
def readAddresses(self, fileName, sheetName): workBook = pyxl.load_workbook(fileName) sheet = workBook.get_sheet_by_name(sheetName) arrAddresses = [] for row in range(2, sheet.max_row + 1): print('*** Address ', row - 1, ': ***') # Address 1 address1 = sheet['A' + str(row)].value print(' - Address 1: ', address1) # Address 2 address2 = sheet['B' + str(row)].value print(' - Address 2: ', address2) # City city = sheet['C' + str(row)].value print(' - City: ', city) # State code stateCode = sheet['D' + str(row)].value print(' - State code: ', stateCode) # State state = sheet['E' + str(row)].value print(' - State: ', state) # Zip code zip = sheet['F' + str(row)].value print(' - Zip: ', zip) addressObj = Address(address1, address2, city, stateCode, state, zip) arrAddresses.append(addressObj) return arrAddresses
def page_classification(training_folder, testing_folder): path_extracted = Address(1).split("\n") base_dir = path_extracted[0] pages_pdf_dir = p_join( base_dir, 'tmp_pdf') # create a temporary directory to split pages pages_txt_dir = p_join( base_dir, 'tmp_txt') # create a temporary directory to convert into texts if isdir(pages_pdf_dir): rmtree(pages_pdf_dir) if isdir(pages_txt_dir): rmtree(pages_txt_dir) # labels and training directory perfect_label = 'perfect' bad_label = 'bad' perfect_path = [p_join(training_folder, perfect_label), perfect_label] bad_path = [p_join(training_folder, bad_label), bad_label] SOURCES = [perfect_path, bad_path] # crop test pdf into test pages pdf_cropper_for_extraction.pdf_cropper_multiple(testing_folder, pages_pdf_dir) # convert test pages to text page_classification_result = [] for pdf_folder in listdir(pages_pdf_dir): page_pdf = p_join(pages_pdf_dir, pdf_folder) page_text = p_join(pages_txt_dir, pdf_folder) pdf_to_text(page_pdf, page_text) classifier_result = supervised_classifier(SOURCES, page_text) page_classification_result = page_classification_result + classifier_result rmtree(pages_pdf_dir) rmtree(pages_txt_dir) return page_classification_result
def page_classification(subject): Path_extracted=Address(1).split("\n") Path_extracted1=Path_extracted[0] #label pages as perfect, good and bad, training set in three folders perfect_label = 'perfect' good_label = 'good' bad_label = 'bad' #pages_path = os.path.join(os.path.join(Path_extracted1, 'All_pages_text'), subject) pages_path = os.path.join(os.path.join(Path_extracted1, 'small_training_set_text'), subject) perfect_path = [os.path.join(pages_path,'perfect'), perfect_label] good_path = [os.path.join(pages_path,'good'), good_label] bad_path = [os.path.join(pages_path,'bad'), bad_label] SOURCES = [perfect_path, good_path, bad_path] #clean up files in test folders pageDir = os.path.join(Path_extracted1, 'Test_pages') textDir = os.path.join(Path_extracted1, 'Test_pages_text') for folder in os.listdir(pageDir): shutil.rmtree(os.path.join(pageDir, folder)) for folder in os.listdir(textDir): shutil.rmtree(os.path.join(textDir, folder)) #crop test pdf into test pages pdfDir = os.path.join(Path_extracted1, 'Test_pdf') pageDir = os.path.join(Path_extracted1, 'Test_pages') pdf_cropper_for_extraction.pdf_cropper_multiple(pdfDir, pageDir) pageDir = os.path.join(Path_extracted1, 'Test_pages') #convert test pages to text for pdf_folder in os.listdir(pageDir): page_pdf = os.path.join('Test_pages', pdf_folder) page_text = os.path.join('Test_pages_text', pdf_folder) pdf_to_text(page_pdf,page_text) page_classifier_result = supervised_classifier_ngram(SOURCES, page_text) print(page_classifier_result)
def ModifyAddresses(): conn = dbconn() # retrieve all addresses # addresses = conn.execute_read_query("SELECT * FROM addresses") # address = addresses[0] # retrieve isp plan and occupant # isp = conn.execute_read_query("SELECT * FROM ISPs")[0][0] # occupant = conn.execute_read_query("SELECT * FROM people")[0][0] if request.method == 'POST': address_street = request.form['street'] address_city = request.form['city'] address_state = request.form['state'] address_postal_code = request.form['postal_code'] address_country = request.form['country'] isp = request.form['plan_id'] # hidden ISP Plan occupant = request.form['Occupant'] address = Address(conn) # Address object address.add_address(address_street, address_city, address_state, address_postal_code, address_country, isp, occupant) # addresses = conn.execute_read_query("SELECT * FROM addresses") return redirect("/addresses/list") return render_template('address.html')
def __init__(self, customer_id, name, street, number, city, zip, country, age, license_id): self.customer_id = customer_id self.name = name self.address = Address(street, number, city, zip, country) self.age = age self.license_id = license_id
def _create(self, nodeName, nodeId=None): # trace("_create") # convenience cache self.meta = self.owner.meta self.nodeName = nodeName if nodeId != None: nodeId = str(nodeId) ownerId = self.meta.getOwnerId(self.owner.ownerName) if ownerId == None: raise ValueError("Owner has not been created") # CREATE NODE if not already created actualNodeId = self.meta.getNodeId(ownerId=None, nodeName=nodeName) if actualNodeId != None: raise ValueError("Node already exists") actualNodeId = self.meta.create(nodeName, nodeId) if actualNodeId == None: raise ValueError("node not created") self.nodeId = actualNodeId self.data = self._makeData(self.owner.databaseId) self.addr = Address(self.owner.ownerId, self.nodeId, databaseId=self.owner.databaseId) self.setState(Node.CREATED) #trace("Node registers listener for handleMeta") #TODO could be more restrictive in registration #i.e. dst=(o), so that filtering is done earlier. anyAddr = Address.EMPTY self.meta.registerListener(None, anyAddr, anyAddr, self.handleMeta)
def readAddresses(self): filename = 'D:\AutomationTest\Scripts\SertaTA\RealAddresses.xlsx' workBook = pyxl.load_workbook(filename) sheet = workBook.get_sheet_by_name('SERTA') arrAddresses = [] for row in range(2, sheet.max_row + 1): print('*** Address ', row - 1, ': ***') # Address 1 address1 = sheet['A' + str(row)].value print(' - Address 1: ', address1) # Address 2 address2 = sheet['B' + str(row)].value print(' - Address 2: ', address2) # City city = sheet['C' + str(row)].value print(' - City: ', city) # State code stateCode = sheet['D' + str(row)].value print(' - State code: ', stateCode) # State state = sheet['E' + str(row)].value print(' - State: ', state) # Zip code zip = sheet['F' + str(row)].value print(' - Zip: ', zip) addressObj = Address(address1, address2, city, stateCode, state, zip) arrAddresses.append(addressObj) return arrAddresses
def __init__(self, street, number, zip_code, city, n_rooms, sale_price): # Address.__init__(self, street,number,zip_code,city) # self.addr = addr self.obj_addr = Address(street, number, zip_code, city) self.n_rooms = n_rooms self.sale_price = sale_price
def __init__(self, first=None, last=None, gender=None, phone=None, city=None, country=None, zip_code=None): """ Constructor of PhoneBook class. All parameters are optional :param first: first name of person :type first : str :param last: last name of person :type last : str :param gender: gender(sex) of person. Valid values are M / F :type gender : str :param phone: Phone Number of person. Valid format is xxxx-xxxxxxx :type phone: str :param city: City Person Belongs to :type city: str :param country: Country person belongs to :type country: str :param zip_code: Zip code of the City :type zip_code: str """ self.person = Person(first, last, gender) self.phone = phone self.address = Address(city, country, zip_code)
def __init__(self, csv_list): # the node are addresses self.nodes = [] # edge two addresses in the distance between them self.edges = [] location1 = 0 location2 = 0 for row in csv_list: name = row[0] street = row[1] city = row[2] state = row[3] zip = row[4] address = Address(street, city, state, zip) self.nodes.append(address) location2 = 0 # distances in the csv file begin here for column in row[5:]: try: edge = Distance(column, self.nodes[location1], self.nodes[location2]) self.edges.append(edge) location2 = location2 + 1 except IndexError: pass continue location1 = location1 + 1
def __init__(self): # Constructor for Repository class self.__address = {} self.__driver = {} with open("addresses", "r") as file: data = file.read() data = data.split("\n") if len(data) != 1: for address in data: address = address.split(",") self.__address[int(address[0].strip())] = Address( int(address[0].strip()), address[1].strip(), int(address[2].strip()), int(address[3].strip())) with open("drivers", "r") as file: data = file.read() data = data.split("\n") if len(data) != 1: for driver in data: driver = driver.split(",") self.__driver[driver[0].strip()] = Driver( driver[0].strip(), int(driver[1].strip()), int(driver[2].strip()))
def CreateTransaction(): receiverAddr = str(recieverentry.get()) ammount = 0 #if turn == False: # senderAddr = USER1.pubKey # sender = USER1 #elif turn == True and not USER1 == None: # senderAddr = USER2.pubKey # sender = USER2 #else: # senderAddr = str(recieverentry.get()) if CURRENTUSER != None: senderAddr = CURRENTUSER.pubKey sender = CURRENTUSER else: senderAddr = str(recieverentry.get()) Naslovi = [] if not str(recieverentry.get()) == "": Naslovi.append( Address(str(recieverentry.get()), float(recieverentryAmount.get()))) ammount = ammount + float(recieverentryAmount.get()) if not str(recieverentry2.get()) == "": Naslovi.append( Address(str(recieverentry2.get()), float(recieverentryAmount2.get()))) ammount = ammount + float(recieverentryAmount2.get()) if not str(recieverentry3.get()) == "": Naslovi.append( Address(str(recieverentry3.get()), float(recieverentryAmount3.get()))) ammount = ammount + float(recieverentryAmount3.get()) Speak( "API_ADDRESS_NEW", Transaction( str(senderAddr.n), Naslovi, ammount, hexlify( rsa.sign( (f"{str(senderAddr)}{str(Naslovi)}{str(ammount)}{time}" ).encode("utf-8"), sender.priKey, "SHA-256")).decode("utf-8")), client2) return
def get_address_from_string(str): if len(str) > 10: split = str.split(":") ip = split[0] port = split[1] return Address(ip, port) else: return None
def update_all_database_addresses(): ref = get_address_firebase() addresses = ref.get() for key, value in addresses.items(): a = Address(value['streetAddress'], 0, 0) a.get_google_geo_location() cur_address_db = ref.child(key) cur_address_db.update({'lat': a.latitude, 'long': a.longitude})
def create_address(self): street = input("Enter street... ") city = input("Enter city... ") state = input("Enter state... ") postal = input("Enter postal... ") country = input("Enter country... ") return Address(street, city, state, postal, country)
def setUp(self): testlog.debug('Instantiate null address, address.setter list') self._address = Address(user_text) self._address_setters = [ i for i in dict( inspect.getmembers(Address, predicate=inspect.ismethod)) if i[:3] == 'set' ]
def readData(self, fileName, sheetName): workBook = pyxl.load_workbook(fileName) sheet = workBook.get_sheet_by_name(sheetName) arrCustomers = [] for row in range(2, sheet.max_row + 1): rowNo = str(row) print('*** Customer ', row - 1, ': ***') # Firstname firstname = sheet['A' + rowNo].value print(' - Firstname: ', firstname) # Lastname lastname = sheet['B' + rowNo].value print(' - Lastname: ', lastname) # Address 1 print(' - Address: ') address1 = sheet['C' + rowNo].value print(' + Address 1: ', address1) # Address 2 address2 = sheet['D' + rowNo].value print(' + Address 2: ', address2) # City city = sheet['E' + rowNo].value print(' + City: ', city) # State code stateCode = sheet['F' + rowNo].value print(' + State code: ', stateCode) # State state = sheet['G' + rowNo].value print(' + State: ', state) # Zip code zip = sheet['H' + rowNo].value print(' + Zip: ', zip) addressObj = Address(address1, address2, city, stateCode, state, zip) # Address type addressType = sheet['I' + rowNo].value print('- Address type: ', addressType) # Email email = sheet['J' + rowNo].value print('- Email: ', email) # Email type emailType = sheet['K' + rowNo].value print('- Email type:', emailType) # Phone phone = sheet['L' + rowNo].value print('- Phone: ', phone) # Phone type phoneType = sheet['M' + rowNo].value print('- Phone type:', phoneType) customer = CrmCustomer(firstname, lastname, addressObj, addressType, email, emailType, phone, phoneType) arrCustomers.append(customer) return arrCustomers
def get_all_database_address(): ref = get_address_firebase() database_addresses = ref.get() addresses = [] for key, value in database_addresses.items(): a = Address(value['streetAddress'], value['lat'], value['long']) a.firebasekey = key addresses.append(a) return addresses
def GetState(addr): #senderAddr = str(senderentry.get()) senderAddr = addr Speak("API_ADDRESS_STATE", Address(senderAddr, 0), client2) time.sleep(0.03) addrState = Recieve(client2, "API_ADDRESS_STATE") addrState = addrState[0].amount return addrState
def __init__(self, endType, ownerId, nodeId, pointName, pointId, meta, data, databaseId): self.pointName = pointName self.addr = Address(ownerId, nodeId, pointId, databaseId=databaseId) self.meta = meta self.data = data #self.node = None #TODO? parent? self.endType = endType self.receive_fn = None self.bindType = None
def GetState(): senderAddr = str(senderentry.get()) Speak("API_ADDRESS_STATE", Address(senderAddr, 0), client2) time.sleep(1) addrState = Recieve(client2, "API_ADDRESS_STATE") addrState = addrState[0].amount ledger.insert(END, f"State: {addrState}\n\n") ledger.see(END)
def feed(self, value, erase_last=True): self.last_value = value new_iter = copy.copy(self.last_iteration) if self.type == "unknown": all_types = [ "uint", "int", "long", "ulong", "float", "double", "short", "ushort", ] else: all_types = [self.type] for type in all_types: if type not in new_iter: try: new_iter[type] = [ Address(x, self.mw.process, type) for x in self.mw.mem_search(value, type, start_offset=self.start, end_offset=self.end) ] except struct.error: new_iter[type] = [] else: l = [] for address in new_iter[type]: try: found = self.mw.process.read(address, type) if int(found) == int(value): l.append(Address(address, self.mw.process, type)) except Exception as e: pass new_iter[type] = l if erase_last: del self.last_iteration self.last_iteration = new_iter return new_iter
def __init__(self, *args, **kwargs): args_len = len(args) self._is_special_mem = 0 if args[0] == 0: ## 1.44, USER_INPUT self._address = None self._address_value = 0 self._length = UNLIMITED_LENGTH self._value = '0' self._is_special_mem = 1 elif args[0] == 1: ## 1.5 ## STD_OUTPUT self._address = None self._address_value = 0 self._length = UNLIMITED_LENGTH self._value = '1' self._is_special_mem = 2 elif args_len: length = UNKNOWN_LENGTH if isinstance(args[0], str): address_string, length = self.parse(args[0]) address = Address(address_string) elif isinstance(args[0], Address) or isinstance(args[0], Memory): address = args[0] elif isinstance(args[0], int): address = Address(args[0]) if args_len == 2: length = args[1] self._address = address ## 1.5 self._address_value = self._address.get_value() # print "==>", address self._length = length self._value = self.get_runtime_value() else: raise ValueError('[Memory] invalid args ' + str(args))
def testVerifyAddress(self): # Load existing customer customerEmail = '*****@*****.**' CrmCustomerTest.searchCustomerByEmail(self, customerEmail) CrmCustomerTest.loadCustomer(self) addressObj = Address('8210 Byron ave', '24', 'Miami beach', 'FL', 'Florida', '33141') self.checkoutStep1('Marco', 'Polo', customerEmail, addressObj, '0909090909')
def __init__(self, pointType, endType, ownerId, nodeId, pointName, pointId, meta, databaseId): self.pointType = pointType self.pointName = pointName ###Need to pass databaseId here, or a ref to owner, but if we set it, it thinks it is remote #may need extra detection at a lower layer to compare with "our databaseId" #where it runs the filter for NODE. self.addr = Address(ownerId, nodeId, pointId, databaseId=databaseId) self.meta = meta # self.node = None #TODO? parent? self.endType = endType self.bindType = None
def title_decision_confusion_matrix(): Path_extracted=Address(1).split("\n") Path_extracted1=Path_extracted[0] ADC = 'ADC' BDRT = 'BDRT' CDC = 'CDC' counters = 'counters' #DAC = 'DAC' DCDC = 'DCDC' #Delay_Line = 'Delay_Line' DSP = 'DSP' IO = 'IO' LDO = 'LDO' Opamp = 'Opamp' Digital_Potentiometers = 'Digital_Potentiometers' PLL = 'PLL' SRAM = 'SRAM' Temperature_Sensor = 'Temperature_Sensor' ADC_path = [os.path.join(os.path.join(Path_extracted1, 'cropped_text'), 'ADC'), ADC] BDRT_path = [os.path.join(os.path.join(Path_extracted1, 'cropped_text'), 'BDRT'), BDRT] CDC_path = [os.path.join(os.path.join(Path_extracted1, 'cropped_text'), 'CDC'), CDC] COUNTER_path = [os.path.join(os.path.join(Path_extracted1, 'cropped_text'), 'counters'), counters] #DAC_path = [os.path.join(os.path.join(Path_extracted1, 'cropped_text'), 'DAC'), DAC] DCDC_path = [os.path.join(os.path.join(Path_extracted1, 'cropped_text'), 'DCDC'), DCDC] #DELAY_LINE_path = [os.path.join(os.path.join(Path_extracted1, 'cropped_text'), 'Delay_Line'), Delay_Line] DSP_path = [os.path.join(os.path.join(Path_extracted1, 'cropped_text'), 'DSP'), DSP] IO_path = [os.path.join(os.path.join(Path_extracted1, 'cropped_text'), 'IO'), IO] LDO_path = [os.path.join(os.path.join(Path_extracted1, 'cropped_text'), 'LDO'), LDO] OPAMP_path = [os.path.join(os.path.join(Path_extracted1, 'cropped_text'), 'Opamp'), Opamp] POTENTIOMETER_path = [os.path.join(os.path.join(Path_extracted1, 'cropped_text'), 'Digital_Potentiometers'), Digital_Potentiometers] PLL_path = [os.path.join(os.path.join(Path_extracted1, 'cropped_text'), 'PLL'), PLL] SRAM_path = [os.path.join(os.path.join(Path_extracted1, 'cropped_text'), 'SRAM'), SRAM] Temperature_Sensor_path = [os.path.join(os.path.join(Path_extracted1, 'cropped_text'), 'Temperature_Sensor'),Temperature_Sensor] #SOURCES = [ADC_path,BDRT_path,COUNTER_path,DAC_path,DELAY_LINE_path,DSP_path,IO_path,OPAMP_path,POTENTIOMETER_path,PLL_path,DCDC_path,CDC_path,Temperature_Sensor_path,SRAM_path,LDO_path] SOURCES = [ADC_path,BDRT_path,COUNTER_path,DSP_path,IO_path,OPAMP_path,POTENTIOMETER_path,PLL_path,DCDC_path,CDC_path,Temperature_Sensor_path,SRAM_path,LDO_path] previous_Test_text_dir=os.path.join(Path_extracted1,'Test_text') previous_cropped_pdf_dir=os.path.join(Path_extracted1,'Test_cropped_pdf') previous_cropped_text_dir=os.path.join(Path_extracted1,'Test_cropped_text') normal_classifier_title_result=supervised_classifier(SOURCES) #ngram_classifier_title_result=supervised_classifier_ngram(SOURCES) #[key_words_title_result,key_words_occurrence_array,key_words_title_result_second,max_zero_flag,max_second_zero_flag]=key_words_title_counter('Test_text') #title=title_arbitration(normal_classifier_title_result,key_words_title_result,key_words_occurrence_array,key_words_title_result_second,max_zero_flag,max_second_zero_flag) titles=[] for elem in normal_classifier_title_result: titles.append(elem) #for elem in key_words_title_result: # titles.append(elem[0]) #for elem in title: # titles.append(elem) print(titles) return titles