def test2_products_page(self): self.log.info("test2_goto_pixstor_page_started") self.basepixitmediaurl = self.googlesearchpage.returnCurrentURL() self.googlesearchpage.gotoProductsPage() result = self.googlesearchpage.verifyWordExistInURL('products') self.tstatus.mark(result, "products word Verified") print("Result " + str(len(self.tstatus.resultList)) + " = " + str(result)) self.googlesearchpage.gotoPixStor() result = self.googlesearchpage.verifyWordExistInURL('PixStor-Search') self.tstatus.mark(result, "PixStor-Search word Verified") print("Result " + str(len(self.tstatus.resultList)) + " = " + str(result)) self.googlesearchpage.viewDatasheet() result = self.googlesearchpage.verifyWordExistInURL('Datasheet') self.tstatus.mark(result, "Datasheet word Verified") print("Result " + str(len(self.tstatus.resultList)) + " = " + str(result)) time.sleep(3) req = urlget(self.driver.current_url) # validate PDF file # validate1 - simple by check size & header (contain pdf type) result = self.googlesearchpage.verifyActualGreaterEqualExpected( int(req.headers['Content-Length']), 1000) self.tstatus.mark(result, "File bigger than 1000 bytes") print("Result " + str(len(self.tstatus.resultList)) + " = " + str(result)) result = self.googlesearchpage.verifyTextEqual( req.headers['Content-Type'], 'application/pdf') self.tstatus.mark(result, "File has Content-Type application/pdf") print("Result " + str(len(self.tstatus.resultList)) + " = " + str(result)) # validate2 - download & check file using PyPDF2 file1 = open(self.tmpfilename, "wb") file1.write(req.content) pdfproducer = PDFread(self.tmpfilename).getDocumentInfo().producer file1.close() pdfchklist = ['Adobe', 'PDF', 'Acrobat'] # search this words - to confirm PDF listintrsect = [ x.upper() for x in pdfchklist if x.upper() in pdfproducer.upper() ] result = self.googlesearchpage.verifyActualGreaterEqualExpected( len(listintrsect), 1) self.tstatus.mark(result, "File is PDF format") print("Result " + str(len(self.tstatus.resultList)) + " = " + str(result)) result = self.googlesearchpage.verifyPageURL( "https://www.pixitmedia.com/wp-content/uploads/2018/03/PixStor-Search-Datasheet.pdf" ) print("ResultLast = " + str(result)) self.tstatus.markFinal("View pdf doc verified", result, "test2_products_page") self.googlesearchpage.gotosite(self.basepixitmediaurl) # Finally - remove tmpfile try: removefile(self.tmpfilename) except WindowsError as exxe: print(' Error = ' + str(exxe) + ' / file = ' + str(self.tmpfilename))
def clear_dir(p_path, ext=None): for root, dirs, files in walk(p_path): for f in files: if ext is None or f.lower().endswith(ext.lower()): removefile(path_join(root, f)) if ext is None: for d in dirs: rmtree(path_join(root, d))
def remove(message): msg = "" # open lang file sconfig = ConfigParser() sconfig.read('../servers/' + message.server.id + '/serverconfig.ini') serverlang = sconfig.get('Config', 'lang') langfile = open("../config/lang/" + serverlang + ".lang", "r", encoding='utf-8') lang = langfile.read().split("\n") splits = {} arg = message.content[1:].split("&") arg[0] = arg[0].split(" ")[1] # separate the command's arguments while len(arg) > 0: if arg[0].startswith("name"): if arg[0][4] == "=": splits['name'] = arg[0][5:] arg = arg[1:] else: msg = lang[12] return (msg) else: arg = arg[1:] # check the command's arguments if "name" not in splits: msg = lang[44] return (msg) # open command list file commandlist = open('../servers/' + message.server.id + '/command.list', "r", encoding='utf-8') commands = commandlist.read() list = commands.split("\n") if splits['name'] not in list: msg = lang[45] return (msg) newlist = "" while len(list) > 1: if list[0] == splits['name']: list = list[1:] else: newlist = newlist + list[0] + "\n" list = list[1:] # remove in the commandlist file commandlist = open('../servers/' + message.server.id + '/command.list', "w", encoding='utf-8') commandlist.write(newlist) commandlist.close() # remove command file if path.exists('../servers/' + message.server.id + '/commands/' + splits['name'] + '.command'): removefile('../servers/' + message.server.id + '/commands/' + splits['name'] + '.command') msg = lang[46] return (msg)
def tearDown(self): print('\n--- > tearDown\n') self.driver.quit() self.assertEqual([], self.verificationErrors) try: removefile(self.tmpfilename) # print(' Successfully remove tmp file ' + str(self.tmpfilename)) except WindowsError as exx: print(' Error = ' + str(exx) + ' / file = ' + str(self.tmpfilename))
def clean_object_files(): if sys.platform == "win32": for file in os.listdir(cwd): filename, ext = os.path.splitext(file) if ext == ".obj": os.removefile(file) elif sys.platform == "linux" or sys.platform == "darwin": for file in os.listdir(cwd): filename, ext = os.path.splitext(file) if ext == ".o": os.removefile(file)
def check_filesystem(): the_dir = path_join(dirname(abspath(__file__)), "projetos") if not exists(the_dir): mkdir(the_dir) the_dir = path_join(dirname(abspath(__file__)), "log") if not exists(the_dir): mkdir(the_dir) zip_file_path = path_join(dirname(abspath(__file__)), SETUP_ZIP) if exists(zip_file_path): removefile(zip_file_path)
def extract_cookie(domainname=""): ## Location of Cookie file (specific location + file) filepath = 'chromedata\\Default\\' file = 'Cookies' cookiefile = filepath + file cookietemp = filepath + 'mytmp123' print('\nCOOKIE_EXE_FILE = ' + str(cookiefile)) copy2(src=cookiefile, dst=cookietemp) sleep(1) cookies = browser_cookie3.chrome(domain_name=domainname, cookie_file=cookietemp) if domainname == "": if len(cookies) >= 1: print('[No]) [Domainname] / [Name] / [Value] / [Expires]') i = 1 for c in cookies: timeexpire = strftime('%Y-%m-%d %H:%M:%S', localtime(c.expires)) print( str(i) + ') ' + str(c.domain) + ' / ' + str(c.name) + ' / ' + str(c.value) + ' / ' + str(timeexpire)) i = i + 1 else: print('COOKIE [ all_domain ] = NONE') cookie = {} else: if len(cookies) >= 1: cookie = {} for c in cookies: # cookie = {'domain': c.domain, 'name': c.name, 'value': c.value, 'secure': c.secure and True or False} cookie = { 'domain': c.domain, 'name': c.name, 'value': c.value, 'expiry': c.expires, 'path': c.path, 'httpOnly': False, 'HostOnly': False, 'sameSite': 'None', 'secure': c.secure and True or False } print('COOKIE [ ' + domainname + ' / ' + str( strftime('%Y-%m-%d %H:%M:%S', localtime(cookie['expiry']))) + ' ] = ' + str(cookie)) else: print('COOKIE [ ' + domainname + ' ] = NONE') cookie = {} sleep(1) removefile(cookietemp) return cookie
def attach_df(msg: MIMEMultipart, name: str, df_set): """ Args: df_set: df的集合, [[a,df], [b,df], [c, df]..] a,b,c 为工作表的名称 """ name = _fix_excel_name(name) path = environ["TEMP"] + ("\\%s" % name) create_excel(path, df_set) attach_file(msg, name, path) try: removefile(path) except Exception as e: print("生成的 Excel 文件未删除,原因:\n" + repr(e)) return msg
def show_saved_figure(fig): fig.savefig('.temp.jpg', dpi=600) plt.figure(figsize=(10, 10), facecolor=(0.5, 0.5, 0.5, 0.5)) im = plt.imread('.temp.jpg') if (np.any(im[0,:] < 255) or\ np.any(im[-1,:] < 255) or\ np.any(im[:,0] < 255) or\ np.any(im[:,-1] < 255)): print('Warning: Figure is probably clipped!') plt.imshow(im, aspect='equal') plt.axis('off') plt.show() from os import remove as removefile removefile('.temp.jpg')
def test_one_login(self): print('\n----> ' + str(self._testMethodName) + '\n') # GET python version & Browser version driver = self.driver from sys import version as pythonversion print('Python Version = ' + pythonversion) try: print('Browser version ( ' + self.driver.name + ' ) = ' + self.driver.capabilities['version']) # Python 3.7 and below except: print('Browser version ( ' + self.driver.name + ' ) = ' + self.driver.capabilities['browserVersion'] ) # Python 3.8 & above print() user1 = self.__class__.userone pswd1 = self.__class__.pswdone if driver.name == 'chrome': driver.maximize_window() driver.get(self.base_url) driver.find_element_by_name('username').click() driver.find_element_by_name('username').send_keys(user1 + Keys.ENTER) driver.find_element_by_name('password').click() driver.find_element_by_name('password').send_keys(pswd1 + Keys.ENTER) ## write login username/pswd to cookie file filename = "cookies.pkl" cookiesfile_w = open(filename, "wb") pickle.dump(driver.get_cookies(), cookiesfile_w) print('GET COOKIES = ' + str(driver.get_cookies())) sleep(1) cookiesfile_w.close() print('CURRENT URL = ' + driver.current_url) self.__class__.mengkome_url = driver.current_url ## read cookie data and put to cookies variable at the top of the class cookiesfile_r = open(filename, "rb") self.__class__.cookies = pickle.load(cookiesfile_r) cookiesfile_r.close() sleep(2) ## remove cookie file try: removefile(filename) print(' Successfully remove tmp file ' + filename) except WindowsError as exx: print(' Error = ' + str(exx) + ' / file = ' + filename)
def dropref(p_proj): rd = _get_refdir(p_proj) patt = "REF[0-9T]+.json" for currfile in ("current.json", "warnings.json"): currpath = path_join(rd, currfile) if exists(currpath): removefile(currpath) for fl in listdir(rd): if search(patt, fl): flfull = path_join(rd, fl) removefile(flfull) the_dirs = ["code", "code_dest", "parameterstables", "tables"] for dirname in the_dirs: the_dir = path_join(rd, dirname) if exists(the_dir): rmtree(the_dir)
def clean(): """ This method is intended to clean the residue of the testing. It should be run at the very end of the test. CAUTION: it calls `minikube_adm.clear_containers` which removes all Docker Containers in your machine. """ # Check minikube status and delete if minikube is running print 'Checking minikube status...' try: status = minikube_adm.check_status() if status['minikube'] == "Running" or status['minikube'] == "Stopped": print 'Deleting minikube...' try: minikube_adm.teardown() except Exception as err: print str(err) else: print 'Machine not present.' except Exception as err: print str(err) # Remove all docker containers print print 'Removing docker containers...' try: minikube_adm.clear_containers() except Exception as err: print str(err) print print 'Removing', NDM_TEST_YAML_NAME, '...' if isfile(NDM_TEST_YAML_PATH + NDM_TEST_YAML_NAME): removefile(NDM_TEST_YAML_PATH + NDM_TEST_YAML_NAME) print 'Removed.' else: print 'Not present.'
def query_osrm_to_shp(dict_coord, coord_liste_s, coord_liste_t, dstpath, host): """ Fonction qui prend en entrée un dictionnaire de {'coordonnées':'noms'} et les listes de coordonées, envoie les requetes au serveur OSRM et enregistre le résultat dans le fichier de sortie indiqué (.shp). """ testit, error = 0, 0 # Syst de coord. a adopter pour écrire le fichier shp spatialreference = osr.SpatialReference() spatialreference.SetWellKnownGeogCS('WGS84') # Syst de coord. pour le calcul de la distance à vol d'oiseau geod = pyproj.Geod(ellps='WGS84') # Définition du type du fichier de sortie.. driver = ogr.GetDriverByName("ESRI Shapefile") try: if os.path.exists(dstpath): removefile(dstpath) dstfile = driver.CreateDataSource(dstpath) dstlayer = dstfile.CreateLayer("layer", spatialreference) except Exception as err: print(err, "\nErreur lors de la création du fichier") sys.exit(0) # Ajout des champs à remplir et de leurs variables associées dans un dico # qui va permettre de faire une boucle sur ces éléments et éviter # de retaper ça lors de la création des champs : fields = [['ID', { 'type': ogr.OFTInteger, 'width': 10 }], ['Total_time', { 'type': ogr.OFTInteger, 'width': 14 }], ['Total_dist', { 'type': ogr.OFTInteger, 'width': 14 }], ['Dist_eucl', { 'type': ogr.OFTInteger, 'width': 14 }], ['Src_name', { 'type': ogr.OFTString, 'width': 80 }], ['Tgt_name', { 'type': ogr.OFTString, 'width': 80 }]] for field_name, detail in fields: fielddef = ogr.FieldDefn(field_name, detail['type']) fielddef.SetWidth(detail['width']) dstlayer.CreateField(fielddef) print("pyq-OSRM : {0} routes to calculate".format( len(coord_liste_s) * len(coord_liste_t))) for source, target in range2d(coord_liste_s, coord_liste_t): src_name, tgt_name = dict_coord[source], dict_coord[target] # Préparation et envoi de la requete puis récupération de la réponse url_query = ('{0}/viaroute?loc={1}&loc={2}' '&instructions=false&alt=false').format( host, source, target) try: response = urllib.request.urlopen(url_query) except Exception as err: print("\npyq-OSRM :\nErreur lors du passage de l'URL\n", err) sys.exit(0) # Lecture des résultats (bytes) en json parsed_json = json.loads(response.readall().decode('utf-8')) # Calcul de la distance euclidienne entre l'origine et la destination _, _, distance_eucl = geod.inv(source[source.find(',') + 1:], source[:source.find(',')], target[target.find(',') + 1:], target[:target.find(',')]) # Verification qu'une route a bien été trouvée par OSRM (si aucune # route n'a été trouvé une exception doit être levée quand on essai # de récupérer le temps total et le code erreur est lu dans le except): try: # Récupération des infos intéressantes... total_time_osrm = parsed_json['route_summary']['total_time'] total_dist_osrm = parsed_json['route_summary']['total_distance'] # ...dont la géométrie est au format encoded polyline algorythm, # à décoder pour obtenir la liste des points composant la ligne # La géométrie arrive sous forme de liste de points (lat, lng) epa_dec = PolylineCodec().decode(parsed_json['route_geometry']) ma_ligne = ogr.Geometry(ogr.wkbLineString) line_add_pts = ma_ligne.AddPoint_2D for coord in epa_dec: line_add_pts(coord[1] / 10.0, coord[0] / 10.0) # Ecriture de la geométrie et des champs feature = ogr.Feature(dstlayer.GetLayerDefn()) feature.SetGeometry(ma_ligne) for f_name, f_value in zip([ 'ID', 'Total_time', 'Total_dist', 'Dist_eucl', 'Src_name', 'Tgt_name' ], [ testit, total_time_osrm, total_dist_osrm, distance_eucl, src_name, tgt_name ]): feature.SetField(f_name, f_value) dstlayer.CreateFeature(feature) # print("Processing.... {0}%".format(int( # testit / (len(coord_liste_s) * len(coord_liste_t)) * 100)), # end='\r') testit += 1 except KeyError: error += 1 if parsed_json['status'] == 207: print("Err #{0} : OSRM status 207 - " "No route found between {1} and {2}".format( error, src_name, tgt_name)) else: print("Err #{0} : No route found between {1} and {2}".format( error, src_name, tgt_name)) if error > 0: print("\t{0} route calculations failed".format(error)) feature.Destroy() dstfile.Destroy() return testit
def clean_dir(d): files = listdir(d) for f in files: removefile(joinpath(d, f))
def json(self): return { "id": self.id, "data_aquisicao": self.data_aquisicao, "filme_id": self.filme_id, "tipo_de_midia_id": self.tipo_de_midia_id, "filme": self.filme.json(), "tipo_de_midia": self.tipo_de_midia.json() } if __name__ == "__main__": if fileexists(arquivobd): removefile(arquivobd) db.create_all() titanic = Filme(titulo="Titanic", data_lancamento="05-09-1997", preco=10.99) homens_de_preto = Filme(titulo="MIB: Homens de Preto", data_lancamento="01-02-2005", preco=1.99) covid_19 = Filme(titulo="Fim dos Tempo", data_lancamento="05-09-2020", preco=100.99)
def confsave(): removefile(__config_file_name__) with open(__config_file_name__, "w") as f: f.write(ini.stringify(conf_e)) f.close()
# dataexcel = pd.read_excel(tmpfilename, sheet_name='1.1 Shares', skiprows=range(7), usecols=['Issuer Name', 'Instrument Name', 'Start Date', 'Security Mkt Cap (in £m)']) dataexcel = pd.read_excel(tmpfilename, sheet_name='1.1 Shares', skiprows=range(7), usecols=['Issuer Name', 'Start Date']) issuer_list = dataexcel['Issuer Name'].tolist() start_list = dataexcel['Start Date'].tolist() issuer_name = [x for x in issuer_list if type(x) == str][::-1] start_date = [ x.date() for x in start_list if type(x) == pd._libs.tslibs.timestamps.Timestamp ][::-1] dict_list = {issuer_name[i]: start_date[i] for i in range(len(issuer_name))} # print(dict_list) limit = 200 dict_list_sort = dict( sorted(dict_list.items(), key=lambda x: x[1], reverse=True)[:limit]) num = 1 for k, v in dict_list_sort.items(): if type(k) == str: print(num, ')', v, ' // ' + k) num += 1 try: from os import remove as removefile removefile(tmpfilename) # print(' Successfully remove tmp file ' + str(self.tmpfilename)) except WindowsError as exx: print(' Error = ' + str(exx) + ' / file = ' + str(tmpfilename))