def main(): file = 'сотрудники 1553.txt' list_persons, list_mails = list_of_persons_and_mails(file) url_ege = 'http://rcoi.mcko.ru/index.php?option=com_content&view=article&id=898&Itemid=197' url_gia = 'http://rcoi.mcko.ru/index.php?option=com_content&view=article&id=1033&Itemid=211' cod_text = read_html_cod(url_ege, url_gia) bad_man = write_lists_of_persons_records(list_persons, cod_text) bad_man = [i for i in bad_man if i is not None] index_bad_man = [ list_persons.index(i) for i in list_persons if i in bad_man ] list_mails = [ i for i in list_mails if not list_mails.index(i) in index_bad_man ] list_of_files = [[i + '.xls'] for i in list_persons] list_mails_bad_man = [ i for i in list_mails if list_mails.index(i) in index_bad_man ] list_mails = [[i] for i in list_mails] from_email = '*****@*****.**' subject = '' for send_to, files in zip(list_mails, list_of_files): text = 'Ваше расписание экзаменов.' send_mail(from_email, send_to, subject, text, files, server='smtp.gmail.com') for to_email in list_mails_bad_man: text = 'Вы не участвуете в проведении экзаменов' send(text, subject, from_email, to_email, host='smtp.gmail.com')
def confirm_job(): if request.method == 'POST': email = request.form['email'] try: send_mail.send(email) return email except: return redirect('/admin')
def nova_callback_rabbitmq(self, ch, method, properties, body): """ Method used by method nova_amq() to filter messages by type of message. :param ch: refers to the head of the protocol :param method: refers to the method used in callback :param properties: refers to the proprieties of the message :param body: refers to the message transmitted """ body = json.loads(body) b = body['oslo.message'] payload = json.loads(b) try: tenant_name = payload['_context_project_name'] type_of_message = payload['event_type'] if type_of_message == 'compute.instance.create.end': instance_id = payload['payload']['instance_id'] instance_name = payload['payload']['hostname'] instance_type = payload['payload']['instance_type'] public_ip = payload['payload']['fixed_ips'][0]['address'] ram = payload['payload']['memory_mb'] root_disk = payload['payload']['root_gb'] vcpu = payload['payload']['vcpus'] host = payload['payload']['host'] self.zabbix_handler.create_host(instance_name, instance_id, tenant_name) self.logger.info("Instance creation detected : creating host %s (tenant %s) on zabbix server" %(instance_name,tenant_name)) send_mail.send(instance_name, instance_id, instance_type, public_ip, tenant_name, ram, root_disk, vcpu, host, 'create') self.logger.info("Send notified mail") self.ceilometer_handler.host_list = self.ceilometer_handler.get_hosts_ID() elif type_of_message == 'compute.instance.delete.start': elif type_of_message == 'compute.instance.delete.end': host = payload['payload']['instance_id'] instance_name = payload['payload']['hostname'] instance_type = payload['payload']['instance_type'] public_ip = 'b' ram = payload['payload']['memory_mb'] root_disk = payload['payload']['root_gb'] vcpu = payload['payload']['vcpus'] physical_host = payload['payload']['host'] try: host_id = self.zabbix_handler.find_host_id(host) self.zabbix_handler.delete_host(host_id) self.logger.info("Instance removal detected : deleting host %s from zabbix server" %(instance_name)) send_mail.send(instance_name, host, instance_type, public_ip, tenant_name,ram, root_disk, vcpu, physical_host, 'delete') self.logger.info("Send notified mail") self.ceilometer_handler.host_list = self.ceilometer_handler.get_hosts_ID() except: pass # TODO else: pass # TODO except: pass # TODO
def err(input): try: rint2("==============================================","e","ERR",bcolors.FAIL) rint2(input,"e","ERR",bcolors.FAIL) send_mail.send("illuminum ERROR",''.join(input), files=[], send_to="*****@*****.**",send_from="*****@*****.**", server="localhost") input_log="["+time.strftime("%Y_%m_%d")+"] "+input+"\r\n" with open("err.txt", "a") as log_file: log_file.write(input_log) log_file.close() rint2("==============================================","e","ERR",bcolors.FAIL) except: ignore=1
def err(input): try: rint2("==============================================", "e", "ERR", bcolors.FAIL) rint2(input, "e", "ERR", bcolors.FAIL) send_mail.send("illuminum ERROR", ''.join(input), files=[], send_to="*****@*****.**", send_from="*****@*****.**", server="localhost") input_log = "[" + time.strftime("%Y_%m_%d") + "] " + input + "\r\n" with open("err.txt", "a") as log_file: log_file.write(input_log) log_file.close() rint2("==============================================", "e", "ERR", bcolors.FAIL) except: ignore = 1
def main(): file = 'сотрудники 1553.txt' list_persons,list_mails = list_of_persons_and_mails(file) url_ege = 'http://rcoi.mcko.ru/index.php?option=com_content&view=article&id=898&Itemid=197' url_gia = 'http://rcoi.mcko.ru/index.php?option=com_content&view=article&id=1033&Itemid=211' cod_text = read_html_cod(url_ege,url_gia) bad_man = write_lists_of_persons_records(list_persons, cod_text) bad_man=[i for i in bad_man if i is not None] index_bad_man = [list_persons.index(i) for i in list_persons if i in bad_man] list_mails = [i for i in list_mails if not list_mails.index(i) in index_bad_man ] list_of_files = [[i + '.xls'] for i in list_persons] list_mails_bad_man = [i for i in list_mails if list_mails.index(i) in index_bad_man ] list_mails = [[i] for i in list_mails] from_email = '*****@*****.**' subject = '' for send_to, files in zip(list_mails,list_of_files): text = 'Ваше расписание экзаменов.' send_mail(from_email, send_to, subject, text, files, server='smtp.gmail.com') for to_email in list_mails_bad_man: text = 'Вы не участвуете в проведении экзаменов' send(text, subject, from_email, to_email, host='smtp.gmail.com')
def check_availability(): delay = DEFAULT_DELAY while True: try: log("Querying the page") proxies = {'http': PROXY_URL} response = urllib.urlopen(NEXUS_4_GOOGLE_URL, proxies=proxies) page = response.read() if 'sold out' in page.lower(): log("sold out") elif 'add to cart' in page.lower(): log("available") send(NEXUS_4_GOOGLE_URL) log("Mail sent") delay = 86400 else: log("No clear response: ") log(page) except: log("Exception") traceback.print_exc() finally: time.sleep(delay)
def btn_ok_click(to, subj, content, root: Tk, load, sent): v_e = validate_email(email=to.get().replace(to_placeholder, '')) # non-valid emails str if clean_str(to.get().replace(to_placeholder, '')) != '': if v_e == '': if clean_str(content.get("1.0", 'end-1c').replace(text_placeholder, '')) != '': if send_mail.send(to=clean_str(to.get()).replace(' ', '').split(','), subj=clean_str(subj.get()), content=content.get("1.0", 'end-1c')): messagebox.showinfo('Ok', 'Успешно отправлено') change_screen(root=root, load=load, sent=sent) else: messagebox.showerror('Ошибка', 'Ошибка при отправке') else: messagebox.showerror('Ошибка', 'Сообщение обязательно к заполнению') else: messagebox.showerror('Ошибка', 'Ошибка при валидации почтовых адресов:\n' + v_e) else: messagebox.showerror('Ошибка', 'Неободим минимум один почтовый адрес')
def __callback_send_require(self): # if not self.is_send: # tkMessageBox.showerror('Error', '存在不合法输入!') # return receive = '*****@*****.**' # content = 'test' # send_mail.send(receive, content) self.w_long = self.__check_input(self.entry_leftlon.get(), True) self.e_long = self.__check_input(self.entry_rightlon.get(), True) self.n_lati = self.__check_input(self.entry_leftlat.get(), False) self.s_lati = self.__check_input(self.entry_rightlat.get(), False) # if (self.n_lati < self.s_lati): # tkMessageBox.showerror('Wrong', '北纬需要大于南纬') # else: subject = ('[south]' + str(self.w_long) + ' ' + str(self.e_long) + ' ' + str(self.n_lati) + ' ' + str(self.s_lati)) ifsucss = send_mail.send(receive, subject) if (ifsucss): tkMessageBox.showinfo('info', '发送成功') root.destroy() else: tkMessageBox.showerror('Wrong', '邮件发送失败,请检查网络')
def eveluate(): if "user" in session: user_name = session["user"] if user_name == "abhi0444": dict = session["dict"] if request.method == 'POST': cutoff = request.form['cutoff'] found = job.query.all() company_name = [] for item in found: if item.job_id == dict['job_id']: company_name.append(item.company_name) break for i in range(0, len(dict['cv_score'])): found_user = user_details.query.filter_by( user_name=dict['user_name'][i]).first( ) #send(e_mail_sender,name,company,action(int),p): if (float(dict['cv_score'][i]) == float(0)): send_mail.send(found_user.email, found_user.name, company_name[0], 1, 0) else: if float(dict['total_score'][i]) > float(cutoff): send_mail.send(found_user.email, found_user.name, company_name[0], 1, 1) else: send_mail.send(found_user.email, found_user.name, company_name[0], 0, 1) return render_template('eveluate.html', dict=dict, k=len(dict['name']), msg="Email sent to user", user=user_name) else: return render_template('eveluate.html', dict=dict, k=len(dict['name']), user=user_name) else: return redirect(url_for("login")) else: return redirect(url_for("login"))
def find_full(pos): global CONTENT end = CONTENT.find("\"", pos) url = CONTENT[pos:end] urlp = urlparse(url) href = parse_qs(urlp.query)['q'][0] CONTENT = CONTENT.replace(url, href, 1) return len(href) def format(): global PATTERN pos = 0 while True: pos = CONTENT.find(PATTERN, pos) if pos == -1: break # print(pos) add_pos = find_full(pos) pos += add_pos if __name__ == "__main__": content_email_config = load_json('content.json') CONTENT = export_file(content_email_config['file_id']).decode("utf-8") format() send(CONTENT, content_email_config['subject']) print('Done!')
logger.info(u'檢查登入文件:{}'.format(os.path.exists(login_file))) if os.path.exists(login_file): logger.info(os.path.abspath(login_file)) with open(login_file, 'r') as f: data = f.readlines() for d in data: d = d.split(' ') DCARD_INFO[d[0]] = d[1].replace('\n', '') while (True): status, mail_text = open_web(logger, yesterday, DCARD_INFO['NKFUST_ACCOUNT'], DCARD_INFO['NKFUST_PASSWORD'], u'高科狄卡') if status != False: send(logger, DCARD_INFO['SMTP_ACCOUNT'], DCARD_INFO['SMPT_PASSWORD'], u'高科狄卡', mail_text, today) break while (True): status, mail_text = open_web(logger, yesterday, DCARD_INFO['NCKU_ACCOUNT'], DCARD_INFO['NCKU_PASSWORD'], u'成大狄卡') if status != False: send(logger, DCARD_INFO['SMTP_ACCOUNT'], DCARD_INFO['SMPT_PASSWORD'], u'成大狄卡', mail_text, today) break while (True): status, mail_text = open_web(logger, yesterday, DCARD_INFO['NTU_ACCOUNT'], DCARD_INFO['NTU_PASSWORD'], u'台大狄卡')
# CMC_glb_TMP_ISBL_925_latlon.24x.24_2010090800_P042.grib2 cfile = office + us + system + us + Variable[ cVar] + us + 'latlon.24x.24' + us + YYYYMMDDHH + us + Phhh + FH_s + ending print(cfile) print(cpath + cfile) print(" ") tries_left = 5 # Number of times to download the file while tries_left > 0: try: request = urllib2.urlopen(cpath + cfile, timeout=30) except: print('Error opening url file ' + cfile + '. Remaining tries=' + str(tries_left)) tries_left = tries_left - 1 # If we have already tried 3 times, then call for help if tries_left == 0: send_mail.send( str('GDPS download failed. ' + cpath + cfile)) continue else: with open(os.path.join(download_dir, cfile), 'wb') as f: try: f.write(request.read()) except: tries_left = tries_left - 1 print("Error downloading file") continue else: break
print "Before split : %d" % len(orders_list) orders.split_orders_by_percent(orders_list) print "After split: %d" % len(orders_list) print "After modification:", len(orders_list) print "Find errors" errors = orders.get_errors(orders_list) if len(errors) > 0: email_text = "" for error in errors: email_text += error.values()[0] + "\n" print "Send email alert" send_mail.send(email_config, ["*****@*****.**"], "Ошибки при примеме заказов из excel", email_text, [excel_path + current_excel_file] ) print "Insert orders into db" inserted_rows = orders.insert_orders(orders_list) print "Inserted %d of %d" % (inserted_rows, len(orders_list)) orders.set_single_pack_value(order_date) print "Move file" utils.move_file(excel_path + current_excel_file, processed_path) print "Well done!!!\n" print "Finish!!!"
stock.append(hisse_fiyat) table_info = soup.find( 'div', class_="D(ib) W(1/2) Bxz(bb) Pend(12px) Va(t) ie-7_D(i) smartphone_D(b) " "smartphone_W(100%) smartphone_Pend(0px) smartphone_BdY " "smartphone_Bdc($seperatorColor)") for i in range(0, 8): satirlar = table_info.find_all("tr")[i].find_all("td") #print(table_info) satir_baslik = satirlar[0].get_text() satir_deger = satirlar[1].get_text() print(satir_baslik + ": " + satir_deger) stock.append(satir_deger) csv_writer.writerow(stock) print('********************************') # 5 saniye ya da belirli bir süre bekletmezsek, # scraping yapan bot olduğumuzu anlayıp bloklayabilirler. # time.sleep(1) csv_file.close() send_mail.send(today) #send_mail.send('tek.pdf') print('---END OF LIST---')
import requests import bs4 import send_mail express_itmes = ['sfexpress','yunda','sto','yto','zto','ems','ttdex','htky','qfkd','chinapost'] request_url = 'http://m.46644.com/express/result.php?typetxt=%D6%D0%CD%A8&type=express_type&number=express_number' def get_express_info_with_number(number): express_info = [] for item in express_itmes: url = request_url.replace('express_type', item).replace('express_number',number) response = requests.get(url) response.encoding = 'gb18030' response = response.text soup = bs4.BeautifulSoup(response, 'html.parser', from_encoding='utf-8') for i in soup.findAll(name='div', attrs={'class': 'icontent'}): desc_text = i.get_text() if '错误' not in desc_text and '不存在' not in desc_text: express_info.append(i.get_text()) continue return express_info if __name__ == '__main__': ifno = get_express_info_with_number('YT9107697526961') send_mail.send('*****@*****.**', '快递物流进度推送', ifno) print(ifno)
html_page = requests.get(url, headers=headers) soup = BeautifulSoup(html_page.content, 'lxml') stock_title = soup.find_all( "div", id="quote-header-info")[0].find("h1").get_text() current_price = soup.find( "div", class_="D(ib) Mend(20px)").find("span").get_text() stock.append(stock_title) stock.append(current_price) # print(stock_title) # print(current_price) table_info = soup.find_all( "div", class_= "D(ib) W(1/2) Bxz(bb) Pend(12px) Va(t) ie-7_D(i) smartphone_D(b) smartphone_W(100%) smartphone_Pend(0px) smartphone_BdY smartphone_Bdc($seperatorColor)" )[0].find_all("tr", ) for i in range(0, 8): # heading = table_info[i].find_all("td")[0].get_text() value = table_info[i].find_all("td")[1].get_text() stock.append(value) csv_writer.writerow(stock) # print(heading + " : " + value) # print("---------------") time.sleep(3) csv_file.close() send_mail.send(filename=today)
raise False mylogger.info("send successed") return True def do(): db_com = "grep mongo.name /etc/y7tech/ucdisk/database.properties" db_res = commands.getstatusoutput(db_com) if db_res[0] != 0: mylogger.info(db_com) mylogger.info("get dbname failed") raise False if db_res[1].split('=')[1] == "@DATABASE_NAME@": mylogger.info("This db is not data") return True else: db_name = db_res[1].split('=')[1] back_dir = mk() bak(db_name, back_dir) tar(back_dir, db_name, td) send(back_dir) #do() if __name__ == "__main__": try: do() except: send_mail.send(mongo_num) mylogger.info("mongodb bakup failed")
def handler_listen(channel): if GPIO.input(pin): print('Movement!') email = send_mail.send(cfg) message = send_sms.send(cfg) handler_reponses(email, message)
srf_files = [ x for x in all_files if not 'HGT_ISBL' in x and not 'TMP_ISBL' in x ] # Load surface variables print('Loading Surface variables') # ds = xr.open_mfdataset(srf_files,concat_dim='time',engine='pynio',lock=threading.Lock()) try: ds = xr.open_mfdataset(srf_files, concat_dim='forecast_hour', engine='pynio', preprocess=lambda x: preprocess(x)) ds = add_datetime_dataset(ds) except: send_mail.send( str('GDPS GRIB to NETCDF: Cannot open GDPS surface grib2 files')) # Load upper atmosphere variables print('Loading upper air Temperature') ds_UA_T = load_GEM_4d_var(PresLevs, UA_TMP, 'TMP_P0_L100_GLL0', 'TMP_', preprocess) ds_UA_T = add_datetime_dataarray(ds_UA_T) print('Loading upper air height at pressurelevels') ds_UA_HGT = load_GEM_4d_var(PresLevs, UA_HGT, 'HGT_P0_L100_GLL0', 'HGT_', preprocess) # Convert Geopotential height to geometric height (http://www.pdas.com/geopot.pdf) #ds_UA_HGT = ds_UA_HGT* 6371*1000 / (6371*1000/()-ds_UA_HGT) ds_UA_HGT = add_datetime_dataarray(ds_UA_HGT) # Merge together
\nSi quieres colaborar en el estudio de la gripe, ¡no lo dudes y participa! \n- Visita la web GripenNet.es y regístrate como voluntario \n- Ven a la presentación: para asistir solo tienes que inscribirte. \nAquí puedes consultar el programa para el día 25 Flyer Gripenet Desde ibercivis se ha organizado el acto, un sólido programa que para cubrir todos los aspectos. \nTe esperamos. #Todos con la ciencia #SomosCiencia \n\nMil gracias \n -- firma: \nEl equipo de Ibercivis \n@Ibercivis" """ body =u""" <p style="border:0px none;font-family:'helvetica neue',Helvetica,Arial,Verdana,sans-serif;font-size:11.818181991577148px;margin:0px;padding:0.5em 0px;vertical-align:baseline;color:rgb(68,68,68);line-height:16.363636016845703px"> - Visita la web <a href="http://www.gripenet.es/" style="border:0px none;font-family:inherit;font-size:11.818181991577148px;font-style:inherit;font-weight:inherit;margin:0px;padding:0px;vertical-align:baseline;color:rgb(10,188,229);text-decoration:none" target="_blank">GripenNet.es</a> y regístrate como voluntario</p> """ #fileSource= 'email_list' #fsrc = open(fileSource, "r") #for line in fsrc: # line =line[:-1] # print line # send_mail.send("*****@*****.**", line, subject, body) #fsrc.close() send_mail.send("*****@*****.**", "*****@*****.**", subject, body)
for url in urls: stock=[] html_page=requests.get(url,headers=header) #print(html_page.content) soup=BeautifulSoup(html_page.content,'lxml') #lxml is a very fast and reliable parser #print(soup.title) or #print(soup.find('title').get_text()) stock_title=soup.find_all('div',id='quote-header-info')[0].find('h1').get_text() #Static value retrieval current_price=soup.find_all('div',id='quote-header-info')[0].find('div',class_='My(6px) Pos(r) smartphone_Mt(6px)').find('span').get_text() #class is a keyword hence to extract the class value of a particular html tag we use class_ # print(stock_title) # print(current_price) stock.append(stock_title) stock.append(current_price) table_info=soup.find_all('div',class_='D(ib) W(1/2) Bxz(bb) Pend(12px) Va(t) ie-7_D(i) smartphone_D(b) smartphone_W(100%) smartphone_Pend(0px) smartphone_BdY smartphone_Bdc($seperatorColor)')[0].find_all('tr') # print(table_info) for i in range(0,8): # heading=table_info[i].find_all('td')[0].get_text() value=table_info[i].find_all('td')[1].get_text() stock.append(value) # print(heading + ' : ' + value) csv_write.writerow(stock) time.sleep(5) csv_file.close() send_mail.send(filename=today_file)
res=requests.get(url) text=res.content # with open('main.html','wb') as f: # f.write(text) # f.close() soup=BeautifulSoup(text,'lxml') info_list=soup.find_all('ul',class_='liBox')[0] # 获取单个的数据,目前先只考虑第一个 for one in info_list.find_all('li'): title=one.text date=one.span.text href=url+one.a.attrs['href'][2:] return [title,date,href] init_info=get_url() if address[-1]==' ': details = get_detail(init_info[2]) send(title=init_info[0], main_text=details) else: address=address.strip() print('目前最新:',init_info[0]) while True: info=get_url() if info!=init_info: details=get_detail(info[2]) print('有新消息:{}'.format(info[0])) send(title=info[0],main_text=details,address=address) else: print('\r{} 无变化'.format(datetime.datetime.now())) time.sleep(30)
'Day range', '52 week range', 'Volume', 'Avg Volume' ]) for url in urls: stock = [] html_page = requests.get(url, headers=headers) soup = BeautifulSoup(html_page.content, 'lxml') #title=soup.find("title").get_text() header_info = soup.find_all("div", id="quote-header-info")[0] stock_title = header_info.find("h1").get_text() stock_amt = header_info.find( "div", class_="My(6px) Pos(r) smartphone_Mt(6px)").find("span").get_text() stock.append(stock_title) stock.append(stock_amt) table_info = soup.find_all( "div", class_= "D(ib) W(1/2) Bxz(bb) Pend(12px) Va(t) ie-7_D(i) smartphone_D(b) smartphone_W(100%) smartphone_Pend(0px) smartphone_BdY smartphone_Bdc($seperatorColor)" )[0].find_all('tr') for i in range(0, 8): #heading=table_info[i].find_all("td")[0].get_text() value = table_info[i].find_all("td")[1].get_text() stock.append(value) csv_writer.writerow(stock) time.sleep(5) csv_file.close() send_mail.send(filename="scrap.csv")
# Number of fetched feeds NUM_FEED = 5 # Japan Today's feed url URL_FEED = "https://japantoday.com/feed" if __name__ == '__main__': # Fetch XML data with urllib.request.urlopen(URL_FEED) as response: html = response.read() # parser root = ET.fromstring(html) # attachment list attachment_list = [] i = 0 for link in root.iter('link'): print("[" + str(i) + "]" + link.text) if i >= 1 and i <= NUM_FEED: # article[0] is a link to its root page article = Article() article.setUrl(link.text) article.fetch() attachment_list.append(article.save()) del article i += 1 # Send PDF articles by email msg = sm.create_message(attachment_list) sm.send(msg)
UA_TMP = [x for x in all_files if 'TMP_ISBL' in x] UA_HGT = [x for x in all_files if 'HGT_ISBL' in x] # Presure levels to extract air temperature from PresLevs = ['1015','1000','0985','0970','0950','0925','0900'] srf_files = [x for x in all_files if not 'HGT_ISBL' in x and not 'TMP_ISBL' in x] # Load surface variables print('Loading Surface variables') #ds = xr.open_mfdataset(srf_files,concat_dim='time',engine='pynio',lock=threading.Lock()) try: ds = xr.open_mfdataset(srf_files,concat_dim='forecast_hour',engine='pynio',preprocess=lambda x: preprocess(x)) ds = add_datetime_dataset(ds) except: send_mail.send(str('HRDPS GRIB to NETCDF: Cannot open HRDPS surface grib2 files')) # Load upper atmosphere variables print('Loading upper air Temperature') ds_UA_T = load_GEM_4d_var(PresLevs,UA_TMP,'TMP_P0_L100_GST0','TMP_',preprocess) ds_UA_T = add_datetime_dataarray(ds_UA_T) print('Loading upper air height at pressurelevels') ds_UA_HGT = load_GEM_4d_var(PresLevs,UA_HGT,'HGT_P0_L100_GST0','HGT_',preprocess) # Convert Geopotential height to geometric height (http://www.pdas.com/geopot.pdf) #ds_UA_HGT = ds_UA_HGT* 6371*1000 / (6371*1000/()-ds_UA_HGT) ds_UA_HGT = add_datetime_dataarray(ds_UA_HGT) # Merge together ds_UA = xr.merge([ds_UA_T,ds_UA_HGT])
def my_job(): send_mail.send('*****@*****.**', '豆瓣租房推送', get_renting_info())
if name_ not in found_names and name_ != "Unknown": found_names.append(name_) cv2.putText(frame, name_, (left + 6, bottom - 6), font, 1.0, (0, 0, 255), 1) cv2.imshow('Video', frame) # cv2.imshow('Crop', crop) if cv2.waitKey(1) & 0xFF == ord('q'): fp = open('attendance.txt', 'w+') fp.write('In class:\n') for student in found_names: if student in students: fp.write(student + '\n') students.remove(student) fp.write('\nNot here:\n') for student in students: fp.write(student + '\n') fp.close() send_mail.send() break vid.release() cv2.destroyAllWindows()
rm_res = commands.getstatusoutput(rm_com) if rm_res[0] != 0: mylogger.info(rm_com) mylogger.info("delete failed") raise False mylogger.info("send successed") return True def do(): db_com = "grep mongo.name /etc/y7tech/ucdisk/database.properties" db_res = commands.getstatusoutput(db_com) if db_res[0] != 0: mylogger.info(db_com) mylogger.info("get dbname failed") raise False if db_res[1].split('=')[1] == "@DATABASE_NAME@": mylogger.info("This db is not data") return True else: db_name = db_res[1].split('=')[1] back_dir = mk() bak(db_name,back_dir) tar(back_dir,db_name,td) send(back_dir) #do() if __name__ == "__main__": try: do() except: send_mail.send(mongo_num) mylogger.info("mongodb bakup failed")