def SendMail(self): #The mail addresses and password sender_address = self.senderMailE.get() #default sender if (sender_address == ""): sender_address = '*****@*****.**' sender_pass = self.senderPass.get() #default password if (sender_pass == ""): sender_pass = '******' self.error = "" mail_subject = self.MailSubject.get() mail_content = self.MailContent.get() receiver_address = self.receiverMail.get() mail = MailHandler(sender_address, sender_pass, receiver_address) mail.constructMail(mail_subject, mail_content) self.error = mail.SendMail() self.resultLABEl.config(text=self.error) if (("ERROR" in self.error)): return else: self.ClearInputs()
def sendErrorMail(exitCode): global log global DataSet global options cmd ='pwd' pExe = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) dir = pExe.stdout.read() log.output("********** SCRIPT GOT TERMINATE SIGNAL -> ALERTING PRODUCTION TEAM **********") mail = MailHandler() type = "error" subject = "Problem within the TopTree ProductionWorkflow" msg = "Dear top quark production group,\n" msg += "\n" msg += "This is an automatically generated e-mail to inform you that the production workflow encountered problems during the processing of "+DataSet+"." msg += "\n\nReason:" if exitCode == 1: msg += "\n\n\t AutomaticTopTreeProducer exited with code 1 -> probabely a python exception. The workflow is now terminated. Please investigate and restart the workflow." if exitCode == 2: msg += "\n\n\t AutoMaticTopTreeProducer encountered a problem in the request configuration. This request is put inactive (Priority 0) until manual intervention." msg += "\n\nSTDOUT log: "+dir+"/"+"stdout" msg += "\n\n\nCheers,\nStartProduction.py" if not options.dryRun: mail.sendMail(type,subject,msg)
def sendErrorMail(exitCode,DataSet): global log global options cmd ='pwd' pExe = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) dir = pExe.stdout.read() log.output("********** SCRIPT GOT TERMINATE SIGNAL -> ALERTING PRODUCTION TEAM **********") mail = MailHandler() type = "error" subject = "Problem within the TopTree ProductionWorkflow" msg = "Dear top quark production group,\n" msg += "\n" msg += "This is an automatically generated e-mail to inform you that the production workflow encountered problems during the processing of "+DataSet+"." msg += "\n\nReason:" if exitCode == 1: msg += "\n\n\t AutomaticTopTreeProducer exited with code 1 -> probabely a python exception. The workflow is now terminated. Please investigate and restart the workflow." if exitCode == 2: msg += "\n\n\t AutoMaticTopTreeProducer encountered a problem in the request configuration. This request is put inactive (Priority 0) until manual intervention." msg += "\n\nSTDOUT log: "+dir+"/"+"stdout" msg += "\n\n\nCheers,\nStartProduction.py" if not options.dryRun: mail.sendMail(type,subject,msg)
def announceDataSet(): log.output( "********** Sending announcement for this production **********") global options global logFileName global patPublishName global nEventsPAT global nEventsTT global nEventsDBS global doStartFromPAT global timestamp mail = MailHandler() type = "announcement" subject = "New dataset announcement" msg = "Dear top quark group,\n" msg += "\n" msg += "This is an automatically generated e-mail to announce that a TopTree production is completed." msg += "\n\n* Input parameters:" msg += "\n\n\t-> Input dataset: " + options.dataset + " (#Events: " + str( nEventsDBS) + ")" msg += "\n\n\t-> Global Tag: " + options.GlobalTag msg += "\n\n\t-> CMSSW Version: " + options.cmssw_ver msg += log.getAnnouncementMSG() if not options.flavourHistoryFilterPath == -1: msg += "\n\nNote: The Flavour History Tool was enabled within the production, filer path: " + str( options.flavourHistoryFilterPath) + " !!\n" msg += "\n\nMore information on this production can be found on https://mtop.iihe.ac.be/TopDB." msg += "\n\n\nCheers,\nThe TopTreeProduction team" mail.sendMail(type, subject, msg)
def announceDataSet(): log.output("********** Sending announcement for this production **********") global options global logFileName global patPublishName global nEventsPAT global nEventsTT global nEventsDBS global doStartFromPAT global timestamp mail = MailHandler() type = "announcement" subject = "New dataset announcement" msg = "Dear top quark group,\n" msg += "\n" msg += "This is an automatically generated e-mail to announce that a TopTree production is completed." msg += "\n\n* Input parameters:" msg += "\n\n\t-> Input dataset: "+options.dataset +" (#Events: "+str(nEventsDBS)+")" msg += "\n\n\t-> Global Tag: "+options.GlobalTag msg += "\n\n\t-> CMSSW Version: "+options.cmssw_ver msg += log.getAnnouncementMSG() if not options.flavourHistoryFilterPath == -1: msg += "\n\nNote: The Flavour History Tool was enabled within the production, filer path: "+str(options.flavourHistoryFilterPath)+" !!\n" msg += "\n\nMore information on this production can be found on https://mtop.iihe.ac.be/TopDB." msg += "\n\n\nCheers,\nThe TopTreeProduction team" mail.sendMail(type,subject,msg)
def announceDataSet(): log.output("********** Sending announcement for this production **********") global options global logFileName global patPublishName global nEventsPAT global nEventsTT global nEventsDBS global doStartFromPAT global timestamp mail = MailHandler() # mail.toAnnounce = [ "*****@*****.**" ] # mail.toError = [ "*****@*****.**" ] log.output("--> sending to " + options.email) mail.toAnnounce = [str(options.email)] mail.toError = [str(options.email)] type = "announcement" subject = "New fast simulation production" msg = "Hi,\n" msg += "\n" msg += "This is an automatically generated e-mail to announce that a GEN-FASTSIM-HLT production is completed." msg += log.getAnnouncementMSG() msg += "\n\nMore information on this production can be found on https://mtop.iihe.ac.be/TopDB." msg += "\n\n\nCheers,\nThe GEN-FASTSIMProduction team" mail.sendMail(type, subject, msg)
def dieOnError(self,string): self.output("********** SCRIPT GOT TERMINATE SIGNAL -> ALERTING PRODUCTION TEAM **********") mail = MailHandler() type = "error" subject = "AutoMaticTopTreeProducer failed" msg = "Dear top quark production group,\n" msg += "\n" msg += "This is an automatically generated e-mail to inform you that the production of "+self.dataset+" failed." msg += "\n\nReason:" msg += "\n\n\t"+string msg += "\n\nLogfile for this production: "+self.logFile msg += "\n\n\nCheers,\nAutoMaticTopTreeProducer.py (killed in combat)" if self.sendErrorMails: mail.sendMail(type,subject,msg) sys.exit(2)
def announceDataSet(): log.output( "********** Sending announcement for this production **********") global options global logFileName global patPublishName global nEventsPAT global nEventsTT global nEventsDBS global doStartFromPAT global timestamp mail = MailHandler() #mail.toAnnounce = [ "*****@*****.**" ] #mail.toError = [ "*****@*****.**" ] log.output("--> sending to " + options.email) mail.toAnnounce = [str(options.email)] mail.toError = [str(options.email)] type = "announcement" subject = "New fast simulation production" msg = "Hi,\n" msg += "\n" msg += "This is an automatically generated e-mail to announce that a GEN-FASTSIM-HLT production is completed." msg += log.getAnnouncementMSG() msg += "\n\nMore information on this production can be found on https://mtop.iihe.ac.be/TopDB." msg += "\n\n\nCheers,\nThe GEN-FASTSIMProduction team" mail.sendMail(type, subject, msg)
from MailHandler import MailHandler #The mail addresses and password sender_address = input("enter sender email :") #default sender if (sender_address == ""): sender_address = '*****@*****.**' sender_pass = input("enter sender password : "******""): sender_pass = '******' mail_subject = input("enter subject :") mail_content = input("enter mail content :") receiver_address = input("enter receiver mail :") if (receiver_address == ""): receiver_address = "*****@*****.**" mail = MailHandler(sender_address, sender_pass, receiver_address) mail.constructMail(mail_subject, mail_content) print(mail.SendMail())
if ans.lower() in str(res[j]).lower() or str( res[j]).lower() in ans.lower(): print('done', res[j], ans) r.score += 1 else: print('Not done', res[j], ans) l.append(r) print(r) email = [] email_file = open('email.txt', 'w') while True: emails = [] score = int(input('Enter the Score limit : ')) for i in l: if i.score >= score: emails.append([i.email, i.score, i.name, i.phone, i.college]) email_file.write(i.email + ':' + str(i.score) + ':' + str(i.name) + ':' + str(i.phone) + ':' + str(i.college) + '\n') email.append([i.email, i.score]) state = input( f"The number of students selected are {len(emails)} do you wish to continue yes/no : " ) if state.lower() == 'yes': break email_file.close() print(email) mailhandler = MailHandler(email) mailhandler.SendMail()
"allow_headers": ["Authorization", "Content-Type"] } mailgun_api_key = os.environ['MAILGUN_API_KEY'] mailgun_domain = os.environ['MAILGUN_DOMAIN'] google_map_key = os.environ['BRYLLUP_GOOGLE_MAP_KEY'] image_url = 'https://photos.app.goo.gl/Qkvp72b5QoqAuWy86' logging.basicConfig(level=logging.DEBUG) app = Flask(__name__, template_folder='.') CORS(app, resources={"/*": cors_config}) gphotos = GooglePhotos(app.logger) gmap = GMap(app.logger, app, google_map_key) mailer = MailHandler(app.logger, mailgun_api_key, mailgun_domain) @app.route('/hello') def hello(): return {'hello': 'world'} @app.route('/map') def google_map(): height = request.args.get('height') width = request.args.get('width') #return render_template('MapTemplate.html', mymap=gmap.get_map()[0], sndmap=gmap.get_map()[1]) return render_template('MapTemplate.html', gmap=gmap.get_map(height, width))
def process(self): # setup proper log location self.log = logHandler("logs/log-TopDB-CleaningAgent-"+str(self.ID)+".txt") self.log.output("****** Removing "+self.removeType+" with ID "+str(self.removeId)+" as requested by "+self.user+" ******") # pat to remove id = [] storagePath = [] dbsPublish = [] CffFilePath = [] # toptree to remove idTop = [] storagePathTop = [] mergedTopLocation = [] storagePathTopMail=[] ## REMOVE ONLY TOPTREE if self.removeType == "toptree": self.sql.createQuery("SELECT","toptrees","id,StoragePath,TopTreeLocation","id = '"+str(self.removeId)+"'") result = self.sql.execQuery().split('\n') if len(result) == 1: self.log.output(" ---> ERROR: TopTree was not found in TopDB") return 1 else: idTop.append(result[1].split("\t")[0]) storagePathTop.append(result[1].split("\t")[1]) mergedTopLocation.append(result[1].split("\t")[2]) ## REMOVE PAT + ALL DOWNSTREAM TOPTREES elif self.removeType == "patuple": self.sql.createQuery("SELECT","patuples","id,StoragePath,name,CffFilePath","id = '"+str(self.removeId)+"'") result = self.sql.execQuery().split('\n') if len(result) == 1: self.log.output(" ---> RRROR: PatTuple was not found in TopDB") return 1 else: id.append(result[1].split("\t")[0]) storagePath.append(result[1].split("\t")[1]) dbsPublish.append(result[1].split("\t")[2]) CffFilePath.append(result[1].split("\t")[3]) self.sql.createQuery("SELECT","toptrees","id,StoragePath,TopTreeLocation","patuple_id = '"+id[len(id)-1].split("\\n")[0]+"'") result2 = self.sql.execQuery().split('\n') if len(result2) > 1: for j in range(1,len(result2)-1): idTop.append(result2[j].split("\t")[0]) storagePathTop.append(result2[j].split("\t")[1]) mergedTopLocation.append(result2[j].split("\t")[2]) ## REMOVE DATASET + ALL DOWNSTREAM PAT + ALL DOWNSTREAM TOPTREES elif self.removeType == "dataset": self.sql.createQuery("SELECT","patuples","id,StoragePath,name,CffFilePath","dataset_id = '"+str(self.removeId)+"'") result = self.sql.execQuery().split('\n') if len(result) > 1: for i in range(1,len(result)-1): id.append(result[i].split("\t")[0]) storagePath.append(result[i].split("\t")[1]) dbsPublish.append(result[i].split("\t")[2]) CffFilePath.append(result[i].split("\t")[3]) self.sql.createQuery("SELECT","toptrees","id,StoragePath,TopTreeLocation","patuple_id = '"+id[len(id)-1].split("\\n")[0]+"'") result2 = self.sql.execQuery().split('\n') if len(result2) > 1: for j in range(1,len(result2)-1): idTop.append(result2[j].split("\t")[0]) storagePathTop.append(result2[j].split("\t")[1]) mergedTopLocation.append(result2[j].split("\t")[2]) ## CLEAN LEFTOVER FILES FROM FAILED PRODUCTION, CLEAN PATUPLES WITHOUT ANY TOPTREES elif self.removeType == "cleanpnfs": self.log.output("--> Cleaning up PNFS area for dhondt") self.log.output(" ---> Searching for PNFS directories from broken production") dirs = [] for dir in os.listdir("/pnfs/iihe/cms/store/user/dhondt/"): if not dir.rfind("Skimmed-TopTrees") == -1: continue; #if dir.rfind("7TeV_T2") == -1: continue # this is just to make testing go fast pExe = Popen("find /pnfs/iihe/cms/store/user/dhondt/"+dir+" -name TOPTREE", shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) out = pExe.stdout.read() for file in out.split("\n"): split = file.split("/") dirName = "" for i in xrange(0,len(split)-1): dirName += split[i]+"/" dirName = dirName.rstrip("/") if dirs.count(dirName) == 0 and len(dirName) > 0: dirs.append(dirName+"/TOPTREE") self.log.output(" ----> "+str(len(dirs))+" directory(s) found in total, cross-referencing TopDB...") for i in xrange(0,len(dirs)): self.sql.createQuery("SELECT","toptrees","id","StoragePath REGEXP '"+dirs[i]+"'") result = self.sql.execQuery().split('\n') self.sql.createQuery("SELECT","patuples","id","StoragePath REGEXP '"+dirs[i]+"'") result2 = self.sql.execQuery().split('\n') self.sql.createQuery("SELECT","gensims","id","PNFSPath REGEXP '"+dirs[i]+"'") result3 = self.sql.execQuery().split('\n') self.sql.createQuery("SELECT","recos","id","PNFSPath REGEXP '"+dirs[i]+"'") result4 = self.sql.execQuery().split('\n') if len(result) < 2 and len(result2) < 2 and len(result3) < 2 and len(result4) < 2 and storagePathTopMail.count(dirs[i]) == 0: filestat = os.stat(dirs[i]) filedate = filestat.st_mtime now = int(time.time()) last_mod=int(filedate) time_diff=now-last_mod if time_diff/(60*60) > 720: # just want the dir to be old enough to not remove ongoing prod self.log.output(" ----> Directory "+dirs[i]+" is not in TopDB, it should be removed! (Age: "+str(time_diff/(60*60*24))+" days)") #idTop.append(-9999) storagePathTopMail.append(dirs[i]) self.log.output(" ----> "+str(len(storagePathTopMail))+" directory(s) need removal!") self.log.output(" ---> Searching for PATuples that don't have a TopTree assigned") self.sql.createQuery("SELECT","patuples","id,StoragePath,name,CffFilePath","") result2 = self.sql.execQuery().split('\n') self.sql.createQuery("SELECT","toptrees","patuple_id","") result3 = self.sql.execQuery().split('\n') for i in result2: if i == "" or not i.rfind("id") == -1: continue tmpid = i.split("\t")[0] found=bool(False) for j in result3: if j == "": continue if tmpid == j: found=bool(True) #if not found: #id.append(i.split("\t")[0]) #storagePath.append(i.split("\t")[1]) #dbsPublish.append(i.split("\t")[2]) #CffFilePath.append(i.split("\t")[3]) msg = "Dear admins," if len(storagePathTopMail) > 0: msg += "\n\n The automatic TopDB PNFS cleaning tool has found "+str(len(storagePathTopMail))+" directories on PNFS not corresponding to any entry in the TopDB database." msg += "\n\n Please have a look at the following list:" for s in storagePathTopMail: msg += "\n\n \t rm -rfv "+s else: msg += "\n\n The automatic TopDB PNFS cleaning tool has found NO directories on PNFS not corresponding to any entry in the TopDB database." msg += "\n\nCheers,\nHector the cleaning agent" mail = MailHandler() mail.sendMail("error","Report from TopDB PNFS cleaning",msg) ## CLEAN LEFTOVER FILES FROM FAILED PRODUCTION, CLEAN PATUPLES WITHOUT ANY TOPTREES elif self.removeType == "cleancrablogs": days=50 self.log.output(" ---> Listing Configuration directories") self.log.output(" ---> Checking every Configuration directory (older than "+str(days)+" days) for large amounts of *.stdout from CRAB") ldirs = [] cleanup_ldirsToRemove = [] basedir=(Popen("echo $HOME", shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True).stdout.read()).strip()+"/AutoMaticTopTreeProducer/" for dir in os.listdir(basedir): if dir.rfind("CMSSW_") == -1: continue; pExe = Popen("find "+basedir+dir.strip()+"/ -name crab_*.cfg", shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) out = pExe.stdout.read() for file in out.split("\n"): split = file.split("/") dirName = "" for i in xrange(0,len(split)-1): dirName += split[i]+"/" dirName = dirName.rstrip("/") if ldirs.count(dirName) == 0 and len(dirName) > 0: ldirs.append(dirName.split("/AutoMaticTopTreeProducer/")[1]) # becase we don't want it to crash on changes /home /user # time to clean out some big chunks of stdout files if not dirName == "": if not dirName.find("find: ") == -1: dirName = dirName.split("find: ")[1] #print dirName filestat = os.stat(dirName) filedate = filestat.st_mtime now = int(time.time()) last_mod=int(filedate) time_diff=now-last_mod if time_diff/(60*60*24) > days: #self.log.output(" ---> Cleaning CRAB stdout files in "+dirName+" (Age: "+str(time_diff/(3600*24))+" days)") crabdir="" for dir in os.listdir(dirName): if not dir.rfind("TOPTREE_") == -1 and dir.rfind(".py") == -1 and os.path.isdir(dirName+"/"+dir): crabdir=dirName+"/"+dir if not crabdir == "": numfiles=int(0) keepstdout="" keepstderr="" keepxml="" if os.path.exists(crabdir+"/log/crab.log"): self.log.output(" ---> Cleaning crab.log in "+crabdir+"/log/ (Age: "+str(time_diff/(3600*24))+" days)") os.unlink(crabdir+"/log/crab.log") #sys.exit(1) for file in os.listdir(crabdir+"/res"): if not file.rfind(".stdout") == -1: if os.path.getsize(crabdir+"/res/"+file) > 0 and keepstdout == "": keepstdout=file numfiles=numfiles+1 #print keepstdout if not os.path.isdir(crabdir+"/res"): numfiles=0 #print numfiles #print str(numfiles)+" "+dirName if numfiles > 2 and dirName.rfind("Run201") == -1: print numfiles self.log.output(" ---> Cleaning CRAB stdout files in "+crabdir+" (Age: "+str(time_diff/(3600*24))+" days)") keepstderr=keepstdout.split(".stdout")[0]+".stderr" keepxml="crab_fjr_"+(keepstdout.split(".stdout")[0]).split("CMSSW_")[1]+".xml" for file in os.listdir(crabdir+"/res"): if not os.path.isdir(crabdir+"/res/"+file) and file.rfind("Submission") == -1 and file.rfind(".json") == -1 and not file == keepxml and not file == keepstdout and not file == keepstderr: self.log.output(" ---> Removing crab output "+file) os.unlink(crabdir+"/res/"+file) elif not file.rfind("Submission") == -1: self.log.output(" ---> Removing old Submission_X dir: "+file) shutil.rmtree(crabdir+"/res/"+file) elif not dirName.rfind("Run201") == -1: if os.path.exists(crabdir+"/res/.shrunk"): continue self.log.output(" ---> (DATA PRODUCTION) Removing unuseful lines from stdout files in "+crabdir+" (Age: "+str(time_diff/(3600*24))+" days)") for file in os.listdir(crabdir+"/res"): if not file.rfind("Submission") == -1: self.log.output(" ---> Removing old Submission_X dir: "+file) shutil.rmtree(crabdir+"/res/"+file) elif not os.path.isdir(crabdir+"/res/"+file) and file.rfind("Submission") == -1 and file.rfind(".json") == -1 and not file.rfind(".stdout") == -1: self.log.output(" ---> Shrinking crab output "+file) tmpfile = open(crabdir+"/res/"+file+"_tmp","w") for line in open(crabdir+"/res/"+file): if line.rfind("Begin processing") == -1 and line.rfind("Vertex") == -1 and line.rfind("%MSG") == -1: tmpfile.write(line) os.unlink(crabdir+"/res/"+file) os.rename(crabdir+"/res/"+file+"_tmp",crabdir+"/res/"+file) f = open(crabdir+"/res/.shrunk","w") # leave a stamp that this dir is fixed f.close() self.log.output(" ----> "+str(len(ldirs))+" Configuration directory(s) found in total, cross-referencing TopDB...") self.log.output("") ldirs = [] # disable this for now for i in xrange(0,len(ldirs)): self.sql.createQuery("SELECT","toptrees","id","CffFilePath REGEXP '"+ldirs[i]+"'") result = self.sql.execQuery().split('\n') self.sql.createQuery("SELECT","patuples","id","CffFilePath REGEXP '"+ldirs[i]+"'") result2 = self.sql.execQuery().split('\n') self.sql.createQuery("SELECT","gensims","id","CffPath REGEXP '"+ldirs[i]+"'") result3 = self.sql.execQuery().split('\n') self.sql.createQuery("SELECT","recos","id","CffPath REGEXP '"+ldirs[i]+"'") result4 = self.sql.execQuery().split('\n') if len(result) < 2 and len(result2) < 2 and len(result3) < 2 and len(result4) < 2 and cleanup_ldirsToRemove.count(ldirs[i]) == 0: filestat = os.stat(basedir+"/"+ldirs[i]) filedate = filestat.st_mtime now = int(time.time()) last_mod=int(filedate) time_diff=now-last_mod if time_diff/(60*60*24) > days: # just want the dir to be old enough to not remove ongoing prod self.log.output(" ----> Directory "+ldirs[i]+" is not in TopDB, it should be removed! (Age: "+str(time_diff/(60*60*24))+" days)") cleanup_ldirsToRemove.append(ldirs[i]) self.log.output(" ----> "+str(len(cleanup_ldirsToRemove))+" directory(s) need removal!") ## SUMMARY OF THE REMOVAL self.log.output(" --> Summary of the removal") for i in range(0,len(id)): self.log.output(" * Removing PATtuple with ID "+str(id[i])+" at "+storagePath[i]) for i in range(0,len(idTop)): self.log.output(" * Removing TopTree with ID "+str(idTop[i])+" at "+storagePathTop[i]) #if self.removeType == "cleanpnfs": # return 0; # START REMOVAL time.sleep(20) log.output(" --> Starting the removal procedure") rm = RemoveHelper(self.sql,self.log) for i in range(0,len(id)): rm.rmSRMdir(storagePath[i]) rm.rmFromTopDB("patuples",id[i]) rm.invalDBS(dbsPublish[i],CffFilePath[i]) for i in range(0,len(idTop)): rm.rmSRMdir(storagePathTop[i]) if idTop[i] > 0: rm.rmFromTopDB("toptrees",idTop[i]) if self.removeType == "dataset": rm.rmFromTopDB("datasets",self.removeId) self.log.output(" --> Ended removal procedure") return 0