def _send_update_email(self, idx, row, type_email):
     """
     When something needs to be sent, this will add to a summary email and send
     :param idx:
     :param row:
     :param type_email: key to self.wks_names / self.wks_text
     :return:
     """
     # Send the e-mail to the person and Sarah
     replace_col = type_email
     if row['DOG/CAT'].lower() == 'cat':
         type_email = 'cat %s' % type_email
     if type_email not in self.wks_text:
         return
     email_txt = self.wks_text[type_email].format(Adopter_First_Name=row['Adopter First Name'],
                                                  Pet_NAME=row['PET Name'])
     email_txt += self._add_footer()
     email_subject = 'ALIVE Rescue follow up!'
     email_to = row['Email Address'].replace(';', ',')
     email_to = email_to.split(',')
     sending = [config.master_email]
     sending.extend(email_to)
     send_email.send_email(to_user=sending, SUBJECT=email_subject, TEXT=email_txt, FROM='ALIVE Rescue')
     self.adopter.loc[idx, replace_col] = 'SENT'
     # Now send a summary email to Sarah
     self.summary_email.add_random_text('<li>%s: %s</li>' % (row['PET Name'], type_email))
예제 #2
0
def send_file(machinename, username, password, dirname, filename):
    ssh = paramiko.SSHClient()
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    ssh.connect(machinename, username=username, password=password)
    sftp = ssh.open_sftp()
    f = sftp.open("/etc/squid/blacklist", 'r')
    send_email.send_email(f.read(data))
예제 #3
0
파일: mail.py 프로젝트: pauljenny/gesassos2
def send_passwords(login_asso, login_president, mdp1, mdp2):
    print("Envoi des passwords au president de l'asso")
    to = login_president + "@etu.utc.fr"
    msg = (
        """From: SiMDE <*****@*****.**>
To: <"""
        + to
        + """>
Subject: Creation du compte asso

Bonjour,

Le compte de ton asso a ete cree avec le login """
        + login_asso
        + """

Voici les mots de passe associes :
 - compte asso : """
        + mdp1
        + """
 - base de donnee MySQL : """
        + mdp2
        + """

Toutes les informations relatives a l'utilisation du compte sont disponibles sur notre wiki :
http://assos.utc.fr/simde

Cordialement,
L'equipe du SiMDE
"""
    )
    send_email.send_email("*****@*****.**", to, msg)
예제 #4
0
파일: mail.py 프로젝트: pauljenny/gesassos2
def send_new_password_sql(login_asso, login_president, mdp):
    print("Envoi du nouveau password sql au president de l'asso")
    to = login_president + "@etu.utc.fr"
    msg = (
        """From: SiMDE <*****@*****.**>
To: <"""
        + to
        + """>
Subject: Changement de mot de passe MySQL

Bonjour,

Le mot de passe de la base de donnee de l'asso """
        + login_asso
        + """ a ete change.

Voici le nouveau mot de passe : """
        + mdp
        + """

Toutes les informations relatives a l'utilisation du compte sont disponibles sur notre wiki :
http://assos.utc.fr/simde

Cordialement,
L'equipe du SiMDE
"""
    )
    send_email.send_email("*****@*****.**", to, msg)
예제 #5
0
def get_iss_bachelor_info(page):
    global conn
    global cursor
    global iss_message
    global new_message_from_iss_bachelor_info
    global info_pagesize
    global email_to
    iss_bachelor_info = read_iss_bachelor_page(page)
    base_bachelor_info_url = "http://www.iss.whu.edu.cn/one/bachelor/generalContent.template"
    pattern = re.compile(r'title="(?P<title>.*)" href="[a-z_]*\.html\?nid=(?P<nid>[0-9]*)"')
    for m in re.finditer(pattern, iss_bachelor_info):
        title = m.group('title').decode('utf-8')
        url = base_bachelor_info_url + "?nid=" + m.group('nid')
        try:
            cursor.execute("INSERT INTO iss_bachelor_info VALUES(?, ?)", (title, url))
        except sqlite3.IntegrityError:
            return None
        else:
            iss_message[title] = url
            new_message_from_iss_bachelor_info += 1
            page_html = urllib2.urlopen(url).read()
            download_urls = re.findall('<a href="/.*"', page_html)
            if download_urls != None:
                for i in download_urls:
                    i_replace = i.replace("\"/", "\"http://www.iss.whu.edu.cn/")
                    #re.sub(i, download_url, page_html)
                    page_html = page_html.replace(i, i_replace)
            send_email(email_to, title, page_html, "html")
    conn.commit()
    if new_message_from_iss_bachelor_info == page * info_pagesize:
        get_iss_bachelor_info(page + 1)
예제 #6
0
def send_report():
    """ send updated csv links and then archive it for a fresh new report at the next interval
    combine vip info with links in a seperate column
    send email
        
    """
    
    directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
    master_file = os.path.join(directory, "Masterlist database_CSV.csv")
    report_file = os.path.join(directory, "reports.csv")
    
    #if there is no reports file for the first time, then create it
    if not os.path.exists(report_file):
        open(report_file, "wb").write(",") #for two column report
    
    email_report_file = os.path.join(directory, "email_reports.csv")
    
    #archive or delete the old first
    if os.path.exists(email_report_file):
        os.rename(email_report_file, email_report_file.replace("email_reports.csv", "email_reports%s.csv" % (str(time()))))
        
    #merge
    merge_files(master_file, report_file, email_report_file)
    
    #send email
    send_email(email_report_file)
    
    #archive the reports file or delete it for new reports
    os.rename(report_file, report_file.replace("reports.csv", "reports_%s.csv" % (str(time()))))
예제 #7
0
def get_teachersblog_info(page):
    global conn
    global cursor
    global chengang_message
    global new_message_from_chengang
    global new_homework_from_chengang
    global info_pagesize
    global email_to
    iss_teacherblog_info = read_teachersblog_info(page)
    base_teacherblog_info_url = "http://www.iss.whu.edu.cn/one/blog/blogcontent.template"
    pattern = re.compile(r'<h3><a href="javascript:showBlogContent\(\'(?P<bid>[0-9]*)\'\,\'(?P<author>.*)\'\);">(?P<title>.*)</a></h3>')
    for m in re.finditer(pattern, iss_teacherblog_info):
        title = m.group('title').decode('utf-8')
        url = base_teacherblog_info_url + "?" + "bid=" + m.group('bid') + "$^@^$author=" + m.group('author')
        try:
            cursor.execute("INSERT INTO iss_teacherblogs_info VALUES(?, ?, ?)", (title, url, m.group('author')))
        except sqlite3.IntegrityError:
            return None
        else:
            chengang_message[title] = url
            new_message_from_chengang += 1
            send_email(email_to, title, urllib2.urlopen(url).read(), "html")
            if title.find(u'作业') != -1:
                new_homework_from_chengang += 1
        conn.commit()
    if new_message_from_chengang == page * info_pagesize:
        get_teachersblog_info(page + 1)
예제 #8
0
def check_for_changes(FieldName="enrollment info"):
    global class_info
    print("Ran")
    try: 
        new_class_info = scraper.scrape_course_info(scraper.gen_url("computer science", "61a")) 
        intersection =  set(new_class_info.keys()) - set(class_info.keys());
        if (intersection):
            print("\a")
            print("new classes ha been added to course!") 
            for each in intersection:
                nice_print(new_class_info[each])   
                result_dict = new_class_info[each]
                if result_dict['enrollment info'] and int(result_dict['enrollment info']['avail seats']) > 0:
                    send_email.send_email(str(result_dict), "vaishaal@g    mail.com", "FOUND ROOM IN COURSE: " + each)

        for key in new_class_info:
            if new_class_info[key][FieldName] != class_info[key][FieldName]:
                print "{0} has changed its {1}".format(key,FieldName) 
                print "Old class:" 
                nice_print(class_info[key])
                print "New class:" 
                nice_print(new_class_info[key])

    except KeyError:
        print("Thats not a valid key silly!") 
    class_info = new_class_info
    timer = threading.Timer(1.0,check_for_changes)
    timer.start()
예제 #9
0
def send_email(video_url):
    for to in send_list:
        emailer.send_email(to=to,
                           subject="Video for {}".format(datetime.datetime.today().strftime("%m/%d/%Y")),
                           message=message.format("cb6111.myfoscam.org:5432",
                                                                                   video_url),
                           html=True)
def expire_hacker(hacker):
	if hacker.rsvpd == True or hacker.admitted_email_sent_date == None:
			#hacker has rsvp'd or was never accepted
			return
	print "Expiring " + hacker.email + " with admit date: " + str(hacker.admitted_email_sent_date)
	email = template("emails/admittance_expired.html", {"name":hacker.name.split(" ")[0]})
	send_email(recipients=[hacker.email], html=email, subject="You didn't RSVP to Hack@Brown in time...")
	deletedHacker.createDeletedHacker(hacker, "expired")
	hacker.key.delete()
예제 #11
0
def tweet_away():
    all_tweets = get_all_tweets("tweets.csv")
    next_tweet_dict = get_next_tweet(all_tweets)
    if not next_tweet_dict:
        send_email(TO, FROM, PASSWORD, "Tweeper is Out of Tweets!", "Oh no! There are no more tweets available")
    else:
        update_status(next_tweet_dict["tweet"])
        updated_all_tweets = update_tweet_with_already_tweeted(all_tweets, next_tweet_dict)
        write_all_tweets_csv(updated_all_tweets, "tweets.csv")
예제 #12
0
파일: main.py 프로젝트: quxbaz/codeniche
def mail():
    username = '******'
    password = '******'
    from_address = request.form['field-email']
    to_address = '*****@*****.**'
    message = "Sender's email:\n{0}\n\nMessage:\n{1}".format(from_address, request.form['field-message'])
    subject = 'Codeniche contact form email'
    send_email(username, password, from_address, to_address, message, subject)
    return 'success'
def main_menu():
    print("Welcome to our bank service. You are not logged in. \nPlease register or login")

    while True:
        command = input("$$$>").split(" ")

        if command[0] == 'register':
            username = input("Enter your username: "******"Enter your password: "******"Password is not strong enough!")
                password = getpass.getpass("Enter your password: "******"Enter your email: ")
            sql_manager.register(username, password, email)

            print("Registration Successfull")

        elif command[0] == 'login':
            username = input("Enter your username: "******"Enter your password: "******"Login failed")

        elif command[0] == "reset-password" and len(command) >1:
            reset_hash = hashlib.sha1(str(random.random()).encode()).hexdigest()
            sql_manager.update_reset_hash(command[1], reset_hash)
            send_email.send_email(sql_manager.get_email(command[1]), reset_hash)
            print(command[1])


        elif command[0] == "send-reset-password" and len(command) > 1:
            reset_hash = input("Enter reset hash: ")
            if reset_hash == sql_manager.get_reset_hash(command[1]):
                logged_user = sql_manager.login_with_hash(command[1], reset_hash)
                if logged_user:
                    logged_menu(logged_user)
                else:
                    print("Login failed")


        elif command[0] == 'help':
            print("login - for logging in!")
            print("register - for creating new account!")
            print("exit - for closing program!")
            print("reset-password <name> - to reset password")
            print("send-reset-password <name> - to enter reset password")

        elif command[0] == 'exit':
            break
        else:
            print("Not a valid command")
예제 #14
0
파일: runner.py 프로젝트: gfechio/python
def get_blacklist(options, issue, summary):
    import send_email
    import paramiko
    ssh = paramiko.SSHClient()
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    ssh.connect(van_server, username=config.conn["user"], password=config.conn["password"])
    sftp = ssh.open_sftp()
    f = sftp.open("/etc/squid/blacklist", 'r')
    send_email.send_email(f)
    jira_con.comment(issue, "Email sent to blacklist.url mail list")
    jira_con.transition_close(issue)
def accept_hacker(hacker):
	#print "actually accepting hacker"
	hacker.deadline = (datetime.datetime.now() + datetime.timedelta(seconds=admission_expiration_seconds()))
	if hacker.deadline > datetime.datetime(2015, 2, 7):
		hacker.deadline = datetime.datetime(2015, 2, 7)
	email = template("emails/admitted.html", {"hacker": hacker, "deadline": hacker.deadline.strftime("%m/%d/%y"), "name":hacker.name.split(" ")[0]})
	send_email(recipients=[hacker.email], html=email, subject="You got off the Waitlist for Hack@Brown!")

	hacker.admitted_email_sent_date = datetime.datetime.now()
	hacker.put()
	memcache.add("admitted:{0}".format(hacker.secret), "1", memcache_expiry)
예제 #16
0
	def send_to_email(self, email, template_args={}):
		# does the actual work of sending
		emails = [email]
		assert self.email_subject, "No email subject provided. Is email unchecked?"
		if self.email_from_template:
			html = template("emails/" + self.email_html + ".html", template_args)
		else:
			html = template_string(self.email_html, template_args)
			html = template("emails/generic.html", dict({"content": html}.items() + template_args.items()))
		subject = template_string(self.email_subject, template_args)
		send_email(html, subject, emails)
예제 #17
0
def report():
	last = get_last()
	
	# Now we need to get the byte totals from iptables.
	new_totals = get_totals()
	
	reset_detected = False
	proxy_usage = 0
	not_network_usage = 0
	total_start = last[TOTAL_START_IDX]
	if last[PROXY_IDX] > new_totals[PROXY_IDX]:
		# Counters must have been reset.
		reset_detected = True
		proxy_usage = new_totals[PROXT_IDX]
		not_network_usage = new_totals[NOT_NETWORK_IDX]
		total_start = new_totals[TIMESTAMP_IDX]
	else:
		# Do the calc
		proxy_usage = new_totals[PROXY_IDX] - last[PROXY_IDX]
		not_network_usage = new_totals[NOT_NETWORK_IDX] - last[NOT_NETWORK_IDX]
	
	result = (new_totals[TIMESTAMP_IDX],proxy_usage, not_network_usage)
	result_str = "Timestamp: %s Proxied: %s Off Network: %s"

	# Write out the new last totals to the log and last.
	last_file = file(LAST_RESULT, 'w')
	tmp_list = []
	tmp_list.extend(new_totals)
	tmp_list.append(total_start)
	last_file.write("%s\t%d\t%d\t%s\n" % tuple(tmp_list))
	last_file.close()

	log = file(RESULT_LOG, 'a')
	log.write("%s\t%d\t%d\n" % new_totals)
	log.close()

	last = make_human_readable(last)
	new_totals = make_human_readable(new_totals)
	result = make_human_readable(result)
	
	
	print "Last Total - " + result_str % last
	print "New Total - " + result_str % new_totals
	print "New Usage - " + result_str % result
	
	if reset_detected:
		msg = " == RESET DETECTED! == \n"
	else:
		msg = ""
	
	# Send the email report
	msg += EMAIL_MSG % (last[TIMESTAMP_IDX],result[TIMESTAMP_IDX], result[PROXY_IDX], new_totals[PROXY_IDX], result[NOT_NETWORK_IDX], new_totals[NOT_NETWORK_IDX], total_start)
	send_email(EMAIL_FROM, EMAIL_TO, EMAIL_SUBJECT % (result[TIMESTAMP_IDX]), msg, EMAIL_ATTACHMENTS, EMAIL_SERVER)
예제 #18
0
def retrieve_password():
    if request.method == 'POST':
        # user_id=request.cookies.get('user_id')
        user_data = request.form.to_dict()
        print 'retrieve_password , user_data: ', user_data
        user = User.query.filter(User.name == user_data['name'], User.email == user_data['email']).first()
        print user
        if user:
            send_email(str(user.email), user.password)
            return "<h1>你好,{0} 。已经将密码发到你的邮箱 <b> {1} </b> , 请查收验证。</h1>".format(user.name, user.email)
        else:
            flash("咦? 这个邮箱还没有注册耶~ .../n  (●'◡'●) come on ,baby  ❤ ~ ")
    return render_template('retrieve_password.html')
예제 #19
0
def run():
    print 'date:%s step1 start error statistics' % TODAY
    error.save_error()
    print 'date:%s step2 start update statistics' % TODAY
    update.save_update()
    print 'date:%s step3 start db statistics' % TODAY
    db_data.save_db()
    print 'date:%s step4 start making tables' % TODAY
    mktables.mktables()
    print 'date:%s step5 start draw statistics picture' % TODAY
    draw.draw()
    print 'date:%s step6 start sending emails' % TODAY
    send_email.send_email()
    print 'date:%s finished' % TODAY
예제 #20
0
    def run(self):
        global last_roomba_trigger

        # trigger light switch wemo and turn off after a few minutes
        send_email('#ON', gmail_addy, gmail_pw, '*****@*****.**')
        logging.info("turning on Wemo light switch and roomba remote switch")

        # trigger the roomba?
        roomba_on = False  # if this becomes true then after sleeping we turn it back off
        timenow = mktime(datetime.now().timetuple())
        # to launch the roomba it must have been recharging at least 6 hours (WAG)
        # and refrain from launching roomba bt 11 am and 1pm because there are
        # too many false alarms during those hours
        if  timenow-last_roomba_trigger > 60*60*6 and localtime(time()).tm_hour not in roomba_off_hours:
                # launch!
                send_email('#ON_ROOMBA', gmail_addy, gmail_pw, '*****@*****.**')
                send_sms('Roomba launched at ' + str(strftime("%X").strip()), gmail_addy, gmail_pw, sms_recipients)
                last_roomba_trigger = timenow
                roomba_on = True

                logging.debug('last_roomba_trigger: ' + str(last_roomba_trigger))
                logging.info('roomba launched')
        else:
            logging.info('skipping roomba launch - ' + str(60*60*6 - (timenow-last_roomba_trigger)/60/60) + ' hours left to charge' )

        # this is how long the switch stays on:
        sleep(60)

        # now turn them off
        for i in range(0,2):  # twice just to be extra cautious and make sure it gets turned off for realzies
            send_email('#OFF', gmail_addy, gmail_pw, '*****@*****.**')
            if roomba_on:
                send_email('#OFF_ROOMBA', gmail_addy, gmail_pw, '*****@*****.**')
            logging.info("turned off Wemo light switch and roomba")
            sleep(15)
예제 #21
0
파일: cvs_status.py 프로젝트: toneill/naa
def generate_report(results, repo):
	msg = "The following has been changed in the %s repository:\n" % (repo)
	for item in results:
		if DEBUG:
			print item
		item_msg = list_output % item
		for line in item[REV_COMMENTS]:
			if not line.startswith("==========="):
				item_msg += "\t%s\n" % (line)
		msg += item_msg
	
	if DEBUG:
		print "MESSAGE:",msg
	#Send the email
	send_email(email_from, email_to, email_subject % (cvs_repos[repo][0]), msg, email_attachments, email_server)
예제 #22
0
def forgot_pass():
	form = ForgotPassForm()
	if 'id' in session:
		return render_template('notice.html', message='Please sign out before proceeding.', redirect='/')
	if request.method == 'GET':
		return render_template('forgotpassword.html', form=form)
	else:
		user = User.query.filter_by(email=request.form['email']).first()
		
		password = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(7))
		user.set_password(password)
		db.session.commit()
		send_email.send_email(user.email, password)
		
		return render_template('notice.html', message='Password reset. Please check your email address.',redirect='/')
예제 #23
0
	def send(self, to_whom_list, title, content):
		"""发送邮件的主体逻辑:
		1)待发送邮件=以前发送失败的+现在要发送的
		2)逐封邮件发送(可能需要调整一个发送间隔,因为太快连续发送,会导致邮件服务器拒绝服务;
		3)收集这一次发送失败的邮件;
		4)清空原来的“发送失败”数据库,把新的数据保存进去。
		"""
		emails_to_send = self.db.get_all()	# 以前发送失败的邮件
		if len(emails_to_send) > 0:
			print('有{0}封发送失败的邮件'.format(len(emails_to_send)))
		# 由于收件人是一个列表,对于数据库的一个字段,所以需要存入时先序列化为字符串,读取则反过来
		for i in range(0, len(emails_to_send)):
			emails_to_send[i][0] = emails_to_send[i][0].splite(self.delimiter)
		emails_to_send.append([to_whom_list, title, content])
		now_fail_emails = []	# 当前发送失败的邮件列表

		for email in emails_to_send:
			try:
				result = send_email(email[0], email[1], email[2])
			except Exception as e:
				result = {str(e)}
				print('smtplib error happend.......................')
			
			print('send_result....', result)
			if len(result) > 0:
				now_fail_emails.append(email)

		self.db.delete_all()
		print("fail??? ", len(now_fail_emails))
		for email in now_fail_emails:
			email[0] = self.delimiter.join(email[0])
			self.db.insert(email)
		return len(now_fail_emails) == 0
def createEvent():
	# send an email with the frame to notify user
	send_email.send_email('*****@*****.**', '9008255338', '*****@*****.**', 'Intrusion Detected', 'An intrusion has been detected, an image of the intrusion has been attached.')
	
	# Authenticate
	response = requests.post(url + '/auth', data=json.dumps(auth_payload), headers=headers)
	json_res = response.json()
	token = json_res['token']

	# Create new event
	with open("intrusion.png", "rb") as image_file:
	    encoded_string = base64.b64encode(image_file.read())
	    event_payload = {"image": encoded_string}
	    response = requests.post(url + '/events?token=' + token, data=json.dumps(event_payload), headers=headers)
	    json_res = response.json()
	    os.remove("intrusion.png")	
예제 #25
0
	def SendEmail(self):
		if self.bEnable == False:
			return CATOMEmail.FUNCTIONS_DISABLED
		
		if os.path.isfile('email\\out_box\\outmail.conf') == True:
			self.configOutEmail.read('email\\out_box\\outmail.conf')
		else:
			return CATOMEmail.ERROR_IN_CONFIG_FILE
		if self.configOutEmail.has_section('GENERAL') == False:
			return CATOMEmail.ERROR_IN_CONFIG_FILE	
		intTotalNumber = self.configOutEmail.getint("GENERAL", "total_number")
		intSendNumber = self.configOutEmail.getint("GENERAL", "send_number")
		intAddNumber = self.configOutEmail.getint("GENERAL", "add_number")

		if intTotalNumber == 0:
			return CATOMEmail.NO_EMAIL_IN_LIST
		intNumber = intSendNumber
		intSendNumber = intSendNumber + 1
		intTotalNumber = intTotalNumber - 1
		strSection = "EMAIL" + str(intNumber)
		strEmailFrom = self.configOutEmail.get(strSection, "from")
		strEmailTo = self.configOutEmail.get(strSection,"to")
		strEmailCc = self.configOutEmail.get(strSection, "cc")
		strEmailSubject = self.configOutEmail.get(strSection, "subject")
		strEmailBody = self.configOutEmail.get(strSection, "body")
		strEmailAttach = self.configOutEmail.get(strSection, "attach")
		for int_email_sent_retry in range(5):
			try:
				send_email(strEmailFrom, strEmailTo, strEmailCc, strEmailSubject, strEmailBody, strEmailAttach).send()
			except:
				library.Library.WriteProcessLog('[ERROR][ATOMEmail]: Failed to send email - %s' % strEmailSubject)
				time.sleep(1)
			else:
				library.Library.WriteProcessLog('email send. subject:%s' % strEmailSubject)
				self.configOutEmail.remove_section(strSection)
				if intTotalNumber == 0:
					intAddNumber = -1
					intSendNumber = 0
				self.configOutEmail.set("GENERAL", "add_number", str(intAddNumber))
				self.configOutEmail.set("GENERAL", "send_number", str(intSendNumber))
				self.configOutEmail.set("GENERAL", "total_number", str(intTotalNumber))
				with open('email\\out_box\\outmail.conf','wb') as fWrite:
					self.configOutEmail.write(fWrite)
				fWrite.close()
				break
		return 0
예제 #26
0
파일: server.py 프로젝트: philipbl/sensor
def triggered_alerts(id_, type_, bound, direction, data):
    if "@" not in id_:
        print("ERROR: id_ is not an email address.")

    to = id_
    subject = "Alert: {} is {} {}{}".format(
        type_, "above" if direction == "gt" else "below", bound, "°" if type_ == "temperature" else "%"
    )

    message = "At {time}, the {type} was {direction} " "{bound}{unit}!".format(
        time=datetime.fromtimestamp(data["date"] / 1000).strftime("%I:%M %p"),
        type=type_,
        direction="above" if direction == "gt" else "below",
        bound=bound,
        unit="°" if type_ == "temperature" else "%",
    )

    send_email(to=to, subject=subject, message=message)
예제 #27
0
    def send_email(self):

        email_from = '*****@*****.**'
        input_box = QtGui.QInputDialog()
        input_box.setOkButtonText("Send!")
        input_box.setLabelText("Send yourself an email!")
        ok = input_box.exec_()
        to_email = str(input_box.textValue())
        # print(ok, str(to_email))
        if ok:
            try:
                response = send_email(to_email,email_from,self.poem) 
                response2 = send_email(email_from,email_from,"{}--{}".format(to_email,self.poem)) #so Krishan gets a copy as well.
                # print(response)
                self.text_box.append("{}\n\n\n".format(response))
                emails.append(to_email)
                poems.append(self.poem)
            except AttributeError:
                self.text_box.append("Make sure to generate a poem you like before sending email!\n\n\n")
예제 #28
0
파일: mail.py 프로젝트: simde-utc/gesassos2
def howto_signup(login_president):
  print("envoi du mail au president pour lui dire d'aller signer la charte")
  to = login_president + "@etu.utc.fr"
  msg = """From: SiMDE <*****@*****.**>
To: <""" + to + """>
Subject: Signature de la charte

Bonjour,

Pour poursuivre la creation de ton compte asso, merci de signer la charte informatique en suivant la procedure suivante :
http://assos.utc.fr/simde/wiki/signaturecharte

Une fois cela fait, elle doit etre validee par le BDE, il faut donc le contacter a cette adresse pour faire la demande :
[email protected]

Cordialement,
L'equipe du SiMDE
"""
  send_email.send_email("*****@*****.**", to, msg)
예제 #29
0
def main():
	E_MAIL=''
	download_rpm_dict=get_should_upgrade_rpm()
	group_in_list(download_rpm_dict)
	download_rpm=list(set(pkg_should_upgrade))

	if len(download_rpm) == 0:
		file=open('/home/cuizz/aeromax/other/nodownload.log','a')
		print >> file,time.asctime()+' : 没有需要下载的rpm'
		file.close()
	else:
		get_last_version(download_rpm)
		E_MAIL=''
		for i in download_rpm:
					E_MAIL=i+"\n"+E_MAIL
		print E_MAIL
		msg=send_email.msg_interg("我们将要下载:"+E_MAIL)
		send_email.send_email(send_email.from_addr,send_email.to_addrs,msg)
		subprocess.Popen('/bin/sh /var/www/cobbler/ks_mirror/rpm_temp/deploy.sh',shell=True) 
예제 #30
0
    def finalized(self):
        logging.info('Finalizing task')
        params = self.args[0]

        if self.was_aborted:    # There's probably a bug in the code
            error = 'An unknown error has occured. Please try again. ' +\
                    'If this occurs again please contact the developers'
            logging.warn(error)
            elapsed_time = datetime.now() - datetime.fromtimestamp(mktime(
                strptime(params['timestamp'], '%a-%d-%b-%Y-%I:%M:%S-%p')))
            for email in params['emails']:
                send_email(FAILURE_EMAIL_SUBJ % params['run_name'],
                           FAILURE_EMAIL % (
                               params['user_args'],
                               elapsed_time.seconds,
                               elapsed_time.microseconds,
                               error),
                           email)
        self.cleanup()
예제 #31
0
import MySQLdb, MySQLdb.cursors
from send_email import send_email
from const.db import HOST, USER, PASSWD, DB, START_URL_SQL
import random, time

conn = MySQLdb.connect(host=HOST,
                       user=USER,
                       passwd=PASSWD,
                       db=DB,
                       cursorclass=MySQLdb.cursors.DictCursor)
cursor = conn.cursor()
requests = []
cursor.execute(START_URL_SQL)
result = cursor.fetchall()
for r in result:
    url = '<a href="http://www.freeb2bmarket.com/company/%s.html">%s</a>' % (
        r['url_slug'], r['name'])
    person = r['contactperpon']
    email = r['email']
    title = '%s Product details required' % person
    send_email(email, title, person, url)
    print r['name']
    time.sleep(random.choice([1, 3, 5, 7]))
cursor.close()
conn.close()
예제 #32
0
def train(input_dim=INPUT_DIM,
          batch_size=BATCH_SIZE,
          learning_rate=1e-4,
          epochs=ITERS,
          fixed_noise_size=FIXED_NOISE_SIZE,
          n_features_first=N_FEATURES_FIRST,
          n_features_reduction_factor=2,
          min_features=64,
          architecture='JLSWGN',
          init_method='He',
          BN=False,
          JL_dim=None,
          JL_error=None,
          n_projections=10000,
          power=5,
          image_enlarge_method='zoom',
          order=1,
          load_saved=True):
    """
    - this is the function to use to train a Johnson-Lindenstrauss Generative Network model which uses the sliced
      Wasserstein-2 distance as objective funtion (JLSWGN) for MNIST (that is artificially enlarged, to get a higher
      dimensional data set), with the configuration given by the parameters
    - the function computes losses and auto-saves the model every 100 steps and automatically resumes training where it
      stopped (when load_saved=True)

    :param input_dim: the dimension of the latent space -> Z
    :param batch_size: the batch size, should be a divisor of 50k
    :param learning_rate:
    :param n_features_first: the number of feature maps in the first step of the generator
    :param epochs: the number of iterations to train for (this should be: 50k/batch_size*true_epochs)
    :param fixed_noise_size: the number of pictures that is generated during training for visual progress
    :param n_features_reduction_factor: integer, e.g.: 1: use same number of feature-maps everywhere, 2: half the number
           of feature-maps in every step
    :param min_features: the minimal number of features (if the reduction of features would give something smaller, it
           is set to this number)
    :param architecture: right now only supports 'JLSWGN', 'SWGN', defaults to 'JLSWGN'
    :param init_method: the method with which the variables are initialized, support: 'uniform', 'He', defaults to 'He'
    :param BN: shall batch normalization be used
    :param JL_dim: the target dimension of the JL mapping
    :param JL_error: the max pairwise distance deviation error of the JL mapping, only applies when JL_dim=None
    :param n_projections: number of random projections in sliced Wasserstein-2 distance
    :param power: int, this specifies the picture size as 2**power, training data in this size is produced automatically
           using preprocessing_mnist.py (if not existent), default: 'zoom'
    :param image_enlarge_method: whether to 'zoom' the MNIST data or to only 'enlarge' the black border
    :param order: only needed when 'zoom', the degree of the spline interpolation used for zooming, int in [0, 5],
           default: 1
    :param load_saved: whether an already existing training progress shall be loaded to continue there (if one exists)
    :return:
    """

    # -------------------------------------------------------
    # setting for sending emails and getting statistics
    send = settings.send_email

    # -------------------------------------------------------
    # picture size
    assert type(power) == int
    size = 2**power
    picture_size = size * size
    picture_dim = [-1, size, size]

    # -------------------------------------------------------
    # image enlarge method default
    if image_enlarge_method in ['enlarge', 'enlarge_border', 'border']:
        image_enlarge_method = 'border'
        order = None
    else:
        image_enlarge_method = 'zoom'
        if (order is None) or (int(order) not in range(6)):
            order = 1
        else:
            order = int(order)

    # -------------------------------------------------------
    # architecture default
    use_JL = True
    if architecture not in ['SWGN']:
        architecture = 'JLSWGN'
    if architecture == 'SWGN':
        use_JL = False
        JL_error = None
        JL_dim = None

    # -------------------------------------------------------
    # init_method default
    if init_method not in ['uniform']:
        init_method = 'He'

    # -------------------------------------------------------
    # JL_dim:
    if JL_dim is None:
        if JL_error is None and use_JL:
            use_JL = False
            architecture = 'SWGN'
            print '\narchitecture changed to SWGN, since JL_dim and JL_error were None\n'
        elif JL_error is not None:
            JL_dim = int(math.ceil(8 * np.log(2 * batch_size) / (JL_error**2)))
            # this uses the constant given on the Wikipedia page of "Johnson-Lindenstrauss Lemma"
    else:
        JL_error = np.round(np.sqrt(8 * np.log(2 * batch_size) / JL_dim),
                            decimals=4)

    if use_JL and JL_dim >= picture_size:
        use_JL = False
        architecture = 'SWGN'
        JL_error = None
        JL_dim = None
        print '\nJL mapping is not used, since the target dimension was chosen bigger than the input dimension\n'

    print '\nJL_dim = {}'.format(JL_dim)
    print 'JL_error = {}\n'.format(JL_error)

    # -------------------------------------------------------
    # create unique folder name
    dir1 = 'JLSWGN_mnist/'
    directory = dir1 + str(size) + '_' + str(image_enlarge_method) + '_' + str(order) + '_' + \
                str(input_dim) + '_' + str(batch_size) + '_' + str(learning_rate) + '_' + \
                str(n_features_first) + '_' + str(n_features_reduction_factor) + '_' + str(min_features) + '_' + \
                str(architecture) + '_' + str(init_method) + '_' + str(BN) + '_' + \
                str(JL_dim) + '_' + str(JL_error) + '_' + \
                str(n_projections) + '/'
    samples_dir = directory + 'samples/'
    model_dir = directory + 'model/'

    # create directories if they don't exist
    if not os.path.isdir(dir1):
        call(['mkdir', dir1])

    if not os.path.isdir(directory):
        load_saved = False
        print 'make new directory:', directory
        print
        call(['mkdir', directory])
        call(['mkdir', samples_dir])
        call(['mkdir', model_dir])

    # if directories already exist, but model wasn't saved so far, set load_saved to False
    if 'training_progress.csv' not in os.listdir(directory):
        load_saved = False

    # -------------------------------------------------------
    # initialize a TF session
    config = tf.ConfigProto()
    config.intra_op_parallelism_threads = settings.number_cpus
    config.inter_op_parallelism_threads = settings.number_cpus
    session = tf.Session(config=config)

    # -------------------------------------------------------
    # convenience function to build the model
    def build_model():
        with tf.name_scope('placeholders'):
            real_data_int = tf.placeholder(
                tf.uint8,
                [None, picture_size])  # uint8 with int values in [0, 255]
            x_true = tf.cast(real_data_int,
                             tf.float32) / 255.  # float with values in [0,1]
            z = tf.placeholder(tf.float32, [None, input_dim])
            if use_JL:
                JL = tf.placeholder(tf.float32, [picture_size, JL_dim])
                P_non_normalized = tf.placeholder(tf.float32,
                                                  [JL_dim, n_projections])
                P_non_normalized_SWD = tf.placeholder(
                    tf.float32, [picture_size, n_projections])
            else:
                JL = None
                P_non_normalized = tf.placeholder(
                    tf.float32, [picture_size, n_projections])
                P_non_normalized_SWD = None

        x_generated = generator(
            z,
            n_features_first=n_features_first,
            n_features_reduction_factor=n_features_reduction_factor,
            min_features=min_features,
            BN=BN,
            power=power,
            init_method=init_method)

        # define loss (big part taken from SWG)
        with tf.name_scope('loss'):
            # apply the Johnson-Lindenstrauss map, if wanted, to the flattened arrays
            if use_JL:
                JL_true = tf.matmul(x_true, JL)
                JL_gen = tf.matmul(x_generated, JL)
            else:
                JL_true = x_true
                JL_gen = x_generated

            # next project the samples (images). After being transposed, we have tensors
            # of the format: [[projected_image1_proj1, projected_image2_proj1, ...],
            #                 [projected_image1_proj2, projected_image2_proj2, ...],
            #                 ...]
            # Each row has the projections along one direction. This makes it easier for the sorting that follows.
            # first normalize the random normal vectors to lie in the sphere
            P = tf.nn.l2_normalize(P_non_normalized, axis=0)

            projected_true = tf.transpose(tf.matmul(JL_true, P))
            projected_fake = tf.transpose(tf.matmul(JL_gen, P))

            sorted_true, true_indices = tf.nn.top_k(input=projected_true,
                                                    k=batch_size)
            sorted_fake, fake_indices = tf.nn.top_k(input=projected_fake,
                                                    k=batch_size)

            # For faster gradient computation, we do not use sorted_fake to compute
            # loss. Instead we re-order the sorted_true so that the samples from the
            # true distribution go to the correct sample from the fake distribution.

            # It is less expensive (memory-wise) to rearrange arrays in TF.
            # Flatten the sorted_true from dim [n_projections, batch_size].
            flat_true = tf.reshape(sorted_true, [-1])

            # Modify the indices to reflect this transition to an array.
            # new index = row + index
            rows = np.asarray([
                batch_size * np.floor(i * 1.0 / batch_size)
                for i in range(n_projections * batch_size)
            ])
            rows = rows.astype(np.int32)
            flat_idx = tf.reshape(fake_indices, [-1, 1]) + np.reshape(
                rows, [-1, 1])

            # The scatter operation takes care of reshaping to the rearranged matrix
            shape = tf.constant([batch_size * n_projections])
            rearranged_true = tf.reshape(
                tf.scatter_nd(flat_idx, flat_true, shape),
                [n_projections, batch_size])

            generator_loss = tf.reduce_mean(
                tf.square(projected_fake - rearranged_true))

            # get for JLSWGN the sliced Wasserstein distance (SWD) (since SWD and JLSWD are not comparable)
            if use_JL:
                P_SWD = tf.nn.l2_normalize(P_non_normalized_SWD, axis=0)

                projected_true_SWD = tf.transpose(tf.matmul(x_true, P_SWD))
                projected_fake_SWD = tf.transpose(tf.matmul(
                    x_generated, P_SWD))

                sorted_true_SWD, true_indices_SWD = tf.nn.top_k(
                    input=projected_true_SWD, k=batch_size)
                sorted_fake_SWD, fake_indices_SWD = tf.nn.top_k(
                    input=projected_fake_SWD, k=batch_size)

                flat_true_SWD = tf.reshape(sorted_true_SWD, [-1])
                flat_idx_SWD = tf.reshape(fake_indices_SWD,
                                          [-1, 1]) + np.reshape(rows, [-1, 1])

                rearranged_true_SWD = tf.reshape(
                    tf.scatter_nd(flat_idx_SWD, flat_true_SWD, shape),
                    [n_projections, batch_size])

                SWD = tf.reduce_mean(
                    tf.square(projected_fake_SWD - rearranged_true_SWD))
            else:
                SWD = generator_loss

        with tf.name_scope('optimizer'):
            generator_vars = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')
            g_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                                 beta1=0.5)
            g_train = g_optimizer.minimize(generator_loss,
                                           var_list=generator_vars)

        # initialize variables using init_method
        session.run(tf.global_variables_initializer())

        return real_data_int, z, x_generated, JL, P_non_normalized, P_non_normalized_SWD, SWD, g_train

    # -------------------------------------------------------
    # build the model
    real_data_int, z, x_generated, JL, P_non_normalized, P_non_normalized_SWD, SWD, g_train = build_model(
    )

    # -------------------------------------------------------
    # For creating and saving samples (taken from IWGAN)
    fixed_noise = np.random.normal(size=(fixed_noise_size,
                                         input_dim)).astype('float32')

    def generate_image(frame):
        samples = session.run(x_generated, feed_dict={z: fixed_noise})
        samples = (samples * 255.99).astype(
            'uint8')  # transform linearly from [0,1] to int[0,255]
        samples = samples.reshape(picture_dim)
        save_images.save_images(samples,
                                samples_dir + 'iteration_{}.png'.format(frame))

    # -------------------------------------------------------
    # get the dataset as infinite generator
    mem = memory()

    data_dir = '../data/MNIST/'
    if image_enlarge_method == 'zoom':
        if NPY:
            data_file = data_dir + 'mnist{}_zoom_{}_train.npy'.format(
                size, order)
        else:
            data_file = data_dir + 'mnist{}_zoom_{}.pkl.gz'.format(size, order)
        if not os.path.isfile(data_file):
            print '\ndata set not found, creating now ...'
            preprocessing_mnist.zoom(size=size,
                                     interpolation_order=order,
                                     npy=NPY)
        data_file = data_dir + 'mnist{}_zoom_{}'.format(size, order)
    else:
        if NPY:
            data_file = data_dir + 'mnist{}_border_train.npy'.format(size)
        else:
            data_file = data_dir + 'mnist{}_border.pkl.gz'.format(size)
        if not os.path.isfile(data_file):
            print '\ndata set not found, creating now ...'
            preprocessing_mnist.enlarge_border(size=size, npy=NPY)
        data_file = data_dir + 'mnist{}_border'.format(size)

    print 'load data ...'
    train_gen, n_train_samples, dev_gen, n_dev_samples = preprocessing_mnist.load(
        data_file, batch_size, npy=NPY)
    print 'number train samples: {}'.format(n_train_samples)
    print 'number dev samples: {}\n'.format(n_dev_samples)

    def inf_train_gen():
        while True:
            for images, _ in train_gen():
                yield images

    gen = inf_train_gen()

    print 'memory usage before loading data (GB): {}'.format(mem)
    mem = memory()
    print 'memory usage after loading data (GB): {}\n'.format(mem)

    # -------------------------------------------------------
    # for saving the model create a saver
    saver = tf.train.Saver(max_to_keep=1)
    epochs_trained = 0
    tp_columns = [
        'iteration', 'time_for_iterations', 'SWD_approximation',
        'time_for_SWD', 'used_memory_GB'
    ]
    training_progress = pd.DataFrame(data=None, index=None, columns=tp_columns)

    # restore the model:
    if load_saved:
        saver.restore(sess=session, save_path=model_dir + 'saved_model')
        epochs_trained = int(np.loadtxt(fname=model_dir + 'epochs.csv'))
        tp_app = pd.read_csv(filepath_or_buffer=directory +
                             'training_progress.csv',
                             index_col=0,
                             header=0)
        training_progress = pd.concat([training_progress, tp_app],
                                      axis=0,
                                      ignore_index=True)
        print 'loaded training progress, and the model, which was already trained for {} iterations'.format(
            epochs_trained)
        print training_progress
        print

    # if the network is already trained completely, set send to false
    if epochs_trained == epochs:
        send = False

    # -------------------------------------------------------
    # print and get model summary
    n_params_gen = model_summary(scope='generator')[0]
    print

    # -------------------------------------------------------
    # FK: print model config to file
    model_config = [[
        'data_set', 'input_dim', 'batch_size', 'learning_rate',
        'fixed_noise_size', 'n_features_first', 'n_features_reduction_factor',
        'min_features', 'architecture', 'init_method', 'BN', 'JL_dim',
        'JL_error', 'n_projections', 'n_trainable_params_gen'
    ],
                    [
                        data_file[:-7], input_dim, batch_size, learning_rate,
                        fixed_noise_size, n_features_first,
                        n_features_reduction_factor, min_features,
                        architecture, init_method, BN, JL_dim, JL_error,
                        n_projections, n_params_gen
                    ]]
    model_config = np.transpose(model_config)
    model_config = pd.DataFrame(data=model_config)
    model_config.to_csv(path_or_buf=directory + 'model_config.csv')
    print 'saved model configuration'
    print

    # -------------------------------------------------------
    # training loop
    print 'train model with config:'
    print model_config
    print

    t = time.time()  # get start time

    for i in xrange(epochs - epochs_trained):
        # print the current iteration
        print('iteration={}/{}'.format(i + epochs_trained + 1, epochs))

        images = gen.next()
        z_train = np.random.randn(batch_size, input_dim)
        if use_JL:
            JL_train = np.random.randn(picture_size, JL_dim)
            P_train = np.random.randn(JL_dim, n_projections)
            session.run(g_train,
                        feed_dict={
                            real_data_int: images,
                            z: z_train,
                            JL: JL_train,
                            P_non_normalized: P_train
                        })
        else:
            P_train = np.random.randn(picture_size, n_projections)
            session.run(g_train,
                        feed_dict={
                            real_data_int: images,
                            z: z_train,
                            P_non_normalized: P_train
                        })

        mem = memory()
        if not settings.euler:
            print 'memory use (GB): {}'.format(mem)

        # all STEP_SIZE_LOSS_COMPUTATION steps compute the losses and elapsed times, and generate images, and save model
        if (i + epochs_trained) % STEP_SIZE_LOSS_COMPUTATION == (
                STEP_SIZE_LOSS_COMPUTATION - 1):
            # get time for last STEP_SIZE_LOSS_COMPUTATION epochs
            elapsed_time = time.time() - t

            # generate sample images from fixed noise
            generate_image(i + epochs_trained + 1)
            print 'generated images'

            # compute and save losses on dev set, starting after ? iterations
            if i + epochs_trained + 1 >= START_COMPUTING_LOSS:
                t = time.time()
                dev_SWD = []
                print 'compute SWD ...'
                j = 0
                for images_dev, _ in dev_gen():
                    if not settings.euler:
                        # progress bar
                        sys.stdout.write(
                            '\r>> Compute SWD %.1f%%' %
                            (float(j) / float(n_dev_samples / batch_size) *
                             100.0))
                        sys.stdout.flush()
                        j += 1
                    z_train_dev = np.random.randn(batch_size, input_dim)
                    P_train_dev = np.random.randn(picture_size, n_projections)
                    if use_JL:
                        _dev_SWD = session.run(SWD,
                                               feed_dict={
                                                   real_data_int:
                                                   images_dev,
                                                   z:
                                                   z_train_dev,
                                                   P_non_normalized_SWD:
                                                   P_train_dev
                                               })
                    else:
                        _dev_SWD = session.run(SWD,
                                               feed_dict={
                                                   real_data_int: images_dev,
                                                   z: z_train_dev,
                                                   P_non_normalized:
                                                   P_train_dev
                                               })
                    dev_SWD.append(_dev_SWD)
                dev_SWD = np.mean(dev_SWD)
                t_loss = time.time() - t
            else:
                dev_SWD = None
                t_loss = None

            tp_app = pd.DataFrame(data=[[
                i + epochs_trained + 1, elapsed_time, dev_SWD, t_loss, mem
            ]],
                                  index=None,
                                  columns=tp_columns)
            training_progress = pd.concat([training_progress, tp_app],
                                          axis=0,
                                          ignore_index=True)

            # save model
            saver.save(sess=session, save_path=model_dir + 'saved_model')
            # save number of epochs trained
            np.savetxt(fname=model_dir + 'epochs.csv',
                       X=[i + epochs_trained + 1])
            print 'saved model after training epoch {}'.format(i +
                                                               epochs_trained +
                                                               1)
            # save training progress
            training_progress.to_csv(path_or_buf=directory +
                                     'training_progress.csv')

            print 'saved training progress\n'

            # fix new start time
            t = time.time()

    # -------------------------------------------------------
    # after training close the session
    session.close()
    tf.reset_default_graph()

    # -------------------------------------------------------
    # when training is done send email
    if send:
        subject = 'JLSWGN ({}) training finished'.format(data_file[:-7])
        body = 'to download the results of this model use (in the terminal):\n\n'
        body += 'scp -r [email protected]:/cluster/home/fkrach/MasterThesis/MTCode1/' + directory + ' .'
        files = [
            directory + 'model_config.csv',
            directory + 'training_progress.csv',
            samples_dir + 'iteration_{}.png'.format(epochs)
        ]
        send_email.send_email(subject=subject, body=body, file_names=files)

    return directory
예제 #33
0
def digital_assistant(data):

    if "who are you" in data:
        var1.set(data)
        window.update()
        var.set("I am Iris, Your pretty intelligent assistant")
        window.update()
        st.respond("I am Iris, Your pretty intelligent assistant")
    elif 'good morning' in data:
        var1.set(data)
        window.update()
        var.set("Good morning dear")
        st.respond("Good morning dear")
        window.update()

    elif "time" in data:
        var1.set(data)
        window.update()
        gf.get_time(var,window)

    elif 'youtube' in data:
        var1.set(data)
        window.update()
        gf.open_youtube(var,window,var1)

    elif 'google' in data:
        var1.set(data)
        window.update()
        gf.open_google(var,window,var1)

    elif "write" in data:
        var1.set(data)
        window.update()
        nf.write_note(var,window,var1)


    elif "show" in data:
        var1.set(data)
        window.update()
        nf.show_note(var,window)

    elif 'joke' in data:
        var1.set(data)
        window.update()
        gf.get_joke(var,window)


    elif 'book' in data:
        var1.set(data)
        window.update()
        bf.read_book(var,window)


    elif 'weather' in data:
        var1.set(data)
        window.update()
        Key = '71b466b89b734b6d8c5566794767010f'
        city_name = 'amman'
        url = f'https://api.weatherbit.io/v2.0/forecast/daily?city={city_name}&key={Key}'
        get_info_as_Json = requests.get(url).json()
        description = get_info_as_Json['data'][0]['weather']['description']
        tempreture=get_info_as_Json['data'][0]['high_temp']
        var.set(f'the weather in {city_name} is {description} and the temprature is {tempreture}C.')
        window.update()
        st.respond(f'the weather in {city_name} is {description} and the temprature is {tempreture}C.')


    elif 'email' in data:
        var1.set(data)
        window.update()
        se.send_email(var,window,var1)

    elif 'sms' in data:
        var1.set(data)
        window.update()
        sf.send_sms()

    elif 'image' in data:
        var1.set(data)
        window.update()
        azure_a.read_image_text()

    elif "stop" in data:
        var1.set(data)
        window.update()
        print('Listening stopped')
        window.destroy()
    else:
        st.respond("Sorry! can you repeat .. ")
        data = st.listen().lower()
        digital_assistant(data)
예제 #34
0
                startTime = str(startTime)[:10]
                endTime = str(endTime)[:10]
                result = getUrl(convent_time(startTime), convent_time(endTime),
                                liveId, stm, cookie, accountId)
                for r in result:
                    insert_table(accountId, liveId, r[0]['raw'], r[1]['raw'],
                                 r[2]['raw'], convent_time(startTime),
                                 convent_time(endTime), stm)
        except:
            print("未知错误")
            continue


def main():
    mg = multiprocessing.Manager()
    accountLst = mg.list((get_accountIds()))
    jobs = [
        multiprocessing.Process(target=root, args=(accountLst, ))
        for i in range(10)
    ]
    for i in jobs:
        i.start()
    for j in jobs:
        j.join()


if __name__ == '__main__':
    t = time.time()
    main()
    send_email(f"主播销量数据更新完毕 花费 {time.time() - t}s", "直播销量")
예제 #35
0
from feed_process.main_transform import run
from feed_process import FP_ENV
from send_email import send_email
import sys, traceback

if __name__ == "__main__":
    try:
        run()
    except Exception as e:
        import traceback

        tb = traceback.format_exc()
        email_body = "Exception type: {0}\nEnvironment: {3} \n\n\n {2} \n\n\n{1}".format(
            type(e).__name__, tb, "Transform Process", FP_ENV)

        send_email("Feed Process Error", email_body)
def generate_graph(stock_name,
                   days_back,
                   num_timesteps,
                   target_len,
                   minimum_days=500,
                   max_tweets=200):
    #get stock data and twitter sentiment
    stock_name = stock_name
    stock_data = get_stock_data(stock_name, days_back, minimum_days,
                                max_tweets)

    X_train, y_train, X_test, y_test, ref = load_data(stock_data,
                                                      num_timesteps,
                                                      target_len=target_len,
                                                      train_percent=.9)

    # store recent data so that we can get a live prediction
    recent_reference = []
    recent_data = stock_data[-num_timesteps:]
    recent_data = normalize_timestep(recent_data, recent_reference)

    print("    X_train", X_train.shape)
    print("    y_train", y_train.shape)
    print("    X_test", X_test.shape)
    print("    y_test", y_test.shape)

    # setup model
    print("TRAINING")
    model = build_model([6, num_timesteps, target_len])
    model.fit(X_train,
              y_train,
              batch_size=512,
              epochs=1,
              validation_split=0.1,
              verbose=2)

    #train the model
    trainScore = model.evaluate(X_train, y_train, verbose=100)
    print('Train Score: %.2f MSE (%.2f RMSE) (%.2f)' %
          (trainScore[0], math.sqrt(trainScore[0]), trainScore[1]))

    testScore = model.evaluate(X_test, y_test, verbose=100)
    print('Test Score: %.2f MSE (%.2f RMSE) (%.2f)' %
          (testScore[0], math.sqrt(testScore[0]), testScore[1]))

    #make predictions
    print("PREDICTING")
    p = model.predict(X_test)
    recent_data = [
        recent_data
    ]  # One-sample predictions need list wrapper. Argument must be 3d.
    recent_data = np.asarray(recent_data)
    future = model.predict([recent_data])

    # document results in file
    print("WRITING TO LOG")
    file = open("log.txt", "w")
    for i in range(0, len(X_train)):
        for s in range(0, num_timesteps):
            file.write(str(X_train[i][s]) + "\n")
        file.write("Target: " + str(y_train[i]) + "\n")
        file.write("\n")

    for i in range(0, len(X_test)):
        for s in range(0, num_timesteps):
            file.write(str(X_test[i][s]) + "\n")
        file.write("Target: " + str(y_test[i]) + "\n")
        file.write("Prediction: " + str(p[i]) + "\n")
        file.write("\n")

    # de-normalize
    print("DENORMALIZING")
    for i in range(0, len(p)):
        p[i] = (p[i] + 1) * ref[round(.9 * len(ref) + i)]
        y_test[i] = (y_test[i] + 1) * ref[round(.9 * len(ref) + i)]

    future[0] = (future[0] + 1) * recent_reference[0]
    recent_data[0] = (recent_data[0] + 1) * recent_reference[0]

    # plot historical predictions
    print("PLOTTING")
    for i in range(0, len(p)):
        if i % (target_len * 2) == 0:
            plot_index = i  #for filling plot indexes
            plot_indexes = []
            plot_values = p[i]
            for j in range(0, target_len):
                plot_indexes.append(plot_index)
                plot_index += 1
            plt.plot(plot_indexes, plot_values, color="red")

    # plot historical actual
    plt.plot(y_test[:, 0], color='blue',
             label='Actual')  # actual stock price history

    # plot recent prices
    plot_indexes = [len(y_test) - 1]
    plot_values = [y_test[-1, 0]]
    plot_index = None
    for i in range(0, len(recent_data[0])):
        plot_values.append(recent_data[0][i][0])
        plot_index = len(y_test) + i
        plot_indexes.append(len(y_test) + i)
    plt.plot(plot_indexes, plot_values, color='blue')

    # plot future predictions
    plot_indexes = [plot_index]
    plot_values = [recent_data[0][-1][0]]
    for i in range(0, len(future[0])):
        plot_index += 1
        plot_values.append(future[0][i])
        plot_indexes.append(plot_index)
    plt.plot(plot_indexes, plot_values, color="red", label="Prediction")

    #show/save plot
    print("SENDING EMAILS")
    plt.legend(loc="upper left")
    plt.title(stock_name + " Price Predictions")
    plt.xlabel("Days")
    plt.ylabel("Price ($)")
    filename = stock_name + "_" + str(arrow.utcnow().format("YYYY-MM-DD") +
                                      "_" + str(days_back) + "_Sentiment")
    plt.savefig("graphs/" + filename)
    #plt.show()
    plt.close()
    send_email(filename)

    return True
예제 #37
0
# __author__ = 'zhouyang'
# -*- coding: utf-8 -*-
import datetime
import sys
from send_email import send_email
from intelligent_select import stock_crawler
from level2 import level2_crawler


def get_subject():
    today = datetime.datetime.now().date()
    subject = "同花顺 智能选股 及 level2每日一星 %s 推荐股票" % str(today)
    return subject


if __name__ == "__main__":
    # 设置默认编码格式为utf8
    reload(sys)
    sys.setdefaultencoding("utf8")
    stock_crawler = stock_crawler()
    level2 = level2_crawler()
    email_text = stock_crawler.get_email_text() + "\n"
    email_text += level2.get_email_text()
    subject = get_subject()
    # 发送邮件
    send_email(subject, email_text)
bug_device0 = []
bug_device1 = []
while True:
    count += 1
    to_log('第' + str(count) + '断网')
    # 断网
    if set_dongle():
        time.sleep(180)
        # 3min后查询设备连接状态
        for d in data:
            status = device_connect_status(d)
            to_log('设备' + d + '连接状态为:' + status)
            if status == '1':
                bug_device0.append(d)

        # 断网3min后是否有“已连接”设备
        if bug_device0:
            # 等3min后再次查询设备连接状态
            time.sleep(180)
            for db in bug_device0:
                if device_connect_status(db) == '1':
                    bug_device1.append(db)
        # 断网6min后若有“已连接”设备,发送邮件
        if bug_device1:
            send_email('盒子断网,平台设备状态为已连接', '问题设备:\n' + str(bug_device1),
                       ['*****@*****.**'])
        # 若设备连接状态正常,恢复网络,等5min,再次断网
        else:
            set_dongle()
            time.sleep(300)
예제 #39
0
def run():

    history()

    print('\n')
    print('--------------------------')
    print('     Running scrapers')
    print('--------------------------')

    conf = configparser.ConfigParser()
    config_file = os.path.join(os.path.dirname(__file__), "config.ini")
    conf.read(config_file)

    nightly_data_date = conf.get('FBO', 'nightly_data_date')
    if nightly_data_date == 'None':
        nightly_data_date = None
    else:
        pass

    notice_types_config = conf.get('FBO', 'notice_types')[:-1]
    notice_types = phrases_config.split(',')

    naics_config = conf.get('FBO', 'naics')[:-1]
    naics = naics_config.split(',')

    agencies_config = conf.get('FBO', 'agencies')[:-1]
    agencies = agencies_config.split(',')

    check_for_phrases_config = conf.get('FBO', 'check_for_phrases')
    check_for_phrases = bool(check_for_phrases_config.split(',')[0])

    check_for_agency_config = conf.get('FBO', 'check_for_agency')
    check_for_agency = bool(check_for_agency_config.split(',')[0])

    curr = os.getcwd()
    daily_message_dir = os.path.join(curr, 'data', 'FBO', 'daily_message')

    # FBO FTP SCRAPER
    nightly_data = get_nightly_data()
    message_field = get_message_field(nightly_data)
    if len(message_field) > 0:
        now_minus_two = datetime.utcnow() - timedelta(2)
        date = now_minus_two.strftime("%m/%d/%Y")
        subject = 'DOE RFP Alert {0}'.format(date)
        recipients = ['*****@*****.**']

        # Run scrapers now and add any other emails to this one?

        send_email('*****@*****.**', 'Rfpsender1!!', recipients, subject,
                   message_field)
        hasdata = True

    else:

        message_field = 'No new RFP matching criteria'
        hasdata = False

    if not os.path.exists(daily_message_dir):
        os.mkdir(daily_message_dir)
        history('created_dir',
                dir_location=daily_message_dir.split('RFPFinder')[1])
    os.chdir(daily_message_dir)
    with open("daily_message.txt", "w") as text_file:

        now_minus_two = datetime.utcnow() - timedelta(2)
        date = now_minus_two.strftime("%m/%d/%Y")
        text_file.write(date)

    with open("daily_message.txt", "a") as text_file:
        text_file.write('\n')
        text_file.write(message_field)

    history('fbo_daily_message', hasdata=hasdata)
    os.chdir(curr)

    print('   - FBO FTP finished')
    if not hasdata:
        print('      - No new RFP matching criteria')

    # SCRAPERS
    main()
예제 #40
0
import auth


def get_labels():
    # Call the Gmail API
    results = service.users().labels().list(userId='me').execute()
    labels = results.get('labels', [])

    if not labels:
        print('No labels found.')
    else:
        print('Labels:')
        for label in labels:
            print(label['name'])


SCOPES = 'https://mail.google.com/'
authInst = auth.auth(SCOPES)
credentials = authInst.get_credentials()
service = build('gmail', 'v1', http=credentials.authorize(Http()))

import send_email
sendInst = send_email.send_email(service)
message = sendInst.create_message_with_attachment('*****@*****.**',
                                                  '*****@*****.**',
                                                  'test gmail api',
                                                  'hi there!, test email',
                                                  'Capture.PNG')
#message = sendInst.create_message('*****@*****.**', '*****@*****.**', 'test gmail api', 'hi there!')
sendInst.send_message('me', message)
예제 #41
0
def GetInfo():
    global EGRN
    global wb
    global ws
    global tout

    Status = 0

    t0 = time.time()
    while EGRN.find_element_by_css_selector("div.blockGrey").is_displayed():
        time.sleep(1)
        if time.time() - t0 > 60:
            print(
                "За 60 секунд не дождались нужной страницы. Вероятно, ошибка сайта."
            )
            return (9)

    KN = re.search(
        r"\b\d{2}:\d{2}:\d{1,7}:\d{1,}\b",
        EGRN.find_element_by_css_selector("div.header3").get_attribute(
            "innerText"))[0]
    print(f"{KN} - подготовка запроса")
    vapp = EGRN.find_element_by_css_selector("div.v-app")

    CaptchaField = vapp.find_elements_by_xpath(".//input[@type='text']")[0]
    SendButton = vapp.find_element_by_xpath(
        ".//span[contains(@class,'v-button-caption') and contains(text(),'Отправить запрос')]"
    )

    Radio = vapp.find_elements_by_xpath(".//input[@type='radio']")[1]
    EGRN.execute_script("arguments[0].scrollIntoView();", Radio)
    Radio.click()

    # цикл будет работать в случае когда капча распознанна неверно
    while True:
        # Обработаем капчу
        captcha = GetCaptcha(vapp)
        if captcha == "00000":
            print("Ошибка при обработке капчи")
            return (9)

# Ввод капчи и отправка запроса
        while True:
            try:
                CaptchaField.click()
                break
            except:
                time.sleep(1)
                print("CaptchaField.click() wait")
        time.sleep(1)
        CaptchaField.send_keys(captcha)
        time.sleep(1)
        SendButton.click()

        ## Дождёмся PopUp
        PopUp = None
        while True:
            PopUp = vapp.find_elements_by_xpath("//div[@class='popupContent']")
            if len(PopUp) > 0:
                break
            else:
                time.sleep(5)
                print(".", end=" ")

## PopUp появился, разберёмся с ним
## возможнрые варианты:
##  1. с этими вариантами всё ясно
##        подтврждение успешного запроса
##        Превышен интервал между запросами
##        Ошибка при регистрации запроса
##  2. а с этим - пока не очень
##        Communication problem. Скорее всего, в зависимости от кода ошибки. можно продолжить работу - нужно проверить.
##          Иногда он продолжает нормально работать, а иногда просит авторизацию.
##  3. неожиданные вариант: невидимый PopUp с сообщением о неверной капче.

        PopUpType = None
        t = PopUp[0].get_attribute("innerText")
        if re.search("Запрос зарегистрирован", t) != None:
            PopUpType = "Normal"
        elif re.search("Превышен интервал между запросами", t) != None:
            PopUpType = "Problem_Timeout"
        elif re.search("Ошибка при регистрации запроса", t) != None:
            PopUpType = "Problem_Registration"
        elif re.search("Ошибка ввода капчи", t) != None:
            PopUpType = "Problem_Captcha"
        elif re.search("Communication problem", t) != None:
            PopUpType = "Problem_Communication"
        else:
            PopUpType = "Problem_Unknown"
        print(f"всё проверили, получилось {PopUpType}")

        if PopUpType == "Normal":
            OkButton = PopUp[0].find_elements_by_xpath(
                ".//span[contains(@class,'v-button-caption') and contains(text(),'Продолжить работу')]"
            )[0]
            NZ = ""
            NZ = re.search(r"\d{2}\-\d{9}",
                           PopUp[0].get_attribute("innerText"))[0]
            OkButton.click()
            print(f"{KN} :: {NZ} - запрос выполнен " +
                  datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
            send_email("EGRN bot OK", f"{KN} :: {NZ}")
            row = ws.max_row + 1
            ws.cell(row=row, column=1).value = KN
            ws.cell(row=row, column=2).value = NZ
            ws.cell(row=row,
                    column=3).value = datetime.now().strftime("%d.%m.%Y")
            wb.save(filename='rq.xlsx')
            return (0)
        elif PopUpType == "Problem_Timeout" or PopUpType == "Problem_Registration":
            PopUpCloseBox = PopUp[0].find_elements_by_xpath(
                ".//div[@class='v-window-closebox']")[0]
            PopUpCloseBox.click()
            time.sleep(10)
            return (1)
        elif PopUpType == "Problem_Captcha":
            pass
        elif PopUpType == "Problem_Communication":
            return (9)
        else:
            return (9)
예제 #42
0
파일: index.py 프로젝트: ZzyMy/12306_search
    def accept(self):
        self.loadDialog = loadDialog()
        username = self.lineEdit_1.text()
        password = self.lineEdit_2.text()

        data = {"username": username, "password": password, "appid": "otn"}
        login_res = my_session.post(login_url, data=data)

        data2 = {"appid": "otn"}
        Uamtk_res = my_session.post(Uamtk_url, data=data2)
        Uamtk_res_json = json.loads(Uamtk_res.text)
        umtk_id = Uamtk_res_json["newapptk"]

        data1 = {"tk": umtk_id}
        uamtk_res = my_session.post(uamtk_url, data=data1)
        if uamtk_res:
            uamtk_res_json = json.loads(uamtk_res.text)
            #print(uamtk_res_json)

        data3 = {"_json_att": ""}
        use_res = my_session.post(user_url, data=data3)
        #print(use_res)
        try:
            check_res = my_session.get(checkUser_url, timeout=5)
        except:
            check_res = my_session.get(checkUser_url, timeout=10)

        res_json = json.loads(check_res.text)
        print('res_json', res_json)
        if res_json['data']['flag'] == False:
            QMessageBox.information(self, "提醒", "登录失败,请重新输入账号密码!",
                                    QMessageBox.Yes)
            login_dialog.exec()

        elif res_json['data']['flag'] == True:
            QMessageBox.information(self, "提醒", "登录成功!", QMessageBox.Yes)
            # ignore_discard:  即保存需要被丢弃的cookie。
            # ignore_expires:  即过期的cookie也保存。
            my_session.cookies.save('cookie.txt',
                                    ignore_discard=True,
                                    ignore_expires=True)
            # 	"secretStr" 车次,需要进行解码
            #   "train_date": 出发日期
            #   "back_train_date"  返回日期
            #   "tour_flag": "dc"  单程/ 往返(wc)
            #   "purpose_codes":  "ADULT"  普通/学生(0X00)
            #   "query_from_stati":  出发车站 ,可以在查询车次接口中得到
            #   "query_to_station":  返回车站,  可以在查询车次接口中得到
            #   "undefined": ""  应该是跟返回数据相关
            order_data = {
                "secretStr": urllib.parse.unquote(secretStr),
                "train_date": train_date,
                "back_train_date": back_train_date,
                "tour_flag": "dc",
                "purpose_codes": "ADULT",
                "query_from_station_name": query_from_station_name,
                "query_to_station_name": query_to_station_name
            }
            print('order_data', order_data)
            buy_ticket.postOrder(my_session, order_url, order_data)
            global DTO_res_json
            DTO_res_json, globalRepeatSubmitToken, purpose_codes, key_check_isChange, leftTicketStr, train_location = buy_ticket.get_initDc(
                my_session, initDc_url, DTO_url)
            if DTO_res_json['data']['isExist'] == True:
                self.select_people_dialog = select_people_dialog()
                self.select_people_dialog.exec_()
                passengerTicketStr = getstr_utils.getPassengerTicketStr(
                    people_list, seat_type_codes)
                oldPassengerStr = getstr_utils.getOldPassengerStr(people_list)
                #tour_flag: {dc: "dc", wc: "wc", fc: "fc", gc: "gc", lc: "lc", lc1: "l1", lc2: "l2"},
                checkOrderInfo_data = {
                    'cancel_flag': '2',
                    'bed_level_order_num': '000000000000000000000000000000',
                    'passengerTicketStr': passengerTicketStr,
                    'oldPassengerStr': oldPassengerStr,
                    'tour_flag': 'dc',
                    'randCode': '',
                    'whatsSelect': '1',
                    '_json_att': '',
                    "REPEAT_SUBMIT_TOKEN": globalRepeatSubmitToken
                }

                self.loadDialog.show()
                with ThreadPoolExecutor(max_workers=5) as executor:
                    executor.submit(buy_ticket.checkOrder, my_session,
                                    checkOrderInfo_url, checkOrderInfo_data)
                    # time.sleep(1)

            #{'train_date': 'Mon Jan  1 2018 00:00:00 GMT+0800 (中国标准时间)',
            # 'train_no': '78000K95180E',
            #  'stationTrainCode': 'K9518',
            #  'seatType': '3',
            #  'fromStationTelecode': 'GIW',
            #  'toStationTelecode': 'ZIW',
            #  'leftTicket': 'fRHQaP8JPYKHxyvfihr70ZJYDi2VYncf4DPG%2FWI6ZmA4DYyN',
            # 'purpose_codes': '00',
            # 'train_location': 'W2',
            # '_json_att': '',
            # 'REPEAT_SUBMIT_TOKEN': 'a8faf49bca59d540d55662d1692a7dc4'}
                getQueue_data = {
                    'train_date': time_utils.fmt_time(train_date),
                    'train_no': train_no,
                    'stationTrainCode': stationTrainCode,
                    'seatType': seat_type_codes,
                    'fromStationTelecode': fromStationTelecode,
                    'toStationTelecode': toStationTelecode,
                    'leftTicket': leftTicketStr,
                    'purpose_codes': purpose_codes,
                    'train_location': train_location,
                    '_json_att': '',
                    'REPEAT_SUBMIT_TOKEN': globalRepeatSubmitToken
                }
                buy_ticket.getQueue(my_session, getQueue_url, getQueue_data)
                confirmOrder_data = {
                    'passengerTicketStr': passengerTicketStr,  #选票人信息,获取过
                    'oldPassengerStr': oldPassengerStr,  #获取过
                    'randCode': '',  #随机值,空
                    "purpose_codes": purpose_codes,  #获取过
                    "key_check_isChange":
                    key_check_isChange,  #和REPEAT_SUBMIT_TOKEN一样在相同网页获取
                    "leftTicketStr": leftTicketStr,  #获取过
                    'train_location': train_location,  #获取过
                    'choose_seats': '',  #座位类型,一般是高铁用
                    'roomType': '00',  #固定值
                    'dwAll': 'N',  #固定值
                    "_json_att": "",  #空
                    'seatDetailType': '000',  #固定值
                    'whatsSelect': '1',  #固定值
                    "REPEAT_SUBMIT_TOKEN": globalRepeatSubmitToken,  #获取过
                }

                buy_ticket.confirmOrder(my_session, confirmOrder_url,
                                        confirmOrder_data)
                self.loadDialog.close()
                if os.path.exists('Email.txt') == True and os.path.getsize(
                        'Email.txt') != 0:
                    host = 'smtp.qq.com'
                    username = '******'
                    passwd = 'gtskanozhqstbfjh'
                    to_list = []
                    subject = "预定成功"
                    content = '您所预定的' + stationTrainCode + '车票已成功'
                    with open('Email.txt', 'r', encoding='utf-8') as f:
                        Email_data = f.read()
                        to_list.append(Email_data)
                    send_email.send_email(host, username, passwd, to_list,
                                          subject, content)
                QMessageBox.information(self, "提醒", "购票成功!", QMessageBox.Yes)
                login_dialog.reject()
예제 #43
0
파일: report.py 프로젝트: zQuantz/OscraP
def report(title_modifier, successful, failures, faults_summary, db_flags,
           db_stats):
    def write_str(product):

        if len(faults_summary['options']) > 0:

            df = pd.DataFrame(faults_summary[product]).T
            df.columns = [
                'Lower Bound', 'First Count', 'Second Count', 'Delta'
            ]
            return df.to_html()

        return ""

    options_fault_str = write_str("options")
    analysis_faults_str = write_str("analysis")
    keystats_faults_str = write_str("keystats")

    ohlc_faults_str = ""
    if len(faults_summary['ohlc']) > 0:

        df = pd.DataFrame(faults_summary['ohlc']).T
        df.columns = ['Status', 'New Status']
        ohlc_faults_str = df.to_html()

    ###############################################################################################

    total = successful['options'] + failures['options']

    starts = db_stats[0][0]
    ends = db_stats[-1][1]

    counts = list(zip(starts, ends))
    analysis_counts, keystats_counts, ohlc_counts, options_counts = counts

    adds = [[x2 - x1 for x1, x2 in zip(*batch)] for batch in db_stats]

    df = pd.DataFrame(adds)
    df.columns = [
        'Analysis Adds', 'Key Stats Adds', 'OHLC Adds', 'Option Adds'
    ]
    df = df.set_index([[f"Batch #{i+1}" for i in range(len(df))]])

    db_flag_names = ["Failure", "Successful"]
    db_flags = [db_flag_names[flag] for flag in db_flags]
    df['Indexing Flags'] = db_flags

    ingestion_str = df.to_html()

    ###############################################################################################

    body = f"""
		Ingestion Summary<br>
		{ingestion_str}<br>
		<br>

		Options Summary<br>
		Successful Tickers: {successful['options']}, {round(successful['options'] / total * 100, 2)}%<br>
		Failed Tickers: {failures['options']}, {round(failures['options'] / total * 100, 2)}%<br>
		Starting Row Count: {options_counts[0]}<br>
		Ending Row Count: {options_counts[1]}<br>
		New Rows Added: {options_counts[1] - options_counts[0]}<br>
		<br>

		OHLC Summary<br>
		Successful Tickers: {successful['ohlc']}, {round(successful['ohlc'] / total * 100, 2)}%<br>
		Failed Tickers: {failures['ohlc']}, {round(failures['ohlc'] / total * 100, 2)}%<br>
		Starting Row Count: {ohlc_counts[0]}<br>
		Ending Row Count: {ohlc_counts[1]}<br>
		New Rows Added: {ohlc_counts[1] - ohlc_counts[0]}<br>
		<br>

		Analysis Summary<br>
		Successful Tickers: {successful['analysis']}, {round(successful['analysis'] / total * 100, 2)}%<br>
		Failed Tickers: {failures['analysis']}, {round(failures['analysis'] / total * 100, 2)}%<br>
		Starting Row Count: {analysis_counts[0]}<br>
		Ending Row Count: {analysis_counts[1]}<br>
		New Rows Added: {analysis_counts[1] - analysis_counts[0]}<br>
		<br>

		Key Statistics Summary<br>
		Successful Tickers: {successful['keystats']}, {round(successful['keystats'] / total * 100, 2)}%<br>
		Failed Tickers: {failures['keystats']}, {round(failures['keystats'] / total * 100, 2)}%<br>
		Starting Row Count: {keystats_counts[0]}<br>
		Ending Row Count: {keystats_counts[1]}<br>
		New Rows Added: {keystats_counts[1] - keystats_counts[0]}<br>
		<br>

		Options Fault Summary<br>
		{options_fault_str}<br>
		<br>

		OHLC Fault Summary<br>
		{ohlc_faults_str}<br>
		<br>

		Analysis Fault Summary<br>
		{analysis_faults_str}<br>
		<br>

		Key Statistics Fault Summary<br>
		{keystats_faults_str}<br>
		<br>

		See attached for the log file and the collected data.<br>
	"""

    ###############################################################################################

    attachments = []

    os.system(f"bash {DIR}/utils/truncate_log_file.sh")
    filename = f'{DIR}/log.log'
    attachments.append({
        "ContentType": "plain/text",
        "filename": f"log.log",
        "filepath": f"{DIR}"
    })

    ###############################################################################################

    send_email(CONFIG, f"{title_modifier} Web Scraping Summary", body,
               attachments, logger)
예제 #44
0
파일: index.py 프로젝트: ZzyMy/12306_search
    def buy_ticket(self):
        ticket_item = self.dialog_tableWidget.selectedItems()
        if ticket_item == []:
            QMessageBox.information(self, "提醒", "请选择票务种类!", QMessageBox.Yes)
        if ticket_item[0].text() == '':
            QMessageBox.information(self, "提醒", "请选择有效的种类!", QMessageBox.Yes)
        elif ticket_item[0].text() == '无':
            QMessageBox.information(self, "提醒", "请选择有余票的座位!", QMessageBox.Yes)
        else:
            #商务座(9),特等座(P),一等座(M),二等座(O),高级软卧(6),软卧(4),硬卧(3),软座(2),硬座(1),
            global seat_type_codes
            seat_type_list = [
                'P', 'M', 'O', '6', '4', '5', '3', '2', '1', '1', '0'
            ]
            dialog_count = self.dialog_tableWidget.rowCount()  #总行数
            row_count = self.dialog_tableWidget.currentRow(
            ) + 1  #当前选中行行号(从1开始)
            for x in range(1, dialog_count):
                if x == row_count:
                    seat_type_codes = seat_type_list[x - 1]

            if os.path.exists('cookie.txt') == True:
                if os.path.getsize('cookie.txt') == 0:
                    img_dialog.show()
                    img_dialog.get_captcha_image()
                else:
                    my_session.cookies.load('cookie.txt',
                                            ignore_discard=True,
                                            ignore_expires=True)
                    handler = urllib.request.HTTPCookieProcessor(
                        my_session.cookies)
                    opener = urllib.request.build_opener(handler)
                    response = opener.open(checkUser_url)
                    text_html = response.read().decode('utf-8')
                    str_json = json.loads(text_html)
                    print(str_json)
                    if str_json['data']['flag'] == False:
                        QMessageBox.information(self, "提醒", "cookie已过期,请重新登录",
                                                QMessageBox.Yes)
                        img_dialog.show()
                        img_dialog.get_captcha_image()
                    if str_json['data']['flag'] == True:
                        QMessageBox.information(self, "提醒",
                                                "cookie登录成功,点击确认开始购票",
                                                QMessageBox.Yes)
                        order_data = {
                            "secretStr": urllib.parse.unquote(secretStr),
                            "train_date": train_date,
                            "back_train_date": back_train_date,
                            "tour_flag": "dc",
                            "purpose_codes": "ADULT",
                            "query_from_station_name": query_from_station_name,
                            "query_to_station_name": query_to_station_name
                        }
                        print(order_data)
                        buy_ticket.postOrder(my_session, order_url, order_data)
                        global DTO_res_json
                        DTO_res_json, globalRepeatSubmitToken, purpose_codes, key_check_isChange, leftTicketStr, train_location = buy_ticket.get_initDc(
                            my_session, initDc_url, DTO_url)
                        if DTO_res_json['data']['isExist'] == True:
                            self.select_people_dialog = select_people_dialog()
                            self.select_people_dialog.exec_()
                            passengerTicketStr = getstr_utils.getPassengerTicketStr(
                                people_list, seat_type_codes)
                            oldPassengerStr = getstr_utils.getOldPassengerStr(
                                people_list)
                            #tour_flag: {dc: "dc", wc: "wc", fc: "fc", gc: "gc", lc: "lc", lc1: "l1", lc2: "l2"},
                            checkOrderInfo_data = {
                                'cancel_flag': '2',
                                'bed_level_order_num':
                                '000000000000000000000000000000',
                                'passengerTicketStr': passengerTicketStr,
                                'oldPassengerStr': oldPassengerStr,
                                'tour_flag': 'dc',
                                'randCode': '',
                                'whatsSelect': '1',
                                '_json_att': '',
                                "REPEAT_SUBMIT_TOKEN": globalRepeatSubmitToken
                            }
                            print(checkOrderInfo_data)
                            checkOrder_res = my_session.post(
                                checkOrderInfo_url, data=checkOrderInfo_data)
                            checkOrder_res_json = json.loads(
                                checkOrder_res.text)
                            #{'train_date': 'Mon Jan  1 2018 00:00:00 GMT+0800 (中国标准时间)',
                            # 'train_no': '78000K95180E',
                            #  'stationTrainCode': 'K9518',
                            #  'seatType': '3',
                            #  'fromStationTelecode': 'GIW',
                            #  'toStationTelecode': 'ZIW',
                            #  'leftTicket': 'fRHQaP8JPYKHxyvfihr70ZJYDi2VYncf4DPG%2FWI6ZmA4DYyN',
                            # 'purpose_codes': '00',
                            # 'train_location': 'W2',
                            # '_json_att': '',
                            # 'REPEAT_SUBMIT_TOKEN': 'a8faf49bca59d540d55662d1692a7dc4'}
                            getQueue_data = {
                                'train_date': time_utils.fmt_time(train_date),
                                'train_no': train_no,
                                'stationTrainCode': stationTrainCode,
                                'seatType': seat_type_codes,
                                'fromStationTelecode': fromStationTelecode,
                                'toStationTelecode': toStationTelecode,
                                'leftTicket': leftTicketStr,
                                'purpose_codes': purpose_codes,
                                'train_location': train_location,
                                '_json_att': '',
                                'REPEAT_SUBMIT_TOKEN': globalRepeatSubmitToken
                            }
                            buy_ticket.getQueue(my_session, getQueue_url,
                                                getQueue_data)
                            confirmOrder_data = {
                                'passengerTicketStr':
                                passengerTicketStr,  #选票人信息,获取过
                                'oldPassengerStr': oldPassengerStr,  #获取过
                                'randCode': '',  #随机值,空
                                "purpose_codes": purpose_codes,  #获取过
                                "key_check_isChange":
                                key_check_isChange,  #和REPEAT_SUBMIT_TOKEN一样在相同网页获取
                                "leftTicketStr": leftTicketStr,  #获取过
                                'train_location': train_location,  #获取过
                                'choose_seats': '',  #座位类型,一般是高铁用
                                'roomType': '00',  #固定值
                                'dwAll': 'N',  #固定值
                                "_json_att": "",  #空
                                'seatDetailType': '000',  #固定值
                                'whatsSelect': '1',  #固定值
                                "REPEAT_SUBMIT_TOKEN":
                                globalRepeatSubmitToken,  #获取过
                            }

                            buy_ticket.confirmOrder(my_session,
                                                    confirmOrder_url,
                                                    confirmOrder_data)

                            if os.path.exists(
                                    'Email.txt'
                            ) == True and os.path.getsize('Email.txt') != 0:
                                host = 'smtp.qq.com'
                                username = '******'
                                passwd = 'gtskanozhqstbfjh'
                                to_list = []
                                subject = "预定成功"
                                content = '您所预定的' + stationTrainCode + '车票已成功'
                                with open('Email.txt', 'r',
                                          encoding='utf-8') as f:
                                    Email_data = f.read()
                                    to_list.append(Email_data)
                                send_email.send_email(host, username, passwd,
                                                      to_list, subject,
                                                      content)
                            QMessageBox.information(self, "提醒", "购票成功!",
                                                    QMessageBox.Yes)
                            login_dialog.reject()
            else:
                img_dialog.show()
                img_dialog.get_captcha_image()
예제 #45
0
    fh = logging.FileHandler('rsq_s5rt-{0}.log'.format(date.get_curdate2()))
    fh.setLevel(logging.DEBUG)
    fh.setFormatter(logging.Formatter(FILE_FORMAT))
    # 将 fh添加到root_logger
    root_logger.addHandler(fh)


if __name__ == '__main__':

    try:
        #设置日志
        setlog()

        #获取rishiqing数据
        LOG.debug("开始获取日事清数据")
        jsondata = rsq_request.get_s5rtdata()

        file_name = "S5-" + date.get_curdate2() + ".xlsx"
        LOG.debug("xlsx文件名称为:" + file_name)
        #生成excel
        write_excel.write_excel(jsondata, file_name)
        LOG.debug("数据已写入xlsx文件:" + file_name)

        #发送email
        LOG.debug("开始发送Email")
        send_email.send_email(file_name)

    except Exception, e:
        LOG.error(e.message)
        raise
예제 #46
0
def main():
    """Main method"""
    url = urlparse.urlparse(os.environ['DATABASE_URL'])
    dbname = url.path[1:]
    user = url.username
    password = url.password
    host = url.hostname
    port = url.port

    con = psycopg2.connect(dbname=dbname,
                           user=user,
                           password=password,
                           host=host,
                           port=port)
    cur = con.cursor()

    cur.execute("SELECT Price FROM Flights WHERE ID=1")
    rdr = cur.fetchall()
    current_price = float(rdr[0][0])

    url = "https://be.wizzair.com/7.0.0/Api/search/search"
    payload = {
        "isFlightChange":
        False,
        "isSeniorOrStudent":
        False,
        "flightList": [{
            "departureStation": "ABZ",
            "arrivalStation": "GDN",
            "departureDate": "2017-12-16"
        }],
        "adultCount":
        1,
        "childCount":
        0,
        "infantCount":
        0,
        "wdc":
        True,
        "rescueFareCode":
        ""
    }
    headers = {'content-type': 'application/json'}

    response = requests.post(url, data=json.dumps(payload), headers=headers)

    json_response = json.loads(response.text)
    flights = json_response["outboundFlights"]

    admin_price = flights[0]["fares"][0]["administrationFeePrice"]["amount"]
    disc_baseprice = flights[0]["fares"][0]["discountedFarePrice"]["amount"]

    full_price = (float(admin_price) + float(disc_baseprice))

    print("Price check complete. Past price: %s. Current price: %s." %
          (str(current_price), str(full_price)))

    if full_price != current_price:
        cur.execute("UPDATE Flights SET Price='" + str(full_price) +
                    "' WHERE ID=1")
        con.commit()

        username = "******"
        password = os.environ['SMS_API_KEY']
        sms_to = os.environ['DAD_NUMBER']
        message = "WizzAir Alert. Price changed ABZ to GDN. From %s GBP To %s GBP" % (
            str(current_price), str(full_price))
        url = "http://api.smsapi.com/sms.do?username=%s&password=%s&to=%s&message=%s" % (
            username, password, sms_to, message)
        requests.get(url)

        send_email("*****@*****.**", str(full_price), str(current_price))

    cur.close()
    con.close()
예제 #47
0
#创建一个logger
logger = logging.getLogger()
#设定logger的级别为INFO
logger.setLevel(level=logging.INFO)

#创建一个handler,此处是以时间为轮转标准
handler = TimedRotatingFileHandler(LOG_FILE,
                                   when='M',
                                   interval=1,
                                   backupCount=10)
datefmt = '%Y-%m-%d %H:%M:%S'
format_str = '%(asctime)s %(levelname)s %(message)s'
#定义hadler的输出格式
formatter = logging.Formatter(format_str, datefmt)
handler.setFormatter(formatter)
#handler创建完毕之后就要给logger添加此handler
logger.addHandler(handler)

#创建udp
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('127.0.0.1', 9981))
while True:
    data, addr = s.recvfrom(1024)
    if data.decode(encoding="utf-8") == None:
        pass
    else:
        logger.info(data.decode(encoding="utf-8"))
        send_email(data.decode(encoding="utf-8"))
예제 #48
0
def handle_exception(user, exception_msg):
    database_interface.report_non_functional_user(user.user_id)
    body, subject = text_generator.get_email_text_invalid_token()
    send_email.send_email(subject, body, [user.email])
    print(exception_msg)
    return exception_msg
예제 #49
0
                if matched_posts:
                    message += f"{group_name}:\n" + "\n".join(matched_posts)
                    message += "\n\n"
            else:
                print(
                    f"Data not found in {group_name}. Check that posts were downloaded correctly for this group."
                )
        message += "\n\n"
    return message


if __name__ == "__main__":
    group_names = [
        "CityOfLondon", "LewishamUK", "GreenwichUK", "SouthwarkUK",
        "LambethUK", "TowerHamletsUK", "WestminsterUK",
        "HammersmithandFulhamUK", "KensingtonandChelseaUK", "HackneyUK",
        "EalingUK", "KentishtownUK", "BarkingandDagenham"
    ]

    posts_by_group = get_all_posts_for_each_group(group_names)

    stuff_I_want = ["chair", "garden", "kitchen", "whiteboard"]
    main_body_message = perform_multiple_matching(stuff_I_want, group_names,
                                                  posts_by_group)

    subject = "Your FreeCycle Update - (Items Followed: " + " | ".join(
        stuff_I_want).title() + ")"
    main_body_message = "Items followed: " + " | ".join(
        stuff_I_want).title() + "\n\n" + main_body_message
    send_email(subject, main_body_message)
예제 #50
0
        if house_available_date.split('-')[1] == '06':
            print('June!!!')
        elif house_available_date.split('-')[1] == '07':
            print('July!!!')
        elif house_available_date.split('-')[1] == '08':
            print('August!!!!!')
        else:
            print('Wonderful, Even later!!!!!!')
        print('*****')
        # End of For
    # break
    if len(house_list_from_file) != len(
            house_dict_result) or is_two_dict_different(
                house_list_from_file, house_dict_result):
        send_email(house_dict_result, current_time_string)
        with open('house_list.json', 'w') as fp:
            fp.write(json.dumps(house_dict_result, indent=4))
        print('Rewrite json file')
        # Call function
        # fill_input('')

    time.sleep(time_sleep_second)
    print('------------------------')

# For test

# print(ul_product_grid[house_id])
# print(ul_product_grid[house_id].text,ul_product_grid[house_id].tail)
# index=0
# for child in ul_product_grid[house_id][4]:
예제 #51
0
import unittest
from BSTestRunner import BSTestRunner
import time
from send_email import send_email
import latest_report

report_dir = './test_report'
test_dir = './test_case'

print("start run test case")
discover = unittest.defaultTestLoader.discover(test_dir, pattern="*.py")

now = time.strftime("%Y-%m-%d %H_%M_%S")
report_name = report_dir + '/' + now + 'result.html'

print("start write report..")
with open(report_name, 'wb') as f:
    runner = BSTestRunner(stream=f,
                          title="Test Report",
                          description="localhost login test")
    runner.run(discover)
    f.close()

print("find latest report")
latest_report = latest_report(report_dir)
print("send email report..")
send_email(latest_report)

print("Test end")
예제 #52
0
def main():
    """
    Main function
    """

    # Parse Command line arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', '-c',
                        dest='config_file',
                        default="watchdog.yaml",
                        help='Configuration file (required)')
    parser.add_argument('--email',
                        dest='email',
                        action='store_true',
                        help='send report as email')
    parser.add_argument('--output', '-o',
                        dest='report_file',
                        default="report.html",
                        help='Set output file name (default: report.html)')
    parser.add_argument('--no-output', '-n',
                        dest='no_output',
                        action='store_true',
                        help="Don't output report file")
    parser.add_argument('--key', '-k',
                        dest='key_path',
                        help="""Absolute path of service account key (optional). 
                        On default GOOGLE_APPLICATION_CREDENTIALS is used""")

    parser.add_argument('--sendgrid_key', '-sgk',
                        dest='sendgrid_key',
                        help="""Absolute path of Sendgrid API key (optional).
                        On default SENDGRID_API_KEY is used""")

    args = parser.parse_args()

    # Parse watchdog configuration file (watchdog.yaml)
    with open(args.config_file, 'r') as ymlfile:
        cfg = yaml.load(ymlfile)


    # Print header
    print "\n***** Google Cloud Platform Watchdog *****\n"
    print 'Report Title:', cfg['general']['report-title']
    print "Watchdog email: ", cfg['general']['watchdog-email']
    print "Receivers: "
    for email in cfg['general']['receiver-email']:
        print "Email: ", email['email']

    print "\nRequested reports: \n"
    if cfg['compute']['print']:
        print '- compute'
    if cfg['IAM']['print']:
        print '- IAM'
    if cfg['firewall']['print']:
        print '- firewall'

    # Get authentication and GCP APIs
    if args.key_path:
        credentials = ServiceAccountCredentials.from_json_keyfile_name(args.key_path)
    else: 
        credentials = ServiceAccountCredentials.from_json_keyfile_name(
        os.environ.get('GOOGLE_APPLICATION_CREDENTIALS'))

    compute = discovery.build('compute', 'v1', credentials=credentials)
    rm = discovery.build('cloudresourcemanager', 'v1', credentials=credentials)


    # Get data and alerts from GCP
    tables = []
    table_titles = []

    # Get projects
    projects_columns = ['Name', 'Project_ID']
    projects_data = get_projects(credentials)
    projects = pd.DataFrame(projects_data, columns=projects_columns)
    projects, inaccessible_projects = check_projects(compute, rm, projects)

    projects = alert_projects(projects, cfg['general'])
    project_IDs = projects['Project_ID'].tolist()

    # Render table of unaccessible projects
    colors = ['background-color: darkorange' for i in range(inaccessible_projects.shape[1])]
    inaccessible_projects_styler = inaccessible_projects.style
    if inaccessible_projects.shape[0] > 0:
        inaccessible_projects_styler = inaccessible_projects_styler.apply(
            lambda x: colors, axis=1)

    inaccessible_projects_styler.set_table_attributes("border=1")
    tables.append(inaccessible_projects_styler.render())
    table_titles.append('Incaccessible Projects')

    # Get zones
    zone_columns = ['Name']
    zones_data = get_gcp_zones(compute, project_IDs[0])
    zones = pd.DataFrame(zones_data, columns=zone_columns)
    zones = alert_zones(zones, cfg['general'])
    zones = zones['Name'].tolist()

    if cfg['compute']['print']:
        instances_coulmns = ['Instance', 'Status', 'Machine_type', 'Project_ID', 'Zone']

        instance_data = get_instance_data(compute, project_IDs, zones)
        instances = pd.DataFrame(instance_data, columns=instances_coulmns)

        # Get alerts and filters
        instances, alerted_instances_styler = alert_instances(instances, cfg['compute'])

        tables.append(alerted_instances_styler.render())
        table_titles.append('Instances')


    if cfg['IAM']['print']:
        iam_coulmns = ['Project_ID', 'Name', 'Email', 'Email_suffix', 'Account_type', 'Role',]

        iam_data = get_people_access(rm, project_IDs)
        iam = pd.DataFrame(iam_data, columns=iam_coulmns)
        iam = iam.groupby(['Project_ID', 'Name', 'Email', 'Email_suffix', 'Account_type'])['Role']\
        .apply(list).reset_index()

        # Get alerts and filters
        alerted_iam, alerted_iam_styler = alert_iam(iam, cfg['IAM'])

        tables.append(alerted_iam_styler.render())
        table_titles.append('IAM')


    if cfg['firewall']['print']:
        firewalls_columns = ['Project_ID', 'Rule_name', 'Range',
                             'Protocol', 'Port', 'Firewall_type']

        firewall_data = get_firewall_data(compute, project_IDs)
        firewalls = pd.DataFrame(firewall_data, columns=firewalls_columns)

        # Get alerts and filters
        alerted_firewalls, alerted_firewalls_styler = alert_firewalls(firewalls, cfg['firewall'])

        tables.append(alerted_firewalls_styler.render())
        table_titles.append('Firewalls')


    # Create report

    # Construct email content
    templates_folder = pkg_resources.resource_filename('templates','')
    env = Environment(loader=FileSystemLoader(templates_folder))
    content = {'title': cfg['general']['report-title'] + '  ' + str(datetime.datetime.now())[:-7],
               'num_tables': len(tables),
               'table_titles': table_titles,
               'tables' : tables
              }
    # Render HTML
    html = env.get_template('daily_report.html').render(content)
    subject = cfg['general']['report-title']


    # Write report to file
    if not args.no_output:
        report_file_path = args.report_file
        with open(report_file_path, 'w') as the_file:
            the_file.write(html)

    # Send email (needs email client to be configured)
    if args.email:
        for to_email in cfg['general']['receiver-email']:
            send_email(html, cfg['general']['watchdog-email'], to_email['email'], 
                        subject, args.sendgrid_key)
예제 #53
0
파일: job.py 프로젝트: zQuantz/OscraP
	r_map = df.iloc[-1, 1:].values
	r_map = np.array([0] + r_map.tolist())
	chs = CubicHermiteSpline(t_map, r_map, [0]*len(t_map))

	rm_df = pd.DataFrame()
	rm_df['days_to_expiry'] = np.arange(0, 365 * 10 + 1).astype(int)
	rm_df['rate'] = chs(rm_df.days_to_expiry.values)
	rm_df['date_current'] = DATE

	_connector.write("treasuryratemap", rm_df)

	return df

if __name__ == '__main__':

	try:

		df = collect()
		store()
		send_email(CONFIG, "Interest Rate Summary", df.to_html(), [], logger)
		metric = 1

	except Exception as e:

		logger.warning(e)
		body = f"<p>Process Failed. {e}</p>"
		send_email(CONFIG, "Interest Rate Summary - FAILED", body, [], logger)
		metric = 0

	send_gcp_metric(CONFIG, "rates_success_indicator", "int64_value", metric)
예제 #54
0
def send():
    mail = e_email.get()
    send_email(mail)
    messagebox.showinfo("Email", "Email Sent!")
예제 #55
0
def main():
    """sends cfm requests"""
    start_logging('send_cfm_requests')
    logging.debug('start send cfm requests')
    conf = site_conf()
    secret = get_secret(conf.get('files', 'secret'))
    db_params = conf.items('db')

    _db = DBConn(db_params)
    yield from _db.connect()
    data = yield from _db.execute(
        """
        select correspondent, correspondent_email,
            json_agg(json_build_object('callsign', callsign, 
            'stationCallsign', station_callsign, 'rda', rda, 'band', band, 
            'mode', mode, 'tstamp', to_char(tstamp, 'DD mon YYYY HH24:MI'), 
            'rcvRST', rec_rst, 'sntRST', sent_rst)) as qso
        from
            (select * 
            from cfm_request_qso 
            where not sent and correspondent not in  
            (select callsign from cfm_request_blacklist)) as data
        group by correspondent, correspondent_email""", None, True)
    if not data:
        return
    sent_to = []
    for row in data:
        token = create_token(secret, {'callsign': row['correspondent']})
        link_cfm = conf.get('web', 'address') + '/#/cfm_qso/?token=' + token + \
            '&callsign=' + row['correspondent']
        link_blacklist = conf.get('web', 'address') +\
            '/#/cfm_blacklist/?token=' + token
        qso_txt = format_qsos(row['qso'])
        text = ("""
Здравствуйте, {correspondent}.
Просим Вас поддержать проект CFMRDA для создания единой базы по программе диплома RDA.

Вы можете подтвердить конкретные связи, которые очень важны Вашим корреспондентам, приславшим запросы или залить полностью свой лог.

""" + qso_txt + """
Для подтверждения QSO зайдите на эту страницу - {link_cfm}
Если указанные данные верны, поставьте отметки "Подтвердить" в каждом QSO и нажмите кнопку "OK"

Было бы удобнее, если бы вы зарегистрировались на CFMRDA.ru и загрузили бы свои логи в базу данных сайта.
Если Вы не хотите регистрироваться или у Вас возникли какие-то трудности при загрузке, пришлите свой лог, желательно в формате ADIF на адрес техподдержки [email protected] 

Спасибо. 73!
Команда CFMRDA.ru


Если вы не хотите в дальнейшем получать подобные запросы на подтверждение QSO, пройдите по этой ссылке - {link_blacklist}  
И нажмите кнопку "Не присылать мне больше запросов от CFMRDA.ru"
        """).format_map({'correspondent': row['correspondent'],\
            'link_cfm': link_cfm, 'link_blacklist': link_blacklist})
        retries = 0
        while retries < 3:
            if send_email(text=text,\
                fr=conf.get('email', 'address'),\
                to=row['correspondent_email'],\
                subject="Запрос на подтверждение QSO от CFMRDA.ru"):
                logging.error('cfm request email sent to ' +
                              row['correspondent'])
                sent_to.append(row)
                break
            else:
                retries += 1
                yield from asyncio.sleep(10)
        if retries == 3:
            logging.error('Email delivery failed. Correspondent: ' + row['correspondent']\
                + ', address: ' + row['correspondent_email'])
        yield from asyncio.sleep(10)
    logging.error('all requests were sent')
    if sent_to:
        yield from _db.execute("""
            update cfm_request_qso 
            set sent = true, status_tstamp = now()
            where correspondent = %(correspondent)s and not sent""",\
            sent_to)
        logging.error('cfm_request_qso table updated')
        yield from _db.execute(
            """
            update cfm_requests 
            set tstamp = now()
            where callsign = %(correspondent)s;
            insert into cfm_requests
            select %(correspondent)s, now()
            where not exists
                (select 1 
                from cfm_requests 
                where callsign = %(correspondent)s)
            """, sent_to)
        logging.error('cfm_requests table updated')
    r.wc('errorName', 'qa12345678')
    r.finished()
    time.sleep(180)
    # 3min后设备断开连接,否则发邮件:平台连接状态有问题
    for d in data:
        status1 = device_connect_status(d)
        if status1 == '1':
            # 等待15min后再检查一次设备状态是否在线,因为这里拿到的设备状态是点
            time.sleep(15)
            status2 = device_connect_status(d)
            if status2 == '1':
                bad_disconnect.append(d)
                data.remove(d)
    # 如果有断开设备
    if bad_disconnect:
        send_email('盒子断网,平台设备状态为已连接', '问题设备:\n' + str(bad_disconnect),
                   ['*****@*****.**'])
    # 恢复网络
    r = Configuration()
    r.wc('weak', 'qa12345678')
    r.finished()
    time.sleep(180)

    # 设备连网3min后平台状态为已连接,否则发邮件:平台连接状态有问题
    for d in data:
        status1 = device_connect_status(d)
        if status1 == '1':
            # 等待15min后再检查一次设备状态是否在线,因为这里拿到的设备状态是点
            time.sleep(15)
            status2 = device_connect_status(d)
            if status2 == '1':
                bad_connect.append(d)
예제 #57
0
 def send_to_email(self):
     send_email.send_email("google.fi updated!", self.body)
     print("Email sent to %s", RECIPIENT)
     logging.info("Email sent to %s", RECIPIENT)
예제 #58
0
# coding=utf-8
import send_email

send_email.send_email()
예제 #59
0
                break

    if upload_date >= ref_date:
        return 0
    return -1


res = {"경력개발센터": [], "컴공 홈페이지": []}

for page in range(1, 10):
    if get_career_data(page, res.get("경력개발센터")) == -1:
        break

for page in range(2, 10):
    if get_cse_data(page, res.get("컴공 홈페이지")) == -1:
        break

resStr = ""
content_format = '<p>{item}\n</p>'

resStr += "<h3>경력개발센터\n" + career_url.format(page=1) + "\n</h3>"
for item in res.get("경력개발센터"):
    resStr += content_format.format(item=item)

resStr += "<h3>컴공 홈페이지\n" + cse_url.format(page=2) + "\n</h3>"

for item in res.get("컴공 홈페이지"):
    resStr += content_format.format(item=item)

send_email(resStr)
def main():
    args.cuda = True
    # 1 choose the data you want to use
    using_data = {
        'my_sp': False,
        'my_cm': False,
        'template_casia_casia': False,
        'template_coco_casia': False,
        'cod10k': True,
        'casia': False,
        'coverage': False,
        'columb': False,
        'negative_coco': False,
        'negative_casia': False,
        'texture_sp': False,
        'texture_cm': False,
    }
    using_data_test = {
        'my_sp': False,
        'my_cm': False,
        'template_casia_casia': False,
        'template_coco_casia': False,
        'cod10k': False,
        'casia': False,
        'coverage': True,
        'columb': False,
        'negative_coco': False,
        'negative_casia': False,
    }
    # 2 define 3 types
    trainData = TamperDataset(stage_type='stage2',
                              using_data=using_data,
                              train_val_test_mode='train')
    valData = TamperDataset(stage_type='stage2',
                            using_data=using_data,
                            train_val_test_mode='val')
    testData = TamperDataset(stage_type='stage2',
                             using_data=using_data_test,
                             train_val_test_mode='test')

    # 3 specific dataloader
    trainDataLoader = torch.utils.data.DataLoader(trainData,
                                                  batch_size=args.batch_size,
                                                  num_workers=3,
                                                  shuffle=True,
                                                  pin_memory=False)
    valDataLoader = torch.utils.data.DataLoader(valData,
                                                batch_size=args.batch_size,
                                                num_workers=3)

    testDataLoader = torch.utils.data.DataLoader(testData,
                                                 batch_size=args.batch_size,
                                                 num_workers=0)
    # model
    model1 = Net1()
    model2 = Net2()
    if torch.cuda.is_available():
        model1.cuda()
        model2.cuda()
    else:
        model1.cpu()
        model2.cuda()

    # 模型初始化
    # 如果没有这一步会根据正态分布自动初始化
    model1.apply(weights_init)
    model2.apply(weights_init)

    # 模型可持续化
    # 这是tensorflow代码中的配置:    optimizer = Adam(lr=1e-2, beta_1=0.9, beta_2=0.999)
    optimizer1 = optim.Adam(model1.parameters(),
                            lr=1e-5,
                            betas=(0.9, 0.999),
                            eps=1e-8)
    optimizer2 = optim.Adam(model2.parameters(),
                            lr=args.lr,
                            betas=(0.9, 0.999),
                            eps=1e-8)
    if args.resume[0]:
        if isfile(args.resume[0]):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint1 = torch.load(args.resume[0])
            checkpoint2 = torch.load(args.resume[1])
            model1.load_state_dict(checkpoint1['state_dict'])
            # optimizer1.load_state_dict(checkpoint1['optimizer'])
            ################################################
            model2.load_state_dict(checkpoint2['state_dict'])
            # optimizer2.load_state_dict(checkpoint2['optimizer'])
            print("=> loaded checkpoint '{}'".format(args.resume))

        else:
            print("=> Error!!!! checkpoint found at '{}'".format(args.resume))

    else:
        print("=> no checkpoint found at '{}'".format(args.resume))

    # 调整学习率
    scheduler1 = lr_scheduler.StepLR(optimizer1,
                                     step_size=args.stepsize,
                                     gamma=args.gamma)
    scheduler2 = lr_scheduler.StepLR(optimizer2,
                                     step_size=args.stepsize,
                                     gamma=args.gamma)
    # scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.5, patience=3, verbose=True)
    # 数据迭代器

    send_email = []
    for epoch in range(args.start_epoch, args.maxepoch):
        train_avg = train(model1=model1,
                          model2=model2,
                          optimizer1=optimizer1,
                          optimizer2=optimizer2,
                          dataParser=trainDataLoader,
                          epoch=epoch)
        print('start val')
        # val_avg = val(model=model1, model2=model2, dataParser=valDataLoader, epoch=epoch)
        # test_avg = test(model=model1, model2=model2, dataParser=testDataLoader, epoch=epoch)
        """""" """""" """""" """""" """"""
        "          写入图             "
        """""" """""" """""" """""" """"""
        try:
            writer.add_scalar('tr_avg_loss_per_epoch',
                              train_avg['loss_avg_stage2'],
                              global_step=epoch)
            writer.add_scalar('tr_avg_f1_per_epoch',
                              train_avg['f1_avg_stage2'],
                              global_step=epoch)
            writer.add_scalar('tr_avg_precision_per_epoch',
                              train_avg['precision_avg_stage2'],
                              global_step=epoch)
            writer.add_scalar('tr_avg_acc_per_epoch',
                              train_avg['accuracy_avg_stage2'],
                              global_step=epoch)
            writer.add_scalar('tr_avg_recall_per_epoch',
                              train_avg['recall_avg_stage2'],
                              global_step=epoch)

            #
            #
            # writer.add_scalar('val_avg_loss_per_epoch', val_avg['loss_avg_stage2'], global_step=epoch)
            # writer.add_scalar('val_avg_f1_per_epoch', val_avg['f1_avg_stage2'], global_step=epoch)
            # writer.add_scalar('val_avg_precision_per_epoch', val_avg['precision_avg_stage2'], global_step=epoch)
            # writer.add_scalar('val_avg_acc_per_epoch', val_avg['accuracy_avg_stage2'], global_step=epoch)
            # writer.add_scalar('val_avg_recall_per_epoch', val_avg['recall_avg_stage2'], global_step=epoch)
            #
            # writer.add_scalar('test_avg_loss_per_epoch', test_avg['loss_avg_stage2'], global_step=epoch)
            # writer.add_scalar('test_avg_f1_per_epoch', test_avg['f1_avg_stage2'], global_step=epoch)
            # writer.add_scalar('test_avg_precision_per_epoch', test_avg['precision_avg_stage2'], global_step=epoch)
            # writer.add_scalar('test_avg_acc_per_epoch', test_avg['accuracy_avg_stage2'], global_step=epoch)
            # writer.add_scalar('test_avg_recall_per_epoch', test_avg['recall_avg_stage2'], global_step=epoch)

            writer.add_scalar('lr_per_epoch_stage1',
                              scheduler1.get_lr(),
                              global_step=epoch)
            writer.add_scalar('lr_per_epoch_stage2',
                              scheduler2.get_lr(),
                              global_step=epoch)
        except Exception as e:
            print(e)
        """""" """""" """""" """""" """"""
        "          写入图            "
        """""" """""" """""" """""" """"""

        # output_name = output_name_file_name % \
        #               (epoch, val_avg['loss_avg'],
        #                val_avg['f1_avg'],
        #                val_avg['precision_avg'],
        #                val_avg['accuracy_avg'],
        #                val_avg['recall_avg'])
        output_name = output_name_file_name % \
                      (epoch, train_avg['loss_avg_stage2'],
                       train_avg['f1_avg_stage2'],
                       train_avg['precision_avg_stage2'],
                       train_avg['accuracy_avg_stage2'],
                       train_avg['recall_avg_stage2'])
        try:
            # send_msn(epoch, f1=val_avg['f1_avg'])
            send_email(output_name)
        except:
            pass

        if epoch % 1 == 0:
            save_model_name_stage1 = os.path.join(args.model_save_dir,
                                                  'stage1' + output_name)
            save_model_name_stage2 = os.path.join(args.model_save_dir,
                                                  'stage2' + output_name)
            torch.save(
                {
                    'epoch': epoch,
                    'state_dict': model1.state_dict(),
                    'optimizer': optimizer1.state_dict()
                }, save_model_name_stage1)
            torch.save(
                {
                    'epoch': epoch,
                    'state_dict': model1.state_dict(),
                    'optimizer': optimizer2.state_dict()
                }, save_model_name_stage2)

        scheduler1.step(epoch=epoch)
        scheduler2.step(epoch=epoch)
    print('训练已完成!')