Exemplo n.º 1
0
    def process_all_blocks(self, wsapi_data, time_to_wait, send_email_after):
        r = requests.get(XML_URL, headers={'Cache-Control': 'no-cache'})
        # Process the r.text to find the errant, non-ASCII characters
        safe_text = unicodedata.normalize('NFKD', r.text).encode('ascii', 'ignore')
        block_xml = ET.fromstring(safe_text)

        paths_to_ignore = ["_shared-content/program-blocks/undergrad"]

        blocks = []
        for block in block_xml.findall('.//system-block'):
            if any([path in block.find('path').text for path in paths_to_ignore]):
                continue

            block_id = block.get('id')

            result = self.process_block(wsapi_data, block_id)
            blocks.append(result)
            time.sleep(time_to_wait)

        if send_email_after:
            missing_data_codes = self.missing_data_codes

            caps_gs_sem_email_content = render_template("caps_gs_sem_recipients_email.html", **locals())
            if len(missing_data_codes) > 0:
                send_message("No CAPS/GS Banner Data Found", caps_gs_sem_email_content, html=True, caps_gs_sem=True)

            unused_banner_codes = self.get_unused_banner_codes(wsapi_data)
            caps_gs_sem_recipients = app.config['CAPS_GS_SEM_RECIPIENTS']
            admin_email_content = render_template("admin_email.html", **locals())
            send_message("Readers Digest: Program Sync", admin_email_content, html=True)

            # reset the codes found
            self.codes_found_in_cascade = []
            
        return "Finished sync of all CAPS/GS/SEM programs."
Exemplo n.º 2
0
def send_email(subject, text):
    print("sending email")

    credentials = auth.get_credentials()
    gmail_service = auth.get_gmail_service(credentials)

    message = mail.create_message("*****@*****.**", subject, text)
    mail.send_message(gmail_service, message)
Exemplo n.º 3
0
def contact():
    if request.method == 'POST':
        name = request.form.get('name')
        email = request.form.get('email')
        message = request.form.get('message')
        email_kwargs = {'name': name, 'email': email, 'message': message}
        # print(email_kwargs)
        formatted_email = email_template.substitute(**email_kwargs)
        # print('\n\n')
        # print(formatted_email)
        send_message(formatted_email)
        return redirect('/')
Exemplo n.º 4
0
def api_system():
    """The website API, used for posting and getting comments."""
    data = request.form
    fields = (data['first_name'], data['last_name'], data['email_address'],
              data['comment'])
    cursor.execute(
        'INSERT INTO comments(first_name, last_name, email_address, comment) '
        'VALUES(?, ?, ?, ?)', fields)
    db.commit()
    send_message(
        data['email_address'], 'We have recieved your comment:\n'
        '"{0}"\nThanks for giving us feedback!'.format(data['comment']))
    return jsonify({'status': 'Comment added successfully!'})
Exemplo n.º 5
0
    def process_all_blocks(self, time_to_wait, send_email_after):
        new_banner_data = self.get_new_banner_data()

        if len(new_banner_data) == 0:
            return 'Received no data from banner; skipping all blocks'

        r = requests.get(XML_URL, headers={'Cache-Control': 'no-cache'})
        # Process the r.text to find the errant, non-ASCII characters
        safe_text = unicodedata.normalize('NFKD', r.text).encode('ascii', 'ignore')
        block_xml = ET.fromstring(safe_text)

        paths_to_ignore = ['_shared-content/program-blocks/undergrad']

        blocks = []
        for block in block_xml.findall('.//system-block'):
            if any([path in block.find('path').text for path in paths_to_ignore]):
                continue

            block_id = block.get('id')

            # gather codes that are in cascade
            concentration_code = block.find('.//concentration_code').text
            if concentration_code not in self.codes_found_in_cascade:
                self.codes_found_in_cascade.append(concentration_code)

            result = self.process_block(new_banner_data, block_id, time_to_wait)
            blocks.append(result)

        if send_email_after:
            codes_not_found_in_banner = self.codes_not_found_in_banner
            caps_gs_sem_email_content = render_template('caps_gs_sem_recipients_email.html', **locals())
            if len(codes_not_found_in_banner) > 0:
                send_message('No CAPS/GS Banner Data Found', caps_gs_sem_email_content, html=True, caps_gs_sem=True)

            unused_banner_codes = self.get_unused_banner_codes(new_banner_data)
            caps_gs_sem_recipients = app.config['CAPS_GS_SEM_RECIPIENTS']
            admin_email_content = render_template('admin_email.html', **locals())

            if codes_not_found_in_banner or unused_banner_codes:
                send_message('Readers Digest: Program Sync', admin_email_content, html=True)

        # reset the codes found
        self.codes_found_in_cascade = []
        self.codes_not_found_in_banner = []

        # publish program feeds
        self.cascade.publish(app.config['PUBLISHSET_ID'], 'publishset')

        return 'Finished sync of all CAPS/GS/SEM programs.'
Exemplo n.º 6
0
def send():
    if not is_admin():
        return "Not Authorized", 401
    args = request.json
    print(args)
    recipients = args.get("recipients") or ""
    subject = args.get("subject") or ""
    html = args.get("html") or ""
    text = args.get("text") or ""

    to = []
    if(recipients == "org"):
        to = ["*****@*****.**"]
    elif(recipients == "admin"):
        to = ["*****@*****.**"]
    elif(recipients == "all"):
        to = [x["email"] for x in admin(True)]
    elif(recipients == "wpi"):
        to = [ x["email"] for x in admin(True) if "wpi.edu" in x["email"] or \
            (x["school"] and ("WPI" in x["school"]["name"] or "Worcester Polytechnic" in x["school"]["name"])) ]

    # return str(to)
    send_message(to, subject, html, text)
    return "Message sent successfully to {0} recipients".format(len(to))
Exemplo n.º 7
0
def emailHandle():
    req_data = request.get_json()
    case = req_data["case"]
    bot_response = "Custom Response"

    EMAIL_FROM = '*****@*****.**'  # [email protected]
    EMAIL_SUBJECT = "Email from chat bot"
    EMAIL_CONTENT = ""

    email_to = {
        2.1: "*****@*****.**",
        2.2: "*****@*****.**",
        3.3: "*****@*****.**"
    }

    # name, roll, mail
    user_info = "Name - " + req_data[
        "name"] + ", Enrollment number - " + req_data[
            "roll"] + ", E-mail ID - " + req_data["mail"] + ", "

    if case == 2.1:
        EMAIL_CONTENT = "Degree Query for course: " + req_data[
            "course"] + ". Batch: " + req_data[
                "batch"] + ". Drop out case: " + req_data["drop"]
    elif case == 2.2:
        EMAIL_CONTENT = "Bonafide for purpose: " + req_data["purpose"]
    elif case == 3.3:
        EMAIL_CONTENT = "Accounts Query for security for batch: " + req_data[
            "batch"] + ". Year: " + req_data["year"]

    EMAIL_CONTENT = user_info + EMAIL_CONTENT

    service = mail.service_account_login()
    message = mail.create_message(EMAIL_FROM, email_to.get(case),
                                  EMAIL_SUBJECT, EMAIL_CONTENT)
    sent = mail.send_message(service, 'me', message)

    bot_response = "Email sent successfully."

    return jsonify({
        "method": "POST",
        "headers": {
            "content-type": "application/json"
        },
        "body": {
            "response": bot_response
        }
    })
Exemplo n.º 8
0
    # loop over the facial embeddings
    for encoding in encodings:
        playsound("anythere.wav")
        servo.turnRight()

        # attempt to match each face in the input image to our known
        # encodings
        matches = face_recognition.compare_faces(data["encodings"], encoding)
        name = "Unknown"  #if face is not recognized, then print Unknown
        if (name == "Unknown"):
            playsound("pew.wav")
            playsound("pew.wav")
            playsound("pew.wav")
            servo.shoot()
            playsound("lost.wav")
            request = message.send_message()
            print('Status Code: ' + format(request.status_code)
                  )  #200 status code means email sent successfully

        # check to see if we have found a match
        if True in matches:
            # find the indexes of all matched faces then initialize a
            # dictionary to count the total number of times each face
            # was matched
            matchedIdxs = [i for (i, b) in enumerate(matches) if b]
            counts = {}

            # loop over the matched indexes and maintain a count for
            # each recognized face face
            for i in matchedIdxs:
                name = data["names"][i]
Exemplo n.º 9
0
        #If the contour isn't large enough
        if cv2.contourArea(c) < MIN_AREA:
            continue

        if not motion_detected and time_detected is None:
            print "Motion Detected!"
            #Create an image and send it
            cv2.imwrite(
                datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p") +
                ".jpg", frame)
            mes = create_message(
                EMAIL, EMAIL, 'Motion Detected!',
                "Motion has been detected by your security webcam!",
                datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p") +
                ".jpg")
            send_message(service, "me", mes)
            #Flag motion_detection as true and begin video recording
            motion_detected = True
            time_detected = datetime.datetime.now()
            fname = datetime.datetime.now().strftime(
                "%A %d %B %Y %I:%M:%S%p") + '_WebCurity.avi'
            out = cv2.VideoWriter(fname, fourcc, 20.0, (RES_X, RES_Y))

        #Surround the contour with a rectangle!
        (x, y, w, h) = cv2.boundingRect(c)
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)

    #Write time and date
    cv2.putText(frame,
                datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
                (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
Exemplo n.º 10
0
def run_seizure_detection(build_target):
    """
    The main entry point for running seizure-detection cross-validation and predictions.
    Directories from settings file are configured, classifiers are chosen, pipelines are
    chosen, and the chosen build_target ('cv', 'predict', 'train_model') is run across
    all combinations of (targets, pipelines, classifiers)
    """

    with open('SETTINGS.json') as f:
        settings = json.load(f)

    data_dir = str(settings['competition-data-dir'])
    cache_dir = str(settings['data-cache-dir'])
    submission_dir = str(settings['submission-dir'])

    makedirs(submission_dir)

    cached_data_loader = CachedDataLoader(cache_dir)

    ts = time.get_millis()

    targets = [
        'Dog_1',
        'Dog_2',
        'Dog_3',
        'Dog_4',
        'Dog_5',
        'Patient_1_downsample',
        'Patient_2_downsample',
   ]
    pipelines = [
        # NOTE(mike): you can enable multiple pipelines to run them all and compare results
        # Pipeline(pipeline=[FFT(), Slice(1, 64), Magnitude(), Log10()]),
        # Pipeline(pipeline=[FFT(), Slice(1, 48), Magnitude(), Log10()]),
        # Pipeline(pipeline=[FFT(), Slice(1, 96), Magnitude(), Log10()]),
        # Pipeline(pipeline=[RFFT(), Slice(1, 48), Magnitude(), Log10()]),
        # Pipeline(pipeline=[FFT(), Slice(1, 128), Magnitude(), Log10()]),
         Pipeline(pipeline=[TimeAliasing(),FFT(), Slice(1, 48), Magnitude(), Log10()]),
        # Pipeline(pipeline=[TimeAliasing(),FFT(), Slice(1, 64), Magnitude(), Log10()]),
        # Pipeline(pipeline=[FFT(), Slice(1, 160), Magnitude(), Log10()]),
        # Pipeline(pipeline=[FFT(), Magnitude(), Log10()]),
        # Pipeline(pipeline=[Stats()]),
        # Pipeline(pipeline=[DaubWaveletStats(4)]),
        # Pipeline(pipeline=[Resample(400), DaubWaveletStats(4)]),
        # Pipeline(pipeline=[Resample(400), MFCC()]),
        # Pipeline(pipeline=[TimeAliasing(),FFTWithTimeFreqCorrelation(1, 48, 400, 'us')]),
        # Pipeline(pipeline=[FFTWithTimeFreqCorrelation(1, 48, 400, 'us')]),
        # Pipeline(pipeline=[TimeAliasing(),FFTWithTimeFreqCorrelation(1, 48, 400, 'none')]), # winning submission
        # Pipeline(pipeline=[FFTWithTimeFreqCorrelation(1, 48, 400, 'usf')]), # higher score than winning submission
        # Pipeline(pipeline=[FFTWithTimeFreqCorrelation(1, 48, 400, 'none')]),
        # Pipeline(pipeline=[FFTWithTimeFreqCorrelation(1, 48, 400, 'none')]),
        # Pipeline(pipeline=[TimeAliasing(),TimeCorrelation(400, 'usf', with_corr=True, with_eigen=True)]),
        # Pipeline(pipeline=[TimeAliasing(),TimeCorrelation(400, 'us', with_corr=True, with_eigen=True)]),
        # Pipeline(pipeline=[TimeCorrelation(400, 'us', with_corr=True, with_eigen=False)]),
        # Pipeline(pipeline=[TimeCorrelation(400, 'us', with_corr=False, with_eigen=True)]),
        # Pipeline(pipeline=[TimeCorrelation(400, 'none', with_corr=True, with_eigen=True)]),
        # Pipeline(pipeline=[TimeAliasing(),FreqCorrelation(1, 48, 'usf', with_corr=True, with_eigen=True,with_fft = True)]),
        # Pipeline(pipeline=[FreqCorrelation(1, 48, 'us', with_corr=True, with_eigen=True)]),
        # Pipeline(pipeline=[FreqCorrelation(1, 48, 'us', with_corr=True, with_eigen=False)]),
        # Pipeline(pipeline=[FreqCorrelation(1, 48, 'us', with_corr=False, with_eigen=True)]),
        # Pipeline(pipeline=[FreqCorrelation(1, 48, 'none', with_corr=True, with_eigen=True)]),
        # Pipeline(pipeline=[TimeFreqCorrelation(1, 48, 400, 'us')]),
        # Pipeline(pipeline=[TimeFreqCorrelation(1, 48, 400, 'usf')]),
        # Pipeline(pipeline=[TimeFreqCorrelation(1, 48, 400, 'none')]),


    ]
    classifiers = [
        # NOTE(mike): you can enable multiple classifiers to run them all and compare results
         (RandomForestClassifier(n_estimators=3, min_samples_split=1, bootstrap=False, n_jobs=4, random_state=0), 'rf3mss1Bfrs0'),
        # (RandomForestClassifier(n_estimators=150, min_samples_split=1, bootstrap=False, n_jobs=4, random_state=0), 'rf150mss1Bfrs0'),
        # (RandomForestClassifier(n_estimators=300, min_samples_split=1, bootstrap=False, n_jobs=4, random_state=0), 'rf300mss1Bfrs0'),
        # (RandomForestClassifier(n_estimators=3000, min_samples_split=1, bootstrap=False, n_jobs=4, random_state=0), 'rf3000mss1Bfrs0'),
        # (GaussianNB(),'gbn'),
        # (BernoulliRBM(n_components=100),'dbn'),
        # (SVC(probability = True),'svc100'),
        # (LDA(),'lda'),

    ]
    cv_ratio = 0.5

    def should_normalize(classifier):
        clazzes = [LogisticRegression]
        return np.any(np.array([isinstance(classifier, clazz) for clazz in clazzes]) == True)

    def train_full_model(make_predictions):
        for pipeline in pipelines:
            for (classifier, classifier_name) in classifiers:
                print 'Using pipeline %s with classifier %s' % (pipeline.get_name(), classifier_name)
                guesses = ['clip,preictal']
                classifier_filenames = []
                for target in targets:
                    task_core = TaskCore(cached_data_loader=cached_data_loader, data_dir=data_dir,
                                         target=target, pipeline=pipeline,
                                         classifier_name=classifier_name, classifier=classifier,
                                         normalize=should_normalize(classifier), gen_ictal=False,
                                         cv_ratio=cv_ratio)


                    if make_predictions:
                        predictions = MakePredictionsTask(task_core).run()
                        guesses.append(predictions.data)
                    else:
                        task = TrainClassifierTask(task_core)
                        print "training"
                        task.run()
                        print "train_finished"
                        classifier_filenames.append(task.filename())


                if make_predictions:
                    filename = 'submission%d-%s_%s.csv' % (ts, classifier_name, pipeline.get_name())
                    filename = os.path.join(submission_dir, filename)
                    with open(filename, 'w') as f:
                        print >> f, '\n'.join(guesses)
                    print 'wrote', filename
                else:
                    print 'Trained classifiers ready in %s' % cache_dir
                    for filename in classifier_filenames:
                        print os.path.join(cache_dir, filename + '.pickle')




    def train_model_with_calib(make_predictions):
        for pipeline in pipelines:
            for (classifier, classifier_name) in classifiers:
                print 'Using pipeline %s with classifier %s' % (pipeline.get_name(), classifier_name)
                guesses = ['clip,preictal']
                classifier_filenames = []
                for target in targets:
                    task_core = TaskCore(cached_data_loader=cached_data_loader, data_dir=data_dir,
                                         target=target, pipeline=pipeline,
                                         classifier_name=classifier_name, classifier=classifier,
                                         normalize=should_normalize(classifier), gen_ictal=False,
                                         cv_ratio=cv_ratio)


                    if make_predictions:
                        predictions = MakePredictionswithCalibTask(task_core).run()
                        guesses.append(predictions.data)
                    else:
                        task = TrainClassifierwithCalibTask(task_core)
                        print "training"
                        task.run()
                        print "train_finished"
                        classifier_filenames.append(task.filename())


                if make_predictions:
                    filename = 'submission%d-%s_%s.csv' % (ts, classifier_name, pipeline.get_name())
                    filename = os.path.join(submission_dir, filename)
                    with open(filename, 'w') as f:
                        print >> f, '\n'.join(guesses)
                    print 'wrote', filename
                else:
                    print 'Trained classifiers ready in %s' % cache_dir
                    for filename in classifier_filenames:
                        print os.path.join(cache_dir, filename + '.pickle')



    def do_cross_validation_full():
        summaries = []
        print "ok"
        for pipeline in pipelines:
            for (classifier, classifier_name) in classifiers:
                print 'Using pipeline %s with classifier %s' % (pipeline.get_name(), classifier_name)
                scores = []
                S_scores = []
                E_scores = []
                y_cv = []
                pred = []
                for target in targets:
                    print 'Processing %s (classifier %s)' % (target, classifier_name)
                    task_core = TaskCore(cached_data_loader=cached_data_loader, data_dir=data_dir,
                                         target=target, pipeline=pipeline,
                                         classifier_name=classifier_name, classifier=classifier,
                                         normalize=should_normalize(classifier), gen_ictal=False,
                                         cv_ratio=cv_ratio)

                    data = CrossValidationScoreFullTask(task_core).run()
                    y_cv = np.concatenate((y_cv,data.y_cv),axis = -1);
                    pred = np.concatenate((pred,data.pred),axis = -1);
                print y_cv
                print pred
                fpr,tpr,thresholds = metrics.roc_curve(y_cv,pred,pos_label = 1)
                print 'AUC'
                print metrics.auc(fpr,tpr)



    def do_cross_validation():
        summaries = []
        for pipeline in pipelines:
            for (classifier, classifier_name) in classifiers:
                print 'Using pipeline %s with classifier %s' % (pipeline.get_name(), classifier_name)
                scores = []
                S_scores = []
                E_scores = []
                for target in targets:
                    print 'Processing %s (classifier %s)' % (target, classifier_name)

                    task_core = TaskCore(cached_data_loader=cached_data_loader, data_dir=data_dir,
                                         target=target, pipeline=pipeline,
                                         classifier_name=classifier_name, classifier=classifier,
                                         normalize=should_normalize(classifier), gen_ictal=False,
                                         cv_ratio=cv_ratio)

                    data = CrossValidationScoreTask(task_core).run()
                    score = data.score
                    scores.append(score)

                    print '%.3f' % score, 'S=%.4f' % data.S_auc
                    S_scores.append(data.S_auc)

                if len(scores) > 0:
                    name = pipeline.get_name() + '_' + classifier_name
                    summary = get_score_summary(name, scores)
                    summaries.append((summary, np.mean(scores)))
                    print summary
                if len(S_scores) > 0:
                    name = pipeline.get_name() + '_' + classifier_name
                    summary = get_score_summary(name, S_scores)
                    print 'S', summary

            print_results(summaries)




    if build_target == 'cv':
        do_cross_validation()
    elif build_target == 'train_model':
        train_full_model(make_predictions=False)
    elif build_target == 'make_predictions':
        train_full_model(make_predictions=True)
    elif build_target == 'make_predictions_with_calib':
        train_model_with_calib(make_predictions = True)
    elif build_target == 'cv_full':
        do_cross_validation_full()
    else:
        raise Exception("unknown build target %s" % build_target)

    send_message('your program finished running on mercury')
Exemplo n.º 11
0
def send_email(*args, **kwds):
    mail.send_message(*args, **kwds)
Exemplo n.º 12
0
 def delete_all():
     m.send_message(text.get(0.0, END), self.login, self.password, to.get(0.0, END), subject.get(0.0, END))
     widgets = self.parent.grid_slaves()
     for widget in widgets:
         widget.destroy()
     self.toolbar()
Exemplo n.º 13
0
def update_git_bom(bom):
	for fname in os.listdir(path+'/'+bom):
		r = requests.get('https://raw.githubusercontent.com/hammonja/prod_data/master/BOMS/'+bom+'/'+fname , verify=False)
		with open(path+'/'+bom+'/'+fname, 'w') as out:		
			out.write(r.text)
	return
	
# get current boms from sage
sage_boms = get_sage_boms()

# get all boms from git
git_boms = get_git_boms()

# check if there is a mis-match
if sage_boms <> git_boms:
	
	# update boms from git that are not equal
	for bom in get_unequal_boms():
		update_git_bom(bom)
			
	# get all boms from git again 
	git_boms = get_git_boms()

	# check again 
	if sage_boms <> git_boms:
	
		# still unequal then send an email
		for bom in get_unequal_boms():
			text = "The following BOM is out of sync with github : " 
			send_message(text,bom)