Ejemplo n.º 1
0
def main():
    instruction()
    msg = "Enter your GMAIL Username and Password"
    title = "Welcome to CheckMail"
    fieldNames = [
        "VirusTotal API key",
        "Gmail Username",
        "Gmail Password",
    ]
    fieldValues = []

    #easygui box for password box to get the API key, gmail username and password!
    fieldValues = multpasswordbox(msg, title, fieldNames)
    emailid = fieldValues[1]
    password = fieldValues[2]
    api = fieldValues[0]
    latest_mail_uid = 0
    while (1):
        try:
            uid = latest_mail_uid
            latest_mail_uid, from_addr, email_message = login(
                emailid=emailid, password=password)
            if latest_mail_uid > uid:
                get_first_text_block(email_message_instance=email_message,
                                     sender_addr=from_addr,
                                     api_key=api)
                create_report(from_addr=from_addr)
            time.sleep(1)
        except:
            continue
Ejemplo n.º 2
0
 def printable(self, *args):
     if not self.data:
         print "No hay datos para imprimir"
         return
     file_name = self.choose_report_name()
     report_data = dict((k, unicode(v.get_text())) for k, v in self.fields.items())
     create_report(report_data, self.data, file_name)
Ejemplo n.º 3
0
def run(strategy, explanation_score_name = "mat_score", prediction_score_name = "mato_score",
        show_me = False, test_problems = None, test_name = "spm", test_anlgs = None, test_trans = None):
    start_time = time.time()

    probs = prob_anlg_tran_new.get_probs(test_problems, test_name)

    for prob in probs:
        print(prob.name)

        # initialize cache
        jaccard.load_jaccard_cache(prob.name)
        asymmetric_jaccard.load_asymmetric_jaccard_cache(prob.name)

        # run strategy
        anlg_tran_data, pred_data, pred_d = strategy(prob,
                                                     explanation_score_name = explanation_score_name,
                                                     prediction_score_name = prediction_score_name,
                                                     test_anlgs = test_anlgs,
                                                     test_trans = test_trans)

        # save data
        prob.data = utils.save_data(prob, anlg_tran_data, pred_data, pred_d,
                                    "./data/" + test_name + "_" + strategy.__name__ + "_" + prediction_score_name + "_" + prob.name,
                                    show_me)

        # update cache
        jaccard.save_jaccard_cache(prob.name)
        asymmetric_jaccard.save_asymmetric_jaccard_cache(prob.name)

    # generate report
    report.create_report(probs, test_name + "_" + strategy.__name__ + "_" + prediction_score_name + "_")

    end_time = time.time()
    print(end_time - start_time)
Ejemplo n.º 4
0
def place_reports_only(expnum, start_time, end_time):
    destination = experiment_path[expnum]
    event_data_dicts = smysql.retrieve_event_description(start_time, end_time, list_of_sites=mySQL_sitedef[expnum])
    default_folder = smysql.retrieve_data_folder()

    # Look at every event in the database between time constraints.
    for event in event_data_dicts:
        site_evt_number = event[cfg_evt_siteEvt]
        site_event_id = event[cfg_evt_evid]
        file_data_dicts = smysql.retrieve_file_location(site_evt_number, mySQL_stadef[expnum])
        current_trial = caching.trial_num_from_evid(expnum, site_event_id)
        trial_doc_folder = "%sTrial-%s/Documentation/" % (destination, current_trial)
        report.create_report(trial_doc_folder, event)
        create_filereports(file_data_dicts, event, destination, current_trial, trial_doc_folder, default_folder)
Ejemplo n.º 5
0
def place_trials_default(expnum, start_time, end_time, verbose=False):
    '''This is going to be the primary way of moving processed data from it's proper location
    to the PEN tool's subfolder. As long as the data is organized with our standard format where
    the metadata is located on the mysql database, this will handle all the uploading.
    WARNING: Currently this will not realize if you've pointed it to a folder that it already uploaded.'''    
    destination         = experiment_path[expnum]
    current_trial       = utils.find_last_trial(expnum) + 1                      
    neeshub             = bhi.conn
    mysqldb             = bui.conn
    existing_evid_dict  = caching.load_evid_dictionary(expnum)
    event_data_dicts    = mysqldb.retrieve_event_description(start_time, end_time, list_of_sites = mySQL_sitedef[expnum])
    default_folder      = mysqldb.retrieve_data_folder()
    
    # Look at every event in the database between time constraints.
    for event in event_data_dicts:
        site_evt_number      = event[cfg_evt_siteEvt]
        site_evt_time        = event[cfg_evt_time]
        site_event_id        = event[cfg_evt_evid]
        site_event_dist      = event[cfg_evt_dist]
        site_event_ml        = event[cfg_evt_ml]
        file_data_dicts      = mysqldb.retrieve_file_location(site_evt_number,mySQL_stadef[expnum])
        
        # If this event has already been uploaded, report it and skip this event.
        if site_event_id in existing_evid_dict.values():
            nees_logging.log_existing_evid(site_event_id)
            continue
        
        # Don't do anything if there's no data
        if file_data_dicts == []:
            continue
        
        # Generate file structure on NEEShub and local system.
        description          = utils.generate_description(event)
        trialtitle           = datetime.datetime.utcfromtimestamp(site_evt_time).strftime(default_time_format)
        trial_doc_folder     = "%sTrial-%s/Documentation/" % (destination, current_trial)
        report_name          = 'report.csv'
        caching.update_all_cache_dictionaries(expnum, current_trial, site_event_id, site_event_ml, site_event_dist)        
        utils.generate_trial_structure(destination, current_trial)
        report.create_report(trial_doc_folder, event)
        neeshub.post_full_trial(experiment_id[expnum], trialtitle, description, current_trial)
        
        # Find and move every file within an event to the created file structure. 
        move_datafiles(file_data_dicts, event, destination, current_trial, trial_doc_folder, default_folder, expnum)
        upload_and_post_report(expnum, current_trial, trial_doc_folder, report_name)
        
        
        # Move on to next trial for further processing.
        nees_logging.log_goto_nextline(neeshub_log_filename)
        current_trial += 1
Ejemplo n.º 6
0
def calculate_size(file_path, img_output_path, histogram_path,
                   reports) -> (List[str]):
    img_org = cv2.imread(file_path)
    img, gray = crop_image(img_org)
    img_with_contours, markers = watershed(img, gray)
    cv2.imwrite(img_output_path, img_with_contours)
    report = create_report(markers, gray, histogram_path)
    reports.append(report)
Ejemplo n.º 7
0
def place_reports_only(expnum, start_time, end_time):
    '''Used in the case that the log gives warning that individual channel information was missing. This allows 
    the used to re-create the report.csv files without having to completely re-do the upload process.'''
    destination         = experiment_path[expnum]                    
    mysqldb             = bui.conn
    event_data_dicts    = mysqldb.retrieve_event_description(start_time, end_time, list_of_sites = mySQL_sitedef[expnum])
    default_folder      = mysqldb.retrieve_data_folder() 
    for event in event_data_dicts:    
        site_evt_number      = event[cfg_evt_siteEvt]
        site_event_id        = event[cfg_evt_evid]
        file_data_dicts      = mysqldb.retrieve_file_location(site_evt_number,mySQL_stadef[expnum])
        current_trial        = caching.trial_num_from_evid(expnum, site_event_id)
        trial_doc_folder     = "%sTrial-%s/Documentation/" % (destination, current_trial)
        report.create_report(trial_doc_folder, event)
        create_filereports(file_data_dicts, 
                           event, 
                           destination, 
                           current_trial, 
                           trial_doc_folder, 
                           default_folder) 
Ejemplo n.º 8
0
    report_data = report_data.append(
        {
            'ticker': ticker,
            'weight': "%.3f" % weight,
            'avg_ret': "%.3f" % avg_ret,
            'alpha': "%.3f" % alpha,
            'beta': "%.3f" % beta,
            'exp_ret': "%.3f" % exp_ret,
            'weightB': "%.3f" % weighted_beta,
            'SumPB': "%.3f" % SumPB,
            'sharpe': "%.3f" % sharpe,
            'reference': reference_ticker
        },
        ignore_index=True)

rpt.create_report(report_directory, report_data)
'''
NOTES: 

    Portfolio beta = sum of the weights of each security in the portfolio*its beta

    Bp = Ba*Wa + Bb*Wb + Bc*Wc... Bn*Wn

    Get all scripts working inside of this script and then remove the defaults
    
    The script will: 
        -Download, Update, or Load the data for the desired stocks
        -An Optimized portfolio based on the stocks in the file, etc will be generated
        -The Capital Asset Pricing Model will be run to see what the overall Beta of the portfolio is
        -All generated files will be saved in an output directory
        -A report will be generated with all relavant information
Ejemplo n.º 9
0
    lid = insert_launch(cnx)
    obj, sen = Index(), Index()
    stats, top, dl = prepare(cnx, lid, cargs["ngrams"], cargs["test"], obj,
                             sen)
    k = kernel(cargs["kernel"], cargs["sigma"])
    estimate = Estimate()
    estimate.read()

    for mode, (trset, teset) in zip(["objective", "sentiment"], dl):
        svm = SVM(k,
                  c=cargs["c"],
                  tol=cargs["tol"],
                  lpass=cargs["lpass"],
                  liter=cargs["liter"])
        svm = watch(svm, mode, len(trset[0]), estimate, cnx, lid, cargs)
        tr = train(svm, mode, trset[0], trset[1], cnx, lid)
        te = test(svm, mode, teset[0], teset[1])
        svm, ts = flush(svm, mode)
        stats = update(stats, tr, te, ts)

    create_report(cnx, lid, stats, top, cargs)
    insert_index(cnx, lid, "objective", obj)
    insert_index(cnx, lid, "sentiment", sen)
    estimate.train()
    estimate.store()
except:
    delete_launch(cnx, lid)
    raise
finally:
    cnx.close()
Ejemplo n.º 10
0
import pandas as pd
import numpy as np
import report
import copy

pd.set_option('display.max_columns', 500)

test_report = np.load("test_report.npz", allow_pickle=True)
probs = test_report["probs"].tolist()
probs.append(copy.deepcopy(probs[0]))
probs.append(copy.deepcopy(probs[1]))
probs[2].data.get("pred_d")["prob_name"] = "d3"
probs[3].data.get("pred_d")["prob_name"] = "e3"

report.create_report(probs, "mode_")

x = [
    {
        "a": "a1",
        "b": 1,
        "c": 1
    },
    {
        "a": "a2",
        "b": 1,
        "c": 1
    },
    {
        "a": "a3",
        "b": 1,
        "c": 1
Ejemplo n.º 11
0
def test_create_report():
    punch = document.Punch004
    filename = 'c:\\alaki\\jafari.docx'
    report.create_report(punch, filename)
Ejemplo n.º 12
0
def run_raven_greedy(show_me=False, test_problems=None):

    start_time = time.time()

    print("run raven in greedy mode.")

    probs = prob_anlg_tran.get_probs(test_problems)

    for prob in probs:

        print(prob.name)

        jaccard.load_jaccard_cache(prob.name)
        asymmetric_jaccard.load_asymmetric_jaccard_cache(prob.name)

        anlgs = prob_anlg_tran.get_anlgs(prob)

        anlg_tran_data = []
        anlg_data = []
        for anlg in anlgs:
            # score all transformations given an analogy
            tran_data = run_prob_anlg(prob, anlg)
            anlg_tran_data.extend(tran_data)

            # optimize w.r.t. transformations for this analogy
            anlg_tran_d = utils.find_best(tran_data, "pat_score")
            anlg_data.append(anlg_tran_d)

        pred_data = []
        for anlg_d in anlg_data:
            # predict with an analogy, and score all options with the prediction
            anlg_pred_data = predict(prob, anlg_d)
            pred_data.extend(anlg_pred_data)

        # optimize w.r.t. options
        pred_d = utils.find_best(pred_data, "pat_score", "pato_score")

        # imaging
        save_image(prob, pred_d.get("pred"),
                   prob.options[pred_d.get("optn") - 1], "greedy", show_me)

        # data aggregation progression, TODO maybe save them as images
        for d in anlg_tran_data:
            del d["diff"]
        for d in anlg_data:
            del d["diff"]
        for d in pred_data:
            del d["diff"]
            del d["pred"]
        del pred_d["diff"]
        del pred_d["pred"]
        aggregation_progression = {
            "anlg_tran_data": anlg_tran_data,
            "anlg_data": anlg_data,
            "pred_data": pred_data,
            "pred_d": pred_d
        }
        with open("./data/greedy_" + prob.name + ".json", 'w+') as outfile:
            json.dump(aggregation_progression, outfile)
            outfile.close()

        # update cache
        jaccard.save_jaccard_cache(prob.name)
        asymmetric_jaccard.save_asymmetric_jaccard_cache(prob.name)

        prob.data = aggregation_progression

    # output report
    if test_problems is None:
        report.create_report(probs, "greedy_")

    end_time = time.time()
    print(end_time - start_time)