Example #1
0
def register():
    '''Gets all data entered in the register view and validates it. If valid,
    send the data and the current date to the server and add it to a new user
    in the database, else show an error message
    '''
    register_view = app.views_dict['register']
    email_address = register_view.get('email_entry').get_var()
    first_name = register_view.get('first_name_entry').get_var()
    last_name = register_view.get('last_name_entry').get_var()
    gender = register_view.get('gender_entry').get_var()
    dob = register_view.get('dob_entry').get_var()
    password = register_view.get('pw_entry').get_var()
    confirm_password = register_view.get('confirm_pw_entry').get_var()

    join_date = util.get_current_date()
    user_data = [
        first_name, last_name, gender, join_date, dob, email_address, password
    ]
    valid_data = validate_user_data(user_data)
    valid_password = validate_password(password, confirm_password)
    if valid_data and valid_password:

        with db_conn as cursor:
            session_id = server.add_user(cursor, user_data)

        app.data['session_id'] = session_id
        if session_id is None:
            show_error_message('Failed to register.')
        else:
            #log in
            app.views_dict['login'].get('pw_entry').set_var(password)
            app.views_dict['login'].get('email_entry').set_var(email_address)
            login()
            clear_register_data()
Example #2
0
def register_vehicle(owner, vehicle):

    thereAdmin = get_current_admin()
    if thereAdmin == '':

        data = {"name": "view.display_not_logged", "param": ()}
        # qs.regFlag = False
        return data
    # pack the attributes
    rowOwner = dict(zip(qs.ATT_OWNER, owner))
    rowVehicle = dict(zip(qs.ATT_VEHICLE, vehicle))

    # insert into Vehicle and Owner
    # check whether the plate is already registered if so dont add the plate
    notExist = model.insert_vehicle(qs.it_vehicle,
                                    rowVehicle)  # insert into vehicle
    lplate = vehicle[0]
    # if not notExist:
    #   model.updateAsReg(lplate)
    print lplate
    isReg = model.is_registered_plate(lplate)
    print isReg
    if not isReg:
        ownerExist = model.insert_owner(qs.it_owner, rowOwner)

        #
        oid = model.retrive_id("Oid", "Owner", qs.ATT_OWNER[1], str(owner[1]))
        oid = oid['Oid']
        strPlate = "'" + vehicle[0] + "'"
        vid = model.retrive_id("Vid", "Vehicle", qs.ATT_VEHICLE[0], strPlate)
        vid = vid['Vid']

        ownership = [oid, vid]
        rowOwnership = dict(zip(qs.ATT_OWNERSHIP, ownership))
        model.insert_ownership(qs.it_ownership, rowOwnership)
        #Registry table

        date = util.get_current_date()
        currAdmin = get_current_admin()
        currAdmin = "'" + currAdmin + "'"
        aid = model.retrive_id("Aid", "Admin", qs.ATT_ADMIN[3], currAdmin)
        aid = aid['Aid']
        pinExist = False
        while not pinExist:
            pin = util.genrate_pin()
            registry = [aid, vid, date, pin]
            rowRegistry = dict(zip(qs.ATT_REGISTRY, registry))
            pinExist = model.insert_registery(qs.it_registery, rowRegistry)
        data = {"name": "view.display_pin", "param": (pin, )}
        aw.isReg = False
        return data

    else:
        data = {"name": "view.display_registerd_user", "param": ()}
        aw.isReg = False
        return data
Example #3
0
def add_transaction(cursor, user_email, chosen_product_ids):
    '''Adds a new transaction containing the passed product ids for specified user.

    As this is a verified database call, a valid session id needs to be passed
    in (see decorator def) in order for the database request to succeed.
    '''
    user_id = database.query_user_id_from_user_email(cursor, user_email)
    date = util.get_current_date()
    transaction_data = [user_id, date]
    return database.add_transaction(cursor, transaction_data,
                                    chosen_product_ids)
Example #4
0
def update_tracker_prices_and_tweets_and_news():
    LOGGER.info('WE IN THE SCHEDULER BOYS')

    if util.is_market_holiday(util.get_current_date(), session):
        return

    connection = util.connect_to_postgres()
    cursor = connection.cursor()
    try:
        cursor.execute("SELECT ticker FROM Trackers")
    except Exception as e:
        return {'error': str(e)}
    tickers = [record[0] for record in cursor]
    cursor.close()
    connection.close()

    print('in scheduler got all tickers')
    for ticker in tickers:
        fundamentals = {}
        connection = util.connect_to_postgres()
        cursor = connection.cursor()
        try:
            cursor.execute("CALL remove_old_daily_price_data(%s);", (ticker, ))
        except Exception as e:
            print({'error': e})
            connection.rollback()
        connection.commit()
        try:
            cursor.execute("CALL remove_old_minute_price_data(%s);",
                           (ticker, ))
        except Exception as e:
            print({'error': e})
            connection.rollback()
        try:
            cursor.execute("SELECT * FROM Fundamentals WHERE ticker = %s",
                           (ticker, ))
        except Exception as e:
            return {'error': str(e)}
        fundamentals = cursor.fetchone()
        connection.commit()
        cursor.close()
        connection.close()

        util.add_daily_closing_price(ticker, session)
        util.add_daily_minute_price(ticker, session)
        update_news(ticker)
        update_tweets(ticker, fundamentals[1])

    print('in scheduler successfully updated all data possible')
    return {
        'success':
        'SUCESSFULLY UPDATED ALL PRICE AND TWEET DATA FOR ALL TRACKED TICKERS'
    }
Example #5
0
def run_scraper(debug=False):
    date = util.get_current_date()

    # create the csv file
    csv_name = '{}_data.csv'.format(date)
    log_file = '{}_log.txt'.format(date)
    util.create_csv(csv_name)

    for date_pair in util.get_date_pairs():
        worker = scraper.Scraper(date_pair[0], date_pair[1], debug)
        data = worker.crawl(log_file)

        if not data:
            data_row = [date_pair[0], date_pair[1], None, None, None, None]
            util.append_row(csv_name, data_row)
        else:
            for room_type in sorted(data):
                room_data = data[room_type]

                # Merge the accomodation data

                for accom_type in sorted(room_data['price_bar']):
                    m_price = merge(room_data, accom_type)
                    data_row = [date_pair[0], date_pair[1],
                                room_type,
                                accom_type,
                                m_price['p_bar'],
                                m_price['p_sum'],
                                m_price['p_adv']
                                ]
                    util.append_row(csv_name, data_row)

                for accom_type in sorted(room_data['price_summer']):
                    m_price = merge(room_data, accom_type)
                    data_row = [date_pair[0], date_pair[1],
                                room_type,
                                accom_type,
                                m_price['p_bar'],
                                m_price['p_sum'],
                                m_price['p_adv']
                                ]
                    util.append_row(csv_name, data_row)
                for accom_type in sorted(room_data['price_adv']):
                    m_price = merge(room_data, accom_type)
                    data_row = [date_pair[0], date_pair[1],
                                room_type,
                                accom_type,
                                m_price['p_bar'],
                                m_price['p_sum'],
                                m_price['p_adv']
                                ]
                    util.append_row(csv_name, data_row)
        worker.clean()
Example #6
0
def process_price_data(prices):
    col_ref = mongo_db['Live_Stock_Prices']
    #print(util.get_ema_idx(prices['s']))
    if prices and util.within_trading_hours(prices['s']):
        timestamp = datetime.datetime.strptime(
            util.epoch_to_timestamp_format(prices['s']), '%Y-%m-%d %H:%M:%S')
        new_price = {
            'volume': prices['v'],
            'open': prices['o'],
            'close': prices['c'],
            'high': prices['h'],
            'low': prices['l'],
            'timestamp': timestamp
        }

        col_ref.update({'ticker': prices['sym']},
                       {'$push': {
                           'minute_prices': new_price
                       }})

        on_minute(prices)
        is_vol_spike = volume_spike_detection(prices['sym'], prices)
        af2_doc = col_ref.find_one({'usecase': 'af2'})
        trackers_sent_today = af2_doc['trackers_sent_today']
        if is_vol_spike and prices['sym'] not in trackers_sent_today:
            trackers_sent_today.append(prices['sym'])
            col_ref.update_one(
                {'usecase': 'af2'},
                {'$set': {
                    'trackers_sent_today': trackers_sent_today
                }})
            msg = 'There is a EMA volume spike on ' + prices[
                'sym'] + ' at ' + util.get_current_date()
            message = 'Subject: {}\n\n{}'.format(
                'EMA Volume Spike Notification', msg)
            util.send_volume_spike_notification(message)
Example #7
0
        scalar = StandardScaler()
        scalar = scalar.fit(X_test)
        X_test = scalar.transform(X_test)

        train_val_split = X_val.shape[0] / X_fit.shape[0]

        class_nn = ClassifierNN()
        class_nn.train_model(X_fit,
                             Y_fit,
                             100,
                             batch_size=24,
                             training_patience=1000,
                             val_split=train_val_split,
                             file_name=MODEL_PATH +
                             'stock_daily_change_predictor_' +
                             get_current_date() + '.h5')
        test_data = class_nn.test_model(X_test, Y_test, show_plots=False)
        df1 = pd.DataFrame(test_data['Predicted'],
                           index=test_dataset.index.values)
        pos_comb = df1[1].values + df1[2].values
        df1['pos_comb'] = pos_comb
        df2 = pd.DataFrame(test_data['Measured'],
                           index=test_dataset.index.values)
        df = pd.concat((df1, df2), axis=1)
        df.index = test_dataset.index.values
        save_pred_data_as_csv(df, prefix + '_test_results_100')

        # estimator = KerasClassifier(build_fn=class_nn, epochs=200, batch_size=5, verbose=2)
        # kfold = KFold(n_splits=10, shuffle=True, random_state=class_nn.seed)
        #
        # results = cross_val_score(estimator, X_fit, Y_fit, cv=kfold)
Example #8
0
def daily_mongo_updates():
    print('WE IN THE MONGO SCHEDULER')

    if util.is_market_holiday(util.get_current_date(), session):
        return

    connection = util.connect_to_postgres()
    cursor = connection.cursor()
    try:
        cursor.execute("SELECT ticker FROM Trackers")
    except Exception as e:
        return {'error': str(e)}
    tickers = [record[0] for record in cursor]
    cursor.close()
    connection.close()

    col_ref = mongo_db['Live_Stock_Prices']

    # delete daily_prices older than a week and reset every minute_prices array and reset emas
    delete_before_timestamp = datetime.datetime.strptime(
        util.get_date_n_days_ago(7), '%Y-%m-%d')

    for tracker in col_ref.find():
        if 'usecase' not in tracker:
            col_ref.update_one({'ticker': tracker['ticker']}, {
                '$pull': {
                    'daily_prices': {
                        'timestamp': {
                            '$lt': delete_before_timestamp
                        }
                    }
                },
                '$set': {
                    'minute_prices': [],
                    'minute_volume': [-1] * 391,
                    'prev_ema': tracker['ema_volume']
                }
            })
        else:
            col_ref.update_one({'usecase': 'af2'},
                               {'$set': {
                                   'trackers_sent_today': []
                               }})

    print('in scheduler MONGO YEET')
    for ticker in tickers:
        url = '{}/v2/aggs/ticker/{}/prev?apiKey={}'.format(
            POLYGON_BASE_URL, ticker, POLYGON_API_KEY)
        resp = util.polygon_get_request_multithreaded(url, session)
        if not resp or len(resp['results']) == 0:
            continue

        resp = resp['results'][0]
        timestamp = datetime.datetime.strptime(
            util.epoch_to_timestamp_format(resp['t']), '%Y-%m-%d %H:%M:%S')
        new_doc = {
            'volume': resp['v'],
            'open': resp['o'],
            'close': resp['c'],
            'high': resp['h'],
            'low': resp['l'],
            'timestamp': timestamp
        }
        col_ref.update({'ticker': ticker},
                       {'$push': {
                           'daily_prices': new_doc
                       }})

    print('UPDATED MONGO BOISSS')
    return {'success': 'SUCESSFULLY UPDATED ALL PRICE DATA FOR MONGO'}
Example #9
0
    return full_df


if __name__ == "__main__":
    valid_tasks = ['get_data', 'score_data', 'create_training_data', 'predict', 'other']
    task = input('available tasks: ' + '"' + '", "'.join(valid_tasks) + '"\nwhat task would you like to perform? ')
    if task not in valid_tasks:
        raise ValueError(task + ' is not a valid entry')

    if task in ['get_data', 'create_training_data']:
        day = None
    else:
        day = input('\nPlease enter the date in YYYY-MM-DD format. For the current date just press enter.\nWhich date would you like to analyze? ')
        if len(day) == 0:
            day = get_current_date()
            print(day)

    if task == 'get_data':
        tickers = list(PENNY_STOCKS.keys())#list(ALL_TICKERS.keys())
        create_and_save_data(tickers)

    elif task == 'score_data':
        all_stats = MultiSymbolStats(ALL_TICKERS.keys(), day)
        news_ax, mvmt_ax = all_stats.plot_score()
        all_stats.print_indicators_for_many_symbols()
        plot_informative_lines(news_ax, style='k-')
        plot_informative_lines(mvmt_ax, style='k-')
        plt.show()

    elif task == 'create_training_data':
Example #10
0
def seek():
    """Initiates the collection of search results and returns a dictionary of 
    user-specific dictionary of search results"""
    collection = dict()
    for user, user_attr in USERS.items():
        """Handle reading the "inputs" file of the user from Google Drive"""
        inputs = gs.handle_inputs(user)
        """
        # Default method of reading inputs:
        inputs = read_file_inputs(user)
        """

        if not inputs:
            continue
        """Handle preparing DataFrame out of existing data"""
        existing_user_data = existing_data[existing_data["user"] == user]
        print("\nexisting user data: {}".format(existing_user_data.shape))
        """Initiate saving the output in DB"""
        user_data_collection = dict()
        for sheet_name, inputs_df in inputs.items():
            print("\nsheet_name = {}".format(sheet_name))

            if not inputs_df.empty:
                inputs_df.dropna(inplace=True)
                inputs_df.reset_index(inplace=True)
                data_final = list()
                for i in range(len(inputs_df)):
                    (_, country, search_term) = inputs_df.loc[i]
                    country = country.strip().lower()
                    sub_df = CLM[CLM["country_name"] == country]
                    target_lang = sub_df["language_ISO"].squeeze()
                    country_code = sub_df["country_ISO"].squeeze()
                    search_string = "{} {}".format(country,
                                                   search_term.strip())
                    """Fetch search results"""
                    util.write_print_logs(
                        "search string:{}".format(search_string))

                    is_target_lang_empty = 'empty' in dir(target_lang)
                    is_country_code_empty = 'empty' in dir(country_code)
                    if is_target_lang_empty or is_country_code_empty:
                        msg_ = "Is target_lang empty: {}\n"
                        msg_ += "Is country code empty: {}\n"
                        msg_ += "Skipping this row!"
                        util.write_print_logs(
                            msg_.format(is_target_lang_empty,
                                        is_country_code_empty))
                        continue
                    search_results = search.get_search_results(
                        query=search_string,
                        lang=target_lang,
                        geoloc=country_code,
                        user_attr=user_attr)

                    if search_results:
                        util.write_print_logs("Found search results")
                        util.write_responses(search_results, search_term)
                        unified_results = util.unify_results(search_results)
                        if unified_results:
                            util.write_print_logs("Fetching Useful Info")
                            search_response = util.fetch_useful_info(
                                unified_results, search_string, user_attr)

                            for response in search_response:
                                data_final.append(response)

                data_final_df = pd.DataFrame(data_final,
                                             columns=data_final_columns)
                data_final_df["user"] = user
                data_final_df["sector"] = sheet_name
                data_final_df["created_date"] = util.get_current_date()
                """Removing duplicate results from the final object"""
                data_final_df.drop_duplicates(subset="link", inplace=True)
                data_final_df.sort_values(by="search_query", inplace=True)
                user_data_collection[sheet_name] = data_final_df
        collection[user] = user_data_collection
    return collection