Exemple #1
0
 def __init__(self, start_date, path_to_save_defaults,
              path_to_save_annualized_returns):
     self.connect = Connect()
     self.today = datetime.today().strftime("%Y-%m-%d")
     self.table_name = "daily_notes_metrics_{date}".format(
         date=datetime.today().strftime("%Y-%m-%d").replace("-", "_"))
     self.start_date = start_date
     # self.path_to_save = path_to_save
     self.path_to_save_defaults = path_to_save_defaults
     self.path_to_save_annualized_returns = path_to_save_annualized_returns
Exemple #2
0
 def __init__(self):
     self.config = default.config
     self.access_token = access_token = TokenGeneration(
         client_id=self.config['prosper']['client_id'],
         client_secret=self.config['prosper']['client_secret'],
         ps=self.config['prosper']['ps'],
         username=self.config['prosper']['username']).execute()
     self.header = utils.http_header_build(access_token)
     Connect.__init__(self)  # Initialize connection
     self.logger = log.create_logger(log_name="metrics_app",
                                     logger_name="tracking_metrics_logger")
Exemple #3
0
 def __init__(self, order_header, listing_header, time_to_run_for,
              filters_dict, bid_amt, available_cash, min_run_time):
     self.order_header = order_header
     self.listing_header = listing_header
     self.filters_dict = filters_dict
     self.bid_amt = bid_amt
     self.remaining_cash = available_cash  # Renamed variable to avoid confusion as this is being updated with orders
     self.lock = threading.Lock()
     self.min_run_time = min_run_time
     self.logger = logging.create_logger(logger_name="search_and_destroy",
                                         log_name="app_run")
     self.time_to_continuously_run_submit_orders = time.time(
     ) + time_to_run_for
     self.connect = Connect()
Exemple #4
0
    def display_current_annualized_return(self):
        query = "select annualized_return, annualized_return_late_equals_default from daily_annualized_returns where date in (select max(date) from daily_annualized_returns);"
        results = Connect().execute_select(query)
        for r in results:
            self.message += """
Current annualized_return: {annualized_return}%
annualized_return_minus_late: {annualized_return_late_equals_default}%
""".format(annualized_return=r[0], annualized_return_late_equals_default=r[1])
 def pull_notes_table(self):
     connect = Connect()
     cursor = connect.connection.cursor(
         cursor_factory=psycopg2.extras.DictCursor
     )  # Pulling in extra muscle with DictCursor
     cursor.execute(
         "select * from notes where '{date}' between effective_start_date and effective_end_date;"
         .format(date=self.date))
     notes_data = cursor.fetchall()
     return notes_data
Exemple #6
0
 def display_notes_purchased_last_X_days_by_rating(self, days_to_query):
     query = "select prosper_rating,count(*) from notes where ownership_start_date > current_date - {days_to_query} and latest_record_flag = 't' group by 1 order by 1 asc;".format(
         days_to_query=days_to_query)
     loans = Connect().execute_select(query)
     msg = "Notes purchased over the past {days} days:\n".format(
         days=days_to_query)
     for r in loans:
         msg += "{prosper_rating}: {count}\n".format(prosper_rating=r[0],
                                                     count=r[1])
     self.message += "\n{loans}".format(loans=msg)
Exemple #7
0
 def display_average_notes_purchased_last_X_days(self, days_to_query):
     query = "select count(*) from notes where ownership_start_date > current_date - {days_to_query} AND latest_record_flag='t'".format(
         days_to_query=days_to_query)
     number_of_new_loans = Connect().execute_select(query)[0][0]
     if number_of_new_loans != 0:
         avg_daily_notes_purchased = round(
             number_of_new_loans / days_to_query, 2)
     else:
         avg_daily_notes_purchased = 0
     self.message += "\nAn average of {avg} notes have been purchased per day for the past {days} days".format(
         avg=avg_daily_notes_purchased, days=days_to_query)
Exemple #8
0
 def display_bids_placed_today_by_prosper_rating(self):
     query = """
     select lfu.prosper_rating, count(*)
     from bid_requests br
     join listings_filters_used lfu
     on br.listing_id = lfu.listing_id
     where br.created_timestamp::date = '{date}'
     group by 1;
     """.format(date=datetime.today())
     msg = "Bids placed today by prosper rating:\n"
     bids = Connect().execute_select(query)
     for b in bids:
         msg += "{prosper_rating}: {count}\n".format(prosper_rating=b[0],
                                                     count=b[1])
     self.message += "\n{bids}".format(bids=msg)
Exemple #9
0
    def execute_dict_threaded(self, filters_dict):
        start_time = time.time()
        listings_list = []
        track_filters = {}
        threads = []
        already_invested_listings = Connect().get_bid_listings() #TODO put this function in a proper location
        for query in filters_dict:
            t = threading.Thread(target=self.thread_worker, args=(query, filters_dict[query], listings_list, already_invested_listings, track_filters))
            threads.append(t)
            t.start()
        for thread in threads:
            thread.join()  # Waits for all threads to complete or else will hit return value before threads are complete

        # print("Total elapsed time to run: {time} seconds".format(time=time.time() - start_time))
        self.logger.info("Run time: {run_time}, Total elapsed time to run execute_dict_threaded in Listing: {time} seconds".format(run_time=datetime.now(), time=time.time() - start_time))
        return listings_list, track_filters
Exemple #10
0
 def execute_dict_sequential(self, filters_dict):
     start_time = time.time()
     listings_list = []
     track_filters = {}
     already_invested_listings = Connect().get_bid_listings() #TODO put this function in a proper location
     for query in filters_dict:
         r = requests.get(filters_dict[query], headers=self.header, timeout=30.0)
         query_listing = r.json()
         result_length = len(query_listing['result'])
         if result_length > 0:
             for i in range(result_length):
                 listing_number = query_listing['result'][i]['listing_number']
                 if listing_number not in already_invested_listings:
                     self.track_filter(track_filters, listing_number, query) # populates track_filters dict to be inserted into psql later
                     if listing_number not in listings_list:
                         listings_list.append(listing_number)
         print("Ran: {query}, found {result_length} listings".format(query=query, result_length=result_length))
     print("Totally elapsed time to run: {time} seconds".format(time=time.time() - start_time))
     return listings_list, track_filters
Exemple #11
0
class SearchAndDestroy:
    def __init__(self, order_header, listing_header, time_to_run_for,
                 filters_dict, bid_amt, available_cash, min_run_time):
        self.order_header = order_header
        self.listing_header = listing_header
        self.filters_dict = filters_dict
        self.bid_amt = bid_amt
        self.remaining_cash = available_cash  # Renamed variable to avoid confusion as this is being updated with orders
        self.lock = threading.Lock()
        self.min_run_time = min_run_time
        self.logger = logging.create_logger(logger_name="search_and_destroy",
                                            log_name="app_run")
        self.time_to_continuously_run_submit_orders = time.time(
        ) + time_to_run_for
        self.connect = Connect()

    def listing_logic(self, query, query_get):
        already_invested_listings = self.connect.get_bid_listings(
        )  # Takes a fraction of a second, should be ok. Repetitive as submitted_order_listings will handle it, but perfer cutting the listing logic off if not needed
        listings_found = []
        throttled_count = 0
        track_filters = {}  # For tracking of what filters are finding notes
        i_got_throttled = True  # Sometimes get throttled, will run again if throttled
        while i_got_throttled:
            r = requests.get(query_get,
                             headers=self.listing_header,
                             timeout=30.0)
            query_listing = r.json()
            if 'result' in query_listing:  # Can get throttled so only execute if get a result
                # if 'result' may be slow
                result_length = len(query_listing['result'])
                if result_length > 0:
                    for i in range(result_length):
                        listing_number = query_listing['result'][i][
                            'listing_number']
                        prosper_rating = query_listing['result'][i][
                            'prosper_rating']
                        if listing_number not in already_invested_listings:
                            self.track_filter(
                                track_filters, listing_number, query,
                                prosper_rating
                            )  # populates track_filters dict to be inserted into psql later
                            if listing_number not in already_invested_listings:
                                listings_found.append(listing_number)
                                logging.log_it_info(
                                    self.logger,
                                    "filter {query} found listing: {listing} at {current_time}"
                                    .format(query=query,
                                            listing=listing_number,
                                            current_time=datetime.now()))

                i_got_throttled = False
            else:
                if 'errors' in query_listing:
                    # logging.log_it_info(self.logger, "query {query} got an error, error is: {error}".format(query=query, error=query_listing))
                    throttled_count += 1
                else:
                    logging.log_it_info(
                        self.logger,
                        "not an errors in response, response is: {response}".
                        format(response=query_listing))
        return listings_found, track_filters, throttled_count

    def order_logic(self, listing_list, bid_amt, filters_used):
        request = {"bid_requests": []}
        for l in listing_list:
            request['bid_requests'].append({
                "listing_id": l,
                "bid_amount": round(bid_amt, 2)
            })
            # TODO What happens if i get throttled!? Is Prosper API post / get throttled different or the same?? Monitor and modify if need error handling like in listing logic
        response = requests.post(
            default.config['prosper']['prosper_order_url'],
            json=request,
            headers=self.order_header).json()
        logging.log_it_info(self.logger,
                            "request = {request}".format(request=request))
        logging.log_it_info(self.logger,
                            "response = {response}".format(response=response))
        self.handle_order_sql(response, filters_used)

    def thread_worker(self, query, query_get, submitted_order_listings):
        logging.log_it_info(
            self.logger,
            "Started running {query} at {time}".format(query=query,
                                                       time=datetime.now()))
        listing_pings = 0
        order_pings = 0
        total_throttle_count = 0
        while time.time() < self.time_to_continuously_run_submit_orders:
            start_time = time.time()
            listings_found, filters_used, throttle_count = self.listing_logic(
                query=query, query_get=query_get)
            listing_pings += 1
            total_throttle_count += throttle_count
            if len(listings_found) > 0:
                with self.lock:
                    unique_listings = []
                    for listing in listings_found:
                        if listing not in submitted_order_listings:
                            submitted_order_listings.append(listing)
                            unique_listings.append(listing)
                    listings_to_invest, new_bid_amt, cash_used = self.handle_cash_balance(
                        self.remaining_cash, unique_listings)
                    self.remaining_cash -= cash_used  # Will count cash used towards an expired listing. Calculate cash on fly because get cash from prosper api not always quick enough
                if len(listings_to_invest) > 0:
                    logging.log_it_info(
                        self.logger,
                        "Listings to invest at {current_time}: {listings}".
                        format(listings=listings_to_invest,
                               current_time=datetime.now()))
                    self.order_logic(
                        listing_list=listings_to_invest,
                        bid_amt=new_bid_amt,
                        filters_used=filters_used
                    )  # Put in order, no need to sleep if order placed since that takes time
                    logging.log_it_info(
                        self.logger,
                        "Listings invested at {current_time}: {listings}".
                        format(current_time=datetime.now(),
                               listings=listings_to_invest))
                    # BUG Only inserting filters used if order placed... I prefer to have filters inserted if filter found something but overlaps with a previous filter will not insert... This was not a problem with run.py
                    order_pings += 1
                else:
                    self.wait_for_throttle_cap(start_time, self.min_run_time)
            else:
                self.wait_for_throttle_cap(start_time, self.min_run_time)

        self.connect.close_connection()
        logging.log_it_info(
            self.logger,
            "Ended running {query} at {time}, with {pings} pings to the listing api, and {order_ping} order pings to the order api, and ignored {throttle_count} throttles from api"
            .format(query=query,
                    time=datetime.now(),
                    pings=listing_pings,
                    order_ping=order_pings,
                    throttle_count=total_throttle_count))

    @staticmethod
    def wait_for_throttle_cap(start_time, min_run_time):
        diff = time.time() - start_time
        if diff < min_run_time:
            time.sleep(min_run_time - diff)

    def execute(self):
        threads = []
        submitted_order_listings = []

        for query in self.filters_dict:
            t = threading.Thread(target=self.thread_worker,
                                 args=(query, self.filters_dict[query],
                                       submitted_order_listings))
            threads.append(t)
            t.start()
            time.sleep(
                0.07143
            )  # Start threads over ~ 1 second to space out threads, hopefully increasing chance of hitting a note fi multiple filters can find it. filters +1 / 1
            # TODO automate sleep
        for thread in threads:
            thread.join()

    """
    Utility function to track filters
    track filters looks like:
    {11762017: ['example_query1'], 11636219: ['example_query1'], 11830273: ['example_query1'], 11641319: ['example_query1'], 11642054: ['example_query1'], 11834419: ['example_query1']}
    """

    @staticmethod
    def track_filter(json, listing_id, filter_used, prosper_rating):
        if listing_id in json:
            json[listing_id][0].append(filter_used)
            json[listing_id][1].append(prosper_rating)
        else:
            json[listing_id] = [filter_used], [prosper_rating]

    def handle_order_sql(self, response, filters_used_dict):
        # TODO error handling per error code type from prosper
        if "order_id" in response:
            try:
                sql = SQLMetrics()
                sql.run_listing_filters_used(
                    filters_used_dict
                )  # inserts the filters used into listings_filters_used for tracking
                sql.run_insert_bid_request(
                    response
                )  # TODO add error handling. Print the error and continue
                sql.run_insert_orders(response)
                sql.close_connection()
            except:  # TODO make specific for now catch all errors
                e = sys.exc_info()[0]
                logging.log_it_info(self.logger, e)
        if 'code' in response:  # Sometimes a listing_id cannot be invested in
            # Example response {'code': 'ORD0019', 'message': 'Listing [10846973] is in status [PENDING_COMPLETION] and cannot currently accept bids.'}
            try:
                listing_string = response['message']
                end_index = listing_string.find("]")
                pending_completion_listing = listing_string[9:end_index]
                sql = SQLMetrics()
                sql.run_insert_bid_request_pending_completion(
                    pending_completion_listing
                )  # This adds the listing to bid_requests table and therefore will be excluded in the future runs
                logging.log_it_info(
                    self.logger,
                    "Added {listing} to pending_completion_listings list".
                    format(listing=pending_completion_listing))
                sql.close_connection()
            except TypeError as type_error:
                logging.log_it_info(
                    self.logger,
                    "type error: {error}".format(error=type_error))
            except:
                e = sys.exc_info()[0]
                logging.log_it_info(self.logger, e)

    def recalculate_bid_amount(self, cash, listing_list):
        while len(listing_list) > 0:
            listing_list.pop(
                0
            )  # Remove an element since this function is only used when aval_amt <= 25 and investment_number > 1. Remove first element, since that index is highest chance of being an expired listing
            aval_bids = cash / 25
            new_listing_length = len(listing_list)
            if aval_bids >= new_listing_length:
                aval_amt = cash / new_listing_length
                return aval_amt, listing_list

    # Add to testing suite
    def handle_cash_balance(self, available_cash, listings_list):
        investment_number = len(listings_list)
        if investment_number == 0:  # Handles no listings
            return listings_list, self.bid_amt, 0  # [], self.bid_amt, no cash used
        else:
            new_listing_list = listings_list
            new_amt = self.bid_amt
            aval_amt = available_cash / investment_number
            if available_cash >= 25:  # min bid for a listing
                if aval_amt < self.bid_amt:
                    if aval_amt <= available_cash:
                        if aval_amt >= 25:
                            situation_one_msg = "1: {cash} is not enough available cash for desired bid amount of {amt}, for {investment_number} listings, modifying to {new_amt}".format(
                                cash=available_cash,
                                investment_number=investment_number,
                                amt=self.bid_amt,
                                new_amt=aval_amt)
                            # self.amt = aval_amt
                            new_amt = aval_amt
                            logging.log_it_info(self.logger, situation_one_msg)
                        elif aval_amt <= 25 and investment_number > 1:
                            new_amt, new_listing_list = self.recalculate_bid_amount(
                                cash=available_cash,
                                listing_list=listings_list)
                            situation_two_msg = "2: {cash} is not enough available cash for desired bid amount of {amt}, for {investment_number}  listings, modifying to bids of {new_amt} for {listing_num} listings".format(
                                cash=available_cash,
                                investment_number=investment_number,
                                amt=self.bid_amt,
                                new_amt=new_amt,
                                listing_num=len(new_listing_list))
                            logging.log_it_info(self.logger, situation_two_msg)
                            # self.amt = new_bid_amt
                            # self.listings_list = new_listing_list

                else:
                    normal_op_msg = f"available_cash of {available_cash} is enough for normal bidding submission"
                    logging.log_it_info(self.logger, normal_op_msg)

            else:
                situation_three_msg = "Your available cash of {cash} is not enough to invest in anything... Wow dude".format(
                    cash=available_cash)
                logging.log_it_info(self.logger, situation_three_msg)
                new_listing_list = []
                # self.listings_list = []
            return new_listing_list, new_amt, len(
                new_listing_list) * new_amt  # May be the same as original
Exemple #12
0
class CreateDailyMetricsTable:
    def __init__(self, start_date, path_to_save_defaults,
                 path_to_save_annualized_returns):
        self.connect = Connect()
        self.today = datetime.today().strftime("%Y-%m-%d")
        self.table_name = "daily_notes_metrics_{date}".format(
            date=datetime.today().strftime("%Y-%m-%d").replace("-", "_"))
        self.start_date = start_date
        # self.path_to_save = path_to_save
        self.path_to_save_defaults = path_to_save_defaults
        self.path_to_save_annualized_returns = path_to_save_annualized_returns

    # Depreciated
    # def create_table(self):
    #     create_table_script = """
    #     create table {table_name} (
    #     date date,
    #     Realized_Gains decimal,
    #     Unrealized_Gains decimal,
    #     Unrealized_Gains_With_Oppertunity_Cost decimal,
    #     Forecasted_Returns decimal);
    #     """.format(table_name=self.table_name)
    #     self.connect.execute_insert_or_update(create_table_script)

    def build_dates_list(self):
        dates_to_run_list = []

        start_date = self.start_date
        end_date = self.today
        start_date_datetime = datetime.strptime(start_date, "%Y-%m-%d")
        end_date_datetime = datetime.strptime(end_date, "%Y-%m-%d")
        days_to_run = int((end_date_datetime - start_date_datetime).days)
        for d in range(days_to_run + 1):
            date_to_run = (end_date_datetime -
                           timedelta(days=d)).strftime("%Y-%m-%d")
            dates_to_run_list.append(date_to_run)
        dates_to_run_list.reverse()

        return dates_to_run_list

    # Depreciated
    # def insert_daily_metrics_data(self):
    #
    #     dates_to_run = self.build_dates_list()
    #     for day in dates_to_run:
    #         print(day)
    #         notes = NotesMetrics(day)
    #         Realized_Gains = notes.realized_gains()
    #         Unrealized_Gains = notes.unrealized_gains()
    #         Unrealized_Gains_With_Oppertunity_Cost = notes.unrealized_gains_with_oppertunity_cost()
    #         Forecasted_Returns = notes.forecasted_returns()
    #
    #         insert_script = """insert into {table_name} values
    #         ('{date}', {Realized_Gains}, {Unrealized_Gains}, {Unrealized_Gains_With_Oppertunity_Cost}, {Forecasted_Returns});
    #         """.format(table_name=self.table_name,
    #             date=day,
    #             Realized_Gains=Realized_Gains,
    #                    Unrealized_Gains=Unrealized_Gains,
    #                    Unrealized_Gains_With_Oppertunity_Cost=Unrealized_Gains_With_Oppertunity_Cost,
    #                    Forecasted_Returns=Forecasted_Returns)
    #         self.connect.execute_insert_or_update(insert_script)
    #         # print(insert_script)

    # Depreciated
    # def create_line_graph_metrics_png(self):
    #
    #     dates_to_run = self.build_dates_list()
    #     realized_gains_list = []
    #     unrealized_gains_list = []
    #     unrealized_gains_with_oppertunity_cost_list = []
    #     forcasted_returns_list = []
    #     forcasted_returns_forcasted_list = []
    #
    #     for day in dates_to_run:
    #         notes = NotesMetrics(day)
    #         realized_gains_list.append(notes.realized_gains())
    #         unrealized_gains_list.append(notes.unrealized_gains())
    #         unrealized_gains_with_oppertunity_cost_list.append(notes.unrealized_gains_with_oppertunity_cost())
    #         forcasted_returns_list.append(notes.forecasted_returns())
    #         forcasted_returns_forcasted_list.append(notes.forecasted_returns_forcasted())
    #
    #     plt.figure(1)
    #     plt.plot(dates_to_run, realized_gains_list, label="realized_gains")
    #     plt.plot(dates_to_run, unrealized_gains_list, label="unrealized_gains")
    #     plt.plot(dates_to_run, unrealized_gains_with_oppertunity_cost_list, label="unrealized_gains_w_opc")
    #     plt.plot(dates_to_run, forcasted_returns_list, label="forcasted_returns")
    #     plt.plot(dates_to_run, forcasted_returns_forcasted_list, label="forcasted_returns_forcasted")
    #     plt.legend()
    #     plt.title("Gains Over Time {start_date} - {end_date}".format(start_date=self.start_date, end_date=self.today))
    #     plt.xlabel("Date")
    #     plt.ylabel("Percent Return")
    #
    #     plt.savefig(self.path_to_save)

    def create_default_tracking_line_graph_png(self):

        dates_to_run = self.build_dates_list()
        projected_default = []
        projected_default_prosper = []
        actual_default = []

        def total_defaults(default_dict):
            defaults = 0
            for k in default_dict:
                defaults += default_dict[k]
            return defaults

        for day in dates_to_run:
            notes = NotesMetrics(day)
            projected_default_dict, projected_default_dict_prosper, actual_default_dict, _, _ = notes.default_rate_tracking(
            )
            projected_default.append(total_defaults(projected_default_dict))
            projected_default_prosper.append(
                total_defaults(projected_default_dict_prosper))
            actual_default.append(total_defaults(actual_default_dict))

        plt.figure(2)
        plt.plot(dates_to_run, projected_default, label="projected_defaults")
        plt.plot(dates_to_run,
                 projected_default_prosper,
                 label="projected_defaults_prosper")
        plt.plot(dates_to_run, actual_default, label="actual_defaults")
        plt.title("Defaults Over Time {start_date} - {end_date}".format(
            start_date=self.start_date, end_date=self.today))
        plt.xlabel("Date")
        plt.ylabel("Number of Defaults")
        plt.legend()

        plt.savefig(self.path_to_save_defaults)

    def create_annualized_returns_line_graph(self):
        query = "select * from daily_annualized_returns;"
        results = self.connect.execute_select(query)

        dates = []
        annualized_returns = []
        annualized_returns_late_equals_default = []
        for t in results:
            dates.append(t[0])
            annualized_returns.append(t[1])
            annualized_returns_late_equals_default.append(t[2])

        plt.figure(3)
        plt.plot(dates, annualized_returns, label="annualized_returns")
        plt.plot(dates,
                 annualized_returns_late_equals_default,
                 label="annualized_returns_late_equals_default")
        plt.title(
            "annualized_returns Over Time {start_date} - {end_date}".format(
                start_date=dates[0], end_date=dates[-1]))
        plt.xlabel("Date")
        plt.ylabel("annualized_returns %")
        plt.legend()

        plt.savefig(self.path_to_save_annualized_returns)
Exemple #13
0
class UpdateNotes:

    def __init__(self):
        self.config = default.config
        self.access_token = access_token = TokenGeneration(
            client_id=self.config['prosper']['client_id'],
            client_secret=self.config['prosper']['client_secret'],
            ps=self.config['prosper']['ps'],
            username=self.config['prosper']['username']
        ).execute()
        self.header = utils.http_header_build(access_token)
        self.connect = Connect()
        self.logger = log.create_logger(log_name="metrics_app", logger_name="update_notes_logger")

    """
    This function checks if any value in the database for a note differs from api
    """
    def check_if_note_needs_update(self, api_record, database_record):
        for k in api_record:
            for n in database_record: # Still need to loop even though only one record
                # This every value!
                # Create a locked column so the record for sure never gets updated again once the note is completed or charged off?
                if (n['note_status_description'] == "COMPLETED" and k == "age_in_months")\
                        or (n['note_status_description'] == "CHARGEOFF" and k == "age_in_months")\
                        or (n['note_status_description'] == "CHARGEOFF" and k == "days_past_due") \
                        or (n['note_status_description'] == "DEFAULTED" and k == "age_in_months") \
                        or (n['note_status_description'] == "DEFAULTED" and k == "days_past_due") \
                        :
                    return False
                # Ignore COMPLETED loans if its just updating the age_in_months
                else:
                    if k != "accrued_interest": # Ignore accrued_interest since it changes daily.
                        if str(api_record[k]) != str(n[k]):  # cast to string to make the same
                            msg = "NOT Equal value, with key: {key}, propser response value of {r_val}, database value of {db_val} of loan_note_id of {loan_note_id}"\
                                .format(key=k, r_val=api_record[k], db_val=n[k], loan_note_id=api_record['loan_note_id'])
                            # print(msg)
                            # self.logger.debug(msg)
                            return True # If database value does not equal prosper api value return True to flag for update
        return False

    """
    builds list of notes that differ in the database compared to the api
    This notes need to be updated
    """
    def build_notes_to_update_query(self):
        list_of_notes_to_update = []
        insert_query = ""
        first_insert_record = True
        offset = 0
        limit = 25
        response = ""
        while response != None:
            response = requests.get(note_util.get_url_get_request_notes_by_date(offset, "2019-11-25", limit), headers=self.header, timeout=30.0).json()['result']
            if response != None:
                # print(response)
                for r in response:
                    cursor = self.connect.connection.cursor(cursor_factory=psycopg2.extras.DictCursor)  # Pulling in extra muscle with DictCursor
                    cursor.execute("select * from notes where loan_note_id = '{loan_note_id}' and latest_record_flag='t';"
                                   .format(loan_note_id=r['loan_note_id']))
                    note_record = cursor.fetchall()
                    if len(note_record) > 0: #TODO raise error flag if len(note_record) == 0
                        if self.check_if_note_needs_update(api_record=r, database_record=note_record):
                            if first_insert_record:
                                insert_query += sql_query_utils.insert_notes_query(response_object=r,
                                                                   effective_start_date=datetime.date.today())
                                first_insert_record = False
                            else:
                                insert_query += sql_query_utils.insert_notes_addational_value(response_object=r,
                                                                                              effective_start_date=datetime.date.today())
                            list_of_notes_to_update.append(r['loan_note_id'])
            offset += limit
        update_query = sql_query_utils.update_notes_query(list_of_notes_to_update)
        print("{num} notes to update".format(num=len(list_of_notes_to_update)))
        self.logger.debug("{num} notes to update".format(num=len(list_of_notes_to_update)))
        return update_query, insert_query

    def build_transaction(self):
        update_query, insert_query = self.build_notes_to_update_query()
        return """ BEGIN TRANSACTION;
        {update_query}
        {insert_query};
        END TRANSACTION;
        """.format(update_query=update_query, insert_query=insert_query)

    def execute(self):
        self.connect.execute_insert_or_update(self.build_transaction())
Exemple #14
0
 def __init__(self, header):
     self.date = datetime.today()
     self.header = header
     Connect.__init__(self)  # Initialize connection