Ejemplo n.º 1
0
    def __init__(self, type, symbol):

        channel = grpc.insecure_channel(f'{get_grpc_hostname()}:6565')
        self.stub = database_pb2_grpc.DatabaseStub(channel)

        self.type = type
        self.symbol = symbol
        self.api_token = get_api_token()
        self.url = f'https://api.fugle.tw/realtime/v0/intraday/quote?symbolId={self.symbol}&apiToken={self.api_token}'

        self.tick = 30  # call API every 30 seconds

        self.quotes = []

        self.ask_units = 0
        self.bid_units = 0
        self.diff_units = 0

        self.prev_ask = {'price': 0, 'unit': 0}
        self.prev_bid = {'price': 0, 'unit': 0}

        self.on_time = datetime.time(9, 1)
        self.off_time = datetime.time(13, 32)

        self.is_closed = False
        self.date = None
Ejemplo n.º 2
0
    def __init__(self):

        self.token = get_api_token()

        channel = grpc.insecure_channel(f'{get_grpc_hostname()}:6565')
        self.stub = database_pb2_grpc.DatabaseStub(channel)

        self.__reset__()
Ejemplo n.º 3
0
    def __init__(self):

        channel = grpc.insecure_channel(f'{get_grpc_hostname()}:6565')
        self.stub = database_pb2_grpc.DatabaseStub(channel)

        self.parser = ReunionParser()
        self.parser.parse()

        self.dict = {}
        n = len(self.parser.list) * 10
        for symbol in self.parser.list:
            self.dict[symbol] = n
            n -= 10
    def __init__(self):

        channel = grpc.insecure_channel(f'{get_grpc_hostname()}:6565')
        self.stub = database_pb2_grpc.DatabaseStub(channel)

        self.max_count = 12
        self.url = 'https://www.cnyes.com/twstock/a_institutional7.aspx'
        self.symbol_xpath = "//*[contains(@class, 'fLtBx')]//tbody//tr//td[1]//a"
        self.foreign_xpath = "//*[contains(@class, 'fLtBx')]//tbody//tr//td[3]"
        self.quantity_xpath = "//*[contains(@class, 'fLtBx')]//tbody//tr//td[6]"
        self.dict = {}
        self.date_xpath = "//*[contains(@class, 'tydate')]"
        self.date = None
        self.model = 'twse_over_bought'
Ejemplo n.º 5
0
    def update_db(self):

        if self.dict is None or len(self.dict) == 0:
            pass
        else:
            print(f'==> update db')
            try:
                channel = grpc.insecure_channel(f'{get_grpc_hostname()}:6565')
                stub = database_pb2_grpc.DatabaseStub(channel)
                rowcount = stub.upsert_stocks(
                    (Stock(symbol=key, name=value)
                     for key, value in self.dict.items()))
                print(rowcount)
            except grpc.RpcError as e:
                status_code = e.code()
                print(e.details())
                print(status_code.name, status_code.value)

        return self
Ejemplo n.º 6
0
    def __init__(self):

        # step 0
        # setup grpc connection
        channel = grpc.insecure_channel(f'{get_grpc_hostname()}:6565')
        self.stub = database_pb2_grpc.DatabaseStub(channel)

        # step 1
        # get stock symbols and names for constructing custom dict later
        stocks = self.stub.get_stocks(Empty())
        #print(f'get {len(list(stocks))} symbols !')
        self.custom_dict = {}
        self.reverse_custom_dict = {}
        self.custom_words = []
        for stock in stocks:
            self.custom_dict[stock.symbol] = stock.name
            self.reverse_custom_dict[stock.name] = stock.symbol
            self.custom_words.append(stock.symbol)
            self.custom_words.append(stock.name)

        # step 2
        # for removing meaningless words later
        # prevent them from being treated as stock symbols
        self.excludes = ['2020', '2021', '2022', '2023', '2024', '2025', 'DDD', 'VVV', 'RRR', 'ALL']

        # step 3
        # parse ptt articles
        self.parser = PttParser('Stock')
        # article title + pushes
        self.sentence_list = self.parser.get_sentence_list_without_content()
        # article title only
        self.title_list = self.parser.get_title_list()

        # step 4
        # tokenization with jieba
        self.pipeline = JiebaPipeline()
        # calculate word freq for title + pushes
        self.pipeline \
            .set_custom_dict(self.custom_words) \
            .tokenize(self.sentence_list) \
            .remove_words_from_token_list(self.excludes) \
            .keep_words_from_token_list(self.custom_words) \
            .count_tokens()
        self.word_freq = deepcopy(self.pipeline.token_freq)
        print('<== word freq ==>')
        print(self.word_freq)

        # step 5-1
        # word freq for title only
        self.pipeline \
            .tokenize(self.title_list) \
            .remove_words_from_token_list(self.excludes) \
            .keep_words_from_token_list(self.custom_words) \
            .count_tokens()
        self.title_word_freq = deepcopy(self.pipeline.token_freq)

        # step 5-2
        # if an article's title contains a keyword,
        # add number of pushes of this article to this keyword
        self.title_word_freq = dict(self.title_word_freq)
        for key, _ in self.title_word_freq.items():
            for article in self.parser.get_articles():
                if key in article.title:
                    self.title_word_freq[key] += article.push_count

        # step 5-3
        # sort title_word_freq
        self.title_word_freq = sorted(self.title_word_freq.items(), key=lambda x: x[1], reverse=True)
        print('<== title word freq ===>')
        print(self.title_word_freq)

        # step 6-1
        # aggregate word_freq and title_word_freq
        self.aggregate_word_freq = {}
        for item in self.word_freq + self.title_word_freq:
            if item[0] in self.aggregate_word_freq:
                self.aggregate_word_freq[item[0]] += item[1]
            else:
                self.aggregate_word_freq[item[0]] = item[1]

        # step 6-2
        # sort aggregate_word_freq
        self.aggregate_word_freq = sorted(self.aggregate_word_freq.items(), key=lambda x: x[1], reverse=True)
        #print(self.aggregate_word_freq)

        # step 7-1
        # normalize word freq
        self.normalize_word_freq = {}
        for item in self.aggregate_word_freq:
            if item[0] in self.custom_dict:
                if item[0] in self.normalize_word_freq:
                    self.normalize_word_freq[item[0]] += item[1]
                else:
                    self.normalize_word_freq[item[0]] = item[1]
            elif item[0] in self.reverse_custom_dict:
                if self.reverse_custom_dict[item[0]] in self.normalize_word_freq:
                    self.normalize_word_freq[self.reverse_custom_dict[item[0]]] += item[1]
                else:
                    self.normalize_word_freq[self.reverse_custom_dict[item[0]]] = item[1]

        # step 7-2
        # sort normalize word freq
        self.normalize_word_freq = sorted(self.normalize_word_freq.items(), key=lambda x: x[1], reverse=True)
        print('<== normalized aggregated word freq ==>')
        print(self.normalize_word_freq)
Ejemplo n.º 7
0
from api.protos.database_pb2 import Symbol
from quote.fugle import Fugle
import asyncio
from datetime import date, timedelta, datetime
import grpc

from quote.utils import get_grpc_hostname
from api.protos import database_pb2_grpc
from api.protos.protobuf_datatype_utils import datetime_to_timestamp

if __name__ == '__main__':

    channel = grpc.insecure_channel(f'{get_grpc_hostname()}:6565')
    stub = database_pb2_grpc.DatabaseStub(channel)

    over_boughts = []
    over_solds = []
    _datetime = datetime.now()
    retry = 0
    max_retry = 7

    while len(over_boughts) == 0 or len(over_solds) == 0:
        print(f'try to get date for datetime = {_datetime}')
        res = stub.query_twse_over_bought_by_date(datetime_to_timestamp(_datetime))
        over_boughts = list(res)
        res = stub.query_twse_over_sold_by_date(datetime_to_timestamp(_datetime))
        over_solds = list(res)
        _datetime -= timedelta(days=1)
        retry += 1
        if retry > max_retry:
            break