예제 #1
0
    def start(self):
        self.trading_engine.start()

        counter = 0
        while True:
            start_time = time.time()
            logger.info(
                "-------count: {}, algos:{}--------".format(counter, self.algos.keys()))
            for algo in self.algos:
                self.algos[algo].prerun(t=datetime.now(timezone.utc))
                orders = self.algos[algo].run()
                logger.info(
                    "algo: {}, orders: {}".format(
                        algo, [
                            o.__dict__ for o in orders]))
                self._trade(orders)
            counter += 1
            spend_sec = time.time() - start_time
            sleep_sec = self.run_freq_s - spend_sec
            if sleep_sec < 0:
                logger.warning("algo run time + trading time > run_freq_s: {} > {}, "
                               "try to use larger run_freq_s to fix it".format(spend_sec, self.run_freq_s))
                sleep_sec = 0
            else:
                logger.info(
                    "algo run time + traing time: {}, sleep time: {}".format(spend_sec, sleep_sec))
            time.sleep(sleep_sec)
 def try_report(self, data):
     if not self.is_send:
         return
     try:
         is_ok, err_str = train_data_report(**data)
     except Exception as err:
         logger.warning(
             'Exception: could not reach RemoteMonitor, exception:{}'.
             format(err))
     else:
         if not is_ok:
             logger.warning("report_train_data error:{}".format(err_str))
예제 #3
0
 def getLast(self):
     l = []
     for id in [10, 11, 12, 13]:
         url = "https://sportsbox.sa.gov.tw/material/list/" + str(id)
         res = self.session.request("GET", url)
         if res is None:
             logger.warning(url + " is None")
             return
         soup = BeautifulSoup(res.content, "html.parser")
         href = soup.select_one(
             "div.itemBox.unit3_v.rounded.box_shadow.itemCard a")["href"]
         l.append(int(href.replace("/material/detail/", "")))
     return max(l)
예제 #4
0
 async def job(self, id):
     url = "https://sportsbox.sa.gov.tw/material/detail/" + str(id)
     res = self.session.request("GET", url)
     if res is None:
         logger.warning(url + " is None")
         return None
     soup = BeautifulSoup(res.content, "html.parser")
     title = soup.select_one("div.article_titleBox div.h4")
     if title is None:
         logger.warning(url + " is None because not find title")
         return None
     logger.info("GET data: " + url)
     p = self.parser(soup, title.text, url)
     return p
예제 #5
0
def stage3():
    answer = input(
        '\nAre you running in large scale manner (more than 5 machines in total)? [y/n] '
    )
    if answer == 'y':
        answer = input(
            '\nHave you followed the "A caveat for running in large scale" section in README? (i.e. you are using my public AMI) [y/n] '
        )
        if answer != 'y':
            logger.warning('Please follow its instructions before proceeding.')
            exit(0)
        else:
            cmds = [
                'cd ~/heart_watch && pip3 install -r requirements.txt',
                'cd ~/heart_watch && wget http://apache.mirrors.pair.com/kafka/2.2.0/kafka_2.12-2.2.0.tgz',
                'cd ~/heart_watch && tar -xzf kafka_2.12-2.2.0.tgz && mv kafka_2.12-2.2.0 kafka',
            ]
    else:
        cmds = [
            'curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -',
            'sudo add-apt-repository \'deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable\'',
            'sudo add-apt-repository --remove -y ppa:andrei-pozolotin/maven3',
            'sudo apt-get -y update',
            'apt-cache policy docker-ce',
            'sudo kill -9 $(ps aux | grep \'dpkg\' | awk \'{print $2}\')',
            'sudo kill -9 $(ps aux | grep \'apt\' | awk \'{print $2}\')',
            'sudo killall -r dpkg',
            'sudo killall -r apt',
            'sudo dpkg --configure -a',
            'sudo apt-get install -y docker-ce python3-pip libpq-dev python-dev maven awscli',
            'sudo usermod -aG docker ubuntu',
            'cd ~/heart_watch && pip3 install -r requirements.txt',
            'cd ~/ && wget http://apache.mirrors.pair.com/kafka/2.2.0/kafka_2.12-2.2.0.tgz',
            'cd ~/ && tar -xzf kafka_2.12-2.2.0.tgz && mv kafka_2.12-2.2.0 kafka',
            'unzip confluent-3.0.0-2.11.zip && mv confluent-3.0.0 confluent',
            'cd ~/heart_watch && pip3 install -r requirements.txt',
        ]
    for cmd in cmds:
        parallel([
            'peg sshcmd-cluster {} "{}"'.format(key, cmd) for key in CLUSTERS
        ])
    logger.info("""
        If you responded that you are NOT running in large scale manner, please manually ssh to each machine and run the following commands

        sudo curl -L https://github.com/docker/compose/releases/download/1.24.1/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
        sudo chmod +x /usr/local/bin/docker-compose

        Then log out and log back again. Then make sure than you can run docker-compose. If you can't, please manually fix it.
    """)
예제 #6
0
 def request(self, method, url, data=None, delay=0, title=None):
     for i in range(RETRY_CNT):
         try:
             if delay: time.sleep(delay)
             return self.session.request(method,
                                         url,
                                         allow_redirects=False,
                                         data=data,
                                         timeout=self.timeout)
         except (requests.HTTPError, requests.Timeout,
                 requests.ConnectionError) as e:
             logger.warning('Warning: {0}, retrying({1}) ...'.format(
                 str(e), i))
             pass
     logger.error("can't get res: " + title)
     return None
예제 #7
0
    async def job(self, id):
        url = "https://ed.arte.gov.tw/ch/content/m_design_content.aspx?AE_SNID=" + str(
            id)
        res = self.session.request("GET", url)
        if res is None:
            logger.warning(url + " is None")
            return None
        soup = BeautifulSoup(res.content, "html.parser")
        title = soup.select_one(
            "div.title_wrapper h3.animate.title-c1.title_icon_news").text
        if len(title) == 0:
            logger.warning(url + " is None because not find title")
            return None
        logger.info("GET data: " + url)

        p = self.parser(soup, title, url)
        return p
예제 #8
0
 def _polygon_get_prices(self, symbols, freq, start_datetime, end_datetime):
     df_dict = {}
     for symbol in symbols:
         try:
             cur_df = self.polygon.get_historical_data(
                 symbol=symbol,
                 freq=freq,
                 start_date_str=start_datetime.strftime(
                     PolygonGateway.DATE_FMT),
                 end_date_str=end_datetime.strftime(
                     PolygonGateway.DATE_FMT),
                 unadjusted=False)
         except PolygonRequestException as err:
             logger.warning(
                 "polygon get error when load data, err:{}".format(err))
             continue
         gq_cur_df = data_polygon_to_goquant(cur_df)
         df_dict[symbol] = gq_cur_df
     return df_dict
예제 #9
0
    async def job(self, id) -> Plan:
        def func(type):
            url = f"https://mlearn.moe.gov.tw/{type}/PartData?key={str(id)}"
            soup = BeautifulSoup(
                self.session.request("GET", url).content, "html.parser")
            title = soup.select_one("div.container.mt-3 h2")
            if title is None:
                return url, soup, None
            return url, soup, title

        url, soup, title = func("TeachingPlan")
        if title is None:
            url, soup, title = func("TopicArticle")
        if title is None:
            logger.warning(url + " is None because not find title")
            return None

        logger.info("GET data: " + url)
        p = self.parser(soup, title.text, url)
        return p
예제 #10
0
    def create(self):
        db_connect_url_jdbc = 'jdbc:postgresql://{}:5432/postgres'.format(
            getenv('DB_HOST'))

        payload = {
            'name': self.name,
            'config': {
                'connector.class':
                'io.confluent.connect.jdbc.JdbcSourceConnector',
                'connection.url': db_connect_url_jdbc,
                'connection.user': '******',
                'connection.password': '******',
                'poll.interval.ms': self._poll_interval,
                'numeric.mapping': 'best_fit',
                'mode': 'bulk',
                'transforms': 'createKey,extractInt',
                'transforms.createKey.type':
                'org.apache.kafka.connect.transforms.ValueToKey',
                'transforms.createKey.fields': self._keyfield,
                'transforms.extractInt.type':
                'org.apache.kafka.connect.transforms.ExtractField$Key',
                'transforms.extractInt.field': self._keyfield,
            }
        }
        if self._query:
            payload['config']['query'] = self._query
            payload['config']['topic.prefix'] = self._topic.name
            self._topic.create()
        else:
            logger.warning(
                '"query" parameter is not specified. All messages will be sent to a single partition.'
            )
            payload['config']['table.whitelist'] = self._table_name
            payload['config']['topic.prefix'] = util.naming.jdbc_topic_prefix()
        if self._get():
            self._delete()
        self._create(payload)
예제 #11
0
    def load_df(self, data_key):
        if not self.check_data_key(data_key):
            logger.warning(
                "missing data key in cache, dt: {}".format(data_key))
            return None

        snapshot_path = self.get_data_snapshot_path(data_key)
        data_files = [
            join(snapshot_path, f) for f in os.listdir(snapshot_path)
            if isfile(join(snapshot_path, f))
        ]
        df = None
        for filepath in data_files:
            logger.debug("loading data from file: {}".format(filepath))
            cur_df = pd.read_csv(filepath, index_col=False)
            if df is None:
                df = cur_df
            else:
                df = pd.concat([df, cur_df], sort=False)
        if df is not None and df.shape[0] > 0:
            df[DATA_DATETIME] = pd.to_datetime(df[DATA_DATETIME])
            df = df.drop_duplicates()
            df.set_index(DATA_DATETIME, inplace=True)
        return df
예제 #12
0
def book_logger(title, msg):
    open(LOG_FILE_NAME + ".book.log", 'a', encoding='utf-8').write(title + "," + msg + "\n")
    logger.warning(title + "," + msg)
예제 #13
0
def on_capture_failure():
    logger.warning('Task failed')
예제 #14
0
 def shutdown(signal, frame):
     logger.warning("Received signal %s: exiting", signal)
     sys.exit(128 + signal)
예제 #15
0
def run(config):
    rank = MPI.COMM_WORLD.Get_rank()
    config.rank = rank
    config.is_chef = rank == 0
    config.seed = config.seed + rank
    config.num_workers = MPI.COMM_WORLD.Get_size()
    config.is_mpi = False if config.num_workers == 1 else True

    if torch.get_num_threads() != 1:
        fair_num_threads = max(
            int(torch.get_num_threads() / MPI.COMM_WORLD.Get_size()), 1
        )
        torch.set_num_threads(fair_num_threads)

    if config.is_chef:
        logger.warning("Running a base worker.")
        make_log_files(config)
    else:
        logger.warning("Running worker %d and disabling logger", config.rank)
        logger.setLevel(CRITICAL)

        if config.date is None:
            now = datetime.now()
            date = now.strftime("%m.%d")
        else:
            date = config.date
        config.run_name = "rl.{}.{}.{}.{}".format(
            config.env, date, config.prefix, config.seed - rank
        )
        if config.group is None:
            config.group = "rl.{}.{}.{}".format(config.env, date, config.prefix)

        config.log_dir = os.path.join(config.log_root_dir, config.run_name)
        if config.is_train:
            config.record_dir = os.path.join(config.log_dir, "video")
        else:
            config.record_dir = os.path.join(config.log_dir, "eval_video")

    def shutdown(signal, frame):
        logger.warning("Received signal %s: exiting", signal)
        sys.exit(128 + signal)

    signal.signal(signal.SIGHUP, shutdown)
    signal.signal(signal.SIGINT, shutdown)
    signal.signal(signal.SIGTERM, shutdown)

    # set global seed
    np.random.seed(config.seed)
    torch.manual_seed(config.seed)
    torch.cuda.manual_seed_all(config.seed)

    os.environ["DISPLAY"] = ":1"

    if config.gpu is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(config.gpu)
        assert torch.cuda.is_available()
        config.device = torch.device("cuda")
    else:
        config.device = torch.device("cpu")

    # build a trainer
    trainer = Trainer(config)
    if config.is_train:
        trainer.train()
        logger.info("Finish training")
    else:
        trainer.evaluate()
        logger.info("Finish evaluating")
예제 #16
0
def update(feedid: int) -> bool:
    buildinfo = BuildInfo.return_item(feedid)
    scheduler.add_job(update,
                      'interval',
                      args=[feedid],
                      id='feed{}'.format(feedid),
                      replace_existing=True,
                      minutes=int(buildinfo.check_interval))

    rule = Ruleset.return_item(feedid)  # type : Ruleset
    _rmodified = False
    # print("Auth:", rset.getauth, rset.auth1, rset.auth1param)
    # if rset.getauth != '':
    #     _auth = {rset.auth1: rset.auth1param, rset.auth2: rset.auth2param,
    #              rset.auth3: rset.auth3param, rset.auth4: rset.auth4param,
    #              rset.auth5: rset.auth5param, }
    #
    #     makeOpenerWithCookie(rset.getauth, _auth)
    # else:
    makeOpenerWithoutCookie(rule.listUrl)

    # optional
    encoding = rule.encoding
    if encoding == '': encoding = 'utf-8'

    if not rule.maxcheckpage or rule.maxcheckpage < 1:
        logger.warning("*- rset.maxcheckpage is None, set default")
        maxpage = 1
    else:
        maxpage = rule.maxcheckpage

    if not rule.nextpagelink or not len(rule.nextpagelink):
        maxpage = 1

    logger.info("*- feed initializing....: feed_no. {}".format(feedid))

    item_list, listpage = get_pageitems(rule, maxpage, encoding)

    list_url = rule.listUrl
    item_list = list(
        map((lambda i: get_absolute_anchor_reference(list_url, i)), item_list))

    if rule.encoding == '':
        rule.encoding = listpage.original_encoding
        _rmodified = True

    # update_feed and update BuildInfo.lastBuildDate
    if update_feed(feedid, rule, item_list):
        buildinfo.lastBuildDate = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
                                                time.gmtime())
    else:
        logger.error("feed 생성에 실패하였음")
        return False
    # whether not updated feed(list not updated), change pub_date
    buildinfo.pub_date = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
                                       time.gmtime())

    if _rmodified:
        # rule.save()

        db_session.commit()
    return True