Esempio n. 1
0
    def fetch_ohlcv(self, timeframe, since, instrument):
        # adding one time interval because Bitmex api is returning us close times instead of open times
        closeDate = since + timedelta(timeframe)
        try:
            result = self._client.Trade.Trade_getBucketed(
                symbol=instrument,
                reverse=False,
                count=self._limit,
                binSize=timeframe,
                startTime=closeDate).result()[0]
        except bravado.exception.HTTPTooManyRequests as err:
            print("To many requests, try again later.")
            result = []

        candles = [candle for candle in result
                   if candle["open"]]  # Filter bad data

        # compute open and close times of each candle
        for candle in candles:
            candle["open_datetime_utc"] = candle['timestamp'] - timedelta(
                timeframe)
            candle["close_datetime_utc"] = candle['timestamp'] - timedelta(
                '1s')
            candle["trade_count"] = candle["trades"]

        return candle_list_to_dataframe(candles)
Esempio n. 2
0
    def AddFromForm(self, form):
        success = True

        if self.id is None:
            isNew = True 
            db.session.add(self)
            db.session.commit()
        else:
            isNew = False


        self.title = form.title.data 
        self.begin_time = form.begin_time.data - timedelta(hours = 8)
        self.end_time = form.end_time.data - timedelta(hours = 8)

        self.announcement = form.announcement.data

        #check 
        if self.begin_time <= datetime.datetime.utcnow():
            success = False 
            form.begin_time.errors.append("Begin Time should not early than current time")

        if self.end_time <= self.begin_time:
            success = False 
            form.end_time.errors.append("End Time should not early than Begin Time")

        if not success and isNew:
            db.session.delete(self)
            db.session.commit()
            return success
        
        for cp in self.contest_problems.all():
            db.session.delete(cp)

        problem_id_list = map(int, form.problem_list.data.split(','))
        for index, id in enumerate(problem_id_list):
            prob = Problem.query.get(id)
            if prob is not None:
                cp = ContestProblem(contest_id = self.id, problem_id = id, problem_relative_id = index)
                db.session.add(cp)
            else:
                success = False
                form.problems.errors.append("Some problem maybe not exist")

        try:
            db.session.commit()
        except IntegrityError:
            db.session.rollback()
            success = False

        if not success and isNew:
            db.session.delete(self)
            db.session.commit()

        if success:
            db.session.add(self)
            db.session.commit()

        return success
Esempio n. 3
0
def filter_events(events, indx_date, deliverables_path):
    
    '''
    Consider an observation window (2000 days) and prediction window (30 days). Remove
    the events that occur outside the observation window.

    Suggested steps:
    1. Join indx_date with events on patient_id
    2. Filter events occuring in the observation window(IndexDate-2000 to IndexDate)
    
    Inputs: events is return from read_csv function, indx_date are returns from calculate_index_date function
    Return filtered_events 
    
    IMPORTANT:
    Save filtered_events to a csv file in the deliverables folder named as etl_filtered_events.csv. 
    Use the global variable deliverables_path while specifying the filepath. 
    Each row is of the form patient_id, event_id, value.
    The csv file should have a header 
    '''
    
    #Join indx_date with events on patient_id
    joined_events = pd.merge(events, indx_date, on=['patient_id'], how = 'left')
    
    #Filter events occuring in the observation window(IndexDate-2000 to IndexDate)
    mask = np.logical_and(joined_events.timestamp <= joined_events.indx_date, 
                     joined_events.timestamp >= joined_events.indx_date - utils.timedelta(2000))
    filtered_events = joined_events.loc[mask, :]
    
    #Save filtered_events to a csv file in the deliverables folder named as etl_filtered_events.csv
    filtered_events.to_csv(deliverables_path + 'etl_filtered_events.csv', columns=['patient_id', 'event_id', 'value'], index=False)
    
    return filtered_events
Esempio n. 4
0
    def reschedule_next_adventure(self):
        # This method reschedules the next adventure if config time changes.
        # You are expected to call this on all players if you want to update them.
        minimum = config.delay_min
        maximum = config.delay_max
        minutes = utils.randfloat(minimum, maximum)

        self.scheduled_adventure = self.last_adventure + utils.timedelta(
            minutes=minutes)
Esempio n. 5
0
 def _get_nearest(self, dateStr, dateObj, direction):
    # you must call this method only if you sure that requested date pass `self.dateWasLoaded`
    if not self._loaded:
       raise RuntimeError('Data not loaded yet')
    if dateStr in self.__cache:
       return dateStr, self.__cache[dateStr]
    # this date have no data, so we swith it to next\prev with results
    d=timedelta(days=direction)
    while True:
       dateObj+=d
       dateStr=dateObj.strftime('%Y-%m-%d')
       if dateStr in self.__cache:
          return dateStr, self.__cache[dateStr]
    def fetch_ohlcv(self, timeframe, since, instrument):
        # Binance include the current (and unfinished) bar in the fetched data, we need to compute endTime to remove it
        endTime = compute_end_timestamp(self.get_utc_time(), timeframe)
        td = timedelta(timeframe)
        td_1s = timedelta('1s')
        result = self._client.get_product_historic_rates(
            instrument,
            granularity=int(td.total_seconds()),
            start=since.timestamp())
        result = reversed(
            result
        )  # Coinbase pro is sending candles from newest to oldest, we need to reverse that

        candles = [{
            "open_datetime_utc":
            datetime.fromtimestamp(int(data[0]), timezone.utc),
            "close_datetime_utc":
            datetime.fromtimestamp(int(data[0]), timezone.utc) + td - td_1s,
            "open":
            data[3],
            "high":
            data[2],
            "low":
            data[1],
            "close":
            data[4],
            "volume":
            data[5]
        } for data in result]

        # We need to manually filter candles because Coinsebase Pro API might give us candles before our startDate and after our endDate
        candles = [
            c for c in candles if since <= c['open_datetime_utc'] < endTime
        ]

        return candle_list_to_dataframe(candles)
Esempio n. 7
0
async def check_players_for_adventure():
    # Embarks any player that needs an adventure.
    if world.players is None or len(world.players) == 0:
        return

    for player_id, player_instance in world.players.items():
        if is_player_online(player_id):
            player_instance.last_online = utils.now()

        if player_instance.paused is True:
            continue
        if player_instance.last_online + utils.timedelta(
                minutes=config.keep_online) < utils.now():
            continue
        if player_instance.scheduled_adventure is None or player_instance.scheduled_adventure < utils.now(
        ):
            message = player_instance.embark()
            await broadcast(message)
Esempio n. 8
0
 def _get_nearest_date(self, direction, date):
     dateStep = direction * self._loader.direction if direction else self._loader.direction
     if date is not None and direction:
         date = (to_date(date) +
                 timedelta(days=dateStep)).strftime('%Y-%m-%d')
         if date in self._data:
             dialog = 0 if dateStep < 0 else len(self._data[date]) - 1
             msg = None if direction > 0 or not self.isOpened(
                 date,
                 dialog) else self._data[date][dialog].messageCount - 1
             return date, dialog, msg
     date, data = self._loader.get(date, dateStep)
     if data is False:
         return None, 0, None
     elif date not in self._data:
         print(f'DIALOG_CACHE_UPDATE {date}')
         self._data[date] = tuple(
             WidgetPlaceholderEx(DialogHeader(None, o)) for o in data)
     dialog = 0 if dateStep < 0 else len(self._data[date]) - 1
     msg = None if direction > 0 or not self.isOpened(
         date, dialog) else self._data[date][dialog].messageCount - 1
     return date, dialog, msg
Esempio n. 9
0
def calculate_index_date(events, mortality, deliverables_path):
    
    '''
    Index date: The day on which mortality is to be predicted. Index date is evaluated as follows:
        For deceased patients: Index date is 30 days prior to the death date (timestamp fi
eld) 
        in data/train/mortality events.csv.
        For alive patients: Index date is the last event date in data/train/events.csv for
        each alive patient.
        
    Inputs are returns from read_csv function
    Return indx_date
    
    IMPORTANT:
    Save indx_date to a csv file in the deliverables folder named as etl_index_dates.csv. 
    Use the global variable deliverables_path while specifying the filepath. 
    Each row is of the form patient_id, indx_date.
    The csv file should have a header  
    '''
    
    #Last event date 
    alive_date = pd.DataFrame({'timestamp' : events.groupby('patient_id').timestamp.max()}).reset_index()
    
    #30 days prior to the death date for deceased patients
    deceased_date = mortality[["patient_id","timestamp"]]
    deceased_date.loc[:,"timestamp"] = deceased_date.loc[:,"timestamp"] - utils.timedelta(30)
    
    #Last event date for alive patients
    still_alive_date = alive_date[~alive_date.patient_id.isin(deceased_date.patient_id)]
    
    #Conbine both alive and deceased patients into one dataframe
    indx_date = pd.concat([still_alive_date, deceased_date], axis = 'rows')
    indx_date.columns = ['patient_id','indx_date']
    
    #Save indx_date to a csv file in the deliverables folder named as etl_index_dates.csv
    indx_date.to_csv(deliverables_path + 'etl_index_dates.csv', columns=['patient_id', 'indx_date'], index=False)
    
    return indx_date
Esempio n. 10
0
    def train(self):
        self.model.train()

        num_epochs = self.opts.num_epochs
        batch_size = self.opts.batch_size
        num_batch = math.ceil(self.opts.epoch_size / batch_size)
        total_steps = num_epochs * num_batch
        start_step = self.train_states["train_steps"]
        remain_steps = total_steps - start_step
        print("total_steps : {}".format(total_steps))

        # data
        train_iter = self.data.train_iterator(start_step % num_batch)

        if start_step == 0:
            print("{}\tStart training......".format(
                time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())))
        else:
            print("{}\tContinue training......".format(
                time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())))

        # timing
        step_stime = time.time()

        for _ in range(remain_steps):
            self.train_states["train_steps"] += 1

            var_dict = self._forward(train_iter, mode="train")
            cur_ce = var_dict["cur_ce"]
            cur_kld = var_dict["cur_kld"]
            kld_coeff = var_dict["kld_coeff"]
            cur_bow_loss = var_dict["cur_bow_loss"]

            cur_loss = cur_ce * self.opts.ce_coeff + cur_kld * kld_coeff + cur_bow_loss * self.opts.bow_coeff
            if torch.isnan(cur_loss):
                print("ERROR: NAN detected.")
                exit(0)
            self.optim.zero_grad()
            cur_loss.backward()
            nn.utils.clip_grad_norm_(self.model.parameters(),
                                     self.opts.max_gradient_norm)
            self.optim.step()
            self.learning_rate_decay()

            cur_loss = cur_ce * self.opts.ce_coeff + cur_kld + cur_bow_loss * self.opts.bow_coeff
            var_dict["cur_loss"] = cur_loss

            # update train_states
            for k, v in var_dict.items():
                if k.startswith("cur_"):
                    if (self.train_states["train_steps"] - 1) % self.opts.logging_rate + self.opts.logging_window \
                            >= self.opts.logging_rate:
                        self.train_states[k]["step"] += v.item()
                    if (self.train_states["train_steps"] - 1) % self.opts.validation_rate + self.opts.validation_window \
                            >= self.opts.validation_rate:
                        self.train_states[k]["mean"] += v.item()

            for k, v in var_dict.items():
                if k.startswith("cur_"):
                    self.metrics["trnes_" + k[4:]].append(v.item())

            # logging
            if self.train_states["train_steps"] % self.opts.logging_rate == 0:
                duration = time.time() - step_stime
                tc = self.metrics["train_time"] + self.metrics[
                    "valid_time"] + duration
                rte = (total_steps - self.train_states["train_steps"]
                       ) * tc / self.train_states["train_steps"]

                print(
                    "Step {}  progress rate: {:.3f}  loss: {:.8f}  ppl: {:.6f}  "
                    "speed: {:.0f} samples per second  remaining (approximately): {}"
                    .format(
                        self.train_states["train_steps"],
                        self.train_states["train_steps"] / total_steps,
                        self.train_states["cur_loss"]["step"] /
                        self.opts.logging_window,
                        self.train_states["cur_ppl"]["step"] /
                        self.opts.logging_window,
                        self.opts.logging_rate * batch_size / duration,
                        timedelta(rte)))

                # update metrics
                self.metrics["train_steps"].append(
                    self.train_states["train_steps"])
                self.metrics["train_time"] += duration
                self.metrics["kl_coefficient"].append(kld_coeff)
                for k in var_dict.keys():
                    if k.startswith("cur_"):
                        self.metrics["train_" + k[4:]].append(
                            self.train_states[k]["step"] /
                            self.opts.logging_window)

                # update train_states
                for k in var_dict.keys():
                    if k.startswith("cur_"):
                        self.train_states[k]["step"] = 0.0

                # reset timing
                step_stime = time.time()

            del var_dict

            # valid
            if self.train_states["train_steps"] % self.opts.validation_rate == 0 or \
                    self.train_states["train_steps"] in self.opts.specific_validations:
                step_stime += self.validate()

            # after each epoch
            if self.train_states["train_steps"] % num_batch == 0:
                completed_epochs = self.train_states["train_steps"] // num_batch
                print(
                    "-------------------Epoch Completed %d/%d-------------------"
                    % (completed_epochs, num_epochs))

            # save states
            temp_stime = time.time()
            self.maybe_save_checkpoints()
            step_stime += time.time() - temp_stime

        if -1 in self.opts.specific_validations:
            self.validate()
        if -1 in self.opts.specific_checkpoints:
            if self.do_testing:
                self.test()
            self.uninterruptible_save(False)
        else:
            self.uninterruptible_save(True)

        print("{}\tFinished!".format(
            time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())))
Esempio n. 11
0
def main():
    # pp = pprint.PrettyPrinter(indent=4)

    args = parse_cli_args()

    exchanges = {
        "bitmex": make_bitmex_exchange,
        "binance": make_binance_exchange,
        "coinbasepro": make_coinbasepro_exchange
    }

    if args.action == "list-exchanges":
        print(to_comma_separated_string(exchanges.keys()))
        exit(0)

    if args.exchange in exchanges.keys():
        exchange = exchanges[args.exchange]()
    else:
        print("Unsupported exchange {}".format(args.exchange))
        exit(-1)

    if args.action == "time":
        print(exchange.get_utc_time())
        exit(0)

    if args.action == "timestamp":
        print(exchange.get_utc_timestamp())
        exit(0)

    if args.action == "list-instruments":
        print(to_comma_separated_string(exchange.get_instruments()))
        exit(0)

    if args.action == "list-timeframes":
        print(to_comma_separated_string(exchange.get_timeframes()))
        exit(0)

    assert (args.action == "fetch-ohlcv")

    ensure_mkdir(args.folder)

    timeframes = args.timeframes if args.timeframes else exchange.get_timeframes(
    )
    instruments = args.instruments if args.instruments else exchange.get_instruments(
    )

    exchange_timeframes = exchange.get_timeframes()
    exchange_instruments = exchange.get_instruments()
    exchange_time = exchange.get_utc_time()

    print("Exchange {} at time {}.".format(args.exchange, exchange_time))

    for instrument in instruments:
        if not instrument in exchange_instruments:
            print("[ERROR] Unsupported instrument {} for exchange {}.".format(
                instrument, args.exchange))
            continue

        print("-- Fetching data for instrument {}.".format(instrument))

        for tf in timeframes:
            if not tf in exchange_timeframes:
                print(
                    "[ERROR] Unsupported timeframe {} for exchange {}.".format(
                        tf, args.exchange))
                continue

            print("\t-- Fetching data for timeframe {}.".format(tf))

            path_to_csv_file = os.path.join(
                args.folder,
                args.exchange + "-" + instrument + "-" + tf + ".csv")

            since = origin_of_time
            if (os.path.exists(path_to_csv_file)):
                print(
                    "\t\t-- Loading existing history from file {} to get next timestamp."
                    .format(path_to_csv_file))
                df = pd.read_csv(path_to_csv_file,
                                 index_col='open_timestamp_utc')
                since = datetime.fromtimestamp(
                    df.close_timestamp_utc.values[-1], timezone.utc)

            next_open_date = compute_end_timestamp(since, tf) + timedelta('1s')
            if exchange_time < next_open_date:
                print(
                    "\t\t-- Exchange time is {} and next candle time is {}, no request needed."
                    .format(exchange_time, since + td))
                continue

            while True:
                print("\t\t-- Fetching candles since {}".format(since))
                df = exchange.fetch_ohlcv(timeframe=tf,
                                          since=since,
                                          instrument=instrument)

                if df.empty:
                    print(
                        "\t\t-- No candles received for timeframe {}, work is done."
                        .format(tf))
                    break
                else:
                    print("\t\t-- {} candles received.".format(len(df)))

                df.to_csv(path_to_csv_file,
                          index_label='open_timestamp_utc',
                          mode='a',
                          header=not os.path.exists(path_to_csv_file))
                since = datetime.fromtimestamp(
                    df.close_timestamp_utc.values[-1], timezone.utc)

                time.sleep(
                    args.delay /
                    1000.0)  # ensure we don't flood exchange API with requests
Esempio n. 12
0
    async def on_chat_message(self, message):
        try:
            content_type, chat_type, chat_id, _, msg_id = \
                telepot.glance(message, long=True)

            # we only work with "text" messages
            if content_type != 'text':
                if chat_type in self.PRIVATE_CHATS:
                    # only do this if it's a privmsg
                    await self.send_message(message,
                                            self.INVALID_CMD,
                                            quote_msg_id=msg_id)
                return

            # we only work in private, groups and supergroups
            if chat_type not in self.PRIVATE_CHATS + self.PUBLIC_CHATS:
                return

            # get the name of the user and log it
            name = self.format_name(message)
            if not name:
                utils.logger.warning('Message without a "from" field received')
                return
            longname = '{chat_id} ({name})'.format(chat_id=chat_id, name=name)
            utils.logger.info('Message from %s: "%s"', longname,
                              message['text'])

            # commands need special handling
            if message['text'].startswith('/'):
                command, rest = self._get_command(message['text'])
                akari_commands = ('akari', 'akari@' + self.username)

                if command not in akari_commands:
                    if chat_type in self.PRIVATE_CHATS:
                        if command in ('help', 'start'):
                            msg = self.help_msg_priv
                        else:
                            msg = self.INVALID_CMD
                        await self.send_message(message,
                                                msg,
                                                no_preview=True,
                                                quote_msg_id=msg_id)
                        return
                    elif chat_type in self.PUBLIC_CHATS:
                        return  # unknown cmd, this was meant for another bot
            else:
                rest = message['text'].strip()

            # if the resulting message is empty...
            if not rest:
                if chat_type in self.PUBLIC_CHATS:  # only show this in groups
                    await self.send_message(message,
                                            self.help_msg_group,
                                            quote_msg_id=msg_id)
                return

            # check rate limit if this chat id is not exempt from them
            if chat_id not in self.rate_limit_exemptions:
                rate_limit = utils.ratelimit_hit('telegram', chat_id)
                if not rate_limit['allowed']:
                    msg = 'Message from %s: throttled (resets in %d seconds)'
                    utils.logger.warning(msg, longname, rate_limit['reset'])
                    msg = ('Not so fast! Try again in %s.' %
                           utils.timedelta(rate_limit['reset']))
                    await self.send_message(message, msg, quote_msg_id=msg_id)
                    return

            msg = random.choice(self.COMPOSING_MSGS)
            await self.send_message(message, msg, quote_msg_id=msg_id)

            # first, search...
            try:
                akari = Akari(rest, type='animation', shuffle_results=True)
            except ImageSearchNoResultsError:
                await self.send_message(message,
                                        'No results.',
                                        quote_msg_id=msg_id)
                return

            # then, if successful, send the pic
            utils.logger.info('Sending %s to %s', akari.filename, longname)
            type = 'file' if akari.filename.endswith('.gif') else 'image'
            await self.send_message(message,
                                    type=type,
                                    filename=akari.filename,
                                    quote_msg_id=msg_id)
        except Exception:
            utils.logger.exception('Error handling %s (%s)', longname,
                                   message['chat']['type'])
            await self.send_message(message,
                                    'Sorry, try again.',
                                    quote_msg_id=msg_id)