Exemple #1
0
def get_training_and_testing_candles(start_date_str: str, finish_date_str: str) -> Tuple[
    Dict[str, Dict[str, Union[Union[str, ndarray], Any]]], Dict[str, Dict[str, Union[Union[str, ndarray], Any]]]]:
    start_date = jh.arrow_to_timestamp(arrow.get(start_date_str, 'YYYY-MM-DD'))
    finish_date = jh.arrow_to_timestamp(arrow.get(finish_date_str, 'YYYY-MM-DD')) - 60000

    # Load candles (first try cache, then database)
    candles = load_candles(start_date_str, finish_date_str)

    # divide into training(85%) and testing(15%) sets
    training_candles = {}
    testing_candles = {}
    days_diff = jh.date_diff_in_days(jh.timestamp_to_arrow(start_date), jh.timestamp_to_arrow(finish_date))
    divider_index = int(days_diff * 0.85) * 1440
    for key in candles:
        training_candles[key] = {
            'exchange': candles[key]['exchange'],
            'symbol': candles[key]['symbol'],
            'candles': candles[key]['candles'][0:divider_index],
        }

        testing_candles[key] = {
            'exchange': candles[key]['exchange'],
            'symbol': candles[key]['symbol'],
            'candles': candles[key]['candles'][divider_index:],
        }

    return training_candles, testing_candles
Exemple #2
0
def get_training_and_testing_candles(start_date_str: str,
                                     finish_date_str: str):
    start_date = jh.arrow_to_timestamp(arrow.get(start_date_str, 'YYYY-MM-DD'))
    finish_date = jh.arrow_to_timestamp(
        arrow.get(finish_date_str, 'YYYY-MM-DD')) - 60000

    # Load candles (first try cache, then database)
    from jesse.modes.backtest_mode import load_candles
    candles = load_candles(start_date_str, finish_date_str)

    # divide into training(85%) and testing(15%) sets
    training_candles = {}
    testing_candles = {}
    days_diff = jh.date_diff_in_days(jh.timestamp_to_arrow(start_date),
                                     jh.timestamp_to_arrow(finish_date))
    divider_index = int(days_diff * 0.85) * 1440
    for key in candles:
        training_candles[key] = {
            'exchange': candles[key]['exchange'],
            'symbol': candles[key]['symbol'],
            'candles': candles[key]['candles'][0:divider_index],
        }

        testing_candles[key] = {
            'exchange': candles[key]['exchange'],
            'symbol': candles[key]['symbol'],
            'candles': candles[key]['candles'][divider_index:],
        }

    return training_candles, testing_candles
Exemple #3
0
def candles_info(candles_array: np.ndarray) -> dict:
    period = jh.date_diff_in_days(jh.timestamp_to_arrow(
        candles_array[0][0]), jh.timestamp_to_arrow(candles_array[-1][0])) + 1

    if period > 365:
        duration = f'{period} days ({round(period / 365, 2)} years)'
    elif period > 30:
        duration = f'{period} days ({round(period / 30, 2)} months)'
    else:
        duration = f'{period} days'

    return {
        'duration': duration,
        'starting_time': candles_array[0][0],
        'finishing_time': (candles_array[-1][0] + 60_000),
    }
Exemple #4
0
def candles(candles_array: np.ndarray) -> List[List[str]]:
    period = jh.date_diff_in_days(jh.timestamp_to_arrow(
        candles_array[0][0]), jh.timestamp_to_arrow(candles_array[-1][0])) + 1

    if period > 365:
        duration = f'{period} days ({round(period / 365, 2)} years)'
    elif period > 30:
        duration = f'{period} days ({round(period / 30, 2)} months)'
    else:
        duration = f'{period} days'

    return [
        ['period', duration],
        [
            'starting-ending date',
            f'{jh.timestamp_to_time(candles_array[0][0])[:10]} => {jh.timestamp_to_time(candles_array[-1][0] + 60_000)[:10]}'
        ],
    ]
Exemple #5
0
def candles(candles_array):
    period = jh.date_diff_in_days(jh.timestamp_to_arrow(
        candles_array[0][0]), jh.timestamp_to_arrow(candles_array[-1][0])) + 1

    if period > 365:
        duration = '{} days ({} years)'.format(period, round(period / 365, 2))
    elif period > 30:
        duration = '{} days ({} months)'.format(period, round(period / 30, 2))
    else:
        duration = '{} days'.format(period)

    return [
        ['period', duration],
        [
            'starting-ending date', '{} => {}'.format(
                jh.timestamp_to_time(candles_array[0][0])[:10],
                jh.timestamp_to_time(candles_array[-1][0] + 60_000)[:10])
        ],
    ]
Exemple #6
0
def test_timestamp_to_arrow():
    arrow_time = arrow.get('2015-08-01')
    assert jh.timestamp_to_arrow(1438387200000) == arrow_time
Exemple #7
0
    def generate_initial_population(self) -> None:
        """
        generates the initial population
        """
        loop_length = int(self.population_size / self.cpu_cores)

        with click.progressbar(length=loop_length, label='Generating initial population...') as progressbar:
            for i in range(loop_length):
                people = []
                with Manager() as manager:
                    dna_bucket = manager.list([])
                    workers = []

                    def get_fitness(dna: str, dna_bucket: list) -> None:
                        try:
                            fitness_score, fitness_log_training, fitness_log_testing = self.fitness(dna)
                            dna_bucket.append((dna, fitness_score, fitness_log_training, fitness_log_testing))
                        except Exception as e:
                            proc = os.getpid()
                            logger.error(f'process failed - ID: {str(proc)}')
                            logger.error("".join(traceback.TracebackException.from_exception(e).format()))
                            raise e

                    try:
                        for _ in range(self.cpu_cores):
                            dna = ''.join(choices(self.charset, k=self.solution_len))
                            w = Process(target=get_fitness, args=(dna, dna_bucket))
                            w.start()
                            workers.append(w)

                        # join workers
                        for w in workers:
                            w.join()
                            if w.exitcode > 0:
                                logger.error(f'a process exited with exitcode: {str(w.exitcode)}')
                    except KeyboardInterrupt:
                        print(
                            jh.color('Terminating session...', 'red')
                        )

                        # terminate all workers
                        for w in workers:
                            w.terminate()

                        # shutdown the manager process manually since garbage collection cannot won't get to do it for us
                        manager.shutdown()

                        # now we can terminate the main session safely
                        jh.terminate_app()
                    except:
                        raise

                    for d in dna_bucket:
                        people.append({
                            'dna': d[0],
                            'fitness': d[1],
                            'training_log': d[2],
                            'testing_log': d[3]
                        })

                # update dashboard
                click.clear()
                progressbar.update(1)
                print('\n')

                table_items = [
                    ['Started at', jh.timestamp_to_arrow(self.start_time).humanize()],
                    ['Index', f'{len(self.population)}/{self.population_size}'],
                    ['errors/info', f'{len(store.logs.errors)}/{len(store.logs.info)}'],
                    ['Trading Route', f'{router.routes[0].exchange}, {router.routes[0].symbol}, {router.routes[0].timeframe}, {router.routes[0].strategy_name}'],
                    # TODO: add generated DNAs?
                    # ['-'*10, '-'*10],
                    # ['DNA', people[0]['dna']],
                    # ['fitness', round(people[0]['fitness'], 6)],
                    # ['training|testing logs', people[0]['log']],
                ]
                if jh.is_debugging():
                    table_items.insert(3, ['Population Size', self.population_size])
                    table_items.insert(3, ['Iterations', self.iterations])
                    table_items.insert(3, ['Solution Length', self.solution_len])
                    table_items.insert(3, ['-' * 10, '-' * 10])

                table.key_value(table_items, 'Optimize Mode', alignments=('left', 'right'))

                # errors
                if jh.is_debugging() and len(report.errors()):
                    print('\n')
                    table.key_value(report.errors(), 'Error Logs')

                for p in people:
                    self.population.append(p)

        # sort the population
        self.population = list(sorted(self.population, key=lambda x: x['fitness'], reverse=True))
Exemple #8
0
    def evolve(self) -> List[Any]:
        """
        the main method, that runs the evolutionary algorithm
        """
        # generate the population if starting
        if self.started_index == 0:
            self.generate_initial_population()
            if len(self.population) < 0.5 * self.population_size:
                raise ValueError('Too many errors: less then half of the planned population size could be generated.')

            # if even our best individual is too weak, then we better not continue
            if self.population[0]['fitness'] == 0.0001:
                print(jh.color('Cannot continue because no individual with the minimum fitness-score was found. '
                               'Your strategy seems to be flawed or maybe it requires modifications. ', 'yellow'))
                jh.terminate_app()

        loop_length = int(self.iterations / self.cpu_cores)

        i = self.started_index
        with click.progressbar(length=loop_length, label='Evolving...') as progressbar:
            while i < loop_length:
                with Manager() as manager:
                    people = manager.list([])
                    workers = []

                    def get_baby(people: List) -> None:
                        try:
                            # let's make a baby together LOL
                            baby = self.make_love()
                            # let's mutate baby's genes, who knows, maybe we create a x-man or something
                            baby = self.mutate(baby)
                            people.append(baby)
                        except Exception as e:
                            proc = os.getpid()
                            logger.error(f'process failed - ID: {str(proc)}')
                            logger.error("".join(traceback.TracebackException.from_exception(e).format()))
                            raise e

                    try:
                        for _ in range(self.cpu_cores):
                            w = Process(target=get_baby, args=[people])
                            w.start()
                            workers.append(w)

                        for w in workers:
                            w.join()
                            if w.exitcode > 0:
                                logger.error(f'a process exited with exitcode: {str(w.exitcode)}')
                    except KeyboardInterrupt:
                        print(
                            jh.color('Terminating session...', 'red')
                        )

                        # terminate all workers
                        for w in workers:
                            w.terminate()

                        # shutdown the manager process manually since garbage collection cannot won't get to do it for us
                        manager.shutdown()

                        # now we can terminate the main session safely
                        jh.terminate_app()
                    except:
                        raise

                    # update dashboard
                    click.clear()
                    progressbar.update(1)
                    print('\n')

                    table_items = [
                        ['Started At', jh.timestamp_to_arrow(self.start_time).humanize()],
                        ['Index/Total', f'{(i + 1) * self.cpu_cores}/{self.iterations}'],
                        ['errors/info', f'{len(store.logs.errors)}/{len(store.logs.info)}'],
                        ['Route', f'{router.routes[0].exchange}, {router.routes[0].symbol}, {router.routes[0].timeframe}, {router.routes[0].strategy_name}']
                    ]
                    if jh.is_debugging():
                        table_items.insert(
                            3,
                            ['Population Size, Solution Length',
                             f'{self.population_size}, {self.solution_len}']
                        )

                    table.key_value(table_items, 'info', alignments=('left', 'right'))

                    # errors
                    if jh.is_debugging() and len(report.errors()):
                        print('\n')
                        table.key_value(report.errors(), 'Error Logs')

                    print('\n')
                    print('Best DNA candidates:')
                    print('\n')

                    # print fittest individuals
                    if jh.is_debugging():
                        fittest_list = [['Rank', 'DNA', 'Fitness', 'Training log || Testing log'], ]
                    else:
                        fittest_list = [['Rank', 'DNA', 'Training log || Testing log'], ]
                    if self.population_size > 50:
                        number_of_ind_to_show = 15
                    elif self.population_size > 20:
                        number_of_ind_to_show = 10
                    elif self.population_size > 9:
                        number_of_ind_to_show = 9
                    else:
                        raise ValueError('self.population_size cannot be less than 10')

                    for j in range(number_of_ind_to_show):
                        log = f"win-rate: {self.population[j]['training_log']['win-rate']}%, total: {self.population[j]['training_log']['total']}, PNL: {self.population[j]['training_log']['PNL']}% || win-rate: {self.population[j]['testing_log']['win-rate']}%, total: {self.population[j]['testing_log']['total']}, PNL: {self.population[j]['testing_log']['PNL']}%"
                        if self.population[j]['testing_log']['PNL'] is not None and self.population[j]['training_log'][
                            'PNL'] > 0 and self.population[j]['testing_log'][
                            'PNL'] > 0:
                            log = jh.style(log, 'bold')
                        if jh.is_debugging():
                            fittest_list.append(
                                [
                                    j + 1,
                                    self.population[j]['dna'],
                                    self.population[j]['fitness'],
                                    log
                                ],
                            )
                        else:
                            fittest_list.append(
                                [
                                    j + 1,
                                    self.population[j]['dna'],
                                    log
                                ],
                            )

                    if jh.is_debugging():
                        table.multi_value(fittest_list, with_headers=True, alignments=('left', 'left', 'right', 'left'))
                    else:
                        table.multi_value(fittest_list, with_headers=True, alignments=('left', 'left', 'left'))

                    # one person has to die and be replaced with the newborn baby
                    for baby in people:
                        random_index = randint(1, len(self.population) - 1)  # never kill our best perforemr
                        try:
                            self.population[random_index] = baby
                        except IndexError:
                            print('=============')
                            print(f'self.population_size: {self.population_size}')
                            print(f'self.population length: {len(self.population)}')
                            jh.terminate_app()

                        self.population = list(sorted(self.population, key=lambda x: x['fitness'], reverse=True))

                        # reaching the fitness goal could also end the process
                        if baby['fitness'] >= self.fitness_goal:
                            progressbar.update(self.iterations - i)
                            print('\n')
                            print(f'fitness goal reached after iteration {i}')
                            return baby

                    # save progress after every n iterations
                    if i != 0 and int(i * self.cpu_cores) % 50 == 0:
                        self.save_progress(i)

                    # store a take_snapshot of the fittest individuals of the population
                    if i != 0 and i % int(100 / self.cpu_cores) == 0:
                        self.take_snapshot(i * self.cpu_cores)

                    i += 1

        print('\n\n')
        print(f'Finished {self.iterations} iterations.')
        return self.population
Exemple #9
0
def livetrade() -> List[Union[List[Union[str, Any]], List[str], List[Union[
    str, int]], List[Union[str, Dict[str, Union[str, int]], Dict[
        str, str], Dict[str, bool], Dict[str, Union[Dict[str, Union[
            int, str, List[Dict[str, Union[str, int]]]]], Dict[str, Union[
                float, str, int, List[Dict[str, Union[str, int]]]]]]], Dict[
                    str, int]]]]]:
    # sum up balance of all trading exchanges
    starting_balance = 0
    current_balance = 0
    for e in store.exchanges.storage:
        starting_balance += store.exchanges.storage[e].starting_assets[
            jh.app_currency()]
        current_balance += store.exchanges.storage[e].assets[jh.app_currency()]
    starting_balance = round(starting_balance, 2)
    current_balance = round(current_balance, 2)

    arr = [[
        'started at',
        jh.timestamp_to_arrow(store.app.starting_time).humanize()
    ], ['current time',
        jh.timestamp_to_time(jh.now_to_timestamp())[:19]],
           ['errors/info', f'{len(store.logs.errors)}/{len(store.logs.info)}'],
           ['active orders',
            store.orders.count_all_active_orders()],
           ['open positions',
            store.positions.count_open_positions()]]

    # TODO: for now, we assume that we trade on one exchange only. Later, we need to support for more than one exchange at a time
    first_exchange = selectors.get_exchange(router.routes[0].exchange)

    if first_exchange.type == 'futures':
        arr.append([
            'started/current balance', f'{starting_balance}/{current_balance}'
        ])
    else:
        # loop all trading exchanges
        for exchange in selectors.get_all_exchanges():
            # loop all assets
            for asset_name, asset_balance in exchange.assets.items():
                if asset_name == jh.base_asset(router.routes[0].symbol):
                    current_price = selectors.get_current_price(
                        router.routes[0].exchange, router.routes[0].symbol)
                    arr.append([
                        f'{asset_name}',
                        f'{round(exchange.available_assets[asset_name], 5)}/{round(asset_balance, 5)} ({jh.format_currency(round(asset_balance * current_price, 2))} { jh.quote_asset(router.routes[0].symbol)})'
                    ])
                else:
                    arr.append([
                        f'{asset_name}',
                        f'{round(exchange.available_assets[asset_name], 5)}/{round(asset_balance, 5)}'
                    ])

    # short trades summary
    if len(store.completed_trades.trades):
        df = pd.DataFrame.from_records(
            [t.to_dict() for t in store.completed_trades.trades])
        total = len(df)
        winning_trades = df.loc[df['PNL'] > 0]
        losing_trades = df.loc[df['PNL'] < 0]
        pnl = round(df['PNL'].sum(), 2)
        pnl_percentage = round((pnl / starting_balance) * 100, 2)

        arr.append([
            'total/winning/losing trades',
            f'{total}/{len(winning_trades)}/{len(losing_trades)}'
        ])
        arr.append(['PNL (%)', f'${pnl} ({pnl_percentage}%)'])

    if config['app']['debug_mode']:
        arr.append(['debug mode', config['app']['debug_mode']])

    if config['app']['is_test_driving']:
        arr.append(['Test Drive', config['app']['is_test_driving']])
    return arr
Exemple #10
0
    def evolve(self) -> list:
        """
        the main method, that runs the evolutionary algorithm
        """
        # clear the logs to start from a clean slate
        jh.clear_file('storage/logs/optimize-mode.txt')

        logger.log_optimize_mode('Optimization session started')

        if self.started_index == 0:
            logger.log_optimize_mode(
                f"Generating {self.population_size} population size (random DNAs) using {self.cpu_cores} CPU cores"
            )
            self.generate_initial_population()

            if len(self.population) < 0.5 * self.population_size:
                msg = f'Too many errors! less than half of the expected population size could be generated. Only {len(self.population)} indviduals from planned {self.population_size} are usable.'
                logger.log_optimize_mode(msg)
                raise ValueError(msg)

            # if even our best individual is too weak, then we better not continue
            if self.population[0]['fitness'] == 0.0001:
                msg = 'Cannot continue because no individual with the minimum fitness-score was found. Your strategy seems to be flawed or maybe it requires modifications. '
                logger.log_optimize_mode(msg)
                raise exceptions.InvalidStrategy(msg)

        loop_length = int(self.iterations / self.cpu_cores)

        i = self.started_index
        progressbar = Progressbar(loop_length)
        while i < loop_length:
            with Manager() as manager:
                people_bucket = manager.list([])
                workers = []

                try:
                    for _ in range(self.cpu_cores):
                        mommy = self.select_person()
                        daddy = self.select_person()
                        w = Process(
                            target=create_baby,
                            args=(
                                people_bucket, mommy, daddy, self.solution_len, self.charset,
                                jh.get_config('env.optimization'), router.formatted_routes,
                                router.formatted_extra_routes,
                                self.strategy_hp, self.training_candles, self.testing_candles,
                                self.optimal_total
                            )
                        )
                        w.start()
                        workers.append(w)

                    for w in workers:
                        w.join()
                        if w.exitcode > 0:
                            logger.error(f'a process exited with exitcode: {w.exitcode}')
                except exceptions.Termination:
                    self._handle_termination(manager, workers)

                # update dashboard
                click.clear()
                self.update_progressbar(progressbar)
                # general_info streams
                general_info = {
                    'started_at': jh.timestamp_to_arrow(self.start_time).humanize(),
                    'index': f'{(i + 1) * self.cpu_cores}/{self.iterations}',
                    'errors_info_count': f'{len(store.logs.errors)}/{len(store.logs.info)}',
                    'trading_route': f'{router.routes[0].exchange}, {router.routes[0].symbol}, {router.routes[0].timeframe}, {router.routes[0].strategy_name}',
                    'average_execution_seconds': self.average_execution_seconds
                }
                if jh.is_debugging():
                    general_info['population_size'] = self.population_size
                    general_info['iterations'] = self.iterations
                    general_info['solution_length'] = self.solution_len
                sync_publish('general_info', general_info)

                if self.population_size > 50:
                    number_of_ind_to_show = 40
                elif self.population_size > 20:
                    number_of_ind_to_show = 15
                elif self.population_size > 9:
                    number_of_ind_to_show = 9
                else:
                    raise ValueError('self.population_size cannot be less than 10')

                best_candidates = [{
                        'rank': j + 1,
                        'dna': self.population[j]['dna'],
                        'fitness': round(self.population[j]['fitness'], 4),
                        'training_win_rate': self.population[j]['training_log']['win-rate'],
                        'training_total_trades': self.population[j]['training_log']['total'],
                        'training_pnl': self.population[j]['training_log']['PNL'],
                        'testing_win_rate': self.population[j]['testing_log']['win-rate'],
                        'testing_total_trades': self.population[j]['testing_log']['total'],
                        'testing_pnl': self.population[j]['testing_log']['PNL'],
                    } for j in range(number_of_ind_to_show)]
                sync_publish('best_candidates', best_candidates)

                # one person has to die and be replaced with the newborn baby
                for baby in people_bucket:
                    # never kill our best performer
                    random_index = randint(1, len(self.population) - 1)
                    self.population[random_index] = baby
                    self.population = list(sorted(self.population, key=lambda x: x['fitness'], reverse=True))

                    # reaching the fitness goal could also end the process
                    if baby['fitness'] >= self.fitness_goal:
                        self.update_progressbar(progressbar, finished=True)
                        sync_publish('alert', {
                            'message': f'Fitness goal reached after iteration {i*self.cpu_cores}',
                            'type': 'success'
                        })
                        return baby

                # TODO: bring back progress resumption
                # # save progress after every n iterations
                # if i != 0 and int(i * self.cpu_cores) % 50 == 0:
                #     self.save_progress(i)

                # TODO: bring back
                # # store a take_snapshot of the fittest individuals of the population
                # if i != 0 and i % int(100 / self.cpu_cores) == 0:
                #     self.take_snapshot(i * self.cpu_cores)

                i += 1

        sync_publish('alert', {
            'message': f"Finished {self.iterations} iterations. Check your best DNA candidates, "
                       f"if you don't like any of them, try modifying your strategy.",
            'type': 'success'
        })
        return self.population
Exemple #11
0
    def generate_initial_population(self) -> None:
        """
        generates the initial population
        """
        length = int(self.population_size / self.cpu_cores)

        progressbar = Progressbar(length)
        for i in range(length):
            people = []
            with Manager() as manager:
                dna_bucket = manager.list([])
                workers = []

                try:
                    for _ in range(self.cpu_cores):
                        dna = ''.join(choices(self.charset, k=self.solution_len))
                        w = Process(
                            target=get_and_add_fitness_to_the_bucket,
                            args=(
                                dna_bucket, jh.get_config('env.optimization'), router.formatted_routes, router.formatted_extra_routes,
                                self.strategy_hp, dna, self.training_candles, self.testing_candles,
                                self.optimal_total
                            )
                        )
                        w.start()
                        workers.append(w)

                    # join workers
                    for w in workers:
                        w.join()
                        if w.exitcode > 0:
                            logger.error(f'a process exited with exitcode: {w.exitcode}')
                except exceptions.Termination:
                    self._handle_termination(manager, workers)

                for d in dna_bucket:
                    people.append({
                        'dna': d[0],
                        'fitness': d[1],
                        'training_log': d[2],
                        'testing_log': d[3]
                    })

            # update dashboard
            self.update_progressbar(progressbar)

            # general_info streams
            general_info = {
                'started_at': jh.timestamp_to_arrow(self.start_time).humanize(),
                'index': f'{(i + 1) * self.cpu_cores}/{self.population_size}',
                'errors_info_count': f'{len(store.logs.errors)}/{len(store.logs.info)}',
                'trading_route': f'{router.routes[0].exchange}, {router.routes[0].symbol}, {router.routes[0].timeframe}, {router.routes[0].strategy_name}',
                'average_execution_seconds': self.average_execution_seconds
            }
            if jh.is_debugging():
                general_info['population_size'] = self.population_size
                general_info['iterations'] = self.iterations
                general_info['solution_length'] = self.solution_len
            sync_publish('general_info', general_info)

            for p in people:
                self.population.append(p)

        sync_publish('progressbar', {
            'current': 100,
            'estimated_remaining_seconds': 0
        })
        # sort the population
        self.population = list(sorted(self.population, key=lambda x: x['fitness'], reverse=True))
Exemple #12
0
def _get_candles_from_backup_exchange(exchange: str,
                                      backup_driver: CandleExchange,
                                      symbol: str, start_timestamp: int,
                                      end_timestamp: int):
    total_candles = []

    # try fetching from database first
    backup_candles = Candle.select(
        Candle.timestamp, Candle.open, Candle.close, Candle.high, Candle.low,
        Candle.volume).where(
            Candle.timestamp.between(start_timestamp, end_timestamp),
            Candle.exchange == backup_driver.name,
            Candle.symbol == symbol).order_by(Candle.timestamp.asc()).tuples()
    already_exists = len(
        backup_candles) == (end_timestamp - start_timestamp) / 60_000 + 1
    if already_exists:
        # loop through them and set new ID and exchange
        for c in backup_candles:
            total_candles.append({
                'id': jh.generate_unique_id(),
                'symbol': symbol,
                'exchange': exchange,
                'timestamp': c[0],
                'open': c[1],
                'close': c[2],
                'high': c[3],
                'low': c[4],
                'volume': c[5]
            })

        return total_candles

    # try fetching from market now
    days_count = jh.date_diff_in_days(jh.timestamp_to_arrow(start_timestamp),
                                      jh.timestamp_to_arrow(end_timestamp))
    # make sure it's rounded up so that we import maybe more candles, but not less
    if days_count < 1:
        days_count = 1
    if type(days_count) is float and not days_count.is_integer():
        days_count = math.ceil(days_count)
    candles_count = days_count * 1440
    start_date = jh.timestamp_to_arrow(start_timestamp).floor('day')
    for _ in range(candles_count):
        temp_start_timestamp = start_date.int_timestamp * 1000
        temp_end_timestamp = temp_start_timestamp + (backup_driver.count -
                                                     1) * 60000

        # to make sure it won't try to import candles from the future! LOL
        if temp_start_timestamp > jh.now_to_timestamp():
            break

        # prevent duplicates
        count = Candle.select().where(
            Candle.timestamp.between(temp_start_timestamp, temp_end_timestamp),
            Candle.symbol == symbol,
            Candle.exchange == backup_driver.name).count()
        already_exists = count == backup_driver.count

        if not already_exists:
            # it's today's candles if temp_end_timestamp < now
            if temp_end_timestamp > jh.now_to_timestamp():
                temp_end_timestamp = arrow.utcnow().floor(
                    'minute').int_timestamp * 1000 - 60000

            # fetch from market
            candles = backup_driver.fetch(symbol, temp_start_timestamp)

            if not len(candles):
                raise CandleNotFoundInExchange(
                    'No candles exists in the market for this day: {} \n'
                    'Try another start_date'.format(
                        jh.timestamp_to_time(temp_start_timestamp)[:10], ))

            # fill absent candles (if there's any)
            candles = _fill_absent_candles(candles, temp_start_timestamp,
                                           temp_end_timestamp)

            # store in the database
            _insert_to_database(candles)

        # add as much as driver's count to the temp_start_time
        start_date = start_date.shift(minutes=backup_driver.count)

        # sleep so that the exchange won't get angry at us
        if not already_exists:
            time.sleep(backup_driver.sleep_time)

    # now try fetching from database again. Why? because we might have fetched more
    # than what's needed, but we only want as much was requested. Don't worry, the next
    # request will probably fetch from database and there won't be any waste!
    backup_candles = Candle.select(
        Candle.timestamp, Candle.open, Candle.close, Candle.high, Candle.low,
        Candle.volume).where(
            Candle.timestamp.between(start_timestamp, end_timestamp),
            Candle.exchange == backup_driver.name,
            Candle.symbol == symbol).order_by(Candle.timestamp.asc()).tuples()
    already_exists = len(
        backup_candles) == (end_timestamp - start_timestamp) / 60_000 + 1
    if already_exists:
        # loop through them and set new ID and exchange
        for c in backup_candles:
            total_candles.append({
                'id': jh.generate_unique_id(),
                'symbol': symbol,
                'exchange': exchange,
                'timestamp': c[0],
                'open': c[1],
                'close': c[2],
                'high': c[3],
                'low': c[4],
                'volume': c[5]
            })

        return total_candles
Exemple #13
0
def load_required_candles(exchange: str, symbol: str, start_date_str: str,
                          finish_date_str: str):
    """
    loads initial candles that required before executing strategies.
    210 for the biggest timeframe and more for the rest
    """
    start_date = jh.arrow_to_timestamp(arrow.get(start_date_str, 'YYYY-MM-DD'))
    finish_date = jh.arrow_to_timestamp(
        arrow.get(finish_date_str, 'YYYY-MM-DD')) - 60000

    # validate
    if start_date == finish_date:
        raise ValueError('start_date and finish_date cannot be the same.')
    if start_date > finish_date:
        raise ValueError('start_date cannot be bigger than finish_date.')
    if finish_date > arrow.utcnow().int_timestamp * 1000:
        raise ValueError('Can\'t backtest the future!')

    max_timeframe = jh.max_timeframe(config['app']['considering_timeframes'])
    short_candles_count = jh.get_config(
        'env.data.warmup_candles_num',
        210) * jh.timeframe_to_one_minutes(max_timeframe)
    pre_finish_date = start_date - 60_000
    pre_start_date = pre_finish_date - short_candles_count * 60_000
    # make sure starting from the beginning of the day instead
    pre_start_date = jh.timestamp_to_arrow(pre_start_date).floor(
        'day').int_timestamp * 1000
    # update candles_count to count from the beginning of the day instead
    short_candles_count = int((pre_finish_date - pre_start_date) / 60_000)

    key = jh.key(exchange, symbol)
    cache_key = '{}-{}-{}'.format(jh.timestamp_to_date(pre_start_date),
                                  jh.timestamp_to_date(pre_finish_date), key)
    cached_value = cache.get_value(cache_key)

    # if cache exists
    if cached_value:
        candles_tuple = cached_value
    # not cached, get and cache for later calls in the next 5 minutes
    else:
        # fetch from database
        candles_tuple = tuple(
            Candle.select(Candle.timestamp, Candle.open, Candle.close,
                          Candle.high, Candle.low, Candle.volume).where(
                              Candle.timestamp.between(pre_start_date,
                                                       pre_finish_date),
                              Candle.exchange == exchange,
                              Candle.symbol == symbol).order_by(
                                  Candle.timestamp.asc()).tuples())

        # cache it for near future calls
        cache.set_value(cache_key,
                        candles_tuple,
                        expire_seconds=60 * 60 * 24 * 7)

    candles = np.array(candles_tuple)

    if len(candles) < short_candles_count + 1:
        first_existing_candle = tuple(
            Candle.select(Candle.timestamp).where(
                Candle.exchange == exchange, Candle.symbol == symbol).order_by(
                    Candle.timestamp.asc()).limit(1).tuples())

        if not len(first_existing_candle):
            raise CandleNotFoundInDatabase(
                'No candle for {} {} is present in the database. Try importing candles.'
                .format(exchange, symbol))

        first_existing_candle = first_existing_candle[0][0]

        last_existing_candle = tuple(
            Candle.select(Candle.timestamp).where(
                Candle.exchange == exchange, Candle.symbol == symbol).order_by(
                    Candle.timestamp.desc()).limit(1).tuples())[0][0]

        first_backtestable_timestamp = first_existing_candle + (
            pre_finish_date - pre_start_date) + (60_000 * 1440)

        # if first backtestable timestamp is in the future, that means we have some but not enough candles
        if first_backtestable_timestamp > jh.today_to_timestamp():
            raise CandleNotFoundInDatabase(
                'Not enough candle for {} {} is present in the database. Jesse requires "210 * biggest_timeframe" warm-up candles. '
                'Try importing more candles from an earlier date.'.format(
                    exchange, symbol))

        raise CandleNotFoundInDatabase(
            'Not enough candles for {} {} exists to run backtest from {} => {}. \n'
            'First available date is {}\n'
            'Last available date is {}'.format(
                exchange,
                symbol,
                start_date_str,
                finish_date_str,
                jh.timestamp_to_date(first_backtestable_timestamp),
                jh.timestamp_to_date(last_existing_candle),
            ))

    return candles