def _resolve_pairs_list(self, config: Dict[str, Any]) -> None: """ Helper for download script. Takes first found: * -p (pairs argument) * --pairs-file * whitelist from config """ if "pairs" in config: return if "pairs_file" in self.args and self.args.pairs_file: pairs_file = Path(self.args.pairs_file) logger.info(f'Reading pairs file "{pairs_file}".') # Download pairs from the pairs file if no config is specified # or if pairs file is specified explicitely if not pairs_file.exists(): raise OperationalException(f'No pairs file found with path "{pairs_file}".') with pairs_file.open('r') as f: config['pairs'] = json_load(f) config['pairs'].sort() return if "config" in self.args and self.args.config: logger.info("Using pairlist from configuration.") config['pairs'] = config.get('exchange', {}).get('pair_whitelist') else: # Fall back to /dl_path/pairs.json pairs_file = Path(config['datadir']) / config['exchange']['name'].lower() / "pairs.json" if pairs_file.exists(): with pairs_file.open('r') as f: config['pairs'] = json_load(f) if 'pairs' in config: config['pairs'].sort()
def get_latest_optimize_filename(directory: Union[Path, str], variant: str) -> str: """ Get latest backtest export based on '.last_result.json'. :param directory: Directory to search for last result :param variant: 'backtest' or 'hyperopt' - the method to return :return: string containing the filename of the latest backtest result :raises: ValueError in the following cases: * Directory does not exist * `directory/.last_result.json` does not exist * `directory/.last_result.json` has the wrong content """ if isinstance(directory, str): directory = Path(directory) if not directory.is_dir(): raise ValueError(f"Directory '{directory}' does not exist.") filename = directory / LAST_BT_RESULT_FN if not filename.is_file(): raise ValueError( f"Directory '{directory}' does not seem to contain backtest statistics yet." ) with filename.open() as file: data = json_load(file) if f'latest_{variant}' not in data: raise ValueError(f"Invalid '{LAST_BT_RESULT_FN}' format.") return data[f'latest_{variant}']
def load_backtest_data(filename) -> pd.DataFrame: """ Load backtest data file. :param filename: pathlib.Path object, or string pointing to the file. :return: a dataframe with the analysis results """ if isinstance(filename, str): filename = Path(filename) if not filename.is_file(): raise ValueError(f"File {filename} does not exist.") with filename.open() as file: data = json_load(file) df = pd.DataFrame(data, columns=BT_DATA_COLUMNS) df['open_time'] = pd.to_datetime(df['open_time'], unit='s', utc=True, infer_datetime_format=True) df['close_time'] = pd.to_datetime(df['close_time'], unit='s', utc=True, infer_datetime_format=True) df['profitabs'] = df['close_rate'] - df['open_rate'] df = df.sort_values("open_time").reset_index(drop=True) return df
def load_backtest_metadata(filename: Union[Path, str]) -> Dict[str, Any]: """ Read metadata dictionary from backtest results file without reading and deserializing entire file. :param filename: path to backtest results file. :return: metadata dict or None if metadata is not present. """ filename = get_backtest_metadata_filename(filename) try: with filename.open() as fp: return json_load(fp) except FileNotFoundError: return {} except Exception as e: raise OperationalException( 'Unexpected error while loading backtest metadata.') from e
def load_backtest_stats(filename: Union[Path, str]) -> Dict[str, Any]: """ Load backtest statistics file. :param filename: pathlib.Path object, or string pointing to the file. :return: a dictionary containing the resulting file. """ if isinstance(filename, str): filename = Path(filename) if filename.is_dir(): filename = filename / get_latest_backtest_filename(filename) if not filename.is_file(): raise ValueError(f"File {filename} does not exist.") logger.info(f"Loading backtest result from {filename}") with filename.open() as file: data = json_load(file) return data
def load_params_from_file(self) -> Dict: filename_str = getattr(self, '__file__', '') if not filename_str: return {} filename = Path(filename_str).with_suffix('.json') if filename.is_file(): logger.info(f"Loading parameters from file {filename}") try: params = json_load(filename.open('r')) if params.get('strategy_name') != self.__class__.__name__: raise OperationalException( 'Invalid parameter file provided.') return params except ValueError: logger.warning("Invalid parameter file format.") return {} logger.info("Found no parameter file.") return {}
def load_cached_data_for_updating( filename: Path, ticker_interval: str, timerange: Optional[TimeRange]) -> Tuple[List[Any], Optional[int]]: """ Load cached data and choose what part of the data should be updated """ since_ms = None # user sets timerange, so find the start time if timerange: if timerange.starttype == 'date': since_ms = timerange.startts * 1000 elif timerange.stoptype == 'line': num_minutes = timerange.stopts * timeframe_to_minutes( ticker_interval) since_ms = arrow.utcnow().shift( minutes=num_minutes).timestamp * 1000 # read the cached file if filename.is_file(): with open(filename, "rt") as file: data = misc.json_load(file) # remove the last item, could be incomplete candle if data: data.pop() else: data = [] if data: if since_ms and since_ms < data[0][0]: # Earlier data than existing data requested, redownload all data = [] else: # a part of the data was already downloaded, so download unexist data only since_ms = data[-1][0] + 1 return (data, since_ms)