def deploy_results(results: List[APIOutput], output: str, write_csv=False): """Deploys results from the top counties to specified output directory. Args: result: Top Counties Pipeline result. key: Name for the file to be uploaded output: output folder to save results in. """ output_path = pathlib.Path(output) if not output_path.exists(): output_path.mkdir(parents=True, exist_ok=True) for api_row in results: data = remove_root_wrapper(api_row.data.dict()) # Encoding approach based on Pydantic's implementation of .json(): # https://github.com/samuelcolvin/pydantic/pull/210/files # `json` isn't in `pydantic/__init__py` which I think means it doesn't intend to export # it. We use it anyway and pylint started complaining. # pylint: disable=no-member data_as_json = simplejson.dumps(data, ignore_nan=True, default=pydantic.json.pydantic_encoder) dataset_deployer.upload_json(api_row.file_stem, data_as_json, output) if write_csv: if not isinstance(data, list): raise ValueError("Cannot find list data for csv export.") dataset_deployer.write_nested_csv(data, api_row.file_stem, output)
def deploy_results(result: TopCountiesPipelineResult, output: str): """Deploys results from the top counties to specified output directory. Args: result: Top Counties Pipeline result. output: output folder to save results in. """ dataset_deployer.upload_json(result.key, result.api.json(), output)
def deploy_results(result: APIGeneration, output: str): """Deploys results from the top counties to specified output directory. Args: result: Top Counties Pipeline result. key: Name for the file to be uploaded output: output folder to save results in. """ for api_row in result.api_rows: dataset_deployer.upload_json(api_row.key, api_row.api.json(), output)
def deploy_results(results: List[APIOutput], output: str, write_csv=False): """Deploys results from the top counties to specified output directory. Args: result: Top Counties Pipeline result. key: Name for the file to be uploaded output: output folder to save results in. """ for api_row in results: dataset_deployer.upload_json(api_row.file_stem, api_row.data.json(), output) if write_csv: data = api_row.data.dict() if not isinstance(data, list): if not isinstance(data.get('data'), list): # Most of the API schemas have the lists under the `'data'` key. logger.warning(f"Missing data field with list of data.") continue else: data = data['data'] dataset_deployer.write_nested_csv(data, api_row.file_stem, output)