def test_exponential_sleep_time(caplog, watchman_service): endpoint = _endpoint_metadata("t1", False) start, end = ( isoparse("2016-01-01T00:00:00+00:00"), isoparse("2016-01-01T12:00:00+00:00"), ) with caplog.at_level(logging.CRITICAL): with patch( "gordo_components.client.client.sleep", return_value=None ) as time_sleep: client = Client(project=tu.GORDO_PROJECT) loop = asyncio.get_event_loop() loop.run_until_complete( client._process_prediction_task( X=pd.DataFrame([123]), y=None, chunk=slice(0, 1), endpoint=endpoint, start=start, end=end, ) ) loop.close() expected_calls = [call(8), call(16), call(32), call(64), call(128)] time_sleep.assert_has_calls(expected_calls)
def predict( ctx: click.Context, start: datetime, end: datetime, data_provider: providers.GordoBaseDataProvider, output_dir: str, influx_uri: str, influx_api_key: str, influx_recreate_db: bool, forward_resampled_sensors: bool, ignore_unhealthy_targets: bool, n_retries: int, parquet: bool, ): """ Run some predictions against the target """ ctx.obj["kwargs"].update({ "data_provider": data_provider, "forward_resampled_sensors": forward_resampled_sensors, "ignore_unhealthy_targets": ignore_unhealthy_targets, "n_retries": n_retries, "use_parquet": parquet, }) client = Client(*ctx.obj["args"], **ctx.obj["kwargs"]) if influx_uri is not None: client.prediction_forwarder = ForwardPredictionsIntoInflux( destination_influx_uri=influx_uri, destination_influx_api_key=influx_api_key, destination_influx_recreate=influx_recreate_db, n_retries=n_retries, ) # Fire off getting predictions predictions = client.predict( start, end ) # type: typing.Iterable[typing.Tuple[str, pd.DataFrame, typing.List[str]]] # Loop over all error messages for each result and log them click.secho( f"\n{'-' * 20} Summary of failed predictions (if any) {'-' * 20}") exit_code = 0 for (_name, _df, error_messages) in predictions: for err_msg in error_messages: # Any error message indicates we encountered at least one error exit_code = 1 click.secho(err_msg, fg="red") # Shall we write the predictions out? if output_dir is not None: for (name, prediction_df, _err_msgs) in predictions: prediction_df.to_csv(os.path.join(output_dir, f"{name}.csv.gz"), compression="gzip") sys.exit(exit_code)
def metadata(ctx: click.Context, output_file: typing.Optional[typing.IO[str]]): """ Get metadata from a given endpoint """ client = Client(*ctx.obj["args"], **ctx.obj["kwargs"]) metadata = client.get_metadata() if output_file: json.dump(metadata, output_file) click.secho(f"Saved metadata json to file: '{output_file}'") else: pprint(metadata) return metadata
def test_client_endpoint_filtering( endpoints: typing.List[EndpointMetadata], target: typing.Optional[str], ignore_unhealthy: typing.Optional[bool], expected: typing.List[EndpointMetadata], ): if not isinstance(expected, list): with pytest.raises(ValueError): Client._filter_endpoints(endpoints, target, ignore_unhealthy) else: filtered_endpoints = Client._filter_endpoints(endpoints, target, ignore_unhealthy) assert (expected == filtered_endpoints ), f"Not equal: {expected} \n----\n {filtered_endpoints}"
def download_model(ctx: click.Context, output_dir: str): """ Download the actual model from the target and write to an output directory """ client = Client(*ctx.obj["args"], **ctx.obj["kwargs"]) models = client.download_model() # Iterate over mapping of models and save into their own sub dirs of the output_dir for target, model in models.items(): model_out_dir = os.path.join(output_dir, target) os.mkdir(model_out_dir) click.secho( f"Writing model '{target}' to directory: '{model_out_dir}'...", nl=False) serializer.dump(model, model_out_dir) click.secho(f"done") click.secho(f"Wrote all models to directory: {output_dir}", fg="green")
def test_client_get_metadata(watchman_service): """ Test client's ability to get metadata from some target """ client = Client(project=tu.GORDO_PROJECT) metadata = client.get_metadata() assert isinstance(metadata, dict) # Can't get metadata for non-existent target with pytest.raises(ValueError): client = Client(project=tu.GORDO_PROJECT, target="no-such-target") client.get_metadata()
def test_client_download_model(watchman_service): """ Test client's ability to download the model """ client = Client(project=tu.GORDO_PROJECT, target=tu.GORDO_SINGLE_TARGET) models = client.download_model() assert isinstance(models, dict) assert isinstance(models[tu.GORDO_SINGLE_TARGET], BaseEstimator) # Can't download model for non-existent target with pytest.raises(ValueError): client = Client(project=tu.GORDO_PROJECT, target="non-existent-target") client.download_model()
def test_client_predictions_diff_batch_sizes_and_toggle_data_provider( influxdb, watchman_service, use_data_provider: bool, batch_size: int ): """ Run the prediction client with different batch-sizes and whether to use a data provider or not. """ # Time range used in this test start, end = ( isoparse("2016-01-01T00:00:00+00:00"), isoparse("2016-01-01T12:00:00+00:00"), ) # Client only used within the this test test_client = client_utils.influx_client_from_uri(tu.INFLUXDB_URI) # Created measurements by prediction client with dest influx query = f""" SELECT * FROM "model-output" WHERE("machine" =~ /^{tu.GORDO_SINGLE_TARGET}$/) """ # Before predicting, influx destination db should be empty for 'predictions' measurement vals = test_client.query(query) assert len(vals) == 0 data_provider = ( providers.InfluxDataProvider( measurement=tu.INFLUXDB_MEASUREMENT, value_name="Value", client=client_utils.influx_client_from_uri( uri=tu.INFLUXDB_URI, dataframe_client=True ), ) if use_data_provider else None ) prediction_client = Client( project=tu.GORDO_PROJECT, data_provider=data_provider, prediction_forwarder=ForwardPredictionsIntoInflux( destination_influx_uri=tu.INFLUXDB_URI ), batch_size=batch_size, ) # Should have discovered machine-1 assert len(prediction_client.endpoints) == 1 # All endpoints should be healthy assert all(ep.healthy for ep in prediction_client.endpoints) # Get predictions predictions = prediction_client.predict(start=start, end=end) assert isinstance(predictions, list) assert len(predictions) == 1 name, predictions, error_messages = predictions[0] # First dict of predictions assert isinstance(name, str) assert isinstance(predictions, pd.DataFrame) assert isinstance(error_messages, list) assert isinstance(predictions.index, pd.core.indexes.datetimes.DatetimeIndex) # This should have resulted in writting predictions to influx # Before predicting, influx destination db should be empty vals = test_client.query(query) assert ( len(vals) > 0 ), f"Expected new values in 'predictions' measurement, but found {vals}"