コード例 #1
0
def run_train_and_test(
        env: TrainEnv, forecaster_type: Type[Union[Estimator,
                                                   Predictor]]) -> None:
    check_gpu_support()

    forecaster_fq_name = fqname_for(forecaster_type)
    forecaster_version = forecaster_type.__version__

    logger.info(f"Using gluonts v{gluonts.__version__}")
    logger.info(f"Using forecaster {forecaster_fq_name} v{forecaster_version}")

    forecaster = forecaster_type.from_hyperparameters(**env.hyperparameters)

    logger.info(
        f"The forecaster can be reconstructed with the following expression: "
        f"{dump_code(forecaster)}")

    if isinstance(forecaster, Predictor):
        predictor = forecaster
    else:
        predictor = run_train(forecaster, env.datasets["train"])

    predictor.serialize(env.path.model)

    if "test" in env.datasets:
        run_test(env, predictor, env.datasets["test"])
コード例 #2
0
ファイル: __init__.py プロジェクト: yuxixin/gluon-ts
def make_gunicorn_app(
    env: ServeEnv,
    forecaster_type: Optional[Type[Union[Estimator, Predictor]]],
    settings: Settings,
) -> Application:
    check_gpu_support()

    if forecaster_type is not None:
        logger.info(f"Using dynamic predictor factory")

        ctor = forecaster_type.from_hyperparameters

        forecaster_fq_name = fqname_for(forecaster_type)
        forecaster_version = forecaster_type.__version__

        def predictor_factory(request) -> Predictor:
            return ctor(**request["configuration"])

    else:
        logger.info(f"Using static predictor factory")

        assert env is not None
        predictor = Predictor.deserialize(env.path.model)

        forecaster_fq_name = fqname_for(type(predictor))
        forecaster_version = predictor.__version__

        def predictor_factory(request) -> Predictor:
            return predictor

    logger.info(f"Using gluonts v{gluonts.__version__}")
    logger.info(f"Using forecaster {forecaster_fq_name} v{forecaster_version}")

    execution_params = {
        "MaxConcurrentTransforms": settings.number_of_workers,
        "BatchStrategy": settings.sagemaker_batch_strategy,
        "MaxPayloadInMB": settings.sagemaker_max_payload_in_mb,
    }

    flask_app = make_app(
        predictor_factory,
        execution_params,
        batch_transform_config=env.batch_config,
        settings=settings,
    )

    gunicorn_app = Application(
        app=flask_app,
        config={
            "bind": settings.sagemaker_server_bind,
            "workers": settings.number_of_workers,
            "timeout": settings.sagemaker_server_timeout,
        },
    )

    return gunicorn_app
コード例 #3
0
def run(env, forecaster):
    check_gpu_support()

    if isinstance(forecaster, Predictor):
        predictor = forecaster
    else:
        predictor = run_train(env, forecaster, env.datasets["train"])

    predictor.serialize(env.path.model)

    if "test" in env.datasets:
        test_dataset = prepare_test_dataset(
            env.datasets["test"],
            prediction_length=forecaster.prediction_length,
        )
        run_test(predictor, test_dataset)
コード例 #4
0
ファイル: train.py プロジェクト: sahand68/gluon-ts
    def run(self):
        try:
            check_gpu_support()

            datasets = get_channels(self.paths).get_datasets()
            self.hyperparameters['freq'] = datasets.metadata.time_granularity

            estimator, predictor = self.run_train(datasets.train)

            if datasets.test is not None:
                self.run_test(datasets.test, estimator, predictor)

            predictor.serialize(self.paths.model)

        # TODO: do we want to handle GluonTS exceptions differently?
        except Exception as e:
            # logger.error(e)
            raise e
コード例 #5
0
ファイル: train.py プロジェクト: hanifmahboobi/gluon-ts
def run_train_and_test(
        env: TrainEnv, forecaster_type: Type[Union[Estimator,
                                                   Predictor]]) -> None:
    check_gpu_support()

    forecaster = forecaster_type.from_hyperparameters(**env.hyperparameters)

    if isinstance(forecaster, Predictor):
        predictor = forecaster
    else:
        predictor = run_train(forecaster, env.datasets["train"])

    predictor.serialize(env.path.model)

    if "test" in env.datasets:
        test_dataset = prepare_test_dataset(
            env.datasets["test"],
            prediction_length=forecaster.prediction_length,
        )
        run_test(env, predictor, test_dataset)
コード例 #6
0
ファイル: train.py プロジェクト: szhengac/gluon-ts
def run_train_and_test(
        env: TrainEnv, forecaster_type: Type[Union[Estimator,
                                                   Predictor]]) -> None:
    check_gpu_support()

    # train_stats = calculate_dataset_statistics(env.datasets["train"])
    # log_metric("train_dataset_stats", train_stats)

    forecaster_fq_name = fqname_for(forecaster_type)
    forecaster_version = forecaster_type.__version__

    logger.info(f"Using gluonts v{gluonts.__version__}")
    logger.info(f"Using forecaster {forecaster_fq_name} v{forecaster_version}")

    forecaster = forecaster_type.from_inputs(env.datasets["train"],
                                             **env.hyperparameters)

    logger.info(
        f"The forecaster can be reconstructed with the following expression: "
        f"{dump_code(forecaster)}")

    logger.info(
        "Using the following data channels: "
        f"{', '.join(name for name in ['train', 'validation', 'test'] if name in env.datasets)}"
    )

    if isinstance(forecaster, Predictor):
        predictor = forecaster
    else:
        predictor = run_train(
            forecaster=forecaster,
            train_dataset=env.datasets["train"],
            validation_dataset=env.datasets.get("validation"),
            hyperparameters=env.hyperparameters,
        )

    predictor.serialize(env.path.model)

    if "test" in env.datasets:
        run_test(env, predictor, env.datasets["test"])
コード例 #7
0
def test_jitter_synthetic_gp(jitter_method, float_type, ctx) -> None:
    # TODO: Enable GPU tests on Jenkins
    if ctx == mx.Context("gpu") and not check_gpu_support():
        return
    # Initialize problem parameters
    batch_size = 1
    prediction_length = 50
    context_length = 5
    num_samples = 3

    # Initialize test data to generate Gaussian Process from
    lb = -5
    ub = 5
    dx = (ub - lb) / (prediction_length - 1)
    x_test = nd.arange(lb, ub + dx, dx, ctx=ctx,
                       dtype=float_type).reshape(-1, 1)
    x_test = nd.tile(x_test, reps=(batch_size, 1, 1))

    # Define the GP hyper parameters
    amplitude = nd.ones((batch_size, 1, 1), ctx=ctx, dtype=float_type)
    length_scale = math.sqrt(0.4) * nd.ones_like(amplitude)
    sigma = math.sqrt(1e-5) * nd.ones_like(amplitude)

    # Instantiate desired kernel object and compute kernel matrix
    rbf_kernel = RBFKernel(amplitude, length_scale)

    # Generate samples from 0 mean Gaussian process with RBF Kernel and plot it
    gp = GaussianProcess(
        sigma=sigma,
        kernel=rbf_kernel,
        prediction_length=prediction_length,
        context_length=context_length,
        num_samples=num_samples,
        ctx=ctx,
        float_type=float_type,
        jitter_method=jitter_method,
        sample_noise=False,  # Returns sample without noise
    )

    # Generate training set on subset of interval using the sine function
    x_train = nd.array([-4, -3, -2, -1, 1], ctx=ctx,
                       dtype=float_type).reshape(context_length, 1)
    x_train = nd.tile(x_train, reps=(batch_size, 1, 1))
    y_train = nd.sin(x_train.squeeze(axis=2))

    # Predict exact GP using the GP predictive mean and covariance using the same fixed hyper-parameters
    samples, predictive_mean, predictive_std = gp.exact_inference(
        x_train, y_train, x_test)

    assert (np.sum(np.isnan(
        samples.asnumpy())) == 0), "NaNs in predictive samples!"
コード例 #8
0
def test_jitter_unit(jitter_method, float_type, ctx) -> None:
    # TODO: Enable GPU tests on Jenkins
    if ctx == mx.Context("gpu") and not check_gpu_support():
        return
    matrix = nd.array([[[1, 2], [3, 4]], [[10, 100], [-21.5, 41]]],
                      ctx=ctx,
                      dtype=float_type)
    F = mx.nd
    num_data_points = matrix.shape[1]
    if jitter_method == "eig":
        L = jitter_cholesky_eig(F, matrix, num_data_points, ctx, float_type)
    elif jitter_method == "iter":
        L = jitter_cholesky(F, matrix, num_data_points, ctx, float_type)
    assert np.sum(np.isnan(L.asnumpy())) == 0, "NaNs in Cholesky factor!"
コード例 #9
0
ファイル: serve.py プロジェクト: sahand68/gluon-ts
    def __init__(
        self,
        paths: PathsEnvironment = PathsEnvironment(),
        port: Optional[int] = None,
        workers: Optional[int] = None,
    ) -> None:
        app = flask.Flask('GluonTS scoring service')

        port = port if port else self.DEFAULT_PORT
        options = {
            "bind": f"0.0.0.0:{port}",
            "workers": workers if workers else number_of_workers(app),
            # "post_worker_init": ScoringService.post_worker_init,
            "timeout": 100,
        }

        check_gpu_support()

        predictor = Predictor.deserialize(paths.model)

        @app.route('/ping')
        def ping():
            return ''

        @app.route("/execution-parameters")
        def execution_parameters():
            return flask.jsonify({
                'MaxConcurrentTransforms': options['workers'],
                'BatchStrategy': 'SINGLE_RECORD',
                'MaxPayloadInMB': SAGEMAKER_MAX_PAYLOAD_IN_MB,
            })

        @app.route('/invocations', methods=['POST'])
        def invocations() -> Any:
            try:
                payload = flask.request.json
                configuration = payload['configuration']
                if 'num_samples' in configuration:
                    configuration['num_eval_samples'] = configuration[
                        'num_samples']
                config = ForecastConfig.parse_obj(configuration)

                def process(forecast: Forecast) -> dict:
                    prediction = {}
                    if 'samples' in config.output_types:
                        if isinstance(forecast, SampleForecast):
                            prediction['samples'] = forecast.samples.tolist()
                        else:
                            prediction['samples'] = []
                    if 'mean' in config.output_types:
                        prediction['mean'] = forecast.mean.tolist()
                    if 'quantiles' in config.output_types:
                        prediction['quantiles'] = {
                            q: forecast.quantile(q).tolist()
                            for q in config.quantiles
                        }
                    return prediction

                dataset = ListDataset(payload['instances'], predictor.freq)

                predictions = list(
                    map(
                        process,
                        predictor.predict(dataset,
                                          no_samples=config.num_eval_samples),
                    ))
                return flask.jsonify(predictions=predictions)

            except Exception as error:
                return flask.jsonify(error=traceback.format_exc()), 500

        # NOTE: Stop Flask application when SIGTERM is received as a result
        # of "docker stop" command.
        signal.signal(signal.SIGTERM, self.stop)

        self.options = options
        self.application = app

        super(DefaultShell, self).__init__()