Exemplo n.º 1
0
    async def process_element(self, input, job_id, job_args, request_id,
                              element_index) -> np.ndarray:
        image_path = input["image"]
        image_patch = input.get("patch", (0, 0, 1, 1))
        augmentations = input.get("augmentations", {})

        # Download image
        if "http" not in image_path:
            image_bucket = job_args["input_bucket"]
            image_path = os.path.join(config.GCS_URL_PREFIX, image_bucket,
                                      image_path)
        image_bytes = await self.download_image(image_path)

        # Run inference
        with self.profiler(request_id, "inference_time"):
            model_output_dict = inference.run(
                image_bytes,
                image_patch,
                augmentations,
                self.input_format,
                self.pixel_mean,
                self.pixel_std,
                self.model,
            )

        with self.profiler(request_id, "flatten_time"):
            spatial_embeddings = next(iter(model_output_dict.values())).numpy()
            n, c, h, w = spatial_embeddings.shape
            assert n == 1
            return np.ascontiguousarray(
                spatial_embeddings.reshape((c, h * w)).T)
def estimation(dataset,
               boot_index,
               model_params,
               model_params_grad,
               num_obs_samples,
               num_future_steps,
               category_tt_split,
               num_mc_samples,
               output_file,
               true_model_params=None):
    y, x = dataset
    y_complete = y.clone().detach()
    y_complete = y_complete[0:-num_future_steps]
    category_tt_split = 'session'
    y, x, y_future, x_future = train_future_split(y, x, num_future_steps)
    y_train, y_test, test_inds = train_test_split(y.cpu(),
                                                  x.cpu(),
                                                  cat=category_tt_split)
    x = x.clone().detach()  #torch.tensor(x, dtype=dtype, device=device)
    y_train = y_train.clone().detach(
    )  #torch.tensor(y_train, dtype=dtype, device=device)
    y_test = torch.tensor(y_test, dtype=dtype, device=device)
    test_inds = torch.tensor(test_inds, dtype=torch.long, device=device)
    y_future = y_future.clone().detach(
    )  #torch.tensor(y_future, dtype=dtype, device=device)
    x_future = x_future.clone().detach(
    )  #torch.tensor(x_future, dtype=dtype, device=device)

    y_train = torch.tensor(y, device=device)
    data = [y_train, x, y_test, test_inds, y_future, x_future, y_complete]

    model = LearningDynamicsModel(dim=dim)

    boot_output_file = output_file + '/' + str(boot_index)
    os.makedirs(boot_output_file)
    os.makedirs(boot_output_file + '/model_structs')
    os.makedirs(boot_output_file + '/data')
    os.makedirs(boot_output_file + '/plots')

    inference = Inference(
        data=data,
        model=model,
        model_params=model_params,
        model_params_grad=model_params_grad,
        savedir=boot_output_file,
        num_obs_samples=num_obs_samples,
        num_future_steps=num_future_steps,
        num_mc_samples=num_mc_samples,
        ppc_window=50,
        z_true=z_true,
        true_model_params=true_model_params)  # pass in just for figures

    opt_params = inference.run()
    torch.save(opt_params, boot_output_file + '/model_structs/opt_params.npy')
    torch.save(dataset, boot_output_file + '/data/dataset.npy')
    torch.save(model_params,
               boot_output_file + '/model_structs/model_params.npy')
    return opt_params
               output_file + '/model_structs/init_model_params.pth')

    model = LearningDynamicsModel(dim)
    inference = Inference(data,
                          model,
                          model_params,
                          model_params_grad,
                          savedir=output_file,
                          num_obs_samples=num_obs_samples,
                          num_future_steps=num_future_steps,
                          num_mc_samples=num_mc_samples,
                          ppc_window=ppc_window,
                          z_true=z_true,
                          true_model_params=None)  # pass in just for figures

    opt_params = inference.run()

    final_loss = -inference.vi.forward_multiple_mcs(
        model_params,
        inference.train_data,
        inference.var_params,
        50,
        num_samples=100)  #/ float(inference.num_train)

    k = float(dim + 1)
    bic = (2 * final_loss.item() + k * np.log(T * num_obs_samples)) / float(
        T * num_obs_samples)

    np.savetxt(output_file + '/training_bic.txt', np.array([bic]))
    np.savetxt(output_file + '/training_elbo.txt',
               np.array([final_loss.item()]))
Exemplo n.º 4
0
import json
import logging
import logging.config

import configure as cfg
import inference
import lib.hwinfo.gpu as hwinfo
import lib.rabbitmq as rmq

logging.addLevelName(logging.WARNING, "WARN")
logging.addLevelName(logging.CRITICAL, "FATAL")
logging.config.dictConfig(json.load(open('./logger.json')))

logger = logging.getLogger(__name__)
logger.info("start inference.executer...", extra={'job_uuid': 'test112233'})

modelName = rmq.GetQueueOrWaiting()['name']

for m, p, job in rmq.consume(modelName):
    if job:
        inference.loadModel(modelName)
        if inference.run(job):
            rmq.ack(m)
    else:
        break

rmq.close()