Ejemplo n.º 1
0
    def _worker_loop(todoq, doneq, wid, proc_class, *args, **kwargs):
        device_id = 0
        world_size = 1
        _set_cuda(True, device_id, world_size)

        # setup
        processor = proc_class(*args, **kwargs)  # CustomBatchProcessor

        for raw_batch in todoq:  # raw_batch
            processed_batch = processor.process_batch(raw_batch)  # 2
            doneq.put(pickle.dumps(processed_batch))

        doneq.close()
Ejemplo n.º 2
0
def batch_predict_caffe2_model(
    pytext_model_file: str,
    caffe2_model_file: str,
    db_type: str = CAFFE2_DB_TYPE,
    data_source: Optional[DataSource] = None,
    use_cuda=False,
):
    logging.info(f"Loading data processing config from {pytext_model_file}")

    _set_cuda(use_cuda)

    task, train_config, _ = load(pytext_model_file)

    data_source = data_source or task.data.data_source
    logging.info("Loading Caffe2 model")
    predictor = create_predictor(train_config, caffe2_model_file, db_type, task)
    logging.info(f"Model loaded, start testing")
    predictions = [predictor(example) for example in data_source.test]
    return predictions
Ejemplo n.º 3
0
def batch_predict_caffe2_model(
    pytext_model_file: str,
    caffe2_model_file: str,
    db_type: str = CAFFE2_DB_TYPE,
    data_source: Optional[DataSource] = None,
    use_cuda=False,
    task: Optional[NewTask] = None,
    train_config: Optional[PyTextConfig] = None,
    cache_size: int = 0,
):
    """
    Gets predictions from caffe2 model from a batch of examples.

    Args:
        pytext_model_file: Path to pytext model file (required if task and
            training config is not specified)
        caffe2_model_file: Path to caffe2 model file
        db_type: DB type to use for caffe2
        data_source: Data source for test examples
        use_cuda: Whether to turn on cuda processing
        task: The pytext task object
        train_config: The pytext training config
        cache_size: The LRU cache size to use for prediction. 0 = no cache,
            -1 = boundless cache, [1, inf) = size of cache
    """
    logging.info(f"Loading data processing config from {pytext_model_file}")

    _set_cuda(use_cuda)
    if task is None or train_config is None:
        task, train_config, _ = load(pytext_model_file)

    data_source = data_source or task.data.data_source
    logging.info(f"Loading Caffe2 model: {caffe2_model_file}")
    predictor = create_predictor(
        train_config,
        PathManager.get_local_path(caffe2_model_file),
        db_type,
        task,
        cache_size,
    )
    logging.info(f"Model loaded, start testing")
    predictions = [predictor(example) for example in data_source.test]
    return predictions