コード例 #1
0
ファイル: workflow.py プロジェクト: tmeng7777/pytext
def get_logits(
    snapshot_path: str,
    use_cuda_if_available: bool,
    output_path: Optional[str] = None,
    test_path: Optional[str] = None,
    field_names: Optional[List[str]] = None,
):
    _set_cuda(use_cuda_if_available)
    task, train_config = load(snapshot_path)
    print(f"Successfully loaded model from {snapshot_path}")
    if isinstance(task, NewTask):
        task.model.eval()
        data_source = _get_data_source(
            test_path,
            getattr(train_config.task.data, "source", None),
            field_names,
            task,
        )
        task.data.batcher = Batcher()
        task.data.sort_key = None
        batches = task.data.batches(Stage.TEST, data_source=data_source)
        results = []
        for (_, tensor_dict) in batches:
            model_inputs = task.model.arrange_model_inputs(tensor_dict)
            model_outputs = task.model(*model_inputs)
            MetricReporter.aggregate_data(results, model_outputs)
        with open(output_path, "w", encoding="utf-8") as fout:
            for row in results:
                fout.write(f"{row}\n")
コード例 #2
0
ファイル: workflow.py プロジェクト: aliarain/pytext
def get_logits(
    snapshot_path: str,
    use_cuda_if_available: bool,
    output_path: Optional[str] = None,
    test_path: Optional[str] = None,
    field_names: Optional[List[str]] = None,
    dump_raw_input: bool = False,
    batch_size: int = 16,
    ndigits_precision: int = 0,
    output_columns: Optional[List[int]] = None,
    use_gzip: bool = False,
    device_id: int = 0,
):
    _set_cuda(use_cuda_if_available, device_id)
    task, train_config, _training_state = load(snapshot_path)
    print(f"Successfully loaded model from {snapshot_path}")
    print(f"Model on GPU? {next(task.model.parameters()).is_cuda}")

    if isinstance(task, NewTask):
        task.model.eval()
        data_source = _get_data_source(test_path, train_config.task.data,
                                       field_names, task)
        task.data.batcher = Batcher(test_batch_size=batch_size)
        task.data.sort_key = None
        batches = task.data.batches(Stage.TEST, data_source=data_source)

        mp = torch.multiprocessing.get_context("spawn")

        with torch.no_grad():
            results = mp.Queue()
            logits_writer = LogitsWriter(results, output_path, use_gzip,
                                         output_columns, ndigits_precision)
            logits_writer_ctx = torch.multiprocessing.spawn(logits_writer.run,
                                                            join=False)

            for (raw_batch, tensor_dict) in batches:
                raw_input_tuple = (dict_zip(*raw_batch, value_only=True)
                                   if dump_raw_input else ())

                model_inputs = task.model.arrange_model_inputs(tensor_dict)
                model_outputs = task.model(*model_inputs)

                results.put((raw_input_tuple, model_outputs))

            results.put((None, None))
            results.close()
            results.join_thread()
            logits_writer_ctx.join()
コード例 #3
0
def get_logits(
    snapshot_path: str,
    use_cuda_if_available: bool,
    output_path: Optional[str] = None,
    test_path: Optional[str] = None,
    field_names: Optional[List[str]] = None,
    dump_raw_input: bool = False,
):
    _set_cuda(use_cuda_if_available)
    task, train_config, _traing_state = load(snapshot_path)
    print(f"Successfully loaded model from {snapshot_path}")
    print(f"Model on GPU? {next(task.model.parameters()).is_cuda}")

    if isinstance(task, NewTask):
        task.model.eval()
        data_source = _get_data_source(
            test_path, train_config.task.data, field_names, task
        )
        task.data.batcher = Batcher()
        task.data.sort_key = None
        batches = task.data.batches(Stage.TEST, data_source=data_source)

        with PathManager.open(
            output_path, "w", encoding="utf-8"
        ) as fout, torch.no_grad():
            for (raw_batch, tensor_dict) in batches:
                raw_input_tuple = (
                    dict_zip(*raw_batch, value_only=True) if dump_raw_input else ()
                )
                model_inputs = task.model.arrange_model_inputs(tensor_dict)
                model_outputs = task.model(*model_inputs)
                if isinstance(model_outputs, tuple):
                    model_outputs_tuple = tuple(m.tolist() for m in model_outputs)
                    for row in zip(*raw_input_tuple, *model_outputs_tuple):
                        dump_row = "\t".join(json.dumps(r) for r in row)
                        fout.write(f"{dump_row}\n")
                elif isinstance(model_outputs, torch.Tensor):
                    model_outputs_list = model_outputs.tolist()
                    for row in zip(*raw_input_tuple, model_outputs_list):
                        dump_row = "\t".join(json.dumps(r) for r in row)
                        fout.write(f"{dump_row}\n")
                else:
                    raise Exception(
                        "Expecting tuple or torchTensor types for model_outputs"
                    )
コード例 #4
0
def get_logits(
    snapshot_path: str,
    use_cuda_if_available: bool,
    output_path: Optional[str] = None,
    test_path: Optional[str] = None,
    field_names: Optional[List[str]] = None,
):
    _set_cuda(use_cuda_if_available)
    task, train_config = load(snapshot_path)
    print(f"Successfully loaded model from {snapshot_path}")
    print(f"Model on GPU? {next(task.model.parameters()).is_cuda}")
    if isinstance(task, NewTask):
        task.model.eval()
        data_source = _get_data_source(
            test_path,
            getattr(train_config.task.data, "source", None),
            field_names,
            task,
        )
        task.data.batcher = Batcher()
        task.data.sort_key = None
        batches = task.data.batches(Stage.TEST, data_source=data_source)

        with open(output_path, "w", encoding="utf-8") as fout, torch.no_grad():
            for (_, tensor_dict) in batches:
                model_inputs = task.model.arrange_model_inputs(tensor_dict)
                model_outputs = task.model(*model_inputs)
                if isinstance(model_outputs, tuple):
                    model_outputs_list = [m.tolist() for m in model_outputs]
                    for row in zip(*model_outputs_list):
                        # row is a tuple of lists
                        dump_row = "\t".join(json.dumps(r) for r in row)
                        fout.write(f"{dump_row}\n")
                elif isinstance(model_outputs, torch.Tensor):
                    model_outputs_list = model_outputs.tolist()
                    for row in zip(model_outputs_list):
                        fout.write(f"{json.dumps(row)}\n")
                else:
                    raise Exception(
                        "Expecting tuple or torchTensor types for model_outputs"
                    )
コード例 #5
0
ファイル: workflow.py プロジェクト: xiaohu4liu/pytext
def get_logits(
    snapshot_path: str,
    use_cuda_if_available: bool,
    output_path: Optional[str] = None,
    test_path: Optional[str] = None,
    field_names: Optional[List[str]] = None,
):
    _set_cuda(use_cuda_if_available)
    task, train_config = load(snapshot_path)
    if isinstance(task, NewTask):
        task.model.eval()
        data_source = _get_data_source(test_path, field_names, task)
        task.data.batcher = Batcher()
        batches = task.data.batches(Stage.TEST, data_source=data_source)
        results = []
        for batch in batches:
            model_inputs = task.model.arrange_model_inputs(batch)
            model_outputs = task.model(*model_inputs)
            MetricReporter.aggregate_data(results, model_outputs)
        with open(output_path, "w", encoding="utf-8") as fout:
            for row in results:
                fout.write(f"{row}\n")
コード例 #6
0
ファイル: workflow.py プロジェクト: liang-tool/pytext
def get_logits(
    snapshot_path: str,
    use_cuda_if_available: bool,
    output_path: Optional[str] = None,
    test_path: Optional[str] = None,
    field_names: Optional[List[str]] = None,
    dump_raw_input: bool = False,
    batch_size: int = 16,
    ndigits_precision: int = 0,
    output_columns: Optional[List[int]] = None,
    use_gzip: bool = False,
    device_id: int = 0,
    fp16: bool = False,
):
    _set_cuda(use_cuda_if_available, device_id)
    task, train_config, _training_state = load(snapshot_path)
    print(f"Successfully loaded model from {snapshot_path}")
    print(f"Model on GPU? {next(task.model.parameters()).is_cuda}")
    print(f"CUDA device id: {torch.cuda.current_device()}")

    if isinstance(task, NewTask):
        task.model.eval()

        if fp16:
            task.model.half()

        data_source = _get_data_source(test_path, train_config.task.data,
                                       field_names, task)
        task.data.batcher = Batcher(test_batch_size=batch_size)
        task.data.sort_key = None
        batches = task.data.batches(Stage.TEST, data_source=data_source)

        mp = torch.multiprocessing.get_context("spawn")

        with torch.no_grad():
            results = mp.SimpleQueue()
            logits_writer = LogitsWriter(results, output_path, use_gzip,
                                         ndigits_precision)
            logits_writer_ctx = torch.multiprocessing.spawn(logits_writer.run,
                                                            join=False)

            for (raw_batch, tensor_dict) in batches:
                raw_input_tuple = (dict_zip(*raw_batch, value_only=True)
                                   if dump_raw_input else ())

                model_inputs = task.model.arrange_model_inputs(tensor_dict)
                model_outputs = task.model(*model_inputs)

                # multi-encoder output
                if isinstance(model_outputs, tuple):
                    # prevent breaking behaviour in default case
                    output_columns = (range(len(model_outputs)) if
                                      not output_columns else output_columns)
                    model_outputs = tuple(m.tolist()
                                          for i, m in enumerate(model_outputs)
                                          if i in output_columns)
                # single encoder output
                elif isinstance(model_outputs, list):
                    model_outputs = model_outputs.tolist()
                else:
                    raise Exception(
                        "Expecting tuple or tensor types for model_outputs")

                results.put((raw_input_tuple, model_outputs))

            results.put((None, None))
            logits_writer_ctx.join()
            print(
                f"Finished logits generation for file {test_path} with output {output_path}"
            )