def test_doc_export_to_caffe2_with_logits(
        self,
        num_doc_classes,
        test_num_words,
        test_num_dict_feat,
        num_predictions,
        test_num_chars,
    ):
        for config in DOC_CONFIGS_WITH_EXPORT_LOGITS:
            config = self._get_config(DocClassificationTask.Config, config)
            metadata = self._get_metadata(num_doc_classes, 0)
            py_model = create_model(config.model, config.features, metadata)
            exporter = create_exporter(config.exporter, config.features,
                                       config.labels, metadata)

            with tempfile.NamedTemporaryFile(delete=False,
                                             suffix=".predictor") as pred_file:
                print(pred_file.name)
                output_names = exporter.export_to_caffe2(
                    py_model, pred_file.name)
                workspace.ResetWorkspace()
            pred_net = pe.prepare_prediction_net(pred_file.name,
                                                 CAFFE2_DB_TYPE)

            for _i in range(num_predictions):
                pred_net = pe.prepare_prediction_net(pred_file.name,
                                                     CAFFE2_DB_TYPE)
                test_inputs = self._get_rand_input(
                    config.features,
                    BATCH_SIZE,
                    W_VOCAB_SIZE,
                    DICT_VOCAB_SIZE,
                    CHAR_VOCAB_SIZE,
                    test_num_words,
                    test_num_dict_feat,
                    test_num_chars,
                )
                self._feed_c2_input(
                    workspace,
                    test_inputs,
                    exporter.input_names,
                    metadata.feature_itos_map,
                )
                workspace.RunNetOnce(pred_net)
                c2_out = [
                    list(workspace.FetchBlob(o_name))
                    for o_name in output_names
                ]

                py_model.eval()
                py_outs = py_model(*test_inputs)
                np.testing.assert_array_almost_equal(
                    py_outs.view(-1).detach().numpy(),
                    np.array(c2_out[-1]).flatten())

                # Do log_softmax since we do that before exporting predictor nets
                py_outs = F.log_softmax(py_outs, 1)
                np.testing.assert_array_almost_equal(
                    py_outs.view(-1).detach().numpy(),
                    np.array(c2_out[:-1]).flatten())
Esempio n. 2
0
    def from_config(cls, task_config, metadata=None, model_state=None):
        """
        Create the task from config, and optionally load metadata/model_state
        This function will create components including :class:`~DataHandler`,
        :class:`~Trainer`, :class:`~Optimizer`, :class:`~Scheduler`,
        :class:`~MetricReporter`, :class:`~Exporter`, and wire them up.

        Args:
            task_config (Task.Config): the config of the current task
            metadata: saved global context of this task, e.g: vocabulary, will be
                generated by :class:`~DataHandler` if it's None
            model_state: saved model parameters, will be loaded into model when given
        """
        print("Task parameters:\n")
        pprint(config_to_json(type(task_config), task_config))
        featurizer = create_featurizer(task_config.featurizer, task_config.features)
        # load data
        data_handler = create_data_handler(
            task_config.data_handler,
            task_config.features,
            task_config.labels,
            featurizer=featurizer,
        )
        print("\nLoading data...")
        if metadata:
            data_handler.load_metadata(metadata)
        else:
            data_handler.init_metadata()

        metadata = data_handler.metadata

        model = create_model(task_config.model, task_config.features, metadata)
        if model_state:
            model.load_state_dict(model_state)
        if cuda_utils.CUDA_ENABLED:
            model = model.cuda()
        metric_reporter = create_metric_reporter(task_config.metric_reporter, metadata)
        optimizer = create_optimizer(task_config.optimizer, model)
        exporter = (
            create_exporter(
                task_config.exporter,
                task_config.features,
                task_config.labels,
                data_handler.metadata,
                task_config.model,
            )
            if task_config.exporter
            else None
        )
        return cls(
            trainer=create_trainer(task_config.trainer),
            data_handler=data_handler,
            model=model,
            metric_reporter=metric_reporter,
            optimizer=optimizer,
            lr_scheduler=Scheduler(
                optimizer, task_config.scheduler, metric_reporter.lower_is_better
            ),
            exporter=exporter,
        )
Esempio n. 3
0
    def from_config(cls, task_config, metadata=None, model_state=None):
        """
        Create the task from config, and optionally load metadata/model_state
        This function will create components including :class:`~DataHandler`,
        :class:`~Trainer`, :class:`~MetricReporter`,
        :class:`~Exporter`, and wire them up.

        Args:
            task_config (Task.Config): the config of the current task
            metadata: saved global context of this task, e.g: vocabulary, will be
                generated by :class:`~DataHandler` if it's None
            model_state: saved model parameters, will be loaded into model when given
        """
        if hasattr(task_config.labels, "target_prob"):
            assert task_config.labels.target_prob == isinstance(
                task_config.model.output_layer.loss,
                (
                    KLDivergenceBCELoss.Config,
                    KLDivergenceCELoss.Config,
                    SoftHardBCELoss.Config,
                ),
            ), "target_prob must be set to True for KD losses"
        featurizer = create_featurizer(task_config.featurizer,
                                       task_config.features)
        # load data
        data_handler = create_data_handler(
            task_config.data_handler,
            task_config.features,
            task_config.labels,
            featurizer=featurizer,
        )
        print("\nLoading data...")
        if metadata:
            data_handler.load_metadata(metadata)
        else:
            data_handler.init_metadata()

        metadata = data_handler.metadata

        model = create_model(task_config.model, task_config.features, metadata)
        if model_state:
            model.load_state_dict(model_state)
        if cuda.CUDA_ENABLED:
            model = model.cuda()
        metric_reporter = create_metric_reporter(task_config.metric_reporter,
                                                 metadata)
        exporter = (create_exporter(
            task_config.exporter,
            task_config.features,
            task_config.labels,
            data_handler.metadata,
            task_config.model,
        ) if task_config.exporter else None)
        return cls(
            trainer=create_trainer(task_config.trainer, model),
            data_handler=data_handler,
            model=model,
            metric_reporter=metric_reporter,
            exporter=exporter,
        )
    def test_wordblstm_export_to_caffe2(
        self,
        export_num_words,
        export_num_dict_feat,
        num_word_classes,
        test_num_words,
        test_num_dict_feat,
        num_predictions,
        test_num_chars,
    ):
        for WORD_CONFIG in WORD_CONFIGS:
            config = self._get_config(WordTaggingTask_Deprecated.Config,
                                      WORD_CONFIG)
            metadata = self._get_metadata(0, num_word_classes)
            py_model = create_model(config.model, config.features, metadata)
            exporter = create_exporter(config.exporter, config.features,
                                       config.labels, metadata)
            with tempfile.NamedTemporaryFile(
                    delete=False,
                    suffix=".{}".format(".predictor")) as pred_file:
                output_names = exporter.export_to_caffe2(
                    py_model, pred_file.name)
                workspace.ResetWorkspace()
            pred_net = pe.prepare_prediction_net(pred_file.name,
                                                 CAFFE2_DB_TYPE)
            for _i in range(num_predictions):
                test_inputs = self._get_rand_input(
                    config.features,
                    BATCH_SIZE,
                    W_VOCAB_SIZE,
                    DICT_VOCAB_SIZE,
                    CHAR_VOCAB_SIZE,
                    test_num_words,
                    test_num_dict_feat,
                    test_num_chars,
                )
                self._feed_c2_input(
                    workspace,
                    test_inputs,
                    exporter.input_names,
                    metadata.feature_itos_map,
                )
                workspace.RunNetOnce(pred_net)
                c2_out = [
                    list(workspace.FetchBlob(o_name))
                    for o_name in output_names
                ]
                py_model.eval()
                py_outs = py_model(*test_inputs)
                context = {SEQ_LENS: test_inputs[-1]}
                target = None
                pred, score = py_model.get_pred(py_outs, target, context)

                np.testing.assert_array_almost_equal(
                    torch.transpose(score, 1,
                                    2).contiguous().view(-1).detach().numpy(),
                    np.array(c2_out).flatten(),
                )
Esempio n. 5
0
    def from_config(cls, task_config, metadata=None, model_state=None):
        print("Task parameters:\n")
        pprint(config_to_json(type(task_config), task_config))

        data_handlers = OrderedDict()
        exporters = OrderedDict()
        for name, task in task_config.tasks.items():
            featurizer = create_featurizer(task.featurizer, task.features)
            data_handlers[name] = create_data_handler(task.data_handler,
                                                      task.features,
                                                      task.labels,
                                                      featurizer=featurizer)
        data_handler = DisjointMultitaskDataHandler(task_config.data_handler,
                                                    data_handlers)
        print("\nLoading data...")
        if metadata:
            data_handler.load_metadata(metadata)
        else:
            data_handler.init_metadata()
        metadata = data_handler.metadata
        exporters = {
            name: (create_exporter(
                task.exporter,
                task.features,
                task.labels,
                data_handler.data_handlers[name].metadata,
                task.model,
            ) if task.exporter else None)
            for name, task in task_config.tasks.items()
        }
        metric_reporter = DisjointMultitaskMetricReporter(
            OrderedDict(
                (name,
                 create_metric_reporter(task.metric_reporter, metadata[name]))
                for name, task in task_config.tasks.items()),
            target_task_name=task_config.metric_reporter.target_task_name,
        )

        model = DisjointMultitaskModel(
            OrderedDict(
                (name, create_model(task.model, task.features, metadata[name]))
                for name, task in task_config.tasks.items()))
        if model_state:
            model.load_state_dict(model_state)
        if cuda_utils.CUDA_ENABLED:
            model = model.cuda()

        optimizers = create_optimizer(model, task_config.optimizer)
        return cls(
            exporters=exporters,
            trainer=create_trainer(task_config.trainer),
            data_handler=data_handler,
            model=model,
            metric_reporter=metric_reporter,
            optimizers=optimizers,
            lr_scheduler=Scheduler(optimizers, task_config.scheduler,
                                   metric_reporter.lower_is_better),
        )
Esempio n. 6
0
def export_saved_model_to_caffe2(saved_model_path: str,
                                 export_caffe2_path: str) -> None:
    task, train_config = load(saved_model_path)
    if task.exporter is None:
        TaskType = type(train_config.task)
        ExporterConfigType = get_type_hints(TaskType)["exporter"].__args__[0]
        task.exporter = create_exporter(
            ExporterConfigType(),
            train_config.task.features,
            train_config.task.labels,
            task.data_handler.metadata,
        )
    task.export(task.model, export_caffe2_path)
Esempio n. 7
0
    def test_seq_nn_export_to_caffe2(
        self,
        export_num_words,
        export_num_dict_feat,
        num_doc_classes,
        num_word_classes,
        test_num_words,
        test_num_dict_feat,
        num_predictions,
        test_num_chars,
        test_num_seq,
    ):
        config = self._get_config(SeqNNTask_Deprecated.Config, SEQ_NN_CONFIG)
        metadata = self._get_seq_metadata(num_doc_classes, 0)
        py_model = create_model(config.model, config.features, metadata)
        exporter = create_exporter(
            config.exporter, config.features, config.labels, metadata
        )
        with tempfile.NamedTemporaryFile(
            delete=False, suffix=".{}".format(".predictor")
        ) as pred_file:
            print(pred_file.name)
            output_names = exporter.export_to_caffe2(py_model, pred_file.name)
            workspace.ResetWorkspace()

        pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE)
        for _i in range(num_predictions):
            test_inputs = self._get_seq_nn_rand_input(
                config.features,
                BATCH_SIZE,
                W_VOCAB_SIZE,
                DICT_VOCAB_SIZE,
                CHAR_VOCAB_SIZE,
                test_num_words,
                test_num_dict_feat,
                test_num_chars,
                test_num_seq,
            )
            self._feed_c2_input(
                workspace, test_inputs, exporter.input_names, metadata.feature_itos_map
            )
            workspace.RunNetOnce(pred_net)
            c2_out = [list(workspace.FetchBlob(o_name)) for o_name in output_names]

            py_model.eval()
            py_outs = py_model(*test_inputs)
            # Do log_softmax since we do that before exporting predictor nets
            py_outs = F.log_softmax(py_outs, 1)
            np.testing.assert_array_almost_equal(
                py_outs.view(-1).detach().numpy(), np.array(c2_out).flatten()
            )
Esempio n. 8
0
def export_saved_model_to_caffe2(
    saved_model_path: str, export_caffe2_path: str, output_onnx_path: str = None
) -> None:
    task, train_config, _training_state = load(saved_model_path)
    if hasattr(task, "exporter") and task.exporter is None:
        TaskType = type(train_config.task)
        ExporterConfigType = get_type_hints(TaskType)["exporter"].__args__[0]
        task.exporter = create_exporter(
            ExporterConfigType(),
            train_config.task.features,
            train_config.task.labels,
            task.data_handler.metadata,
        )
    task.export(task.model, export_caffe2_path, export_onnx_path=output_onnx_path)
Esempio n. 9
0
    def from_config(
        cls,
        task_config: Config,
        metadata=None,
        model_state=None,
        tensorizers=None,
        rank=0,
        world_size=1,
    ):
        print("Task parameters:\n")
        pprint(config_to_json(type(task_config), task_config))

        data_handlers = OrderedDict()
        exporters = OrderedDict()
        for name, task in task_config.tasks.items():
            featurizer = create_featurizer(task.featurizer, task.features)
            data_handlers[name] = create_data_handler(
                task.data_handler, task.features, task.labels, featurizer=featurizer
            )
        data_handler = DisjointMultitaskDataHandler(
            task_config.data_handler,
            data_handlers,
            target_task_name=task_config.target_task_name,
        )
        print("\nLoading data...")
        if metadata:
            data_handler.load_metadata(metadata)
        else:
            data_handler.init_metadata()

        metadata = data_handler.metadata
        exporters = {
            name: (
                create_exporter(
                    task.exporter,
                    task.features,
                    task.labels,
                    data_handler.data_handlers[name].metadata,
                    task.model,
                )
                if task.exporter
                else None
            )
            for name, task in task_config.tasks.items()
        }
        task_weights = {
            task_name: task_config.task_weights.get(task_name, 1)
            for task_name in task_config.tasks.keys()
        }
        metric_reporter = DisjointMultitaskMetricReporter(
            OrderedDict(
                (name, create_metric_reporter(task.metric_reporter, metadata[name]))
                for name, task in task_config.tasks.items()
            ),
            loss_weights=task_weights,
            target_task_name=task_config.target_task_name,
            use_subtask_select_metric=(
                task_config.metric_reporter.use_subtask_select_metric
            ),
        )
        model = DisjointMultitaskModel(
            OrderedDict(
                (name, create_model(task.model, task.features, metadata[name]))
                for name, task in task_config.tasks.items()
            ),
            loss_weights=task_weights,
        )
        if model_state:
            model.load_state_dict(model_state)
        if cuda.CUDA_ENABLED:
            model = model.cuda()

        return cls(
            target_task_name=task_config.target_task_name,
            exporters=exporters,
            trainer=create_trainer(task_config.trainer, model),
            data_handler=data_handler,
            model=model,
            metric_reporter=metric_reporter,
        )
    def test_joint_export_to_caffe2(
        self,
        export_num_words,
        export_num_dict_feat,
        num_doc_classes,
        num_word_classes,
        test_num_words,
        test_num_dict_feat,
        num_predictions,
        test_num_chars,
    ):
        config = self._get_config(JointTextTask.Config, JOINT_CONFIG)
        metadata = self._get_metadata(num_doc_classes, num_word_classes)
        py_model = create_model(config.model, config.features, metadata)
        exporter = create_exporter(config.exporter, config.features,
                                   config.labels, metadata)
        with tempfile.NamedTemporaryFile(
                delete=False, suffix=".{}".format(".predictor")) as pred_file:
            exporter.export_to_caffe2(py_model, pred_file.name)
            workspace.ResetWorkspace()

        pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE)

        for _i in range(num_predictions):
            test_inputs = self._get_rand_input(
                config.features,
                BATCH_SIZE,
                W_VOCAB_SIZE,
                DICT_VOCAB_SIZE,
                CHAR_VOCAB_SIZE,
                test_num_words,
                test_num_dict_feat,
                test_num_chars,
            )
            self._feed_c2_input(workspace, test_inputs, exporter.input_names,
                                metadata.feature_itos_map)
            workspace.RunNetOnce(pred_net)
            doc_output_names = [
                "{}:{}".format("doc_scores", class_name)
                for class_name in metadata.label_names[0]
            ]
            word_output_names = [
                "{}:{}".format("word_scores", class_name)
                for class_name in metadata.label_names[1]
            ]

            py_model.eval()
            logits = py_model(*test_inputs)
            context = {SEQ_LENS: test_inputs[-1]}
            target = None
            (d_pred,
             w_pred), (d_score,
                       w_score) = py_model.get_pred(logits, target, context)

            c2_doc_out = []
            for o_name in doc_output_names:
                c2_doc_out.extend(list(workspace.FetchBlob(o_name)))
            np.testing.assert_array_almost_equal(
                d_score.view(-1).detach().numpy(),
                np.array(c2_doc_out).flatten())

            c2_word_out = []
            for o_name in word_output_names:
                c2_word_out.extend(list(workspace.FetchBlob(o_name)))

            np.testing.assert_array_almost_equal(
                torch.transpose(w_score, 1,
                                2).contiguous().view(-1).detach().numpy(),
                np.array(c2_word_out).flatten(),
            )