예제 #1
0
    def run_debug_test(self,
                       model,
                       train,
                       batch_size,
                       state_dict=None,
                       input=None,
                       use_gpu=True):
        """
        # TODO: remove this from the final release version
        This test is for our debugging only for the case where
        embed_params=False
        """
        model.train(train)
        if state_dict is not None:
            model.load_state_dict(state_dict)

        # Either user specified input or random (deterministic) input
        if input is None:
            input = Variable(torch.randn(batch_size, 3, 224, 224),
                             requires_grad=True)
        if use_gpu:
            model, input = self.convert_cuda(model, input)

        onnxir, torch_out = do_export(model,
                                      input,
                                      export_params=self.embed_params,
                                      verbose=False)
        if isinstance(torch_out, torch.autograd.Variable):
            torch_out = (torch_out, )

        caffe2_out = run_embed_params(onnxir, model, input, state_dict,
                                      use_gpu)
        for i, (x, y) in enumerate(zip(torch_out, caffe2_out)):
            np.testing.assert_almost_equal(x.data.cpu().numpy(), y, decimal=3)
예제 #2
0
    def run_debug_test(self, model, train, batch_size, state_dict=None,
                       input=None, use_gpu=True):
        """
        # TODO: remove this from the final release version
        This test is for our debugging only for the case where
        embed_params=False
        """
        model.train(train)
        if state_dict is not None:
            model.load_state_dict(state_dict)

        # Either user specified input or random (deterministic) input
        if input is None:
            input = Variable(torch.randn(batch_size, 3, 224, 224),
                             requires_grad=True)
        if use_gpu:
            model, input = self.convert_cuda(model, input)

        onnxir, torch_out = do_export(model, input, export_params=self.embed_params, verbose=False)
        if isinstance(torch_out, torch.autograd.Variable):
            torch_out = (torch_out,)

        caffe2_out = run_embed_params(onnxir, model, input, state_dict, use_gpu)
        for i, (x, y) in enumerate(zip(torch_out, caffe2_out)):
            np.testing.assert_almost_equal(x.data.cpu().numpy(), y, decimal=3)
예제 #3
0
    def _lstm_test(self, layers, bidirectional, initial_state, packed_sequence,
                   dropout):
        model = LstmFlatteningResult(RNN_INPUT_SIZE,
                                     RNN_HIDDEN_SIZE,
                                     layers,
                                     bidirectional=bidirectional,
                                     dropout=dropout)
        if packed_sequence == 1:
            model = RnnModelWithPackedSequence(model, False)
        if packed_sequence == 2:
            model = RnnModelWithPackedSequence(model, True)

        def make_input(batch_size):
            seq_lengths = np.random.randint(1,
                                            RNN_SEQUENCE_LENGTH + 1,
                                            size=batch_size)
            seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
            inputs = [
                Variable(torch.randn(l, RNN_INPUT_SIZE)) for l in seq_lengths
            ]
            inputs = rnn_utils.pad_sequence(inputs)
            if packed_sequence == 2:
                inputs = inputs.transpose(0, 1)
            inputs = [inputs]

            directions = 2 if bidirectional else 1

            if initial_state:
                h0 = Variable(
                    torch.randn(directions * layers, batch_size,
                                RNN_HIDDEN_SIZE))
                c0 = Variable(
                    torch.randn(directions * layers, batch_size,
                                RNN_HIDDEN_SIZE))
                inputs.append((h0, c0))
            if packed_sequence != 0:
                inputs.append(Variable(torch.IntTensor(seq_lengths)))
            if len(inputs) == 1:
                input = inputs[0]
            else:
                input = tuple(inputs)
            return input

        input = make_input(RNN_BATCH_SIZE)
        self.run_model_test(model,
                            train=False,
                            batch_size=RNN_BATCH_SIZE,
                            input=input,
                            use_gpu=False)

        # test that the model still runs with a different batch size
        onnxir, _ = do_export(model, input)
        other_input = make_input(RNN_BATCH_SIZE + 1)
        _ = run_embed_params(onnxir, model, other_input, use_gpu=False)
예제 #4
0
    def _elman_rnn_test(self, layers, nonlinearity, bidirectional,
                        initial_state, packed_sequence, dropout):
        model = nn.RNN(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE,
                       layers,
                       nonlinearity=nonlinearity,
                       bidirectional=bidirectional,
                       dropout=dropout)

        if packed_sequence == 1:
            model = RnnModelWithPackedSequence(model, False)
        if packed_sequence == 2:
            model = RnnModelWithPackedSequence(model, True)

        def make_input(batch_size):
            seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
            seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
            inputs = [Variable(torch.randn(l, RNN_INPUT_SIZE)) for l in seq_lengths]
            inputs = rnn_utils.pad_sequence(inputs)
            if packed_sequence == 2:
                inputs = inputs.transpose(0, 1)
            inputs = [inputs]

            directions = 2 if bidirectional else 1

            if initial_state:
                h0 = Variable(torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE))
                inputs.append(h0)
            if packed_sequence != 0:
                inputs.append(Variable(torch.IntTensor(seq_lengths)))
            if len(inputs) == 1:
                input = inputs[0]
            else:
                input = tuple(inputs)
            return input

        input = make_input(RNN_BATCH_SIZE)
        self.run_model_test(model, train=False, batch_size=RNN_BATCH_SIZE, input=input, use_gpu=False, atol=1e-7)

        # test that the model still runs with a different batch size
        onnxir, _ = do_export(model, input)
        other_input = make_input(RNN_BATCH_SIZE + 1)
        _ = run_embed_params(onnxir, model, other_input, use_gpu=False)