def test_build_target_simple(self):
        loss = ExplodedLogitLoss()

        order = torch.tensor([3, 1, 2, 4], dtype=torch.long)
        expected = torch.tensor([[0., 0., 1., 0.], [1., 0., 0., 0.],
                                 [0., 1., 0., 0.], [0., 0., 0., 1.]],
                                dtype=torch.float64)

        actual = loss.build_target(order)
        self.assertTrue(
            torch.equal(actual, expected),
            "Building simple target matrix failed:\nActual:\n{0}\nExpected:\n{1}"
            .format(actual, expected))
    def test_simple_forward_pass_nll_top(self):
        loss = ExplodedLogitLoss(loss_type='nll', reduction='sum', top_n=3)

        scores = torch.tensor([1.2, 4.8, 0.2, 5.6, 7.4, 0.],
                              dtype=torch.float64)
        order = torch.tensor([6, 5, 3, 4, 2, 1], dtype=torch.long)

        loss_expected = torch.tensor(13.6171, dtype=torch.float64)
        loss_actual = loss.forward(scores, order)
        self.assertTrue(
            torch.isclose(loss_actual, loss_expected, atol=1e-4),
            "Forward pass not valid: {0} != {1}".format(
                loss_actual, loss_expected))
    def test_batch_forward_pass_bce(self):
        loss = ExplodedLogitLoss(loss_type='bce', reduction='sum')

        scores = torch.tensor(
            [[1.2, 4.8, 0.2, 5.6, 7.4, 0.], [1.2, 4.8, 0.2, 5.6, 7.4, 0.]],
            dtype=torch.float64)
        order = torch.tensor([[6, 5, 3, 4, 2, 1], [6, 5, 3, 4, 2, 1]],
                             dtype=torch.long)

        loss_expected = torch.tensor(17.9922 * 2, dtype=torch.float64)
        loss_actual = loss.forward(scores, order)
        self.assertTrue(
            torch.isclose(loss_actual, loss_expected, atol=1e-4),
            "Forward pass not valid: {0} != {1}".format(
                loss_actual, loss_expected))
    def test_simple_backward_pass(self):
        loss = ExplodedLogitLoss(loss_type='bce', reduction='sum')

        scores = torch.tensor([1.2, 4.8, 0.2, 5.6, 7.4, 0.],
                              dtype=torch.float64,
                              requires_grad=True)
        order = torch.tensor([6, 5, 3, 4, 2, 1], dtype=torch.long)

        loss = loss.forward(scores, order)
        loss.backward()

        grad_expected = torch.tensor(
            [0.0604, 0.4864, -1.0052, 0.3989, 1.0611, -1.0016],
            dtype=torch.float64)
        grad_actual = scores.grad

        self.assertTrue(
            torch.allclose(grad_actual, grad_expected, atol=1e-4),
            "Gradient is not valid:\n{0}\n{1}".format(grad_actual,
                                                      grad_expected))
    def test_single_column_input(self):
        torch.manual_seed(24637882)

        dataset_size = 8000
        test_dataset_size = 1000
        data_columns = 6
        competitors = 8

        dataset_generator = ArtificialDataset(dataset_size,
                                              competitors,
                                              data_columns,
                                              rand_eps=1e-3)
        loader_iterator = iter(DataLoader(dataset_generator))

        linear_model = LinearModel(data_columns,
                                   1)  # number of columns to score
        optimizer = torch.optim.Adam(params=linear_model.parameters())
        loss = ExplodedLogitLoss(loss_type='nll', top_n=3)

        for step in range(dataset_size):
            data, order = next(loader_iterator)
            optimizer.zero_grad()

            score = linear_model(data).squeeze(-1)

            loss_value = loss(score, order)
            loss_value.backward()
            optimizer.step()
            if step % 1000 == 0:
                print("Loss value: {0}".format(loss_value.item()))

        with torch.no_grad():
            for _ in range(test_dataset_size):
                data, expected_order = next(loader_iterator)

                score = linear_model(data).squeeze(-1)
                actual_order = get_sort_order(score)

                self.assertTrue(
                    torch.equal(actual_order, expected_order),
                    "Order not equal:\n{0}\n{1}".format(
                        actual_order, expected_order))

        print("\n\nLinear transformation weights matrix\n--------------------")
        print(linear_model.linear.weight)
        print("Linear transformation bias:")
        print(linear_model.linear.bias)

        print("\n\nOriginal coefficients\n--------------------")
        print(dataset_generator.coeffs[0])
        print("Original bias")
        print(dataset_generator.biases[0])
    def test_without_bias_with_regularization(self):
        torch.manual_seed(24637882)

        dataset_size = 20000
        test_dataset_size = 1000
        data_columns = 1
        random_columns = 15
        competitors = 8
        regularization_lambda = 0.05

        dataset_generator = ArtificialDataset(
            dataset_size,
            competitors,
            data_columns,
            rand_eps=1e-3,
            number_of_random_columns=random_columns,
            bias=False)
        loader_iterator = iter(DataLoader(dataset_generator))

        linear_model = LinearModel(data_columns + random_columns,
                                   1,
                                   bias=False)  # number of columns to score
        optimizer = torch.optim.Adam(params=linear_model.parameters())
        loss = ExplodedLogitLoss(loss_type='bce')

        for step in range(dataset_size):
            data, order = next(loader_iterator)
            optimizer.zero_grad()

            score = linear_model(data).squeeze(-1)

            loss_value = loss(score, order)

            l1_loss_value = 0
            for param in linear_model.parameters():
                l1_loss_value += torch.sum(torch.abs(param))
            loss_value += regularization_lambda * l1_loss_value

            loss_value.backward()
            optimizer.step()

            if step % 1000 == 0:
                print("Loss value: {0}".format(loss_value.item()))

        with torch.no_grad():
            for _ in range(test_dataset_size):
                data, expected_order = next(loader_iterator)

                score = linear_model(data).squeeze(-1)
                actual_order = get_sort_order(score)

                self.assertTrue(
                    torch.equal(actual_order, expected_order),
                    "Order not equal:\n{0}\n{1}".format(
                        actual_order, expected_order))

        print("\n\nLinear transformation weights matrix\n--------------------")
        print(linear_model.linear.weight)

        print("\n\nInverted original coefficients\n--------------------")
        print(dataset_generator.coeffs[0])