コード例 #1
0
    def test_mlp(self):
        """
        Train MLP on dataset 1, which is not linearly separable. Tests will try multiple random initializations to
        reduce chance of bad initialization.
        """
        i = 1

        num_hidden_units = [4, 5]

        params = {
            'max_iter': 400,
            'squash_function': logistic,
            'loss_function': nll,
            'num_hidden_units': num_hidden_units,
            'lambda': 0.01
        }

        input_dim = self.train_data[i].shape[0] + 1
        total_weight_length = input_dim * num_hidden_units[0]
        for j in range(len(num_hidden_units) - 1):
            total_weight_length += num_hidden_units[j] * num_hidden_units[j +
                                                                          1]
        total_weight_length += num_hidden_units[-1]

        # try at most 10 random initializations
        for trial in range(10):
            mlp_model = mlp_train(self.train_data[i], self.train_labels[i],
                                  params)
            predictions, _, _, _ = mlp_predict(self.test_data[i], mlp_model)
            accuracy = np.mean(predictions == self.test_labels[i])
            print(
                "On trial %d, 3-layer MLP had test accuracy %2.3f "
                "(should be around 0.95, depending on random initialization)" %
                (trial, accuracy))

            if accuracy > 0.9:
                return

        assert False, "Accuracy was never above 0.9. Could be a bug, or could be bad luck. " \
                      "Try running again to check."
コード例 #2
0
    def test_mlp(self):
        """
        Train 3-layer MLP on datasets 0 to 9. Tests will try multiple random initialization trials to
        reduce chance of bad initialization.
        """
        threshold_accuracy = [0.95,0.9,0.85,0.9,0.9,0.875,0.9,0.875,0.85,0.85] # threshold accuracy values to beat for every dataset
        pass_vec = np.zeros(10, dtype =bool) # stores whether test passed on a dataset or not
        for i in range(10):

            num_hidden_units = [4, 5]
    
            params = {
                'max_iter': 400,
                'activation_function': logistic,
                'loss_function': nll,
                'num_hidden_units': num_hidden_units,
                'lambda': 0.01
            }
    
            input_dim = self.train_data[i].shape[0] + 1
            total_weight_length = input_dim * num_hidden_units[0]
            for j in range(len(num_hidden_units) - 1):
                total_weight_length += num_hidden_units[j] * num_hidden_units[j + 1]
            total_weight_length += num_hidden_units[-1]
    
            # try at most 10 random initializations
            best_accuracy = 0
            for trial in range(10):
                mlp_model = mlp_train(self.train_data[i], self.train_labels[i], params)
                predictions, _, _, _ = mlp_predict(self.test_data[i], mlp_model)
                accuracy = np.mean(predictions == self.test_labels[i])
                if accuracy > best_accuracy:
                    best_accuracy = accuracy
                    
            print("On dataset %d, 3-layer MLP had test accuracy %2.3f (should be greater than %2.3f)" %
                  (i, best_accuracy,threshold_accuracy[i]))
            pass_vec = best_accuracy - threshold_accuracy[i] < 0
    
        assert np.sum(pass_vec) == 0, "3-layer MLP accuracy was less than threshold for one of the datasets. Could be a bug, or could be bad luck. " \
                      "Try running again to check."