class SongPairModel(object):
    """
  Class for regression model that maps two audio feature
  sets to a probability of co-occurrence in a playset

  #Uses Kernelized Support Vector Regression
  Uses Logistic Regression with L2 regularization
  """
    def __init__(self):
        self.model = None

    def join_feature_sets(self, x, y):
        """
    Concatenate two song audio feature sets

    :param AudioFeatureSet x: An audio feature set of a song
    :param AudioFeatureSet y: An audio feature set of a song
    :rtype np.ndarray
    :return Feature set of x :: feature set of y
    """

        return np.concatenate((x.get(), y.get()))

    def train(self, data):
        """
    Train regression model on a training data set

    :param list[tuple(AudioFeatureSet,AudioFeatureSet,float)] data: 
    A list of (song A, song B, \hat{Pr}(A|B))
    """

        print "Preparing training set..."
        X = [self.join_feature_sets(e[0], e[1]) for e in data]
        Y = [e[2] for e in data]
        print "Done."

        print "Training model on data set with %d instances and %d features..." % (
            len(X), len(X[0]))
        start_time = time.time()
        #self.model = KNeighborsRegressor(n_neighbors=3)
        #self.model = LogisticRegression()
        #self.model = SVR(kernel='rbf', C=1.0)
        #self.model = SVC(kernel='rbf', C=1.0, probability=True)
        self.model = MultilayerPerceptron()
        self.model.fit(X, Y)
        print "Done in %f seconds." % (time.time() - start_time)

    def log_likelihood(self, x, y):
        """
    Compute log likelihood of co-occurrence Pr(X|Y)

    :param AudioFeatureSet x: An audio feature set of a song
    :param AudioFeatureSet y: An audio feature set of a song
    :rtype float
    :return A real value between 0 and 1
    """

        f = self.join_feature_sets(x, y)
        return self.model.predict_log_proba(f)
Example #2
0
 def setUp(self):
     self.nn = NN(seed=0,
                  learning_algorithm=batch,
                  error_calculator=ErrorCalculator.MIS,
                  architecture=MultilayerPerceptron(
                      2,
                      activation=sigmoid,
                      activation_hidden=relu,
                      alambd=0,
                      alpha=0.9,
                      eta=0.9,
                  ))
    def setUp(self):
        np.random.seed(seed=1)

        self.nn: MLPNeuralNetwork = MultilayerPerceptron(1000, activation=sigmoid, activation_hidden=sigmoid, eta=0.5)()
        self.nn._compute_layers(200, 500)

        output_layer = self.nn.layers[-1]
        self.hidden_layer = self.nn.layers[0]

        self.output_w = output_layer.T[1:].T
        self.hidden_w = self.hidden_layer.T[1:].T

        self.len_input = len(self.hidden_layer[0]) - 1
Example #4
0
    def test_batch_explicit(self):
        nn = MultilayerPerceptron(layers=[[[0, 1.5, 2], [0, 3, 0.5]],
                                          [[0, -1.5, 1.5], [0, -0.5, 2]]],
                                  alambd=0,
                                  alpha=0,
                                  eta=0.5,
                                  activation=sigmoid,
                                  activation_hidden=sigmoid)()

        nn.fit([([1, 1], [0, 1]), ([2, 2], [1, 1])])

        print(nn.layers[1])

        self.assertTrue(
            np.isclose(nn.layers[1],
                       [[0., -1.49911246, 1.50088754],
                        [0.01406306, -0.48615559, 2.01384441]]).all())

        self.assertTrue(
            np.isclose(
                nn.layers[0],
                [[1.18486022e-03, 1.50113909e+00, 2.00113909e+00],
                 [-8.66234240e-04, 2.99918884e+00, 4.99188840e-01]]).all())
    def train(self, data):
        """
    Train regression model on a training data set

    :param list[tuple(AudioFeatureSet,AudioFeatureSet,float)] data: 
    A list of (song A, song B, \hat{Pr}(A|B))
    """

        print "Preparing training set..."
        X = [self.join_feature_sets(e[0], e[1]) for e in data]
        Y = [e[2] for e in data]
        print "Done."

        print "Training model on data set with %d instances and %d features..." % (
            len(X), len(X[0]))
        start_time = time.time()
        #self.model = KNeighborsRegressor(n_neighbors=3)
        #self.model = LogisticRegression()
        #self.model = SVR(kernel='rbf', C=1.0)
        #self.model = SVC(kernel='rbf', C=1.0, probability=True)
        self.model = MultilayerPerceptron()
        self.model.fit(X, Y)
        print "Done in %f seconds." % (time.time() - start_time)
Example #6
0
def grid_search_task(pm: Dict[str, Any]) -> GridSearchResult:
    pm_with_architecture = dict(**pm)
    pm_with_architecture['architecture'] = MultilayerPerceptron(**pm['architecture'])
    nn = NeuralNetwork(**pm_with_architecture)
    if validation_params_process is None:  # type: ignore
        kf = k_fold_CV(nn, dataset_process, **cv_params_process)  # type: ignore
    else:
        validation_result = validation(
            nn,
            dataset_process,  # type: ignore
            **validation_params_process,  # type: ignore
        )
        kf = KFoldCVResult(validation_result.score_validation, 0, validation_result.epoch)
    pm.update(architecture=MLPParams(**pm['architecture']))
    typed_pm: NNParams = NNParams(**pm)

    return GridSearchResult(typed_pm, *kf)
Example #7
0
    def test_monk1(self):
        nn = NN(seed=4,
                epochs_limit=400,
                learning_algorithm=batch,
                error_calculator=ErrorCalculator.MSE,
                architecture=MultilayerPerceptron(
                    4,
                    activation=sigmoid,
                    activation_hidden=relu,
                    eta=0.5,
                    alambd=0,
                    alpha=0.8,
                ))

        train_data, test_data = read_monk(1)

        nn.fit(train_data)
        train_errs = nn.compute_learning_curve(train_data, ErrorCalculator.MIS)

        test_errs = nn.compute_learning_curve(test_data, ErrorCalculator.MIS)

        error_train = 0
        for x, d in train_data:
            error_train += (round(nn(x)[0][-1]) - d[0])**2

        error_test = 0
        for x, d in test_data:
            error_test += (round(nn(x)[0][-1]) - d[0])**2

        print(
            'train:',
            str(((len(train_data) - error_train) / len(train_data)) * 100) +
            '%')
        print(
            'test: ',
            str(((len(test_data) - error_test) / len(test_data)) * 100) + '%')

        self.assertEqual(error_train, 0)
        self.assertEqual(error_test, 0)

        nn.error_calculator = ErrorCalculator.MIS
        self.assertEqual(nn.compute_error(train_data), 0)
        self.assertEqual(nn.compute_error(test_data), 0)
Example #8
0
    def test_batch_explicit(self):
        np.random.seed(0)

        nn = NeuralNetwork(architecture=MultilayerPerceptron(
            layers=[[[0, 1.5, 2], [0, 3, 0.5]], [[0, -1.5, 1.5], [0, -0.5,
                                                                  2]]],
            alambd=0,
            alpha=0,
            eta=0.5,
            activation=sigmoid,
            activation_hidden=sigmoid,
        ),
                           learning_algorithm=minibatch(.5))

        nn.fit([([1, 1], [0, 1]), ([2, 2], [1, 1])])

        nn = nn._current_network
        np.testing.assert_array_almost_equal(
            nn.layers[0], [[-9.153689e-05, 1.499817e+00, 1.999817e+00],
                           [1.101478e-04, 3.000220e+00, 5.002203e-01]])

        np.testing.assert_array_almost_equal(
            nn.layers[1],
            [[0.0625, -1.437557, 1.562443], [0.013631, -0.486381, 2.013619]])
Example #9
0
    epochs_limit = 466
    eta = 0.1
    alpha = 0.5
    alambd = 0.001
    validation_error = ErrorCalculator.MSE

    nn = NN(
        seed=seed,
        epochs_limit=epochs_limit,
        learning_algorithm=batch,
        n_init=1,
        error_calculator=ErrorCalculator.MSE,
        architecture=MultilayerPerceptron(
            size_hidden_layers=(2, 2),
            eta=eta,
            alpha=alpha,
            alambd=alambd,
            activation=tanh_classification,
            activation_hidden=relu,
        ),
    )

    nn.fit(train_set)

    nn.error_calculator = ErrorCalculator.MSE
    print('mse', nn.compute_error(train_set), nn.compute_error(validation_set),
          nn.compute_error(test_data))

    nn.error_calculator = ErrorCalculator.MEE
    print('mee', nn.compute_error(train_set), nn.compute_error(validation_set),
          nn.compute_error(test_data))