Пример #1
0
 def train(self, X, y):
     """
     Parameters
     ----------
     X : samples, shape=(num_samples, num_features)
     y : labels, shape=(num_samples, )
     Train this classifier over the sample (X,y)
     After finish the training return the weights of the samples in the last iteration.
     """
     n_samples = X.shape[0]
     D = np.array([1.0 / n_samples] * n_samples)
     # D = np.array([[1.0 / m] * m] * self.T)
     for t in range(self.T):
         gc.log(f'At T = {t}')
         self.h[t] = self.WL(D, X, y)
         y_hat = self.h[t].predict(X)
         mask = y != y_hat
         epsilon = np.matmul(D, mask)
         self.w[t] = 1 / 2 * np.log((1 - epsilon) / epsilon)
         D *= np.exp(-(y * y_hat * self.w[t]))
         D /= np.sum(D)
    def big_test(self):
        gc.log("Big test")
        fig = plt.figure()
        fig.suptitle("SVM vs Perceptron, Accuracy Test")
        plt.xlabel('Train Set Size')
        plt.ylabel('Accuracy (%)')

        k, n_iter = 10000, 500
        M, accurs = [5, 10, 15, 25, 70], [[], []]
        for m in M:
            svm_accu_sum = 0
            perc_accu_sum = 0
            for i in range(n_iter):
                while True:
                    train_X = self.draw_m_points(m).T
                    train_y = self.true_label(train_X)
                    if np.unique(train_y).shape[0] == 2:
                        break
                while True:
                    test_X = self.draw_m_points(k).T
                    test_y = self.true_label(test_X)
                    if np.unique(test_y).shape[0] == 2:
                        break
                svm = self._svm.fit(train_X, np.ravel(train_y.T))
                perc_w = self._perc.fit(train_X, train_y)
                svm_accu = self.get_svm_accu(svm, test_X, test_y)
                perc_accu = self.get_perc_accu(perc_w, test_X, test_y)
                svm_accu_sum += svm_accu
                perc_accu_sum += perc_accu
            svm_accu_avg = svm_accu_sum / n_iter
            perc_accu_avg = perc_accu_sum / n_iter
            accurs[0].append(svm_accu_avg)
            accurs[1].append(perc_accu_avg)
        plt.plot(M, accurs[0], label='SVM')
        plt.plot(M, accurs[1], label='Perceptron')
        plt.legend()
        self.plot_to_file('q5', 2)
Пример #3
0
    train_errs = [ada.error(train_images, train_labels, t) for t in T_range]
    test_errs = [ada.error(test_images, test_labels, t) for t in T_range]

    fig = plt.figure()
    fig.suptitle("Train vs Test error, Face Classifier")
    plt.xlabel('# of Hypotheses (T)')
    plt.ylabel('Error rate (%)')
    plt.plot(T_range, train_errs, label='Train Error')
    plt.plot(T_range, test_errs, label='Test Error')
    # plt.ylim(top=0.06)
    plt.legend()
    plt.savefig(FIG_DIR3 + 'q17')
    'TODO complete this function'


def Q18():
    'TODO complete this function'


if __name__ == '__main__':
    start_time = time.time()
    # Q4()
    # Q5()
    # learner, test_X, test_y, train_X, train_y = Q8()
    # Q9(learner, test_X, test_y)
    # Q10(learner, train_X, train_y)
    # Q12()
    Q17()
    gc.log('Execution took %s seconds' % (time.time() - start_time))
    'TODO complete this function'
 def compare_many(self):
     gc.log("Comparing many")
     for m in [5, 10, 15, 25, 70]:
         self.compare_one(m)
 def __init__(self):
     gc.log("Creating comparer")
     self._perc = pc.Perceptron()
     self._svm = svm.SVC(C=1e10, kernel='linear')
     self._mu = np.zeros([DIM])
     self._sig = np.eye(DIM)