def dec_or_reg_tree(df_train, df_test, Y):
    binary = utils.check_binary(df_train[Y])
    if binary:
        newtree = treeHW4.TreeOptimal(max_depth=1)
        y = list(df_train[Y])
        nondf_train = utils.pandas_to_data(df_train)
        nondf_test = utils.pandas_to_data(df_test)
        newtree.fit(nondf_train, y)
        predict = newtree.predict(nondf_train)
        error_train = mystats.get_error(predict, y, binary)

        y = utils.pandas_to_data(df_test[Y])
        predict = newtree.predict(nondf_test)
        error_test = mystats.get_error(predict, y)
    else:

        node = mytree.Node(np.ones(len(df_train)))
        hw1.branch_node(node, df_train, 5, Y)
        model = mytree.Tree(node)
        predict = model.predict_obj()
        error_train = mystats.get_error(predict, df_train[Y], binary)

        node.presence = np.ones(len(df_test))
        hw1.test_node(node, df_test, Y)
        test_tree = mytree.Tree(node)
        predict = test_tree.predict_obj()
        error_test = mystats.get_error(predict, df_test[Y], binary)
    return [error_train, error_test]
def linear_gd(df_train, df_test, Y):
    """ linear gradient descent """
    binary = utils.check_binary(df_train[Y])
    model = gd.gradient(df_train, df_train[Y], 0.00001, max_iterations=50)
    print model
    predict = gd.predict(df_train, model, binary)
    print predict
    error_train = mystats.get_error(predict, df_train[Y], binary)
    predict = gd.predict(df_test, model, binary)
    print predict
    error_test = mystats.get_error(predict, df_test[Y], binary)
    return [error_train, error_test]
def logistic_gd(df_train, df_test, Y):
    """ logistic gradient descent """
    binary = utils.check_binary(df_train[Y])
    model = gd.logistic_gradient(df_train, df_train[Y], 0.1, max_iterations=5)
    print model
    predict = gd.predict(df_train, model, binary, True)
    print predict
    error_train = mystats.get_error(predict, df_train[Y], binary)
    predict = gd.predict(df_test, model, binary, True)
    print predict
    error_test = mystats.get_error(predict, df_test[Y], binary)
    return [error_train, error_test]
Пример #4
0
def q1():
    """GDA """
    """Run the Gaussian Discriminant Analysis on the spambase data. Use the k-folds from the previous problem (1 for testing, k-1 for training, for each fold)
Since you have 57 real value features, each of the  2gaussians (for + class and for - class) will have a mean  vector with 57 components, and a they will have
either a common (shared) covariance matrix size 57x57. This covariance is estimated from all training data (both classes)
or two separate covariance 57x57 matrices (estimated separately for each class)
(you can use a Matlab or Python of Java built in function to estimated covariance matrices, but the estimator is easy to code up).
Looking at the training and testing performance, does it appear that the gaussian assumption (normal distributed data) holds for this particular dataset?
"""

    spamData = hw3.pandas_to_data(hw3.load_and_normalize_spambase())  # returns an array of arrays - this is by row
    k = 10
    train_acc_sum = 0
    k_folds = hw3.partition_folds(spamData, k)
    gdas = []
    for ki in range(k - 1):
        subset = []
        gda = hw3.GDA()
        X, truth = hw3.separate_X_and_y(k_folds[ki])
        covariance_matrix = hw3.get_covar(X)
        gda.p_y = float(sum(truth)) / len(truth)
        gda.train(X, covariance_matrix, truth)
        predictions = gda.predict(X)
        #print predictions
        accuracy = mystats.get_error(predictions, truth, True)
        #gdas.append(gda)
        print_output(ki, accuracy)
        #print gda.prob
        gdas.append(gda)
def linear_gd_error(df, Y):
    binary = utils.check_binary(df[Y])
    model = gd.gradient(df, df[Y], 0.00001, max_iterations=50)
    print model
    predict = gd.predict(df, model, binary)
    print predict
    error = mystats.get_error(predict, df_train[Y], binary)
    return error
def testLogisticGradient():
    """ logistic gradient descent """
    df_test, df_train = utils.split_test_and_train(utils.load_and_normalize_spam_data())
    Y = 'is_spam'
    binary = utils.check_binary(df_train[Y])
    model = gd.logistic_gradient(df_train, df_train[Y], .1, max_iterations=5)
    #print model
    #raw_input()
    predict = gd.predict(df_train, model, binary, True)
    print predict
    error_train = mystats.get_error(predict, df_train[Y], binary)
    #raw_input()
    predict = gd.predict(df_test, model, binary, True)
    print predict
    error_test = mystats.get_error(predict, df_test[Y], binary)
    print 'error train {} error_test {}'.format(error_train, error_test)
    return [error_train, error_test]
def debug_print(iters, nc, h, y):
    diffs = 0
    error = mystats.get_error(h, y, 0)
    for i, pred in enumerate(y):
        diffs += abs(pred - h[i])
    distance = float(diffs)/len(h)
    print "actual"
    print y[:5]
    print "predicted"
    print h[:5]
    print 'loop: {} num not converged: {} distance: {} MSE: {}'.format(iters, nc, distance, error)
def k_folds_linear_gd(df_test, df_train, Y):
    k = 10
    df_test = gd.pandas_to_data(df_test)
    k_folds = partition_folds(df_test, k)
    model = Model_w()
    theta = None
    for ki in range(k - 1):
        print "k fold is {}".format(k)
        data, truth = get_data_and_truth(k_folds[ki])
        binary = True
        model.update(gd.gradient(data, np.array(truth), 0.00001, max_iterations=5, binary=binary))
        print model.w
        if theta is None:
            theta, max_acc = get_best_theta(data, truth, model.w, binary, False)
        predict = gd.predict_data(data, model.w, binary, False, theta)
        error = mystats.get_error(predict, truth, binary)
        print "Error for fold {} is {} with theta =  {}".format(k, error, theta)
    test, truth = get_data_and_truth(k_folds[k - 1])
    predict = gd.predict_data(test, model.w, binary, False, theta)
    test_error = mystats.get_error(predict, truth, binary)
    return [error, test_error]
def get_best_theta(data, truth, model, binary, logistic):
    best_theta = None
    max_acc = 0
    modmin = min(model)
    modmax = max(model)
    for theta_i in range(100):
        theta = modmin + float(theta_i) / (modmax - modmin)
        predict = gd.predict_data(data, model, binary, False, theta)
        acc = mystats.get_error(predict, truth, binary)
        if best_theta is None:
            best_theta = theta
            max_acc = acc
        elif acc > max_acc:
            best_theta = theta
            max_acc = acc
    return best_theta, max_acc
Пример #10
0
def linear_reg(df, Y, binary=False, ridge=False, sigmoid=False):
    means = []
    columns = [col for col in df.columns if (col != "is_spam" and col != "MEDV" and col != "y")]
    if ridge:
        w = mystats.get_linridge_w(df[columns], df[Y], binary)
    else:
        for col in df.columns:
            mean = df[col].mean()
            means.append(mean)
            df[col] -= mean

        w = mystats.get_linreg_w(df[columns], df[Y])

    print ("w:")
    print (w)
    predict = mystats.predict(df[columns], w, binary, means=means)
    error = mystats.get_error(predict, df[Y], binary)
    return error