예제 #1
0
def test_solve_primal_l2_svc_with_line_search_optimizers():
    X, y = load_iris(return_X_y=True)
    X_scaled = MinMaxScaler().fit_transform(X)
    X_train, X_test, y_train, y_test = train_test_split(X_scaled,
                                                        y,
                                                        train_size=0.75,
                                                        random_state=123456)

    svc = OVR(SVC(loss=squared_hinge, optimizer=SteepestGradientDescent))
    svc = svc.fit(X_train, y_train)
    assert (np.allclose(np.hstack((estimator.coef_, estimator.intercept_)),
                        estimator.loss.x_star())
            for estimator in svc.estimators_)
    assert svc.score(X_test, y_test) >= 0.57

    svc = OVR(SVC(loss=squared_hinge, optimizer=ConjugateGradient))
    svc = svc.fit(X_train, y_train)
    assert (np.allclose(np.hstack((estimator.coef_, estimator.intercept_)),
                        estimator.loss.x_star())
            for estimator in svc.estimators_)
    assert svc.score(X_test, y_test) >= 0.57

    svc = OVR(SVC(loss=squared_hinge, optimizer=Newton))
    svc = svc.fit(X_train, y_train)
    assert (np.allclose(np.hstack((estimator.coef_, estimator.intercept_)),
                        estimator.loss.x_star())
            for estimator in svc.estimators_)
    assert svc.score(X_test, y_test) >= 0.57

    svc = OVR(SVC(loss=squared_hinge, optimizer=BFGS))
    svc = svc.fit(X_train, y_train)
    assert (np.allclose(np.hstack((estimator.coef_, estimator.intercept_)),
                        estimator.loss.x_star())
            for estimator in svc.estimators_)
    assert svc.score(X_test, y_test) >= 0.57
예제 #2
0
def test_solve_dual_l2_svc_with_AdaGrad():
    X, y = load_iris(return_X_y=True)
    X_scaled = MinMaxScaler().fit_transform(X)
    X_train, X_test, y_train, y_test = train_test_split(X_scaled,
                                                        y,
                                                        train_size=0.75,
                                                        random_state=123456)

    svc = OVR(
        SVC(loss=squared_hinge,
            kernel=gaussian,
            reg_intercept=True,
            dual=True,
            optimizer=AdaGrad,
            learning_rate=1.))
    svc = svc.fit(X_train, y_train)
    assert svc.score(X_test, y_test) >= 0.97

    svc = OVR(
        SVC(loss=squared_hinge,
            kernel=gaussian,
            reg_intercept=False,
            dual=True,
            optimizer=AdaGrad,
            learning_rate=1.))
    svc = svc.fit(X_train, y_train)
    assert svc.score(X_test, y_test) >= 0.97
예제 #3
0
def test_solve_dual_l1_svc_with_proximal_bundle():
    X, y = load_iris(return_X_y=True)
    X_scaled = MinMaxScaler().fit_transform(X)
    X_train, X_test, y_train, y_test = train_test_split(X_scaled,
                                                        y,
                                                        train_size=0.75,
                                                        random_state=123456)

    svc = OVR(
        SVC(loss=hinge,
            kernel=gaussian,
            reg_intercept=True,
            dual=True,
            optimizer=ProximalBundle,
            max_iter=150))
    svc = svc.fit(X_train, y_train)
    assert svc.score(X_test, y_test) >= 0.97

    svc = OVR(
        SVC(loss=hinge,
            kernel=gaussian,
            reg_intercept=False,
            dual=True,
            optimizer=ProximalBundle,
            max_iter=150))
    svc = svc.fit(X_train, y_train)
    assert svc.score(X_test, y_test) >= 0.97
예제 #4
0
def test_solve_dual_l1_svc_with_reg_intercept_with_bcqp_optimizers():
    X, y = load_iris(return_X_y=True)
    X_scaled = MinMaxScaler().fit_transform(X)
    X_train, X_test, y_train, y_test = train_test_split(X_scaled,
                                                        y,
                                                        train_size=0.75,
                                                        random_state=123456)

    svc = OVR(
        SVC(loss=hinge,
            kernel=gaussian,
            reg_intercept=True,
            dual=True,
            optimizer=ProjectedGradient))
    svc = svc.fit(X_train, y_train)
    assert svc.score(X_test, y_test) >= 0.97

    svc = OVR(
        SVC(loss=hinge,
            kernel=gaussian,
            reg_intercept=True,
            dual=True,
            optimizer=ActiveSet))
    svc = svc.fit(X_train, y_train)
    assert svc.score(X_test, y_test) >= 0.97

    svc = OVR(
        SVC(loss=hinge,
            kernel=gaussian,
            reg_intercept=True,
            dual=True,
            optimizer=InteriorPoint))
    svc = svc.fit(X_train, y_train)
    assert svc.score(X_test, y_test) >= 0.97

    svc = OVR(
        SVC(loss=hinge,
            kernel=gaussian,
            reg_intercept=True,
            dual=True,
            optimizer=FrankWolfe))
    svc = svc.fit(X_train, y_train)
    assert svc.score(X_test, y_test) >= 0.97
예제 #5
0
def test_solve_dual_l1_svc_with_smo():
    X, y = load_iris(return_X_y=True)
    X_scaled = MinMaxScaler().fit_transform(X)
    X_train, X_test, y_train, y_test = train_test_split(X_scaled,
                                                        y,
                                                        train_size=0.75,
                                                        random_state=123456)
    svc = OVR(SVC(loss=hinge, kernel=gaussian, dual=True, optimizer='smo'))
    svc.fit(X_train, y_train)
    assert svc.score(X_test, y_test) >= 0.97
예제 #6
0
def create_binary_multi_model():
    DOOM = 42

    #for small dataset drop these columns
    def drop(df):
        for col in list(df):
            if 'src' in col or 'dst' in col or 'id' in col:
                df.drop(col, axis=1, inplace=True)

    path = 'DATA'

    df = pd.read_csv('model_data/DATA/trainb.csv')
    df = shuffle(df)

    print('\nabsolute correlation :')

    print(df.corr()[[
        'label'
    ]].apply(lambda x: np.round(((x**2)**0.5), 4))['label'].sort_values(
        ascending=False).head(100))

    drop(df)
    Y = df.label
    df = df.drop('label', axis=1)

    df = df.fillna(-1)
    print(df.shape)
    clf = lr(random_state=DOOM, solver='liblinear', penalty='l1', max_iter=50)

    clf = clf.fit(df, Y)
    filename = 'binary'
    pickle.dump(clf, open(filename, 'wb'))

    print('roc_auc : ')
    print(cross_val_score(clf, df, Y, cv=3, verbose=3, scoring='roc_auc'))

    print('Number of nonzero weights : ', sum((clf.coef_ != 0)[0]))

    #multi (secondary)

    path = 'DATA'

    df = pd.read_csv('model_data/DATA/trainl.csv')
    df = shuffle(df)
    drop(df)
    Y = pd.read_csv('model_data/DATA/labels.csv')

    df = df.fillna(-1)
    print(df.shape)
    clf = OVR(
        lr(random_state=DOOM, solver='lbfgs', penalty='l2',
           max_iter=2000)).fit(df, Y)
    filename = 'multi'
    pickle.dump(clf, open(filename, 'wb'))
예제 #7
0
def test_solve_primal_l1_svc_with_stochastic_optimizers():
    X, y = load_iris(return_X_y=True)
    X_scaled = MinMaxScaler().fit_transform(X)
    X_train, X_test, y_train, y_test = train_test_split(X_scaled,
                                                        y,
                                                        train_size=0.75,
                                                        random_state=123456)

    svc = OVR(SVC(loss=hinge, optimizer=StochasticGradientDescent))
    svc = svc.fit(X_train, y_train)
    assert (np.allclose(np.hstack((estimator.coef_, estimator.intercept_)),
                        estimator.loss.x_star())
            for estimator in svc.estimators_)
    assert svc.score(X_test, y_test) >= 0.57

    svc = OVR(SVC(loss=hinge, optimizer=Adam))
    svc = svc.fit(X_train, y_train)
    assert (np.allclose(np.hstack((estimator.coef_, estimator.intercept_)),
                        estimator.loss.x_star())
            for estimator in svc.estimators_)
    assert svc.score(X_test, y_test) >= 0.57

    svc = OVR(SVC(loss=hinge, optimizer=AMSGrad))
    svc = svc.fit(X_train, y_train)
    assert (np.allclose(np.hstack((estimator.coef_, estimator.intercept_)),
                        estimator.loss.x_star())
            for estimator in svc.estimators_)
    assert svc.score(X_test, y_test) >= 0.57

    svc = OVR(SVC(loss=hinge, optimizer=AdaMax))
    svc = svc.fit(X_train, y_train)
    assert (np.allclose(np.hstack((estimator.coef_, estimator.intercept_)),
                        estimator.loss.x_star())
            for estimator in svc.estimators_)
    assert svc.score(X_test, y_test) >= 0.57

    svc = OVR(SVC(loss=hinge, optimizer=AdaGrad))
    svc = svc.fit(X_train, y_train)
    assert (np.allclose(np.hstack((estimator.coef_, estimator.intercept_)),
                        estimator.loss.x_star())
            for estimator in svc.estimators_)
    assert svc.score(X_test, y_test) >= 0.57

    svc = OVR(SVC(loss=hinge, optimizer=AdaDelta, learning_rate=1.))
    svc = svc.fit(X_train, y_train)
    assert (np.allclose(np.hstack((estimator.coef_, estimator.intercept_)),
                        estimator.loss.x_star())
            for estimator in svc.estimators_)
    assert svc.score(X_test, y_test) >= 0.57

    svc = OVR(SVC(loss=hinge, optimizer=RMSProp))
    svc = svc.fit(X_train, y_train)
    assert (np.allclose(np.hstack((estimator.coef_, estimator.intercept_)),
                        estimator.loss.x_star())
            for estimator in svc.estimators_)
    assert svc.score(X_test, y_test) >= 0.57
예제 #8
0
def test_solve_primal_l1_svc_with_proximal_bundle():
    X, y = load_iris(return_X_y=True)
    X_scaled = MinMaxScaler().fit_transform(X)
    X_train, X_test, y_train, y_test = train_test_split(X_scaled,
                                                        y,
                                                        train_size=0.75,
                                                        random_state=123456)
    svc = OVR(SVC(loss=hinge, optimizer=ProximalBundle))
    svc = svc.fit(X_train, y_train)
    assert (np.allclose(np.hstack((estimator.coef_, estimator.intercept_)),
                        estimator.loss.x_star())
            for estimator in svc.estimators_)
    assert svc.score(X_test, y_test) >= 0.57
예제 #9
0
 def get_accuracy_report(self,X,y,multiclass=False,test_size=0.1):
     if multiclass:
         estimator = lambda : OVR(LR(), n_jobs=16)
         y_trans = lambda y : y 
     else:
         estimator = lambda : LR(n_jobs=16, max_iter=1000)
         y_trans = lambda y : y.argmax(axis=1)
      
     strat = y_trans(y) if not multiclass else None   
     lr = estimator()
     Xtr, Xte, ytr, yte = train_test_split(
         X, y_trans(y), 
         stratify=strat, 
         test_size=test_size,
         random_state=1337
     )
     
     lr.fit(Xtr, ytr)
     yprime = lr.predict(Xte)
     
     return classification_report(yprime, yte, output_dict=True)
예제 #10
0
def main():
    # load data
    # training data
    data = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'usps',
                                    'zip.train'),
                       header=None,
                       delimiter=' ').iloc[:, :-1]
    y_train = data.pop(0).values
    X_train = data.values

    # test data
    data = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'usps',
                                    'zip.test'),
                       header=None,
                       delimiter=' ')
    y_test = data.pop(0).values
    X_test = data.values

    pca = PCA(n_components=.95)
    pca.fit(X_train)

    X_train = pca.transform(X_train)
    X_test = pca.transform(X_test)

    svm_errs = []
    with tqdm(desc="Problem 1", total=len(C_VALS)) as pbar:
        for C in C_VALS:
            svm = SVC(C=C, kernel='linear', decision_function_shape='ovo')
            svm.fit(X_train, y_train)
            pbar.update(1)

            svm_errs.append(1 - svm.score(X_test, y_test))

    lr = OVO(LR(solver='lbfgs', max_iter=5000))
    lr.fit(X_train, y_train)
    lr_score = lr.score(X_test, y_test)
    err_plot([svm_errs], ["SVM"],
             lr=1. - lr_score,
             title="One vs. One Linear SVM",
             out='hw7/ovo_linear_svm.pdf')

    ovo_svm_errs = []
    with tqdm(desc="Problem 2", total=len(C_VALS)) as pbar:
        for C in C_VALS:
            svm = OVO(SVC(C=C, kernel='poly', degree=3, gamma='auto'))
            svm.fit(X_train, y_train)
            pbar.update(1)

            ovo_svm_errs.append(1 - svm.score(X_test, y_test))

    err_plot([ovo_svm_errs], ["OvO SVM"],
             lr=1. - lr_score,
             title="One vs. One Cubic SVM",
             out='hw7/ovo_cubic_svm.pdf')

    ovr_svm_errs = []
    with tqdm(desc="Problem 3", total=len(C_VALS)) as pbar:
        for C in C_VALS:
            svm = OVR(SVC(C=C, kernel='poly', degree=3, gamma='auto'))
            svm.fit(X_train, y_train)
            pbar.update(1)

            ovr_svm_errs.append(1 - svm.score(X_test, y_test))

    err_plot([ovo_svm_errs, ovr_svm_errs], ["OvO SVM", "OvR SVM"],
             lr=1. - lr_score,
             title="One vs. Rest Cubic SVM/OvO Cubic",
             out='hw7/ovr_cubic_svm.pdf')

    n = 5
    # ensuring that we have at least n neighbors for all classes in the
    # sample
    while True:
        index = np.random.choice(X_train.shape[0], 100, replace=False)

        X_sample = X_train[index]
        y_sample = y_train[index]

        # can use a list comprehension to check
        if all([
                len(X_sample[y_sample == y_i]) >= n
                for y_i in np.unique(y_sample)
        ]):
            break

    dists = []
    for X_i, y_i in zip(X_sample, y_sample):
        X_cls = X_sample[y_sample == y_i]
        nbrs = NearestNeighbors(n_neighbors=n)
        nbrs.fit(X_cls)
        try:
            distances, _ = nbrs.kneighbors(X_i.reshape(1, -1))
        except ValueError as err:
            raise err
        # nee to use reshape b/c single sample
        dists.append(distances[-1])

    global SIGMA
    SIGMA = np.mean(dists)

    ovo_gauss_svm_errs = []
    with tqdm(desc="Problem 4 (SVM)", total=len(C_VALS),
              file=sys.stdout) as pbar:
        for C in C_VALS:
            svm = OVO(SVC(C=C, kernel='rbf', gamma=1. / (2. * SIGMA**2)))
            #            svm = SVC(C=C, kernel='rbf', gamma=1. / (2. * SIGMA ** 2),
            #                      decision_function_shape='ovo')
            svm.fit(X_train, y_train)
            score = svm.score(X_test, y_test)
            pbar.update(1)

            ovo_gauss_svm_errs.append(1 - score)

    knn_errs = []
    with tqdm(desc="Problem 4 (kNN)",
              total=len(np.arange(3, 11)),
              file=sys.stdout) as pbar:
        for k in np.arange(3, 11):
            knn = KNeighborsClassifier(n_neighbors=k, weights=gaussian)
            knn.fit(X_train, y_train)
            pbar.update(1)

            knn_errs.append((k, 1 - knn.score(X_test, y_test)))

    err_plot([ovo_gauss_svm_errs], ["OvO SVM"],
             knn=knn_errs,
             title="One vs. One Gaussian SVM with kNN",
             out='hw7/ovo_gaussian_svm_knn.pdf')

    ovr_gauss_svm_errs = []
    with tqdm(desc="Problem 5", total=len(C_VALS), file=sys.stdout) as pbar:
        for C in C_VALS:
            svm = OVR(SVC(C=C, kernel='rbf', gamma=1. / (2. * SIGMA**2)))
            #            svm = SVC(C=C, kernel='rbf', gamma=1. / (2. * SIGMA ** 2),
            #                      decision_function_shape='ovr')
            svm.fit(X_train, y_train)
            score = svm.score(X_test, y_test)
            pbar.update(1)

            ovr_gauss_svm_errs.append(1 - score)

    err_plot([ovr_gauss_svm_errs], ["OvR SVM"],
             knn=knn_errs,
             title="One vs. Rest Gaussian SVM with kNN",
             out='hw7/ovr_gaussian_svm_knn.pdf')

    err_plot([
        svm_errs, ovo_svm_errs, ovr_svm_errs, ovo_gauss_svm_errs,
        ovr_gauss_svm_errs
    ], [
        "Linear SVM", "OvO Cubic SVM", "OvR Cubic SVM", "OvO Gaussian SVM",
        "OvR Gaussian SVM"
    ],
             lr=1. - lr_score,
             knn=knn_errs,
             title="Multiclass SVM Kernels",
             out='hw7/all_svm_knn.pdf')

    min_idx = np.argmin(svm_errs)
    min_lin_err = svm_errs[min_idx]
    min_lin_c = np.log2(C_VALS[min_idx])
    print("Min Linear SVM Error = {0:.4f}".format(min_lin_err))
    print("Min Linear SVM log2(C) = {0}".format(min_lin_c))
    print("LR Error = {0:.4f}".format(1. - lr_score))

    min_idx = np.argmin(ovo_svm_errs)
    min_lin_err = ovo_svm_errs[min_idx]
    min_lin_c = np.log2(C_VALS[min_idx])
    print("Min OvO Cubic SVM Error = {0:.4f}".format(min_lin_err))
    print("Min OvO Cubic SVM log2(C) = {0}".format(min_lin_c))

    min_idx = np.argmin(ovr_svm_errs)
    min_lin_err = ovr_svm_errs[min_idx]
    min_lin_c = np.log2(C_VALS[min_idx])
    print("Min OvR Cubic SVM Error = {0:.4f}".format(min_lin_err))
    print("Min OvR Cubic SVM log2(C) = {0}".format(min_lin_c))

    min_idx = np.argmin(knn_errs)
    min_lin_k, min_lin_err = knn_errs[min_idx]
    print("Min kNN Error = {0:.4f}".format(min_lin_err))
    print("Min kNN log2(C) = {0}".format(min_lin_k))

    min_idx = np.argmin(ovo_gauss_svm_errs)
    min_lin_err = ovo_gauss_svm_errs[min_idx]
    min_lin_c = np.log2(C_VALS[min_idx])
    print("Min OvO Gaussian SVM Error = {0:.4f}".format(min_lin_err))
    print("Min OvO Gaussian SVM log2(C) = {0}".format(min_lin_c))

    min_idx = np.argmin(ovr_gauss_svm_errs)
    min_lin_err = ovr_gauss_svm_errs[min_idx]
    min_lin_c = np.log2(C_VALS[min_idx])
    print("Min OvR Gaussian SVM Error = {0:.4f}".format(min_lin_err))
    print("Min OvR Gaussian SVM log2(C) = {0}".format(min_lin_c))

    print("sigma = {0:.4f}".format(SIGMA))
예제 #11
0
    def compare_to_random(self, batch, w2v_params={}, multiclass=False, fast_walks=False):
        if type(batch) != torch.Tensor and fast_walks:
            batch = torch.tensor(batch)
        
        # Generate policy guided walks
        print("Generating policy guided walks")
        if fast_walks:
            walks = self.walker.fast_walks(batch, egreedy=True, weighted_rand=False)
        else:
            walks = None 
            
        pX, py = self.encode_nodes(batch=batch, w2v_params=w2v_params, walks=walks)
        
        # Test against policy weighted walks
        print("Generating policy weighted walks")
        if fast_walks:
            walks = self.walker.fast_walks(batch, egreedy=False, weighted_rand=True)
        else:
            walks = None 
            
        wX, wy = self.encode_nodes(batch=batch, weighted=True, 
                                   w2v_params=w2v_params, walks=walks)
        
        # Test against random walk embeddings
        print("Generating random walks")
        if type(batch) != torch.Tensor:
            batch = torch.tensor(batch)
            
        walks = self.walker.fast_walks(batch, egreedy=False, weighted_rand=False)
        rX, ry = self.encode_nodes(batch=batch, random=True, w2v_params=w2v_params, walks=walks)

        if multiclass:
            estimator = lambda : OVR(LR(), n_jobs=16)
            y_trans = lambda y : y 
        else:
            estimator = lambda : LR(n_jobs=16, max_iter=1000)
            y_trans = lambda y : y.argmax(axis=1)

        lr = estimator()
        Xtr, Xte, ytr, yte = train_test_split(pX, y_trans(py))
        lr.fit(Xtr, ytr)
        yprime = lr.predict(Xte)
        
        print(yprime)
        print("Policy guided:")
        print(classification_report(yprime, yte))
        
        lr = estimator()
        Xtr, Xte, ytr, yte = train_test_split(wX, y_trans(wy))
        lr = OVR(LR(), n_jobs=16)
        lr.fit(Xtr, ytr)
        yprime = lr.predict(Xte)
        print("Policy weighted:")
        print(classification_report(yprime, yte))
        
        lr = estimator()
        Xtr, Xte, ytr, yte = train_test_split(rX, y_trans(ry))
        lr = OVR(LR(), n_jobs=16)
        lr.fit(Xtr, ytr)
        yprime = lr.predict(Xte)
        print("Random walk:")
        print(classification_report(yprime, yte))
예제 #12
0
###### the main code starts here ################

# generate random features
wx = rp(250, [0.2, 2, 20], 1)
wy = rp(250, [0.2, 2, 20], 1)
wz = rp(250, [0.2, 2, 20], 1)
ww = rp(250, [0.2, 2, 20], 3)

# generate training data
print ':: generating triplet training data...'
(X, Y) = tripletset(5000)

# train the classifier and predict the test data
print ':: training the random forest classifier...'
reg = OVR(RFC(n_estimators=1000, random_state=0, n_jobs=24)).fit(X, Y)

(node_labels, final_scores, permute_idx, num_features,
 desp) = infer_abalone(reg)

print ':: contructing a DAG...'
G = build_dag(node_labels, final_scores, permute_idx, 0.7)

# save dag in dot format
nx.write_dot(G, "abalone.dot")

# draw dag
#pos = nx.circular_layout(G,scale=3)
#nx.draw(G, pos, cmap=plt.get_cmap('jet'), node_size=2000, node_color='b', alpha=0.95)
#nx.draw_networkx_labels(G, pos, font_color='w')
#nx.draw_networkx_edges(G, pos, arrows=True)
예제 #13
0
test_data, test_labels = np.array(test_data,
                                  dtype=float), np.array(test_labels,
                                                         dtype=int)

avgtrain = np.zeros(2500)
for i in range(2500):
    avgtrain[i] = np.mean(train_data[:, i])
avgtest = np.zeros(2500)
for i in range(2500):
    avgtest[i] = np.mean(test_data[:, i])
mstrain = []
for i in range(len(train_data)):
    mstrain.append(train_data[i] - avgtrain)
mstest = []
for i in range(len(test_data)):
    mstest.append(test_data[i] - avgtest)

U, s, V = np.linalg.svd(mstrain)


def eigenface_feature(train_data, test_data, V, r):
    return np.dot(train_data, V[:r, :].T), np.dot(test_data, V[:r, :].T)


accuracy = []
for r in range(1, 201):
    F, F_test = eigenface_feature(mstrain, mstest, V, r)
    ovr = OVR(LR()).fit(F, train_labels)
    accuracy.append(ovr.score(F_test, test_labels))
plt.plot(accuracy)
plt.savefig("1h.png")