import autograd.numpy as np
from autograd import grad
from util import shl, shm, shs

from datasets import ToyDataset

np.random.seed(10)

ds = ToyDataset()

xv, yv = ds.next_train_batch()

batch_size = xv.shape[0]


def cost(y):
    x_gram = np.dot(xv, xv.T)
    y_gram = np.dot(y, y.T)
    return np.sum(np.square(y_gram - x_gram))


grad_cost = grad(cost)

n_comp = 2

yit = np.random.random((batch_size, n_comp))

for it in xrange(200):
    cost_val = cost(yit)

    dy = grad_cost(yit)
import numpy as np
import pickle
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import cv2

for j in range(10):
    # NOTE! change this to not overwrite all log data when you train the model:
    model_id = "Ensemble-MAP-Adam-Fixed_%d_M4" % (j + 1)

    num_epochs = 150
    batch_size = 32
    learning_rate = 0.001

    train_dataset = ToyDataset()
    N = float(len(train_dataset))
    print(N)

    alpha = 1.0

    num_train_batches = int(len(train_dataset) / batch_size)
    print("num_train_batches:", num_train_batches)

    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True)

    init_param_values = {}
    network = ToyNet(model_id,
                     project_dir="/root/evaluating_bdl/toyRegression").cuda()
]

classifiers = [
    KNeighborsClassifier(3),
    SVC(kernel="linear", C=0.025),
    SVC(gamma=2, C=1),
    GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
    DecisionTreeClassifier(max_depth=5),
    RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
    MLPClassifier(max_iter=1000),
    AdaBoostClassifier(),
    GaussianNB(),
    QuadraticDiscriminantAnalysis()
]

ds = ToyDataset()

# model = DecisionTreeClassifier()
for model_name, model in zip(names, classifiers):
    x_train, y_train = ds.next_train_batch()

    y_train = np.argmax(y_train, axis=1)

    x_test, y_test = ds.next_test_batch()
    y_test = np.argmax(y_test, axis=1)

    model.fit(x_train, y_train)

    predicted = model.predict(x_test)

    print '%s, predicting, classification error=%f' % (