val_energy_everybody = [] val_acc_everybody = [] for e in range(n_everybody_epochs): train_epoch = [] train_energy_everybody.append(train_epoch) for i in range(n_batches): train_epoch.append(train_l2_everybody(i)) val_energy_everybody.append(val_l2()) val_acc_everybody.append(val_l2_accuracy()) print "Epoch %d: mean train %1.3f, val %1.3f, acc %1.3f" % ( e, np.mean(train_epoch), val_energy_everybody[-1], val_acc_everybody[-1]) from utils import load_test_subjects X_test, labels_test = load_test_subjects() predictions = f_everything_classify(X_test) indices = np.zeros_like(predictions) for l in np.unique(labels_test): indices[labels_test == l] = 1000 * l + np.arange( (labels_test == l).sum()) submission_file = "submission_nnet.csv" with open(submission_file, "w") as f: f.write("Id,Prediction\n") for ind, pred in zip(indices, predictions): f.write("%d,%d\n" % (ind, pred))
from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from sklearn.metrics import accuracy_score from sklearn.pipeline import make_pipeline from utils import load_train_subjects, load_test_subjects import matplotlib.pyplot as plt mem = Memory(cachedir="cache", verbose=10) load_train_subjects = mem.cache(load_train_subjects) load_test_subjects = mem.cache(load_test_subjects) all_train_data, all_train_targets, all_train_labels = load_train_subjects() all_test_data, all_test_labels = load_test_subjects() val_idx = np.where(all_train_labels == 16)[0] all_val_data = all_train_data[val_idx] all_val_targets = all_train_targets[val_idx] all_val_labels = all_train_labels[val_idx] train_idx = np.where(all_train_labels < 15)[0] all_train_data = all_train_data[train_idx] all_train_targets = all_train_targets[train_idx] all_train_labels = all_train_labels[train_idx] X_train = all_train_data y_train = all_train_targets X_val = all_val_data y_val = all_val_targets