# -*- coding: utf-8 -*- from traffic_signs import load_data import numpy as np from sklearn.model_selection import train_test_split from keras.models import Sequential from keras.layers.core import Dense, Activation, Flatten from keras.layers.convolutional import Conv2D, MaxPooling2D #from keras.utils import to_categorical #---------------------------- Q3 ----------------------------------------- X, y = load_data("GTSRB_subset_2") X = X - np.min(X) X = X / np.max(X) y1 = np.int32(y == 0) y2 = np.transpose(np.array([y, y1])) X_train, X_test, y_train, y_test = train_test_split(X, y2, test_size=0.2) #---------------------------- Q4 ----------------------------------------- model = Sequential() N = 32 # Number of feature maps w, h = 5, 5 # Conv. window size
plt.ylabel('w$_1$') plt.title('Optimization path') plt.subplot(212) plt.plot(100.0 * np.array(accuracies), linewidth=2) plt.ylabel('Accuracy / %') plt.xlabel('Iteration') plt.tight_layout() plt.savefig("log_loss_minimization.pdf", bbox_inches="tight") ####################Q4#################### import traffic_signs from sklearn.metrics import accuracy_score import sklearn.model_selection X, y = traffic_signs.load_data(".") X_flat = [] for matrix in X: X_flat.append(matrix.ravel()) X = np.float64(X_flat) clf_list = [LogisticRegression(solver='liblinear'), SVC(gamma='scale')] clf_name = ['LR', 'SVC'] C_range = [1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1] X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split( X, y, test_size=0.2, random_state=0) for clf, name in zip(clf_list, clf_name): for C in C_range: for penalty in ["l1", "l2"]: clf.C = C clf.penalty = penalty clf.fit(X_train, y_train)
""" Created on Mon Feb 4 12:52:14 2019 @author: Asier """ import traffic_signs import numpy as np from sklearn.model_selection import train_test_split from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV C_range = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1] features = [] X, y = traffic_signs.load_data('./data') #### Load or Extract features #features = traffic_signs.extract_lbp_features(X) features = np.load('./data/features.npy') #features = np.array(features) #features = features.reshape(features.shape[0], features.shape[1]*features.shape[2]) #X = X.reshape(X.shape[0], X.shape[1]*X.shape[2]) ## Spliting data X_train, X_test, y_train, y_test = train_test_split(features, y) parameters = {'kernel': ('linear', 'rbf'), 'C': C_range} svc = SVC(gamma="scale")
import traffic_signs import numpy as np from sklearn.model_selection import train_test_split from keras.models import Sequential from keras.layers import Dense, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.utils import to_categorical from keras.losses import categorical_crossentropy from keras.optimizers import SGD num_classes = 2 batch_size = 32 epochs = 20 X, y = traffic_signs.load_data('./input') X_norm = ((X - np.amin(X)) / (np.amax(X) - np.amin(X))) X_train, X_test, y_train, y_test = train_test_split(X_norm, y) y_train = to_categorical(y_train, num_classes) y_test = to_categorical(y_test, num_classes) model = Sequential() model.add( Conv2D(32, kernel_size=(5, 5), activation='relu', input_shape=X_norm.shape[1:], padding='same')