def opp_sliding_window(data_x, data_y, ws, ss): data_x = sliding_window(data_x, (ws, data_x.shape[1]), (ss, 1)) data_y = np.asarray([[i[-1]] for i in sliding_window(data_y, ws, ss)]) data_x, data_y = data_x.astype(np.float32), one_hot( data_y.reshape(len(data_y)).astype(np.uint8)) print(" ..after sliding window (testing): inputs {0}, targets {1}".format( X_test.shape, y_test.shape)) return data_x, data_y
def opp_sliding_window(data_x, data_y, ws, ss): data_x = sliding_window(data_x, (ws, data_x.shape[1]), (ss, 1)) data_y = np.asarray([[i[-1]] for i in sliding_window(data_y, ws, ss)]) #np.savetxt('answers.txt',data_y, fmt='%s',delimiter=' ',newline='\n') print(data_y) data_x, data_y = data_x.astype(np.float32), one_hot( data_y.reshape(len(data_y)).astype(np.uint8)) print(" ..after sliding window (testing): inputs {0}, targets {1}".format( X_test.shape, y_test.shape)) return data_x, data_y
def load_y(y_path): """ Read Y file of values to be predicted argument: y_path str attibute of Y: 'train' or 'test' return: Y ndarray / tensor of the 6 one_hot labels of each sample """ file = open(y_path, 'rb') # Read dataset from disk, dealing with text file's syntax y_ = np.array([ elem for elem in [row.replace(' ', ' ').strip().split(' ') for row in file] ], dtype=np.int32) file.close() # Substract 1 to each output class for friendly 0-based indexing return one_hot(y_ - 1)
x_test = np.loadtxt("X_test.CSV", delimiter=",", dtype=np.float32) X_test_t = np.vstack([[x_test[:10, :]], [x_test[10:20, :]], [x_test[20:30, :]], [x_test[30:40, :]], [x_test[40:50, :]], [x_test[50:60, :]], [x_test[60:70, :]], [x_test[70:80, :]], [x_test[80:90, :]], [x_test[90:100, :]], [x_test[100:110, :]], [x_test[110:120, :]], [x_test[120:130, :]], [x_test[130:140, :]], [x_test[140:150, :]], [x_test[150:160, :]], [x_test[160:170, :]], [x_test[170:180, :]]]) X_test = np.transpose(X_test_t, (1, 2, 0)) t_data_1 = np.loadtxt("Y_train.CSV", delimiter=",", dtype=np.int32) Y_train = t_data_1[:] - 1 y_train = one_hot(Y_train) t_data_2 = np.loadtxt("Y_test.CSV", delimiter=",", dtype=np.int32) Y_test = t_data_2[:] - 1 y_test = one_hot(Y_test) #-------------------------------------------- # Training (maybe multiple) experiment(s) #-------------------------------------------- n_layers_in_highway = 0 n_stacked_layers = 3 trial_name = "{}x{}".format(n_layers_in_highway, n_stacked_layers) for learning_rate in [0.001]: # [0.01, 0.007, 0.001, 0.0007, 0.0001]: for lambda_loss_amount in [0.005]: for clip_gradients in [15.0]:
#-------------------------------------------- # Useful Constants # Those are separate normalised input features for the neural network # Load "X" (the neural network's training and testing inputs) X_train_signals_paths = "/localSSD/xjc/codalab_train/train/train_train_fc7_feature_new.fea" X_test_signals_paths = "/localSSD/xjc/codalab_train/train/train_valid_fc7_feature_new.fea" y_train_path = "/localSSD/xjc/codalab_train/train/train_y_label_2.fea" y_test_path = "/localSSD/xjc/codalab_train/train/valid_y_label_2.fea" X_train = load_X_pca(X_train_signals_paths) X_test = load_X_pca(X_test_signals_paths) X_train = do_pca(X_train) X_test = do_pca(X_test) y_train = one_hot(load_Y_my(y_train_path)) y_test = one_hot(load_Y_my(y_test_path)) #-------------------------------------------- # Training (maybe multiple) experiment(s) #-------------------------------------------- n_layers_in_highway = 0 n_stacked_layers = 3 trial_name = "{}x{}".format(n_layers_in_highway, n_stacked_layers) for learning_rate in [0.0001]: # [0.01, 0.007, 0.001, 0.0007, 0.0001]: for lambda_loss_amount in [0.005]: for clip_gradients in [15.0]: print "learning_rate: {}".format(learning_rate) print "lambda_loss_amount: {}".format(lambda_loss_amount) print ""