示例#1
0
train_data = np.genfromtxt('data/train_data.csv', delimiter=',')
train_labels = np.genfromtxt('data/train_labels.csv', delimiter=',')


# Standardize data
scaler = preprocessing.StandardScaler()
scaler.fit(train_data)
X = scaler.transform(train_data)
y = train_labels

model = Sequential()

model.add(Dense(units=64, activation='relu' input_dim=100))
model.add(Dense(units=10, activation='softmax'))

model.compiler(loss='categorial_crossentropy', optimizer='sgd', metrics=['accuracy'])

model.fit(X, y, epocs=5, batch_size=32)

loss_and_metrics = model.evaluate(x_test, y_test, batch_size=128)

# best_score = 0.0

# for j in [10,20,40,80,150,270]:
#     # Use PCA to reduce dimensionality
#     print ('--------------------------')
#     if j == 270:
#         Xn = X
#         print ('Using entire original data')

#     else: