clf = linear_model.Lasso(alpha=0.1).fit(train, train_y)
model = SelectFromModel(clf, prefit=True)
X_new = model.transform(train)
print(X_new.shape)

test_new = model.transform(test)

### Make the Dense Network Model and Evaluation

model = baseline_model(train)
# fit model
history = model.fit(train, train_y, batch_size=100, validation_data=(test, test_y), epochs=100, verbose=1)

# evaluate the model
train_mse = model.evaluate(train, train_y, verbose=0)
test_mse = model.evaluate(test, test_y, verbose=0)
print('Train: %.3f' % (train_mse))
print('Test: %.3f' % (test_mse))

### Load Test Data and Predict

TaskTest = pd.read_csv('test.csv',encoding='latin1')

convertedTest = pd.DataFrame(columns=columns)

for cc in columns:
    convertedTest[cc] = convertCategoricalToNumerical(TaskTest, cc, TaskTest[cc])

convertedTest['Length'] = TaskTest['Length']
convertedTest = convertedTest.dropna()
示例#2
0
    -1, 1)[int(0.8 * len(final_array)):len(final_array), 0]

model = Sequential()
model.add(
    keras.layers.core.Dense(len(train_data[0]),
                            input_dim=len(train_data[0]),
                            init='uniform',
                            activation='relu',
                            bias=True))
model.add(
    keras.layers.core.Dense(8, init='uniform', activation='relu', bias=True))
model.add(keras.layers.core.Dense(1, init='uniform', bias=True))
model.compile(loss='mean_squared_error', optimizer='adam')
keras.layers.core.Dropout(0.1)
model.fit(train_data, train_target, nb_epoch=150, batch_size=10)
model.evaluate(train_data, train_target, batch_size=10)

#training the 2nd Neural network
#For category II LOS>7

#array_2 = scipy.delete(array_2,0,1);
train_data_2 = array_2[0:int(0.9 * len(array_2)), 0:len(array_2[0])]
train_target_2 = main_target_2.reshape(-1, 1)[0:int(0.9 * len(array_2)), 0]

test_data_2 = array_2[int(0.9 * len(array_2)):len(array_2), :]
test_target_2 = main_target_2.reshape(-1,
                                      1)[int(0.9 * len(array_2)):len(array_2),
                                         0]

model_2 = Sequential()
model_2.add(
print('%i features identified as important:' % nb_features)

indices = np.argsort(fsel.feature_importances_)[::-1][:nb_features]
for f in range(nb_features):
    print("%d. feature %s (%f)" % (f + 1, data.columns[2+indices[f]], fsel.feature_importances_[indices[f]]))

# XXX : take care of the feature order
for f in sorted(np.argsort(fsel.feature_importances_)[::-1][:nb_features]):
    features.append(data.columns[2+f])

# Deep learning:
# create model
model = Sequential()
model.add(Dense(12, input_dim=54, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

# Fit the model
model.fit(X, y, epochs=10, batch_size=10)

# evaluate the model
scores = model.evaluate(X, y)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))

# Save model
model.save('C:/Users/Rahul/Desktop/antivirus_demo-master/deep_calssifier/deep_classifier.h5')