for clf, label in zip([svcmod, knnmod, rfmod, xgbmod, etmod, admod, lr], [
        'SVC', 'KNN', 'Random Forest', 'XGB', 'ExtraTrees', 'AdaBoost',
        'Logistic Regression'
]):
    print("%s Accuracy: %0.3f" % (label, clf.scores.mean()))

# Let's now use **VotingClassifier**.  I have decided to drop the SVC (as it didn't give good results) and give a higher weight to KNN and XGB. I think XGB is clear why (it normally gets the best results), but I have chosen KNN as weight 2 because it had the most different results compared to the rest.

# In[ ]:

votemod = VotingClassifier(estimators=[('xgb', xgbmod), ('et', etmod),
                                       ('knn', knnmod), ('rf', rfmod),
                                       ('ad', admod)],
                           weights=[2, 1, 2, 1, 1],
                           voting='soft')
votemod.scores = cross_val_score(votemod, X, y, cv=5)
print(votemod.scores.mean())

# In[ ]:

stcmod = StackingClassifier(
    classifiers=[svcmod, xgbmod, knnmod, etmod, admod, rfmod],
    meta_classifier=lr)
stcmod.scores = cross_val_score(stcmod, X, y, cv=5)
print(stcmod.scores.mean())

# In[ ]:

votemod.fit(X, y)
predictionsfinal = votemod.predict(testX)