ecolor='black') ax.set_yticks(y_pos) ax.set_yticklabels(show_features) ax.invert_yaxis() # labels read top-to-bottom ax.set_xlabel('Gain') plt.show() transformer.print_config() print transformer.attribute_position print show_features[0] print show_features[0].split("#")[0] most_important_attribute = int(show_features[0].split("#")[0]) transformer.next_transformation_for_attribute( most_important_attribute) print "##############" transformer.print_config() print transformer.attribute_position # Make predictions using the testing set y_pred = regr.predict(datasets[1]) print "F1: " + str(f1_score(targets[1], y_pred, average='micro')) #print explain_prediction_me(X_test[0, :], regr, feature_names) y_test = targets[1] for record_i in [0]: #range(len(y_test)):
pandas_table.columns[col_rep], pandas_table.columns[target_column] ]] transformer = Transformer(new_dataframe, 1) while True: transformer.fit() datasets, targets, feature_names = transformer.transform() if type(datasets[0]) == type(None): break transformer.next_transformation_for_attribute(0) print str(type(datasets[0])) print str(datasets[0].shape) if len(datasets[0].shape) == 1: for data_i in range(3): datasets[data_i] = np.matrix(datasets[data_i]).T regr = xgb.XGBClassifier(objective='multi:softprob', nthread=4) regr.fit(datasets[0], targets[0]) from sklearn import svm #regr = svm.SVC()