def train(): # 1. Crear modelo print('(TRAINER) Creating model...') model = Model() # 2. Entrenar clasificador print('(TRAINER) Training model...') model.train() # 3. Guardar clasificador print('(TRAINER) Saving model...') model.save() return model
def evaluate(grid_search=False): # Lista de 6-uplas (model, params, accuracy, precision, recall, f1_score) results_list = [] # Iterar segun tipos de modelo for model_type in const.MODELS: print() print('(EVALUATOR) Evaluating model ' + model_type) if grid_search: # Lista de 6-uplas (model, params, accuracy, precision, recall, f1_score) grid_search_list = [] param_space = get_parameter_space(model_type) for params in param_space: # 1. Crear modelo model = Model(model=model_type, params={'model': model_type, 'params': params}) # 2. Entrenar clasificador model.train() # 3. Evaluar clasificador accuracy, results, _, _ = model.evaluate() grid_search_list.append((model_type, params, accuracy, results['precision'], results['recall'], results['f1_score'])) # Ordenar resultados segun f1_score grid_search_list = sorted(grid_search_list, key=lambda x: x[5], reverse=True) print() print('(EVALUATOR) Grid search results -> Model - ', model_type) for _, params, accuracy, precision, recall, f1_score in grid_search_list: print() print("Params - ", params) print("-> F1 Score - ", "{0:.2f}".format(f1_score)) print("-> Precision - ", "{0:.2f}".format(precision)) print("-> Recall - ", "{0:.2f}".format(recall)) print("-> Accuracy - ", "{0:.2f}".format(accuracy)) print() best_params = grid_search_list[0][1] best_accuracy = grid_search_list[0][2] best_precision = grid_search_list[0][3] best_recall = grid_search_list[0][4] best_f1_score = grid_search_list[0][5] results_list.append((model_type, best_params, best_accuracy, best_precision, best_recall, best_f1_score)) else: # 1. Crear modelo model = Model(model=model_type) # 2. Entrenar clasificador model.train() # 3. Evaluar clasificador accuracy, results, _, _ = model.evaluate() results_list.append((model_type, None, accuracy, results['precision'], results['recall'], results['f1_score'])) # Ordenar resultados segun f1_score results_list = sorted(results_list, key=lambda x: x[5], reverse=True) # Mostrar resultados print() print('(EVALUATOR) Sorted results: ') for model, params, accuracy, precision, recall, f1_score in results_list: print() print("Model - ", model) if params is not None: print("Params - ", params) print("-> F1 Score - ", "{0:.2f}".format(f1_score)) print("-> Precision - ", "{0:.2f}".format(precision)) print("-> Recall - ", "{0:.2f}".format(recall)) print("-> Accuracy - ", "{0:.2f}".format(accuracy)) print() best_solution = { 'model': results_list[0][0], 'params': results_list[0][1] } # Elegir mejor modelo, entrenarlo por completo y guardarlo model = Model(model=results_list[0][0], params=best_solution) model.train() model.save() print('(EVALUATOR) Trained and saved best model')