def _fit_ranker(self, xtrain, ytrain, xtest, ytest, next_point): start = datetime.now() self._set_new_parameters(next_point) try: self.learner.fit(xtrain, ytrain, **self._fit_params) ypred = self.learner(xtest) loss = get_mean_loss(self.validation_loss, ytest, ypred) time_taken = duration_till_now(start) except: self.logger.error(traceback.format_exc()) self.logger.info( "For current parameter error occurred so taking loss as maximum value" ) loss = 1.00 time_taken = duration_till_now(start) return loss, time_taken
'cluster_id': str(cluster_id) } for name, evaluation_metric in lp_metric_dict[ learning_problem].items(): predictions = s_pred if evaluation_metric in metrics_on_predictions: logger.info("Metric on predictions") predictions = y_pred if "NDCG" in name: evaluation_metric = make_ndcg_at_k_loss(k=n_objects) predictions = y_pred if "CategoricalTopK{}" == name: k = int(n_objects / 2) + 1 evaluation_metric = topk_categorical_accuracy_np(k=k) name = name.format(k) metric_loss = get_mean_loss(evaluation_metric, Y_test, predictions) logger.info(ERROR_OUTPUT_STRING % (name, metric_loss)) if np.isnan(metric_loss): results[name] = "\'Infinity\'" else: results[name] = "{0:.4f}".format(metric_loss) dbConnector.insert_results(experiment_schema=experiment_schema, experiment_table=experiment_table, results=results) if fold_id != 0: dbConnector.mark_running_job_finished(current_job_id) dbConnector.mark_running_job_finished(job_id) except Exception as e: if hasattr(e, 'message'): message = e.message else: