def evaluate_AUC(prices, model, score_thresholds=None, window_size=def_prediction_params['window_size'], future_window=def_annotation_params['future_window'], drop_threshold=def_annotation_params['drop_threshold'], drop_window=def_annotation_params['drop_window'], percentage=def_annotation_params['use_percentage']): if score_thresholds==None: score_thresholds = np.linspace(0.0, 1.0, 101) recalls = [] precisions = [] for threshold in score_thresholds: ir_measures = irutils.calculate_IR(**counts_pn(prices, model, divide_threshold=threshold, window_size=window_size, future_window=future_window, drop_threshold=drop_threshold, drop_window=drop_window, percentage=percentage ) ) recalls.append(ir_measures['recall']) precisions.append(ir_measures['precision']) recalls = np.array(recalls) precisions = np.array(precisions) evaluation_results = {'score_thresholds': score_thresholds, 'recalls': recalls, 'precisions': precisions, 'AUC': irutils.calculate_AUC(recalls, precisions)} return evaluation_results
def evaluate(prices, model, divide_threshold=def_prediction_params['divide_threshold'], window_size=def_prediction_params['window_size'], future_window=def_annotation_params['future_window'], drop_threshold=def_annotation_params['drop_threshold'], drop_window=def_annotation_params['drop_window'], percentage=def_annotation_params['use_percentage']): ir_counts = counts_pn(prices, model, divide_threshold=divide_threshold, window_size=window_size, future_window=future_window, drop_threshold=drop_threshold, drop_window=drop_window, percentage=percentage) print "tp = ", ir_counts['tp'], "fp = ", ir_counts['fp'], "fn = ", ir_counts['fn'], "tn = ", ir_counts['tn'] ir_measures = irutils.calculate_IR(**ir_counts) print "recall = ", ir_measures['recall'] print "precision = ", ir_measures['precision'] print "F-score = ", ir_measures['fscore']