Beispiel #1
0
def run_instance(instance,
                 sparsity,
                 trade_off,
                 learning_rate,
                 max_iter=1000,
                 epsilon=1e-3,
                 logger=None):

    opt_x_array, _ = optimize(instance, sparsity, trade_off, learning_rate,
                              max_iter, epsilon)

    raw_pred_subgraphs = []
    for opt_x in opt_x_array:
        pred_subgraph = np.nonzero(opt_x)[0]
        raw_pred_subgraphs.append(pred_subgraph)

    global_prec, global_rec, global_fm, global_iou, valid_global_prec, valid_global_rec, valid_global_fm, valid_global_iou, _, _, _, _, _ = evaluate_evo(
        instance['true_subgraphs'], raw_pred_subgraphs)

    logger.debug('-' * 5 + 'performance in the whole interval' + '-' * 5)
    logger.debug('global precision: {:.5f}'.format(global_prec))
    logger.debug('global recall   : {:.5f}'.format(global_rec))
    logger.debug('global f-measure: {:.5f}'.format(global_fm))
    logger.debug('global iou      : {:.5f}'.format(global_iou))
    logger.debug('-' * 5 + 'performance in the interval with signals' +
                 '-' * 5)
    logger.debug('global precision: {:.5f}'.format(valid_global_prec))
    logger.debug('global recall   : {:.5f}'.format(valid_global_rec))
    logger.debug('global f-measure: {:.5f}'.format(valid_global_fm))
    logger.debug('global iou      : {:.5f}'.format(valid_global_iou))

    refined_pred_subgraphs = post_process_evo(instance['graph'],
                                              raw_pred_subgraphs)
    global_prec, global_rec, global_fm, global_iou, valid_global_prec, valid_global_rec, valid_global_fm, valid_global_iou, _, _, _, _, _ = evaluate_evo(
        instance['true_subgraphs'], refined_pred_subgraphs)

    logger.debug('-' * 5 + ' refined performance ' + '-' * 5)
    logger.debug('refined global precision: {:.5f}'.format(global_prec))
    logger.debug('refined global recall   : {:.5f}'.format(global_rec))
    logger.debug('refined global f-measure: {:.5f}'.format(global_fm))
    logger.debug('refined global iou      : {:.5f}'.format(global_iou))
    logger.debug('-' * 5 + 'refined performance in the interval with signals' +
                 '-' * 5)
    logger.debug('global precision: {:.5f}'.format(valid_global_prec))
    logger.debug('global recall   : {:.5f}'.format(valid_global_rec))
    logger.debug('global f-measure: {:.5f}'.format(valid_global_fm))
    logger.debug('global iou      : {:.5f}'.format(valid_global_iou))
Beispiel #2
0
def run_dataset(paras):
    dataset, sparsity, trade_off, learning_rate, max_iter, epsilon, write_to_dir, data_type = paras

    if not write_to_dir:
        logger = logging.getLogger('fei')
    else:
        log_fn = '{}_sparsity_{:d}_trade_{}_lr_{}_{}.txt'.format(
            DATASET, sparsity, trade_off, learning_rate, data_type)

        if os.path.isfile(os.path.join(write_to_dir, log_fn)):
            print('file exist !!!')
            return

        logger = logging.getLogger(log_fn)
        formatter = logging.Formatter('')
        file_handler = logging.FileHandler(filename=os.path.join(
            write_to_dir, log_fn),
                                           mode='w')
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)

    all_performance = []
    logger.debug('-' * 5 + ' setting ' + '-' * 5)
    logger.debug('sparsity: {:d}'.format(sparsity))
    logger.debug('learning rate: {:.5f}'.format(learning_rate))
    logger.debug('trade off: {:.5f}'.format(trade_off))
    for i, instance in enumerate(dataset):

        logger.debug('instance: {:d}'.format(i))

        opt_x_array, run_time = optimize(instance,
                                         sparsity,
                                         trade_off,
                                         learning_rate,
                                         max_iter,
                                         epsilon,
                                         logger=None)

        logger.debug('run time: {:.5f}'.format(run_time))

        raw_pred_subgraphs = []
        for opt_x in opt_x_array:
            pred_subgraph = np.nonzero(opt_x)[0]
            print(sorted(pred_subgraph))
            raw_pred_subgraphs.append(pred_subgraph)

        global_prec, global_rec, global_fm, global_iou, valid_global_prec, valid_global_rec, valid_global_fm, valid_global_iou, _, _, _, _, _ = evaluate_evo(
            instance['subgraphs'], raw_pred_subgraphs)

        logger.debug('-' * 5 + ' performance in the whole interval ' + '-' * 5)
        logger.debug('global precision: {:.5f}'.format(global_prec))
        logger.debug('global recall   : {:.5f}'.format(global_rec))
        logger.debug('global f-measure: {:.5f}'.format(global_fm))
        logger.debug('global iou      : {:.5f}'.format(global_iou))
        logger.debug('-' * 5 + ' performance in the interval with signals ' +
                     '-' * 5)
        logger.debug('global precision: {:.5f}'.format(valid_global_prec))
        logger.debug('global recall   : {:.5f}'.format(valid_global_rec))
        logger.debug('global f-measure: {:.5f}'.format(valid_global_fm))
        logger.debug('global iou      : {:.5f}'.format(valid_global_iou))

        refined_pred_subgraphs = post_process_evo(instance['graph'],
                                                  raw_pred_subgraphs,
                                                  dataset=DATASET)
        refined_global_prec, refined_global_rec, refined_global_fm, refined_global_iou, _, _, _, _, _, _, _, _, _ = evaluate_evo(
            instance['subgraphs'], refined_pred_subgraphs)

        logger.debug('-' * 5 + ' refined performance ' + '-' * 5)
        logger.debug(
            'refined global precision: {:.5f}'.format(refined_global_prec))
        logger.debug(
            'refined global recall   : {:.5f}'.format(refined_global_rec))
        logger.debug(
            'refined global f-measure: {:.5f}'.format(refined_global_fm))
        logger.debug(
            'refined global iou      : {:.5f}'.format(refined_global_iou))

        all_performance.append(
            (global_prec, global_rec, global_fm, global_iou,
             refined_global_prec, refined_global_rec, refined_global_fm,
             refined_global_iou, run_time))

    all_performance = np.array(all_performance)
    avg_performance = np.mean(all_performance, axis=0)
    # print(all_performance.shape)
    logger.debug('-' * 5 + ' average performance ' + '-' * 5)
    logger.debug('average presision: {:.5f}'.format(avg_performance[0]))
    logger.debug('average recall   : {:.5f}'.format(avg_performance[1]))
    logger.debug('average f-measure: {:.5f}'.format(avg_performance[2]))
    logger.debug('average iou      : {:.5f}'.format(avg_performance[3]))
    logger.debug('avg refined prec : {:.5f}'.format(avg_performance[4]))
    logger.debug('avg refined rec  : {:.5f}'.format(avg_performance[5]))
    logger.debug('avg refined fm   : {:.5f}'.format(avg_performance[6]))
    logger.debug('avg refined iou  : {:.5f}'.format(avg_performance[7]))
    logger.debug('average run time : {:.5f}'.format(avg_performance[8]))