Esempio n. 1
0
def evaluate_new(dataset_list, predictions_list, nms_thresh, result_output_dir, recall_metrics=(1,5), iou_metrics=(0.1,0.3,0.5,0.7)):
    """evaluate dataset using different methods based on dataset type.
        Args: 
            predictions: list[
                    (
                        moments_norm (tensor(num_predictions, 2)), 
                        scores (tensor(num_predictions))
                    )
                ]
        Returns:
            Recall@1 mIoU float
    """
    dataset_original, dataset_replaced = dataset_list
    predictions_original, predictions_replaced = predictions_list
    dataset_name = dataset_original.__class__.__name__

    logger = logging.getLogger("vmr.inference")
    logger.info("Performing {} evaluation (Size: {}).".format(dataset_name, len(dataset_original)))

    if result_output_dir:
        result_dict = {'vid': [], 'sentence': [], 'iou_o': [], 'iou_rp': []}
    else:
        result_dict = None

    if predictions_original[0][0].shape[0]==1:
        recall_metrics=(1,)
    
    num_recall_metrics, num_iou_metrics = len(recall_metrics), len(iou_metrics)
    
    recall_metrics = torch.tensor(recall_metrics)
    iou_metrics = torch.tensor(iou_metrics)
    unbaised_recall_x_iou = torch.zeros(num_recall_metrics, num_iou_metrics)
    recall_x_miou = torch.zeros(num_recall_metrics)
    miss_x_iou_bias = torch.zeros(num_recall_metrics, num_iou_metrics) # number of samples failed to recall in replaced datasets

    for idx in tqdm(range(len(predictions_original))):
        duration = dataset_original.get_duration(idx)
        gt_moment = dataset_original.get_moment(idx)
        
        result_original = predictions_original[idx]
        candidates_original, scores_original = result_original[0]*duration, result_original[1]
        predicted_moments_original = nms(candidates_original, scores_original, topk=recall_metrics[-1], thresh=nms_thresh)
        predicted_ious_original = iou(predicted_moments_original[:max(recall_metrics)], gt_moment)
        
        result_replaced = predictions_replaced[idx]
        candidates_replaced, scores_replaced = result_replaced[0]*duration, result_replaced[1]
        predicted_moments_replaced = nms(candidates_replaced, scores_replaced, topk=recall_metrics[-1], thresh=nms_thresh)
        predicted_ious_replaced = iou(predicted_moments_replaced[:max(recall_metrics)], gt_moment)

        for i, r in enumerate(recall_metrics):
            ious_o = predicted_ious_original[:r] # [r/?]
            ious_rp = predicted_ious_replaced[:r] # [r/?]
            recall_x_miou[i] += ious_o.mean()
            
            ious_o = iou_padding(ious_o, r)
            ious_rp = iou_padding(ious_rp, r)
            bools_o = ious_o[:,None].expand(r, num_iou_metrics) > iou_metrics # [r, num_iou_metrics]
            bools_rp = ious_rp[:,None].expand(r, num_iou_metrics) < iou_metrics # [r, num_iou_metrics]
            unbaised_recall_x_iou[i] += bools_rp.all(dim=0)*bools_o.any(dim=0) # [num_iou_metrics]
            miss_x_iou_bias[i] += bools_rp.any(dim=0) # [num_iou_metrics]
            
            if i==1 and result_dict:
                result_dict['vid'].append(dataset_original.get_vid(idx))
                result_dict['sentence'].append(dataset_original.get_sentence(idx))
                result_dict['iou_o'].append(ious_o[:5])
                result_dict['iou_rp'].append(ious_rp[:5])
    
    hard_unbaised_recall_x_iou, unbaised_recall_x_iou = unbaised_recall_x_iou/miss_x_iou_bias, unbaised_recall_x_iou/len(predictions_original)
    recall_x_miou /= len(predictions_original)
    # Print result in table
    
    table = [['hard_UBRank@{},IoU@{:.1f}'.format(i,j) \
        for i in recall_metrics for j in iou_metrics]]
    table.append(['{:.02f}'.format(hard_unbaised_recall_x_iou[i][j]*100) \
        for i in range(num_recall_metrics) for j in range(num_iou_metrics)])
    table = AsciiTable(table)
    for i in range(num_recall_metrics*num_iou_metrics):
        table.justify_columns[i] = 'center'
    logger.info('\n' + table.table)
    
    table = [['UBRank@{},IoU@{:.1f}'.format(i,j) \
        for i in recall_metrics for j in iou_metrics]]
    table.append(['{:.02f}'.format(unbaised_recall_x_iou[i][j]*100) \
        for i in range(num_recall_metrics) for j in range(num_iou_metrics)])
    table = AsciiTable(table)
    for i in range(num_recall_metrics*num_iou_metrics):
        table.justify_columns[i] = 'center'
    logger.info('\n' + table.table)

    if result_output_dir:
        with open(result_output_dir + 'test_results.pkl', 'wb') as F: # DO NOT use join for prefix '/{k}_{epoch}e'
            pickle.dump(result_dict, F)
Esempio n. 2
0
def process(host, port, user, password, database, table):
    connection = connect(host=host,
                         user=user,
                         passwd=password,
                         db=database,
                         charset='utf8',
                         cursorclass=DictCursor)
    with closing(connection) as connection:
        rows = []
        cursor = connection.cursor()
        with closing(cursor) as cursor:
            cursor.execute('SHOW TABLES')
            rows = cursor.fetchall()

        tables = []
        for row in rows:
            values = row.values()
            values = list(values)
            value = values[0]
            if not table or table == value:
                tables.append(value)

        tables = sorted(tables)

        tables_and_counts = []
        for table in tables:
            count = 0
            cursor = connection.cursor()
            with closing(cursor) as cursor:
                query = 'SELECT COUNT(*) AS count FROM {table:s}'.format(
                    table=table)
                cursor.execute(query)
                row = cursor.fetchone()
                count = row['count']
            if not count:
                continue
            tables_and_counts.append([table, count])

        ascii_table = AsciiTable([['Table', 'COUNT(*)']] + tables_and_counts)
        ascii_table.justify_columns[1] = 'right'
        print(ascii_table.table)

        tables = []
        for table, _ in tables_and_counts:
            description = []
            rows = []
            cursor = connection.cursor()
            with closing(cursor) as cursor:
                query = 'SELECT * FROM {table:s}'.format(table=table)
                cursor.execute(query)
                description = cursor.description
                rows = cursor.fetchall()
            ascii_table = []
            if description:
                description = [d[0] for d in description]
                ascii_table.append(description)
            for row in rows:
                values = list(row.values())
                for index, _ in enumerate(values):
                    if isinstance(values[index], bytes):
                        try:
                            values[index] = values[index].decode('utf-8')
                        except UnicodeDecodeError:
                            values[index] = 'UnicodeDecodeError'
                ascii_table.append(values)
            tables.append((table, ascii_table))

        for name, ascii_table in tables:
            print('')
            print(name)
            print('')
            ascii_table = AsciiTable(ascii_table)
            print(ascii_table.table)