Example #1
0
def db_save_eval(db_eval_dict, outputdir=cfg.PATH.EVAL_DIR):
    """ Save per-frame evaluation to HDF5 file.

	Arguments:
		db_eval_dict[method][measure][sequence] (dict): evaluation results.
		outputdir: destination folder of the output files (one for each technique).

	"""

    for technique in db_eval_dict.keys():
        outfilename = osp.join(outputdir, technique + ".h5")
        log.info("Saving evaluation in: %s" % outfilename)
        db_hdf5 = h5py.File(outfilename, 'w')
        for measure in db_eval_dict[technique].keys():
            for sequence, val in db_eval_dict[technique][measure].iteritems():
                db_hdf5["%s/%s" % (measure, sequence)] = val

        db_hdf5.close()
Example #2
0
def db_save_eval(db_eval_dict,outputdir=cfg.PATH.EVAL_DIR):

	""" Save per-frame evaluation to HDF5 file.

	Arguments:
		db_eval_dict[method][measure][sequence] (dict): evaluation results.
		outputdir: destination folder of the output files (one for each technique).

	"""


	for technique in db_eval_dict.keys():
		outfilename = osp.join(outputdir,technique + ".h5")
		log.info("Saving evaluation in: %s"%outfilename)
		db_hdf5 = h5py.File(outfilename,'w')
		for measure in db_eval_dict[technique].keys():
			for sequence,val in db_eval_dict[technique][measure].iteritems():
				db_hdf5["%s/%s"%(measure,sequence)] = val

		db_hdf5.close()
Example #3
0
def db_eval(techniques,
            sequences,
            inputdir=cfg.PATH.SEGMENTATION_DIR,
            metrics=None):
    """ Perform per-frame sequence evaluation.

	Arguments:
		techniques (string,list): name(s) of the method to be evaluated.
		sequences  (string,list): name(s) of the sequence to be evaluated.
		inputdir  (string): path to the technique(s) folder.

	Returns:
		db_eval_dict[method][measure][sequence] (dict): evaluation results.

	"""

    if isinstance(techniques, str): techniques = [techniques]
    if isinstance(sequences, str): sequences = [sequences]

    ndict = lambda: defaultdict(ndict)
    db_eval_dict = ndict()

    # RAW, per-frame evaluation
    timer = Timer()
    log.info("Number of cores allocated: %d" % cfg.N_JOBS)
    for technique in techniques:
        log.info('Evaluating technique: "%s"' % technique)
        timer.tic()

        J,j_M,j_O,j_D,F,f_M,f_O,f_D,T,t_M = \
           zip(*Parallel(n_jobs=cfg.N_JOBS)(delayed(db_eval_sequence)(
         technique,sequence,inputdir,metrics) for sequence in sequences))
        log.info('Processing time: "%.3f"' % timer.toc())

        # STORE RAW EVALUATION
        for seq_id, sequence in enumerate(sequences):
            db_eval_dict[technique]['J'][sequence] = J[seq_id]
            db_eval_dict[technique]['F'][sequence] = F[seq_id]
            db_eval_dict[technique]['T'][sequence] = T[seq_id]

    return db_eval_dict
Example #4
0
def db_eval(techniques,sequences,inputdir=cfg.PATH.SEGMENTATION_DIR,metrics=None):

	""" Perform per-frame sequence evaluation.

	Arguments:
		techniques (string,list): name(s) of the method to be evaluated.
		sequences  (string,list): name(s) of the sequence to be evaluated.
		inputdir  (string): path to the technique(s) folder.

	Returns:
		db_eval_dict[method][measure][sequence] (dict): evaluation results.

	"""

	if isinstance(techniques,str): techniques = [techniques]
	if isinstance(sequences,str):  sequences  = [sequences]

	ndict        = lambda: defaultdict(ndict)
	db_eval_dict = ndict()

	# RAW, per-frame evaluation
	timer = Timer()
	log.info("Number of cores allocated: %d"%cfg.N_JOBS)
	for technique in techniques:
		log.info('Evaluating technique: "%s"'%technique)
		timer.tic()

		J,j_M,j_O,j_D,F,f_M,f_O,f_D,T,t_M = \
				 zip(*Parallel(n_jobs=cfg.N_JOBS)(delayed(db_eval_sequence)(
			technique,sequence,inputdir,metrics) for sequence in sequences))
		log.info('Processing time: "%.3f"'%timer.toc())

		# STORE RAW EVALUATION
		for seq_id,sequence in enumerate(sequences):
			db_eval_dict[technique]['J'][sequence] = J[seq_id]
			db_eval_dict[technique]['F'][sequence] = F[seq_id]
			db_eval_dict[technique]['T'][sequence] = T[seq_id]

	return db_eval_dict
Example #5
0
	parser.add_argument(
			dest='input',default=None,type=str,
			help='Path to the technique to be evaluated')

	parser.add_argument(
			dest='output',default=None,type=str,
			help='Output folder')

	parser.add_argument(
			'--metrics',default=None,nargs='+',type=str,choices=['J','F','T'])

	args = parser.parse_args()

	return args

if __name__ == '__main__':

	args       = parse_args()
	
	args.metrics = ['J', 'F']
	args.input = osp.abspath(args.input)

	db_eval_dict = db_eval(osp.basename(args.input),
			os.listdir(args.input),osp.dirname(args.input),args.metrics)

	log.info("Saving results in: %s"%osp.join(
			args.output,osp.basename(args.input))+".h5")

	db_save_eval(db_eval_dict,outputdir=args.output)
Example #6
0
    return args


if __name__ == '__main__':

    # Parse command-line arguments
    args = parse_args()

    db_sequences = db_read_sequences()
    db_techniques = db_read_techniques()

    A = []

    for attribute in args.attributes:
        # Filter sequences tagged with `attribute`
        log.info("Filtering sequences with attribute: %s" % attribute)
        sequences = filter(lambda s: attribute in s.attributes, db_sequences)

        db_eval_dict = db_read_eval(sequence=[s.name for s in sequences],
                                    measure=args.measure,
                                    raw_eval=False)

        statistics_to_id = {'M': 0, 'O': 1, 'D': 2}

        R = []
        for t in db_techniques:
            R.append(
                np.vstack(db_eval_dict[t.name][args.measure].values())
                [:, statistics_to_id[args.statistic]])

        A.append(np.average(np.array(R).T, axis=0))
Example #7
0

def parse_args():
    """
  Parse input arguments.
  """

    parser = argparse.ArgumentParser(
        description="""Print technique results.""")

    parser.add_argument('-i',
                        '--input',
                        required=True,
                        type=str,
                        help='Path to the technique results (yaml)')

    args = parser.parse_args()

    return args


if __name__ == '__main__':

    args = parse_args()

    log.info("Loading evaluation from: {}".format(args.input))
    with open(args.input, 'r') as f:
        evaluation = edict(yaml.safe_load(f))

    print_results(evaluation)
Example #8
0
  parser.add_argument('--single-object',action='store_true')

  args = parser.parse_args()

  # Cast string to Enum
  args.phase = davis.phase[args.phase.upper()]

  return args


if __name__ == '__main__':

  args = parse_args()

  log.info('Loading DAVIS year: {} phase: {}'.format(
    args.year,args.phase))

  # Load DAVIS
  db = DAVISLoader(args.year,
      args.phase,args.single_object)

  log.info('Loading video segmentations from: {}'.format(args.input))

  # Load segmentations
  segmentations = [Segmentation(
    osp.join(args.input,s),args.single_object) for s in db.iternames()]

  # Evaluate results
  evaluation = db_eval(db,segmentations,args.metrics)

  # Print results
Example #9
0
	parser = argparse.ArgumentParser(
			description='Perform full evaluation as reported in the paper.')

	parser.add_argument('--compute',
			dest='compute',action='store_true',
			help='Compute results instead of loading from file.')

	# Parse command-line arguments
	return parser.parse_args()

if __name__ == '__main__':

	args = parse_args()

	if args.compute:
		log.info('Running full evaluation on DAVIS')
		log.info('Searching available techniques in: "%s"'%cfg.PATH.SEGMENTATION_DIR)

		# Search available techniques within the default output folder
		techniques = sorted([osp.splitext(osp.basename(t))[0]
				for t in glob.glob(cfg.PATH.SEGMENTATION_DIR+ "/*")])

		log.info('Number of techniques being evaluated: %d'%len(techniques))

		# Read sequences from file
		log.info('Reading sequences from: %s '%osp.basename(cfg.FILES.DB_INFO))
		sequences  = [s.name for s in db_read_sequences()]

		# Compute full evaluation and save results
		for technique in techniques:
			db_save_eval(db_eval(technique,sequences))
Example #10
0

if __name__ == '__main__':

    # Parse command-line arguments
    args = parse_args()

    db_info = db_read_info()
    db_techniques = db_read_techniques()

    attributes = db_info.attributes
    distr = []
    S = []

    for t_set in db_info.sets:
        log.info("Filtering techniques in: %s" % (t_set))
        # Filter sequences tagged with set=`t_set`
        X = []
        db_sequences = [s for s in db_info.sequences if t_set == s.set]
        for s in db_sequences:
            X.append([1 if attr in s.attributes else 0 for attr in attributes])

        distr.append(
            np.round(np.sum(X, axis=0).astype(np.float32) / np.sum(X), 3))

        db_eval_dict = db_read_eval(sequence=[s.name for s in db_sequences],
                                    measure=args.measure,
                                    raw_eval=False)

        statistics_to_id = {'M': 0, 'O': 1, 'D': 2}
Example #11
0
			dest='statistic',default='M',
			help='Evaluate results instead of loading from file.')

	# Parse command-line arguments
	return parser.parse_args()


if __name__ == '__main__':

	args = parse_args()

	db_sequences  = db_read_sequences()
	db_techniques = db_read_techniques()

	# Read results from file
	log.info("Reading evaluation from: %s"%cfg.FILES.DB_BENCHMARK)
	db_eval_dict = db_read_eval(
			measure=args.measure,raw_eval=False)

	# Generate table
	statistics_to_id = {'M':0,'O':1,'D':2}

	R = []
	for t in db_techniques:
		R.append(np.vstack(db_eval_dict[t.name][
			args.measure].values())[:,statistics_to_id[args.statistic]])

	R = np.array(R).T

	table = ptable(["Sequence"] +
			[t.name for t in db_techniques])
Example #12
0
    return parser.parse_args()


if __name__ == '__main__':

    args = parse_args()

    db_techniques = db_read_techniques()

    from prettytable import PrettyTable as ptable

    table = ptable(["Abbr", "Title", "Authors", "Conf", "Year"])

    table.align = 'l'
    technique_table = {}

    for t in db_techniques:
        technique_table[t.name] = edict()
        technique_table[t.name].title = t.title
        technique_table[t.name].authors = t.authors
        technique_table[t.name].conference = t.conference
        technique_table[t.name].year = t.year
        table.add_row([t.name, t.title, t.authors[0], t.conference, t.year])

    print "\n%s\n" % str(table)

    if args.output is not None:
        log.info("Saving list of techniques in: %s" % args.output)
        with open(args.output, 'w') as f:
            f.write(json.dumps(technique_table, indent=2))
Example #13
0

if __name__ == "__main__":

    # Parse command-line arguments
    args = parse_args()

    db_info = db_read_info()
    db_techniques = db_read_techniques()

    attributes = db_info.attributes
    distr = []
    S = []

    for t_set in db_info.sets:
        log.info("Filtering techniques in: %s" % (t_set))
        # Filter sequences tagged with set=`t_set`
        X = []
        db_sequences = filter(lambda s: t_set == s.set, db_info.sequences)
        for s in db_sequences:
            X.append([1 if attr in s.attributes else 0 for attr in attributes])

        distr.append(np.round(np.sum(X, axis=0).astype(np.float32) / np.sum(X), 3))

        db_eval_dict = db_read_eval(sequence=[s.name for s in db_sequences], measure=args.measure, raw_eval=False)

        statistics_to_id = {"M": 0, "O": 1, "D": 2}

        R = []
        for t in db_techniques:
            R.append(np.vstack(db_eval_dict[t.name][args.measure].values())[:, statistics_to_id[args.statistic]])
Example #14
0
    parser.add_argument('--single-object', action='store_true')

    args = parser.parse_args()

    # Cast string to Enum
    args.phase = davis.phase[args.phase.upper()]

    return args


if __name__ == '__main__':

    args = parse_args()

    log.info('Loading DAVIS year: {} phase: {}'.format(args.year, args.phase))

    # Load DAVIS
    db = DAVISLoader(args.year, args.phase, args.single_object)

    log.info('Loading video segmentations from: {}'.format(args.input))

    # Load segmentations
    segmentations = [
        Segmentation(osp.join(args.input, s), args.single_object)
        for s in db.iternames()
    ]

    # Evaluate results
    evaluation = db_eval(db, segmentations, args.metrics)
Example #15
0
from davis         import cfg,log,Timer
from prettytable   import PrettyTable as ptable
from davis.dataset import DAVISAnnotationLoader,DAVISSegmentationLoader

if __name__ == '__main__':

	sequence_name  = 'flamingo'
	technique_name = 'fcp'

	sourcedir = osp.join(cfg.PATH.SEGMENTATION_DIR,'fcp',sequence_name)

	db_annotation   = DAVISAnnotationLoader(cfg,osp.basename(sourcedir))
	db_segmentation = DAVISSegmentationLoader(
			cfg,osp.basename(sourcedir),osp.dirname(sourcedir))

	log.info('Starting evaluation of technique: "%s" on sequence "%s"'%(
		technique_name,sequence_name))


	# Initialize timer
	timer = Timer().tic()

	# Processs sequence
	J,Jm,Jo,Jt = db_annotation.eval(db_segmentation,'J')

	# Report results
	log.info("Processing time: %.3f seconds"%timer.toc())

	table = ptable(['Sequence']+['Jm','Jo','Jt'])
	table.add_row([sequence_name]+["{: .3f}".format(f) for f in [Jm,Jo,Jt]])

	print "\n" + str(table)
    parser.add_argument('--compute',
                        dest='compute',
                        action='store_true',
                        help='Compute results instead of loading from file.')

    # Parse command-line arguments
    return parser.parse_args()


if __name__ == '__main__':

    args = parse_args()

    if args.compute:
        log.info('Running full evaluation on DAVIS')
        log.info('Searching available techniques in: "%s"' %
                 cfg.PATH.SEGMENTATION_DIR)

        # Search available techniques within the default output folder
        techniques = sorted([
            osp.splitext(osp.basename(t))[0]
            for t in glob.glob(cfg.PATH.SEGMENTATION_DIR + "/*")
        ])

        log.info('Number of techniques being evaluated: %d' % len(techniques))

        # Read sequences from file
        log.info('Reading sequences from: %s ' %
                 osp.basename(cfg.FILES.DB_INFO))
        sequences = [s.name for s in db_read_sequences()]
    return np.nanmean(X, axis=0)[0]


if __name__ == '__main__':

    args = parse_args()

    technique = osp.splitext(osp.basename(args.input))[0]

    seqs = None
    seqs = []
    with open(SEQ_LIST_FILE) as f:
        for seqname in f:
            seqs.append(seqname[:-1])

    log.info("Evaluation sequences: %s" % os.listdir(args.input.split('.')[0]))

    db_eval_dict = db_read_eval(technique,
                                sequence=os.listdir(args.input.split('.')[0]),
                                raw_eval=False,
                                inputdir=osp.dirname(args.input))

    db_benchmark = db_read_benchmark()
    # db_sequences = db_read_sequences()

    log.info("Displaying evaluation of: %s" % osp.basename(args.input))

    table = ptable(["Sequence"] +
                   ['J(M)', 'J(O)', 'J(D)', 'F(M)', 'F(O)', 'F(D)', 'T(M)'])

    X = []
from davis import cfg, log, Timer
from prettytable import PrettyTable as ptable
from davis.dataset import DAVISAnnotationLoader, DAVISSegmentationLoader

if __name__ == '__main__':

    sequence_name = 'flamingo'
    technique_name = 'fcp'

    sourcedir = osp.join(cfg.PATH.SEGMENTATION_DIR, 'fcp', sequence_name)

    db_annotation = DAVISAnnotationLoader(cfg, osp.basename(sourcedir))
    db_segmentation = DAVISSegmentationLoader(cfg, osp.basename(sourcedir),
                                              osp.dirname(sourcedir))

    log.info('Starting evaluation of technique: "%s" on sequence "%s"' %
             (technique_name, sequence_name))

    # Initialize timer
    timer = Timer().tic()

    # Processs sequence
    J, Jm, Jo, Jt = db_annotation.eval(db_segmentation, 'J')

    # Report results
    log.info("Processing time: %.3f seconds" % timer.toc())

    table = ptable(['Sequence'] + ['Jm', 'Jo', 'Jt'])
    table.add_row([sequence_name] +
                  ["{: .3f}".format(f) for f in [Jm, Jo, Jt]])

    print "\n" + str(table)
Example #19
0
	return args

if __name__ == '__main__':

	args = parse_args()

	technique = osp.splitext(osp.basename(args.input))[0]

	db_eval_dict = db_read_eval(technique,raw_eval=False,
			inputdir=osp.dirname(args.input))

	db_benchmark = db_read_benchmark()
	db_sequences = db_read_sequences()


	log.info("Displaying evaluation of: %s"%osp.basename(args.input))

	table = ptable(["Sequence"] + ['J(M)','J(O)','J(D)','F(M)','F(O)','F(D)','T(M)'])

	X = []
	for key,values in db_eval_dict[technique].iteritems():
		X.append(db_eval_dict[technique][key].values())

	X = np.hstack(X)[:,:7]
	for s,row in zip(db_sequences,X):
		table.add_row([s.name]+ ["{: .3f}".format(n) for n in row])

	table.add_row(['Average'] +
			["{: .3f}".format(n) for n in np.nanmean(X,axis=0)])

	print "\n" + str(table) + "\n"
Example #20
0
	return parser.parse_args()

if __name__ == '__main__':

	args =parse_args()

	db_techniques = db_read_techniques()

	from prettytable import PrettyTable as ptable

	table = ptable(["Abbr","Title","Authors","Conf","Year"])

	table.align     = 'l'
	technique_table = {}

	for t in db_techniques:
		technique_table[t.name]            = edict()
		technique_table[t.name].title      = t.title
		technique_table[t.name].authors    = t.authors
		technique_table[t.name].conference = t.conference
		technique_table[t.name].year       = t.year
		table.add_row([t.name,t.title,t.authors[0], t.conference,t.year])

	print "\n%s\n"%str(table)

	if args.output is not None:
		log.info("Saving list of techniques in: %s"%args.output)
		with open(args.output,'w') as f:
			f.write(json.dumps(technique_table,indent=2))
Example #21
0
    return args


def imshow(im, an, color_palette):
    """ Display image using cv2 as backend."""

    ov = overlay(im, an, color_palette)
    cv2.imshow("Sequence", ov[..., [2, 1, 0]])

    ch = chr(cv2.waitKey())
    return ch


if __name__ == '__main__':

    args = parse_args()

    log.info('Loading DAVIS year: {} phase: {}'.format(args.year, args.phase))

    db = davis.dataset.DAVISLoader(args.year, args.phase, args.single_object)

    if args.input is None:
        # Visualize ground-truth data
        for images, annotations in db.items():
            for im, an in zip(images, annotations):
                ch = imshow(im, an, annotations.color_palette)
                if ch == 'q':
                    sys.exit(0)
                elif ch == 's':
                    break  # skip to next sequence
Example #22
0
                        default=None,
                        nargs='+',
                        type=str,
                        choices=['J', 'F', 'T'])

    args = parser.parse_args()

    return args


if __name__ == '__main__':

    args = parse_args()
    args.input = osp.abspath(args.input)

    log.info("Evaluation sequences: %s" % os.listdir(args.input))

    import pdb
    pdb.set_trace()

    db_eval_dict = db_eval(osp.basename(args.input), os.listdir(args.input),
                           osp.dirname(args.input), args.metrics)

    import pdb
    pdb.set_trace()

    log.info("Saving results in: %s" %
             osp.join(args.output, osp.basename(args.input)) + ".h5")

    db_save_eval(db_eval_dict, outputdir=args.output)
Example #23
0
                        default='M',
                        help='Evaluate results instead of loading from file.')

    # Parse command-line arguments
    return parser.parse_args()


if __name__ == '__main__':

    args = parse_args()

    db_sequences = db_read_sequences()
    db_techniques = db_read_techniques()

    # Read results from file
    log.info("Reading evaluation from: %s" % cfg.FILES.DB_BENCHMARK)
    db_eval_dict = db_read_eval(measure=args.measure, raw_eval=False)

    # Generate table
    statistics_to_id = {'M': 0, 'O': 1, 'D': 2}

    R = []
    for t in db_techniques:
        R.append(
            np.vstack(db_eval_dict[t.name][args.measure].values())
            [:, statistics_to_id[args.statistic]])

    R = np.array(R).T

    table = ptable(["Sequence"] + [t.name for t in db_techniques])
Example #24
0
import numpy as np

from davis import Timer,log,cfg,db_eval,print_results
from easydict import EasyDict as edict

def parse_args():
  """
  Parse input arguments.
  """

  parser = argparse.ArgumentParser(
    description="""Print technique results.""")

  parser.add_argument(
      '-i','--input',required=True,type=str,
      help='Path to the technique results (yaml)')

  args = parser.parse_args()

  return args

if __name__ == '__main__':

  args = parse_args()

  log.info("Loading evaluation from: {}".format(args.input))
  with open(args.input,'r') as f:
    evaluation = edict(yaml.safe_load(f))

  print_results(evaluation)
Example #25
0
			description="""Evaluate a technique and store results.
			""")

	parser.add_argument(
			dest='input',default=None,type=str,
			help='Path to the technique to be evaluated')

	parser.add_argument(
			dest='output',default=None,type=str,
			help='Output folder')

	parser.add_argument(
			'--metrics',default=None,nargs='+',type=str,choices=['J','F','T'])

	args = parser.parse_args()

	return args

if __name__ == '__main__':

	args       = parse_args()
	args.input = osp.abspath(args.input)

	db_eval_dict = db_eval(osp.basename(args.input),
			os.listdir(args.input),osp.dirname(args.input),args.metrics)

	log.info("Saving results in: %s"%osp.join(
			args.output,osp.basename(args.input))+".h5")

	db_save_eval(db_eval_dict,outputdir=args.output)
Example #26
0
  return args

def imshow(im,an,color_palette):
  """ Display image using cv2 as backend."""

  ov = overlay(im,an,color_palette)
  cv2.imshow("Sequence",ov[...,[2,1,0]])

  ch = chr(cv2.waitKey())
  return ch

if __name__ == '__main__':

  args = parse_args()

  log.info('Loading DAVIS year: {} phase: {}'.format(
    args.year,args.phase))

  db = davis.dataset.DAVISLoader(args.year,
      args.phase,args.single_object)

  if args.input is None:
    # Visualize ground-truth data
    for images,annotations in db.iteritems():
      for im,an in zip(images,annotations):
        ch = imshow(im,an,annotations.color_palette)
        if  ch == 'q':
          sys.exit(0)
        elif ch == 's':
          break # skip to next sequence
	parser.add_argument('--summary',action='store_true',
			help='Print dataset average instead of per-sequence results.')

	# Parse command-line arguments
	args       = parser.parse_args()
	args.input = osp.abspath(args.input)

	return args

if __name__ == '__main__':

	args = parse_args()

	technique = osp.splitext(osp.basename(args.input))[0]

        sequence = None
        if args.eval_set == "val":
            sequence = ['blackswan', 'bmx-trees', 'breakdance', 'camel', 'car-roundabout', 'car-shadow', 'cows', 'dance-twirl', 'dog', 'drift-chicane', 'drift-straight', 'goat', 'horsejump-high', 'kite-surf', 'libby', 'motocross-jump', 'paragliding-launch', 'parkour', 'scooter-black', 'soapbox']
        else:
            raise ValueError("Eval set must be val, not {}".format(args.eval_set))

	db_eval_dict = db_read_eval(technique,raw_eval=False,
			inputdir=osp.dirname(args.input),
                        sequence=sequence)

	log.info("Displaying evaluation of: %s"%osp.basename(args.input))

	db_eval_view(db_eval_dict,
			technique,args.summary,args.eval_set,
                        sequence=sequence)
Example #28
0
	return args

if __name__ == '__main__':

	# Parse command-line arguments
	args = parse_args()

	db_sequences  = db_read_sequences()
	db_techniques = db_read_techniques()

	A = []


	for attribute in args.attributes:
		# Filter sequences tagged with `attribute`
		log.info("Filtering sequences with attribute: %s"%attribute)
		sequences = filter(
				lambda s: attribute in s.attributes,db_sequences)

		db_eval_dict = db_read_eval(sequence=[s.name for s in sequences],
				measure=args.measure,raw_eval=False)

		statistics_to_id = {'M':0,'O':1,'D':2}

		R = []
		for t in db_techniques:
			R.append(np.vstack(db_eval_dict[t.name][
				args.measure].values())[:,statistics_to_id[args.statistic]])

		A.append(np.average(np.array(R).T,axis=0))