Exemplo n.º 1
0
def db_eval_view(db_eval_dict,technique,
		summary=False,eval_set='all'):

	db_sequences = db_read_sequences()

	from prettytable import PrettyTable as ptable
	table = ptable(["Sequence"] + ['J(M)','J(O)','J(D)','F(M)','F(O)','F(D)','T(M)'])

	X = []
	for key,values in db_eval_dict[technique].iteritems():
		X.append(db_eval_dict[technique][key].values())

	X = np.hstack(X)[:,:7]
	if not summary:
		for s,row in zip(db_sequences,X):
			if eval_set == 'all' or s.set == eval_set:
				table.add_row([s.name]+ ["{: .3f}".format(n) for n in row])


	set_ids = [seq_id for seq_id,seq in enumerate(db_sequences)
			if eval_set == 'all' or seq.set == eval_set]

	print set_ids

	table.add_row(['Average'] + ["{: .3f}".format(n)
		for n in np.nanmean(X[set_ids],axis=0)])

	print "\n" + str(table) + "\n"
	return str(table)
Exemplo n.º 2
0
def db_eval_view(db_eval_dict, technique, summary=False):

    db_sequences = db_read_sequences()

    from prettytable import PrettyTable as ptable
    table = ptable(["Sequence"] +
                   ['J(M)', 'J(O)', 'J(D)', 'F(M)', 'F(O)', 'F(D)', 'T(M)'])

    X = []
    for key, values in db_eval_dict[technique].iteritems():
        X.append(db_eval_dict[technique][key].values())

    X = np.hstack(X)[:, :7]
    if not summary:
        for s, row in zip(db_sequences, X):
            table.add_row([s.name] + ["{: .3f}".format(n) for n in row])

    table.add_row(['Average'] +
                  ["{: .3f}".format(n) for n in np.nanmean(X, axis=0)])

    print "\n" + str(table) + "\n"
    return str(table)
Exemplo n.º 3
0
def db_eval_view(db_eval_dict,
                 technique,
                 summary=False,
                 eval_set='all',
                 sequence=None):

    if sequence is None:
        db_sequences = db_read_sequences()
    else:
        db_sequences = sequence

    from prettytable import PrettyTable as ptable
    table = ptable(["Sequence"] +
                   ['J(M)', 'J(O)', 'J(D)', 'F(M)', 'F(O)', 'F(D)', 'T(M)'])

    X = []
    for key, values in db_eval_dict[technique].iteritems():
        X.append(db_eval_dict[technique][key].values())

    X = np.hstack(X)[:, :7]
    if not summary:
        for s, row in zip(db_sequences, X):
            #if eval_set == 'all' or s.set == eval_set:
            #table.add_row([s.name]+ ["{: .3f}".format(n) for n in row])
            table.add_row([s] + ["{: .3f}".format(n) for n in row])

    set_ids = [
        seq_id for seq_id, seq in enumerate(db_sequences)
        if True or eval_set == 'all' or seq.set == eval_set
    ]

    print set_ids

    table.add_row(
        ['Average'] +
        ["{: .3f}".format(n) for n in np.nanmean(X[set_ids], axis=0)])

    print "\n" + str(table) + "\n"
    return str(table)
Exemplo n.º 4
0
def db_eval_view(db_eval_dict, summary=True):
    """Modified from DAVIS 2016 evaluation code."""
    from prettytable import PrettyTable as ptable
    table = ptable(["Sequence"] +
                   ['J(M)', 'J(O)', 'J(D)', 'F(M)', 'F(O)', 'F(D)', 'T(M)'])

    X = []
    sequences = []
    for key, values in db_eval_dict.items():
        key_sequences, key_results = zip(*db_eval_dict[key].items())
        sequences.extend(key_sequences)
        X.append(list(key_results))
    X = np.hstack(X)[:, :7]

    for s, row in zip(sequences, X):
        table.add_row([s] + ["{: .3f}".format(n) for n in row])

    table.add_row(['' for _ in range(1 + X.shape[1])])
    table.add_row(['Average'] +
                  ["{: .3f}".format(n) for n in np.nanmean(X, axis=0)])

    return str(table)
Exemplo n.º 5
0
        db_eval_dict = db_read_eval(sequence=[s.name for s in db_sequences],
                                    measure=args.measure,
                                    raw_eval=False)

        statistics_to_id = {'M': 0, 'O': 1, 'D': 2}

        R = []
        for t in db_techniques:
            R.append(
                np.vstack(list(db_eval_dict[t.name][args.measure].values()))
                [:, statistics_to_id[args.statistic]])

        S.append(np.average(np.array(R).T, axis=0))

    print("\nAttributes Distribution")

    table = ptable(["Set"] + attributes)
    for attr, row in zip(db_info.sets, distr):
        table.add_row([attr] + \
          ['{: .2f}'.format(np.round(r,2)) for r in row])
    print(table)

    table = ptable(["Set"] + [t.name for t in db_techniques])

    print("\nEvaluation (%s)" % args.measure)
    for attr, row in zip(db_info.sets, S):
        table.add_row([attr] + \
          ['{: .2f}'.format(np.round(r,2)) for r in row])

    print(table)
Exemplo n.º 6
0
def check_regexp_uptime(log_output, expect_uptime, pre_time, tolerance=0.5):
    '''Get the uptime by given regexp from the routers show logging, 
    and compare them with the given expected uptime.'''

    # create table info for Neighbors
    log.info(
        banner('Calculate Method for "tolerance check" is below:\n'
               '|a - b| <= 0.5 * (a + b) * tolerance'))

    # create table headers
    table = ptable([
        'log pattern', 'expected time', 'actual time', 'tolerance check',
        'result'
    ])

    # initial
    flag = True

    # check feature uptime
    # setup the regexp pattern
    p = r'.+ +(?P<uptime>\d+\:\d+\:\d+).\d+.+{}.+'

    for item in (expect_uptime or []):
        for regexp, expect_up in item.items():

            # *Dec  6 11:51:37.043: %OSPF-5-ADJCHG: Process 1, Nbr 10.2.2.2 on GigabitEthernet3 from LOADING to FULL, Loading Done
            pattern = re.compile(p.format(regexp))

            # find all the matched value
            uptimes = pattern.findall(log_output)

            # find the lastest one
            try:
                assert uptimes
            except Exception as e:
                raise AssertionError(
                    'Cannot find log message for {}'.format(regexp)) from e
            else:
                latest_uptime = list(uptimes)[-1]

            # calculate the time slot it takes to reach ready
            start = pre_time.split(':')
            start = int(start[0]) * 3600 + int(start[1]) * 60 + int(start[2])
            end = latest_uptime.split(':')
            end = int(end[0]) * 3600 + int(end[1]) * 60 + int(end[2])

            # get real uptime
            time_consume = end - start

            # calculate the Equation left and right sides
            equal_left = abs(time_consume - expect_up)
            equal_right = 0.5 * tolerance * (time_consume + expect_up)

            # check uptime
            try:
                # calculate to see if the real time consuming is clased to the expect number
                assert equal_left <= equal_right
            except Exception:
                flag = False
                table.add_row([
                    regexp, expect_up, time_consume,
                    '{} <= {}'.format(equal_left, equal_right), 'Failed'
                ])
            else:
                table.add_row([
                    regexp, expect_up, time_consume,
                    '{} <= {}'.format(equal_left, equal_right), 'Passed'
                ])

    table_str = table.get_string()
    log.info('\n{}'.format(
        banner('Overall Information', width=len(table._hrule))))
    log.info(table_str)

    if not flag:
        raise Exception(
            'Not all the regexps uptime are closed to the ones given from trigger yaml file. \n'
            'Please refer the table to see if anything needs to be adjusted from trigger yaml file'
        )
Exemplo n.º 7
0
	db_sequences  = db_read_sequences()
	db_techniques = db_read_techniques()

	# Read results from file
	log.info("Reading evaluation from: %s"%cfg.FILES.DB_BENCHMARK)
	db_eval_dict = db_read_eval(
			measure=args.measure,raw_eval=False)

	# Generate table
	statistics_to_id = {'M':0,'O':1,'D':2}

	R = []
	for t in db_techniques:
		R.append(np.vstack(db_eval_dict[t.name][
			args.measure].values())[:,statistics_to_id[args.statistic]])

	R = np.array(R).T

	table = ptable(["Sequence"] +
			[t.name for t in db_techniques])

	for n,row in enumerate(R):
		table.add_row([db_sequences[n].name] + \
				['{: .3f}'.format(r) for r in row])

	table.add_row(["Average"] + ['{: .3f}'.format(r)
		for r in np.average(R,axis=0)])

	print "\n" + str(table) + "\n"
Exemplo n.º 8
0
if __name__ == '__main__':

	args = parse_args()

	technique = osp.splitext(osp.basename(args.input))[0]

	db_eval_dict = db_read_eval(technique,raw_eval=False,
			inputdir=osp.dirname(args.input))

	db_benchmark = db_read_benchmark()
	db_sequences = db_read_sequences()


	log.info("Displaying evaluation of: %s"%osp.basename(args.input))

	table = ptable(["Sequence"] + ['J(M)','J(O)','J(D)','F(M)','F(O)','F(D)','T(M)'])

	X = []
	for key,values in db_eval_dict[technique].iteritems():
		X.append(db_eval_dict[technique][key].values())

	X = np.hstack(X)[:,:7]
	for s,row in zip(db_sequences,X):
		table.add_row([s.name]+ ["{: .3f}".format(n) for n in row])

	table.add_row(['Average'] +
			["{: .3f}".format(n) for n in np.nanmean(X,axis=0)])

	print "\n" + str(table) + "\n"

Exemplo n.º 9
0
from davis.dataset import DAVISAnnotationLoader, DAVISSegmentationLoader

if __name__ == '__main__':

    sequence_name = 'flamingo'
    technique_name = 'fcp'

    sourcedir = osp.join(cfg.PATH.SEGMENTATION_DIR, 'fcp', sequence_name)

    db_annotation = DAVISAnnotationLoader(cfg, osp.basename(sourcedir))
    db_segmentation = DAVISSegmentationLoader(cfg, osp.basename(sourcedir),
                                              osp.dirname(sourcedir))

    log.info('Starting evaluation of technique: "%s" on sequence "%s"' %
             (technique_name, sequence_name))

    # Initialize timer
    timer = Timer().tic()

    # Processs sequence
    J, Jm, Jo, Jt = db_annotation.eval(db_segmentation, 'J')

    # Report results
    log.info("Processing time: %.3f seconds" % timer.toc())

    table = ptable(['Sequence'] + ['Jm', 'Jo', 'Jt'])
    table.add_row([sequence_name] +
                  ["{: .3f}".format(f) for f in [Jm, Jo, Jt]])

    print "\n" + str(table)
Exemplo n.º 10
0
	A = []


	for attribute in args.attributes:
		# Filter sequences tagged with `attribute`
		log.info("Filtering sequences with attribute: %s"%attribute)
		sequences = filter(
				lambda s: attribute in s.attributes,db_sequences)

		db_eval_dict = db_read_eval(sequence=[s.name for s in sequences],
				measure=args.measure,raw_eval=False)

		statistics_to_id = {'M':0,'O':1,'D':2}

		R = []
		for t in db_techniques:
			R.append(np.vstack(db_eval_dict[t.name][
				args.measure].values())[:,statistics_to_id[args.statistic]])

		A.append(np.average(np.array(R).T,axis=0))

	table = ptable(["Attribute"] +
			[t.name for t in db_techniques])

	for attr,row in zip(args.attributes,A):
		table.add_row([attr] + \
				['{: .2f}'.format(np.round(r,2)) for r in row])

	print "\n" + str(table) + "\n"
Exemplo n.º 11
0
	parser.add_argument("--output",
			dest='output',default=None,type=str,
			help='Output folder')

	return parser.parse_args()

if __name__ == '__main__':

	args =parse_args()

	db_techniques = db_read_techniques()

	from prettytable import PrettyTable as ptable

	table = ptable(["Abbr","Title","Authors","Conf","Year"])

	table.align     = 'l'
	technique_table = {}

	for t in db_techniques:
		technique_table[t.name]            = edict()
		technique_table[t.name].title      = t.title
		technique_table[t.name].authors    = t.authors
		technique_table[t.name].conference = t.conference
		technique_table[t.name].year       = t.year
		table.add_row([t.name,t.title,t.authors[0], t.conference,t.year])

	print "\n%s\n"%str(table)

	if args.output is not None:
Exemplo n.º 12
0
                        default=None,
                        type=str,
                        help='Output folder')

    return parser.parse_args()


if __name__ == '__main__':

    args = parse_args()

    db_techniques = db_read_techniques()

    from prettytable import PrettyTable as ptable

    table = ptable(["Abbr", "Title", "Authors", "Conf", "Year"])

    table.align = 'l'
    technique_table = {}

    for t in db_techniques:
        technique_table[t.name] = edict()
        technique_table[t.name].title = t.title
        technique_table[t.name].authors = t.authors
        technique_table[t.name].conference = t.conference
        technique_table[t.name].year = t.year
        table.add_row([t.name, t.title, t.authors[0], t.conference, t.year])

    print "\n%s\n" % str(table)

    if args.output is not None:
Exemplo n.º 13
0
        db_sequences = filter(lambda s: t_set == s.set, db_info.sequences)
        for s in db_sequences:
            X.append([1 if attr in s.attributes else 0 for attr in attributes])

        distr.append(np.round(np.sum(X, axis=0).astype(np.float32) / np.sum(X), 3))

        db_eval_dict = db_read_eval(sequence=[s.name for s in db_sequences], measure=args.measure, raw_eval=False)

        statistics_to_id = {"M": 0, "O": 1, "D": 2}

        R = []
        for t in db_techniques:
            R.append(np.vstack(db_eval_dict[t.name][args.measure].values())[:, statistics_to_id[args.statistic]])

        S.append(np.average(np.array(R).T, axis=0))

    print "\nAttributes Distribution"

    table = ptable(["Set"] + attributes)
    for attr, row in zip(db_info.sets, distr):
        table.add_row([attr] + ["{: .2f}".format(np.round(r, 2)) for r in row])
    print table

    table = ptable(["Set"] + [t.name for t in db_techniques])

    print "\nEvaluation (%s)" % args.measure
    for attr, row in zip(db_info.sets, S):
        table.add_row([attr] + ["{: .2f}".format(np.round(r, 2)) for r in row])

    print table
Exemplo n.º 14
0
from davis.dataset import DAVISAnnotationLoader,DAVISSegmentationLoader

if __name__ == '__main__':

	sequence_name  = 'flamingo'
	technique_name = 'fcp'

	sourcedir = osp.join(cfg.PATH.SEGMENTATION_DIR,'fcp',sequence_name)

	db_annotation   = DAVISAnnotationLoader(cfg,osp.basename(sourcedir))
	db_segmentation = DAVISSegmentationLoader(
			cfg,osp.basename(sourcedir),osp.dirname(sourcedir))

	log.info('Starting evaluation of technique: "%s" on sequence "%s"'%(
		technique_name,sequence_name))


	# Initialize timer
	timer = Timer().tic()

	# Processs sequence
	J,Jm,Jo,Jt = db_annotation.eval(db_segmentation,'J')

	# Report results
	log.info("Processing time: %.3f seconds"%timer.toc())

	table = ptable(['Sequence']+['Jm','Jo','Jt'])
	table.add_row([sequence_name]+["{: .3f}".format(f) for f in [Jm,Jo,Jt]])

	print "\n" + str(table)
Exemplo n.º 15
0
        # Read sequences from file
        log.info('Reading sequences from: %s ' %
                 osp.basename(cfg.FILES.DB_INFO))
        sequences = [s.name for s in db_read_sequences()]

        # Compute full evaluation and save results
        for technique in techniques:
            db_save_eval(db_eval(technique, sequences))

        # Read results from file
        db_eval_dict = db_read_eval(raw_eval=False)

        # Save techniques attributes and results
        #db_save_techniques(db_eval_dict)

    log.info('Reading available techniques and results from: %s' %
             osp.basename(cfg.FILES.DB_BENCHMARK))

    db_techniques = db_read_techniques()

    # Display results
    table = ptable(['Measure'] + [t.name for t in db_techniques])

    X = np.array([np.hstack([t.J, t.F, t.T]) for t in db_techniques]).T

    for row, measure in zip(
            X, ['J(M)', 'J(O)', 'J(D)', 'F(M)', 'F(O)', 'F(D)', 'T(M)']):
        table.add_row([measure] + ["{: .3f}".format(r) for r in row])

    print "\n" + str(table) + "\n"
Exemplo n.º 16
0
    args = parse_args()

    db_sequences = db_read_sequences()
    db_techniques = db_read_techniques()

    # Read results from file
    log.info("Reading evaluation from: %s" % cfg.FILES.DB_BENCHMARK)
    db_eval_dict = db_read_eval(measure=args.measure, raw_eval=False)

    # Generate table
    statistics_to_id = {'M': 0, 'O': 1, 'D': 2}

    R = []
    for t in db_techniques:
        R.append(
            np.vstack(db_eval_dict[t.name][args.measure].values())
            [:, statistics_to_id[args.statistic]])

    R = np.array(R).T

    table = ptable(["Sequence"] + [t.name for t in db_techniques])

    for n, row in enumerate(R):
        table.add_row([db_sequences[n].name] + \
          ['{: .3f}'.format(r) for r in row])

    table.add_row(["Average"] +
                  ['{: .3f}'.format(r) for r in np.average(R, axis=0)])

    print "\n" + str(table) + "\n"
Exemplo n.º 17
0
		# Read sequences from file
		log.info('Reading sequences from: %s '%osp.basename(cfg.FILES.DB_INFO))
		sequences  = [s.name for s in db_read_sequences()]

		# Compute full evaluation and save results
		for technique in techniques:
			db_save_eval(db_eval(technique,sequences))

		# Read results from file
		db_eval_dict = db_read_eval(raw_eval=False)

		# Save techniques attributes and results
		#db_save_techniques(db_eval_dict)

	log.info('Reading available techniques and results from: %s'%
			osp.basename(cfg.FILES.DB_BENCHMARK))

	db_techniques = db_read_techniques()

	# Display results
	table = ptable(['Measure']+[t.name for t in db_techniques])

	X = np.array([np.hstack([t.J,t.F,t.T])
		for t in db_techniques]).T

	for row,measure in zip(X,['J(M)','J(O)','J(D)','F(M)','F(O)','F(D)','T(M)']):
		table.add_row([measure]+["{: .3f}".format(r) for r in row])

	print "\n" + str(table) + "\n"
Exemplo n.º 18
0
    db_techniques = db_read_techniques()

    A = []

    for attribute in args.attributes:
        # Filter sequences tagged with `attribute`
        log.info("Filtering sequences with attribute: %s" % attribute)
        sequences = filter(lambda s: attribute in s.attributes, db_sequences)

        db_eval_dict = db_read_eval(sequence=[s.name for s in sequences],
                                    measure=args.measure,
                                    raw_eval=False)

        statistics_to_id = {'M': 0, 'O': 1, 'D': 2}

        R = []
        for t in db_techniques:
            R.append(
                np.vstack(db_eval_dict[t.name][args.measure].values())
                [:, statistics_to_id[args.statistic]])

        A.append(np.average(np.array(R).T, axis=0))

    table = ptable(["Attribute"] + [t.name for t in db_techniques])

    for attr, row in zip(args.attributes, A):
        table.add_row([attr] + \
          ['{: .2f}'.format(np.round(r,2)) for r in row])

    print "\n" + str(table) + "\n"
Exemplo n.º 19
0
def show_data_shape():
    table = ptable(["", "images", "labels"])
    table.add_row(["training", mnist.train.images.shape, mnist.train.labels.shape])
    table.add_row(["test", mnist.test.images.shape, mnist.test.labels.shape])
    table.add_row(["validation", mnist.validation.images.shape, mnist.validation.labels.shape])
    print(table)
Exemplo n.º 20
0
    seqs = None
    seqs = []
    with open(SEQ_LIST_FILE) as f:
        for seqname in f:
            seqs.append(seqname[:-1])

    db_eval_dict = db_read_eval(technique,
                                raw_eval=False,
                                inputdir=osp.dirname(args.input))

    db_benchmark = db_read_benchmark()
    db_sequences = db_read_sequences()

    log.info("Displaying evaluation of: %s" % osp.basename(args.input))

    table = ptable(["Sequence"] +
                   ['J(M)', 'J(O)', 'J(D)', 'F(M)', 'F(O)', 'F(D)', 'T(M)'])

    X = []
    for key, values in db_eval_dict[technique].iteritems():
        X.append(db_eval_dict[technique][key].values())

    X = np.hstack(X)[:, :7]
    for s, row in zip(db_sequences, X):
        table.add_row([s.name] + ["{: .3f}".format(n) for n in row])

    table.add_row(['Average'] +
                  ["{: .3f}".format(n) for n in np.nanmean(X, axis=0)])

    print "\n" + str(table) + "\n"