def check_detection_result( test_file, reference_file, ): """ Return:[ [nums of bboxes isequal, conf of bboxes isclose, labels of bboxes isequal, bboxes pos isequal] .... [nums of bboxes isequal, conf of bboxes isclose, labels of bboxes isequal, bboxes pos isequal] ] """ test_data = json2dict(test_file) ref_data = json2dict(reference_file) results = None for img in test_data: if not img in ref_data: raise Exception('image can not be found in reference data') else: new_results = isEqualDetection(test_data[img], ref_data[img]) if not results: results = new_results else: changed = update_result_obj(results, new_results) if changed: print img return res_as_dict_for_obj(results)
def check_classify_result(test_file, reference_file): """ return: a list with index N, list[N] is the result of topN, include label check and accuracy check. [[True True], [True, True], [True, False], ....] The result of topN means the result of first N predictions """ test_data = json2dict(test_file) ref_data = json2dict(reference_file) results = None for img in test_data: if not img in ref_data: raise Exception('image can not be found in reference data') else: new_results = isEqualPrediction(test_data[img], ref_data[img]) if not results: results = new_results else: update_result(results, new_results) return res_as_dict(results)
def run(config): """ return: Per layer forward performance: [(layer name, layer type, elapsed_time, FPS), ... ] Per layer backward performance:[(layer name, layer type, elapsed_time, FPS), ... ] """ backend_class = backends_factory(config.backend) backend = backend_class(config) #backend.prepare_benchmark(config) layers_forward_perf, layers_backward_perf = get_layers_perf( backend, config) net_forward_perf, net_backward_perf = get_net_perf(backend, config) res_dict = { 'layers_forward_perf': layers_forward_perf, 'layers_backward_perf': layers_backward_perf, 'net_forward_perf': net_forward_perf, 'net_backward_perf': net_backward_perf } if hasattr(config, 'getReport') and hasattr(config, 'reference'): logger.debug('gen report') convertToReport(res_dict, config, backend) if hasattr(config, 'reference'): logger.debug('comparing') ref_res_dict = json2dict( os.path.join(config.reference.result_dir, 'perf_data.json')) for key, value in res_dict.iteritems(): try: ref_value = ref_res_dict[key] except: raise Exception('Unexpected reference ') if 'layers' in key: ref_perf_dict = dict(ref_value) for index, perf_list in enumerate(value): ref_time = 0.0 if perf_list[0] == 'summary': ref_time = ref_perf_dict[perf_list[0]][0] diff_time = ref_time - perf_list[1][0] else: ref_time = ref_perf_dict[perf_list[0]] diff_time = ref_time - perf_list[1] layer_name = perf_list[0] this_time = perf_list[1] value[index].append("faster than ref") value[index].append([ diff_time, '%.2f' % (100 * diff_time / ref_time) + "%" ]) else: ref_time = ref_value[0] diff_time = ref_value[0] - value[0] value.append('faster than ref') value.append( [diff_time, '%.2f' % (100 * diff_time / ref_time) + "%"]) logger.debug(pprint.pformat(layers_forward_perf)) logger.debug(pprint.pformat(layers_backward_perf)) logger.debug(pprint.pformat(net_forward_perf)) logger.debug(pprint.pformat(net_backward_perf)) #write res_dict to file out_dir = os.path.expanduser(str(config.out_dir)) if hasattr(config, 'reference'): out_path = os.path.join(out_dir, "perf_cmp.json") else: out_path = os.path.join(out_dir, "perf_data.json") utils.io.dict2json(res_dict, out_path)
def convertToReport(res_dict, config, backend): ref_res_dict = json2dict( os.path.join(config.reference.result_dir, 'perf_data.json')) aTXT = list() net_time = list() #aTXT.append("Test engine: {}, reference engine: {}".format(config.backend.engine,config.reference.engine)) aTXT.append(['-'] * 80) aTXT.append('net performance: ') net_time.append('forward: ') net_time.append('time: {:<9.4f} ms'.format( res_dict['net_forward_perf'][0])) net_time.append('reference time: {:<9.4f} ms'.format( ref_res_dict['net_forward_perf'][0])) net_time.append( 'Gap: {:<6.2f}'.format(-100 * (res_dict['net_forward_perf'][0] - ref_res_dict['net_forward_perf'][0]) / ref_res_dict['net_forward_perf'][0]) + '%') aTXT.append(net_time) net_time = list() net_time.append('backward: ') net_time.append('time: {:<9.4f} ms'.format( res_dict['net_backward_perf'][0])) net_time.append('reference time: {:<9.4f} ms'.format( ref_res_dict['net_backward_perf'][0])) net_time.append( 'Gap: {:<6.2f}'.format(-100 * (res_dict['net_backward_perf'][0] - ref_res_dict['net_backward_perf'][0]) / ref_res_dict['net_backward_perf'][0]) + '%') aTXT.append(net_time) aTXT.append(['-'] * 80) layers_f_perf = dict(res_dict['layers_forward_perf']) layers_b_perf = dict(res_dict['layers_backward_perf']) ref_f_perf = dict(ref_res_dict['layers_forward_perf']) ref_b_perf = dict(ref_res_dict['layers_backward_perf']) fwd_perf_perctg = dict() bwd_perf_perctg = dict() for key in layers_f_perf.iterkeys(): if key != 'summary': fwd_perf_perctg[key] = -100 * (layers_f_perf[key] - ref_f_perf[key]) / ref_f_perf[key] for key in layers_b_perf.iterkeys(): if key != 'summary': bwd_perf_perctg[key] = -100 * (layers_b_perf[key] - ref_b_perf[key]) / ref_b_perf[key] if config.getReport.reportOrder == 'default': orderedKey = sorted(layers_f_perf.iterkeys(), key=lambda item: item) elif config.getReport.reportOrder == 'forward performance': orderedKey = sorted(fwd_perf_perctg.iterkeys(), key=lambda item: fwd_perf_perctg[item]) elif config.getReport.reportOrder == 'backward performance': orderedKey = sorted(bwd_perf_perctg.iterkeys(), key=lambda item: bwd_perf_perctg[item]) else: raise Exception( 'Unsupported reprort order,choose default,forward performance or backward performance' ) layer_id = -1 aTXT.append('layer by layer performance') aTXT.append(['-'] * 80) for key in orderedKey: if key != 'summary': #print key layer_id += 1 layer_time = list() layer_time.append('layer_id: {}'.format(key)) layer_time.append('layer_name: {}'.format( backend.get_layer_name(key))) layer_time.append('layer_type: {}'.format( backend.get_layer_type(key))) aTXT.append(layer_time) layer_time = list() layer_time.append('forward: ') layer_time.append('time: {:<9.4f} ms'.format(layers_f_perf[key])) layer_time.append('reference time: {:<9.4f} ms'.format( ref_f_perf[key])) layer_time.append('Gap: {:<6.2f}'.format(fwd_perf_perctg[key]) + '%') aTXT.append(layer_time) layer_time = list() layer_time.append('backward: ') layer_time.append('time: {:<9.4f} ms'.format(layers_b_perf[key])) layer_time.append('reference time: {:<9.4f} ms'.format( ref_b_perf[key])) layer_time.append('Gap: {:<6.2f}'.format(bwd_perf_perctg[key]) + '%') aTXT.append(layer_time) aTXT.append(['-'] * 80) if not os.path.exists(config.out_dir): os.mkdir(config.out_dir) with open(os.path.join(config.out_dir, 'test_report.txt'), 'w') as fp: for line in aTXT: if type(line) == type([]): for word in line: #print word fp.write(str(word)) if type(word) == type('?') and word == '-': continue fp.write('\t') else: fp.write(line) fp.write('\n') fp.write('\n') fp.write('\n')
def main(): args = args_process() setup_logger() #[[json_path,is_ref],[],...] jsonPathList = io.genConfFilename(args.config) report_path_txt = os.path.join('test-config-debug',os.path.splitext(os.path.basename(args.config))[0] + '.txt' ) cases_info_json = os.path.join('test-config-debug',os.path.splitext(os.path.basename(args.config))[0] + '_cases_info.json' ) if not os.path.exists(args.parent_dir): os.makedirs(args.parent_dir) report_path_list=list() raw_lines = list() if args.run_ref == 'on': shutil.rmtree(args.parent_dir) os.makedirs(args.parent_dir) call(["cp",report_path_txt, os.path.join(args.parent_dir,'find_the_report.txt')]) with open(os.path.join(args.parent_dir,'find_the_report.txt')) as fp: #skip the first line(title),get report path raw_lines = fp.readlines() report_path_list = [ x.split('\t')[-1].strip() for x in raw_lines[1:]] for report_path in report_path_list: real_path = os.path.join( args.parent_dir, os.path.dirname(report_path)) if os.path.exists(real_path): shutil.rmtree(real_path) for jsonPath,is_ref in jsonPathList: if ( args.run_ref != 'on' and not is_ref ) or (args.run_ref == 'on' and is_ref ): call(["./bin/run_case.py", "-c", jsonPath, "-p", args.parent_dir, "-pp", args.python_path]) else: continue test_case_successed = 0 test_case_failed = 0 if os.path.exists(os.path.join(args.parent_dir,'test_report')): shutil.rmtree(os.path.join(args.parent_dir,'test_report')) os.makedirs(os.path.join(args.parent_dir,'test_report')) # with open(os.path.join(args.parent_dir,'test_result.txt'),'w') as test_result_fp: # for index,line in enumerate(raw_lines): # if index == 0: # new_line = line.strip() + '\t' + 'pass/fail' # test_result_fp.write(new_line) # test_result_fp.write('\n') # else: # report_path = line.split('\t')[-1].strip() # try: # report_fp = open(os.path.join(args.parent_dir,report_path)) # except: # test_case_failed += 1 # else: # call(["cp", os.path.join(args.parent_dir,report_path), \ # os.path.join(args.parent_dir,'test_report',os.path.dirname(report_path) + '.txt')]) # test_case_successed +=1 # pass_or_fail = report_fp.readline().strip().split('\t')[-1] # new_line = line.strip() + '\t' + pass_or_fail # test_result_fp.write(new_line) # test_result_fp.write('\n') # report_fp.close() cases_info = io.json2dict(cases_info_json) cases_info['cpu_type'] = args.cpu_type if cases_info['application'] == 'accuracy': cases_info_list = cases_info['cases_info'] for case_info_dict in cases_info_list: #find test result if case_info_dict.has_key('report_path'): report_path = case_info_dict['report_path'] try: report_fp = open(os.path.join(args.parent_dir,report_path)) except: case_info_dict['test_result'] = 'cannot find test report' case_info_dict['report_path'] = '' else: call(["cp", os.path.join(args.parent_dir,report_path), \ os.path.join(args.parent_dir,'test_report',os.path.dirname(report_path) + '.txt')]) test_case_successed +=1 pass_or_fail = report_fp.readline().strip().split('\t')[-1] case_info_dict['test_result'] = pass_or_fail case_info_dict['report_path'] = os.path.join(args.parent_dir, 'test_report',\ os.path.dirname(report_path) + '.txt') report_fp.close() #redefine case_info_dict = modify_conf(case_info_dict) io.dict2json(cases_info, os.path.join(args.parent_dir,'test_results.json'))
def layer_accuracy_debug(batch_num, img_names, test_result, ref_dir, precision=1e-04): this_batch_result = list() this_batch_result.append(['batch_num: ', batch_num]) this_batch_result.append(['-'] * 40) ref_json = ref_dir + '/' + 'name.json' ref_batches_name = json2dict(ref_json) if not str(batch_num) in ref_batches_name: raise Exception('batch can not be found in reference data') for img in img_names: if not img in ref_batches_name[str(batch_num)]: raise Exception( 'image in batch %s can not be found in reference data ' % (batch_num)) count = 0 for layer_name, l in test_result.iteritems(): count += 1 this_layer_pass = '******' this_layer_result = list() for j, [blob_name, np_list] in enumerate(l): ref_sample_list = list() sample_list = list() blob_title = list() for i, np_arry in enumerate(np_list): if blob_name == 'params_diff': if i == 0: ctx = 'W' else: ctx = 'b' else: if i == 0: ctx = 'data' else: ctx = 'diff' try: ref_data = np.load( os.path.join(ref_dir, 'batch_' + str(batch_num), layer_name.replace('/', '-'), blob_name + '_' + ctx + '.npy')) except IOError: logger.error( "layer {} not found in refenence, skiping ...".format( layer_name)) continue isequal = np.allclose(np_arry, ref_data, rtol=1e-04, atol=precision, equal_nan=True) if isequal: this_arry = 'pass' else: this_arry = 'fail' this_layer_pass = '******' blob_title.append(ctx + ': ' + this_arry) ref_sample_list.append(ctx + '_ref' + ': ') ref_sample_list.append( np.concatenate( (ref_data.flatten()[1:6], ref_data.flatten()[-5:]))) sample_list.append(ctx + ': ') sample_list.append( np.concatenate( (np_arry.flatten()[1:6], np_arry.flatten()[-5:]))) if blob_name == 'params_diff': blob_title.insert(0, ' paramaters_diff: ') else: blob_title.insert(0, ' top_name: ' + blob_name) this_layer_result.append(blob_title) sample_list.insert(0, ' ') ref_sample_list.insert(0, ' ') this_layer_result.append(sample_list) this_layer_result.append(ref_sample_list) this_batch_result.append(['%04d' % count, layer_name, this_layer_pass]) this_batch_result.extend(this_layer_result) this_batch_result.append(['-'] * 40) return this_batch_result
def check_layer_accuracy_result(batch_name, test_datas, test_diffs, ref_dir, check_result): last_res = check_result flag = 0 if len(last_res) == 0: first_result = True else: first_result = False ref_json = ref_dir + '/' + 'name.json' ref_batches_name = json2dict(ref_json) for num, img_list in batch_name.iteritems(): if not num in ref_batches_name: raise Exception('batch can not be found in reference data') for img in img_list: if not img in ref_batches_name[num]: raise Exception( 'image in batch %s can not be found in reference data ' % (num)) ordered_key = sorted(test_datas.keys(), key=lambda item: item.split('_')[0]) for key in ordered_key: ref_data = np.load(ref_dir + '/' + num + '/' + key.replace('/', '-') + '_' + 'datas' + '.npy') data_isequal = np.allclose(test_datas[key], ref_data, rtol=1e-02, atol=1e-04, equal_nan=False) if first_result: last_res[key] = data_isequal else: if data_isequal == False and flag == 0: logger.debug('error occur from ' + key) last_res['first_error'] = key #print key #print test_datas[key] - ref_data flag = 1 last_res[key] &= data_isequal ordered_key = sorted(test_diffs.keys(), key=lambda item: item.split('_')[0], reverse=True) for key in ordered_key: ref_weight = np.load(ref_dir + '/' + num + '/' + key + '_' + 'diffs' + '.npy') if np.average(ref_weight) < 1e-06 and np.average( test_diffs[key]) < 1e-06: weight_isequal = True else: weight_isequal = np.allclose(test_diffs[key], ref_weight, rtol=1e-02, atol=1e-04, equal_nan=False) if first_result: last_res[key] = weight_isequal else: if weight_isequal == False and flag == 0: logger.debug('error occur from ' + key) last_res['first_error'] = key #print key #print test_diffs[key] - ref_weight flag = 1 last_res[key] &= weight_isequal return last_res