def __init__(self, input, label, k=1, **kwargs): super(Accuracy, self).__init__("accuracy", **kwargs) main_program = self.helper.main_program if main_program.current_block().idx != 0: raise ValueError("You can only invoke Evaluator in root block") self.total = self.create_state(dtype='int64', shape=[1], suffix='total') self.correct = self.create_state( dtype='int64', shape=[1], suffix='correct') kwargs = {'main_program': main_program} total = self.helper.create_tmp_variable(dtype='int') correct = self.helper.create_tmp_variable(dtype='int') acc = layers.accuracy( input=input, label=label, k=k, total=total, correct=correct, **kwargs) total = layers.cast(x=total, dtype='int64', **kwargs) correct = layers.cast(x=correct, dtype='int64', **kwargs) layers.sums(input=[self.total, total], out=self.total, **kwargs) layers.sums(input=[self.correct, correct], out=self.correct, **kwargs) self.metrics.append(acc)
def __init__(self, input, label, k=1, **kwargs): super(Accuracy, self).__init__("accuracy", **kwargs) main_program = self.helper.main_program if main_program.current_block().idx != 0: raise ValueError("You can only invoke Evaluator in root block") self.total = self.create_state(dtype='int64', shape=[1], suffix='total') self.correct = self.create_state(dtype='int64', shape=[1], suffix='correct') kwargs = {'main_program': main_program} total = self.helper.create_tmp_variable(dtype='int') correct = self.helper.create_tmp_variable(dtype='int') acc = layers.accuracy(input=input, label=label, k=k, total=total, correct=correct, **kwargs) total = layers.cast(x=total, dtype='int64', **kwargs) correct = layers.cast(x=correct, dtype='int64', **kwargs) layers.sums(input=[self.total, total], out=self.total, **kwargs) layers.sums(input=[self.correct, correct], out=self.correct, **kwargs) self.metrics.append(acc)
def __init__(self, input, gt_label, gt_box, gt_difficult, class_num, background_label=0, overlap_threshold=0.5, evaluate_difficult=True, ap_version='integral'): super(DetectionMAP, self).__init__("map_eval") gt_label = layers.cast(x=gt_label, dtype=gt_box.dtype) gt_difficult = layers.cast(x=gt_difficult, dtype=gt_box.dtype) label = layers.concat([gt_label, gt_difficult, gt_box], axis=1) # calculate mean average precision (mAP) of current mini-batch map = layers.detection_map(input, label, class_num, background_label, overlap_threshold=overlap_threshold, evaluate_difficult=evaluate_difficult, ap_version=ap_version) self.create_state(dtype='int32', shape=None, suffix='accum_pos_count') self.create_state(dtype='float32', shape=None, suffix='accum_true_pos') self.create_state(dtype='float32', shape=None, suffix='accum_false_pos') self.has_state = None var = self.helper.create_variable(persistable=True, dtype='int32', shape=[1]) self.helper.set_variable_initializer( var, initializer=Constant(value=int(0))) self.has_state = var # calculate accumulative mAP accum_map = layers.detection_map(input, label, class_num, background_label, overlap_threshold=overlap_threshold, evaluate_difficult=evaluate_difficult, has_state=self.has_state, input_states=self.states, out_states=self.states, ap_version=ap_version) layers.fill_constant(shape=self.has_state.shape, value=1, dtype=self.has_state.dtype, out=self.has_state) self.cur_map = map self.accum_map = accum_map
def eval(self, executor, eval_program=None): if eval_program is None: eval_program = Program() block = eval_program.current_block() kwargs = {'main_program': eval_program} total = _clone_var_(block, self.total) correct = _clone_var_(block, self.correct) total = layers.cast(total, dtype='float32', **kwargs) correct = layers.cast(correct, dtype='float32', **kwargs) out = layers.elementwise_div(x=correct, y=total, **kwargs) return np.array(executor.run(eval_program, fetch_list=[out])[0])
def eval(self, executor, eval_program=None): if eval_program is None: eval_program = Program() block = eval_program.current_block() with program_guard(main_program=eval_program): total_distance = _clone_var_(block, self.total_distance) seq_num = _clone_var_(block, self.seq_num) instance_error = _clone_var_(block, self.instance_error) seq_num = layers.cast(x=seq_num, dtype='float32') instance_error = layers.cast(x=instance_error, dtype='float32') avg_distance = layers.elementwise_div(x=total_distance, y=seq_num) avg_instance_error = layers.elementwise_div(x=instance_error, y=seq_num) result = executor.run( eval_program, fetch_list=[avg_distance, avg_instance_error]) return np.array(result[0]), np.array(result[1])
def __init__(self, input, label, ignored_tokens=None, **kwargs): super(EditDistance, self).__init__("edit_distance", **kwargs) main_program = self.helper.main_program if main_program.current_block().idx != 0: raise ValueError("You can only invoke Evaluator in root block") self.total_distance = self._create_state(dtype='float32', shape=[1], suffix='total_distance') self.seq_num = self._create_state(dtype='int64', shape=[1], suffix='seq_num') self.instance_error = self._create_state(dtype='int64', shape=[1], suffix='instance_error') distances, seq_num = layers.edit_distance( input=input, label=label, ignored_tokens=ignored_tokens) zero = layers.fill_constant(shape=[1], value=0.0, dtype='float32') compare_result = layers.equal(distances, zero) compare_result_int = layers.cast(x=compare_result, dtype='int') seq_right_count = layers.reduce_sum(compare_result_int) instance_error_count = layers.elementwise_sub(x=seq_num, y=seq_right_count) total_distance = layers.reduce_sum(distances) layers.sums(input=[self.total_distance, total_distance], out=self.total_distance) layers.sums(input=[self.seq_num, seq_num], out=self.seq_num) layers.sums(input=[self.instance_error, instance_error_count], out=self.instance_error) self.metrics.append(total_distance) self.metrics.append(instance_error_count)
def eval(self, executor, eval_program=None): if eval_program is None: eval_program = Program() block = eval_program.current_block() with program_guard(main_program=eval_program): total_distance = _clone_var_(block, self.total_distance) seq_num = _clone_var_(block, self.seq_num) instance_error = _clone_var_(block, self.instance_error) seq_num = layers.cast(x=seq_num, dtype='float32') instance_error = layers.cast(x=instance_error, dtype='float32') avg_distance = layers.elementwise_div(x=total_distance, y=seq_num) avg_instance_error = layers.elementwise_div( x=instance_error, y=seq_num) result = executor.run( eval_program, fetch_list=[avg_distance, avg_instance_error]) return np.array(result[0]), np.array(result[1])
def __init__(self, input, label, ignored_tokens=None, **kwargs): super(EditDistance, self).__init__("edit_distance", **kwargs) main_program = self.helper.main_program if main_program.current_block().idx != 0: raise ValueError("You can only invoke Evaluator in root block") self.total_distance = self.create_state( dtype='float32', shape=[1], suffix='total_distance') self.seq_num = self.create_state( dtype='int64', shape=[1], suffix='seq_num') self.instance_error = self.create_state( dtype='int64', shape=[1], suffix='instance_error') distances, seq_num = layers.edit_distance( input=input, label=label, ignored_tokens=ignored_tokens) zero = layers.fill_constant(shape=[1], value=0.0, dtype='float32') compare_result = layers.equal(distances, zero) compare_result_int = layers.cast(x=compare_result, dtype='int') seq_right_count = layers.reduce_sum(compare_result_int) instance_error_count = layers.elementwise_sub( x=seq_num, y=seq_right_count) total_distance = layers.reduce_sum(distances) layers.sums( input=[self.total_distance, total_distance], out=self.total_distance) layers.sums(input=[self.seq_num, seq_num], out=self.seq_num) layers.sums( input=[self.instance_error, instance_error_count], out=self.instance_error) self.metrics.append(total_distance) self.metrics.append(instance_error_count)
def _add_average_apply_op(self, block, param_grad): param = block.clone_variable(param_grad[0]) grad = block.clone_variable(param_grad[1]) sum_1 = block.clone_variable(self._get_accumulator('sum_1', param)) sum_2 = block.clone_variable(self._get_accumulator('sum_2', param)) sum_3 = block.clone_variable(self._get_accumulator('sum_3', param)) num_accumulates = block.clone_variable( self._get_accumulator('num_accumulates', param)) old_num_accumulates = block.clone_variable( self._get_accumulator('old_num_accumulates', param)) num_updates = block.clone_variable( self._get_accumulator('num_updates', param)) # backup param value to grad layers.assign(input=param, output=grad) # param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates) tmp = layers.sum(x=[num_accumulates, old_num_accumulates]) sum = layers.sum(x=[sum_1, sum_2, sum_3]) tmp = layers.cast(x=tmp, dtype='float32') sum = layers.cast(x=sum, dtype='float32') layers.elementwise_div(x=sum, y=tmp, out=param)
def __init__(self, input, gt_label, gt_box, gt_difficult=None, class_num=None, background_label=0, overlap_threshold=0.5, evaluate_difficult=True, ap_version='integral'): super(DetectionMAP, self).__init__("map_eval") gt_label = layers.cast(x=gt_label, dtype=gt_box.dtype) if gt_difficult: gt_difficult = layers.cast(x=gt_difficult, dtype=gt_box.dtype) label = layers.concat([gt_label, gt_difficult, gt_box], axis=1) else: label = layers.concat([gt_label, gt_box], axis=1) # calculate mean average precision (mAP) of current mini-batch map = layers.detection_map( input, label, class_num, background_label, overlap_threshold=overlap_threshold, evaluate_difficult=evaluate_difficult, ap_version=ap_version) self.create_state(dtype='int32', shape=None, suffix='accum_pos_count') self.create_state(dtype='float32', shape=None, suffix='accum_true_pos') self.create_state(dtype='float32', shape=None, suffix='accum_false_pos') self.has_state = None var = self.helper.create_variable( persistable=True, dtype='int32', shape=[1]) self.helper.set_variable_initializer( var, initializer=Constant(value=int(0))) self.has_state = var # calculate accumulative mAP accum_map = layers.detection_map( input, label, class_num, background_label, overlap_threshold=overlap_threshold, evaluate_difficult=evaluate_difficult, has_state=self.has_state, input_states=self.states, out_states=self.states, ap_version=ap_version) layers.fill_constant( shape=self.has_state.shape, value=1, dtype=self.has_state.dtype, out=self.has_state) self.cur_map = map self.accum_map = accum_map