Exemplo n.º 1
0
 def reset(self, executor, reset_program=None):
     if reset_program is None:
         reset_program = Program()
     with program_guard(main_program=reset_program):
         var = _clone_var_(reset_program.current_block(), self.has_state)
         layers.fill_constant(
             shape=var.shape, value=0, dtype=var.dtype, out=var)
     executor.run(reset_program)
Exemplo n.º 2
0
    def __init__(self,
                 input,
                 gt_label,
                 gt_box,
                 gt_difficult,
                 class_num,
                 background_label=0,
                 overlap_threshold=0.5,
                 evaluate_difficult=True,
                 ap_version='integral'):
        super(DetectionMAP, self).__init__("map_eval")

        gt_label = layers.cast(x=gt_label, dtype=gt_box.dtype)
        gt_difficult = layers.cast(x=gt_difficult, dtype=gt_box.dtype)
        label = layers.concat([gt_label, gt_difficult, gt_box], axis=1)

        # calculate mean average precision (mAP) of current mini-batch
        map = layers.detection_map(input,
                                   label,
                                   class_num,
                                   background_label,
                                   overlap_threshold=overlap_threshold,
                                   evaluate_difficult=evaluate_difficult,
                                   ap_version=ap_version)

        self.create_state(dtype='int32', shape=None, suffix='accum_pos_count')
        self.create_state(dtype='float32', shape=None, suffix='accum_true_pos')
        self.create_state(dtype='float32',
                          shape=None,
                          suffix='accum_false_pos')

        self.has_state = None
        var = self.helper.create_variable(persistable=True,
                                          dtype='int32',
                                          shape=[1])
        self.helper.set_variable_initializer(
            var, initializer=Constant(value=int(0)))
        self.has_state = var

        # calculate accumulative mAP
        accum_map = layers.detection_map(input,
                                         label,
                                         class_num,
                                         background_label,
                                         overlap_threshold=overlap_threshold,
                                         evaluate_difficult=evaluate_difficult,
                                         has_state=self.has_state,
                                         input_states=self.states,
                                         out_states=self.states,
                                         ap_version=ap_version)

        layers.fill_constant(shape=self.has_state.shape,
                             value=1,
                             dtype=self.has_state.dtype,
                             out=self.has_state)

        self.cur_map = map
        self.accum_map = accum_map
Exemplo n.º 3
0
    def reset(self, executor, reset_program=None):
        """
        reset metric states at the begin of each pass/user specified batch
        """
        if reset_program is None:
            reset_program = Program()

        with program_guard(main_program=reset_program):
            for var in self.states:
                assert isinstance(var, Variable)
                g_var = _clone_var_(reset_program.current_block(), var)
                layers.fill_constant(
                    shape=g_var.shape, value=0.0, dtype=g_var.dtype, out=g_var)

        executor.run(reset_program)
Exemplo n.º 4
0
    def __init__(self, input, label, ignored_tokens=None, **kwargs):
        super(EditDistance, self).__init__("edit_distance", **kwargs)
        main_program = self.helper.main_program
        if main_program.current_block().idx != 0:
            raise ValueError("You can only invoke Evaluator in root block")

        self.total_distance = self._create_state(dtype='float32',
                                                 shape=[1],
                                                 suffix='total_distance')
        self.seq_num = self._create_state(dtype='int64',
                                          shape=[1],
                                          suffix='seq_num')
        self.instance_error = self._create_state(dtype='int64',
                                                 shape=[1],
                                                 suffix='instance_error')
        distances, seq_num = layers.edit_distance(
            input=input, label=label, ignored_tokens=ignored_tokens)

        zero = layers.fill_constant(shape=[1], value=0.0, dtype='float32')
        compare_result = layers.equal(distances, zero)
        compare_result_int = layers.cast(x=compare_result, dtype='int')
        seq_right_count = layers.reduce_sum(compare_result_int)
        instance_error_count = layers.elementwise_sub(x=seq_num,
                                                      y=seq_right_count)
        total_distance = layers.reduce_sum(distances)
        layers.sums(input=[self.total_distance, total_distance],
                    out=self.total_distance)
        layers.sums(input=[self.seq_num, seq_num], out=self.seq_num)
        layers.sums(input=[self.instance_error, instance_error_count],
                    out=self.instance_error)
        self.metrics.append(total_distance)
        self.metrics.append(instance_error_count)
Exemplo n.º 5
0
    def __init__(self, input, label, ignored_tokens=None, **kwargs):
        super(EditDistance, self).__init__("edit_distance", **kwargs)
        main_program = self.helper.main_program
        if main_program.current_block().idx != 0:
            raise ValueError("You can only invoke Evaluator in root block")

        self.total_distance = self.create_state(
            dtype='float32', shape=[1], suffix='total_distance')
        self.seq_num = self.create_state(
            dtype='int64', shape=[1], suffix='seq_num')
        self.instance_error = self.create_state(
            dtype='int64', shape=[1], suffix='instance_error')
        distances, seq_num = layers.edit_distance(
            input=input, label=label, ignored_tokens=ignored_tokens)

        zero = layers.fill_constant(shape=[1], value=0.0, dtype='float32')
        compare_result = layers.equal(distances, zero)
        compare_result_int = layers.cast(x=compare_result, dtype='int')
        seq_right_count = layers.reduce_sum(compare_result_int)
        instance_error_count = layers.elementwise_sub(
            x=seq_num, y=seq_right_count)
        total_distance = layers.reduce_sum(distances)
        layers.sums(
            input=[self.total_distance, total_distance],
            out=self.total_distance)
        layers.sums(input=[self.seq_num, seq_num], out=self.seq_num)
        layers.sums(
            input=[self.instance_error, instance_error_count],
            out=self.instance_error)
        self.metrics.append(total_distance)
        self.metrics.append(instance_error_count)
Exemplo n.º 6
0
    def __init__(self, name=None):
        self.helper = LayerHelper('select', name=name)
        self.parent_block = self.helper.main_program.current_block()
        self.cases = []

        super(Select, self).__init__(self.helper.main_program)
        self.case_to_execute = fill_constant(
            shape=[1], dtype=core.VarDesc.VarType.INT32, value=-1)
Exemplo n.º 7
0
    def construct_op(self):
        main_program = self.helper.main_program
        cases_block = main_program.current_block()

        inner_outputs = set()
        input_set = set()
        params = set()

        for op in self.block.ops:
            # Iterate over all operators, get all the inputs
            # and add as input to the SelectCase operator.
            for iname in op.input_names:
                for in_var_name in op.input(iname):
                    if in_var_name not in inner_outputs:
                        input_set.add(in_var_name)

            for oname in op.output_names:
                for out_var_name in op.output(oname):
                    inner_outputs.add(out_var_name)

        param_list = [
            cases_block.var(each_name) for each_name in params
            if each_name not in input_set
        ]

        # Iterate over all operators, get all the outputs
        # add to the output list of SelectCase operator only if
        # they exist in the parent block.
        out_vars = []
        for inner_out_name in inner_outputs:
            if inner_out_name in cases_block.vars:
                out_vars.append(cases_block.var(inner_out_name))

        # First, create an op that will determine whether or not this is the
        # conditional variable to execute.
        should_execute_block = equal(
            fill_constant(
                shape=[1], dtype=core.VarDesc.VarType.INT32, value=self.idx),
            self.case_to_execute)

        step_scope = cases_block.create_var(
            type=core.VarDesc.VarType.STEP_SCOPES)

        cases_block.append_op(
            type='conditional_block',
            inputs={'X': [should_execute_block],
                    'Params': param_list},
            outputs={'Out': out_vars,
                     'Scope': [step_scope]},
            attrs={
                'sub_block': self.block,
                'is_scalar_condition': self.is_scalar_condition
            })

        return '%s,%s,%s,%s' % (self.idx, self.action, self.channel.name
                                if self.channel else '', self.value.name
                                if self.value else '')
Exemplo n.º 8
0
    def process_context(self, context, param, grad):
        if self.group_name not in context:
            context[self.group_name] = []
            context[self.group_name + "_clip_value"] = self.clip_norm
            context[self.group_name + "_clip"] = layers.fill_constant(
                shape=[1], dtype="float32", value=self.clip_norm)
        else:
            if not self.clip_norm == context[self.group_name + "_clip_value"]:
                raise ValueError(
                    "All parameters' 'clip_norm' of a same group should be the same"
                )

        local_norm_var = layers.reduce_sum(input=layers.pow(x=grad, factor=2.0))
        context[self.group_name].append(local_norm_var)

        self.context = context
Exemplo n.º 9
0
    def process_context(self, context, param, grad):
        if self.group_name not in context:
            context[self.group_name] = []
            context[self.group_name + "_clip_value"] = self.clip_norm
            context[self.group_name + "_clip"] = layers.fill_constant(
                shape=[1], dtype="float32", value=self.clip_norm)
        else:
            if not self.clip_norm == context[self.group_name + "_clip_value"]:
                raise ValueError(
                    "All parameters' 'clip_norm' of a same group should be the same"
                )

        local_norm_var = layers.reduce_sum(
            input=layers.pow(x=grad, factor=2.0))
        context[self.group_name].append(local_norm_var)

        self.context = context
Exemplo n.º 10
0
    def __init__(self,
                 input,
                 gt_label,
                 gt_box,
                 gt_difficult=None,
                 class_num=None,
                 background_label=0,
                 overlap_threshold=0.5,
                 evaluate_difficult=True,
                 ap_version='integral'):
        super(DetectionMAP, self).__init__("map_eval")

        gt_label = layers.cast(x=gt_label, dtype=gt_box.dtype)
        if gt_difficult:
            gt_difficult = layers.cast(x=gt_difficult, dtype=gt_box.dtype)
            label = layers.concat([gt_label, gt_difficult, gt_box], axis=1)
        else:
            label = layers.concat([gt_label, gt_box], axis=1)

        # calculate mean average precision (mAP) of current mini-batch
        map = layers.detection_map(
            input,
            label,
            class_num,
            background_label,
            overlap_threshold=overlap_threshold,
            evaluate_difficult=evaluate_difficult,
            ap_version=ap_version)

        self.create_state(dtype='int32', shape=None, suffix='accum_pos_count')
        self.create_state(dtype='float32', shape=None, suffix='accum_true_pos')
        self.create_state(dtype='float32', shape=None, suffix='accum_false_pos')

        self.has_state = None
        var = self.helper.create_variable(
            persistable=True, dtype='int32', shape=[1])
        self.helper.set_variable_initializer(
            var, initializer=Constant(value=int(0)))
        self.has_state = var

        # calculate accumulative mAP
        accum_map = layers.detection_map(
            input,
            label,
            class_num,
            background_label,
            overlap_threshold=overlap_threshold,
            evaluate_difficult=evaluate_difficult,
            has_state=self.has_state,
            input_states=self.states,
            out_states=self.states,
            ap_version=ap_version)

        layers.fill_constant(
            shape=self.has_state.shape,
            value=1,
            dtype=self.has_state.dtype,
            out=self.has_state)

        self.cur_map = map
        self.accum_map = accum_map