Exemple #1
0
def _sparse_categorical_accuracy(y_true, y_pred):
    y_pred_rank = ops.convert_to_tensor(y_pred).shape.ndims
    y_true_rank = ops.convert_to_tensor(y_true).shape.ndims
    # If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
    if (y_true_rank is not None) and (y_pred_rank is not None) and (len(
            F.int_shape(y_true)) == len(F.int_shape(y_pred))):
        y_true = array_ops.squeeze(y_true, [-1])
    y_pred = math_ops.argmax(y_pred, axis=-1)

    # If the predicted output and actual output types don't match, force cast them
    # to match.
    if F.dtype(y_pred) != F.dtype(y_true):
        y_pred = math_ops.cast(y_pred, F.dtype(y_true))

    return F.float32(math_ops.equal(y_true, y_pred))
Exemple #2
0
 def _build_feed_targets(self, targets):
     # We don't check targets' length to compatible with self.outputs'
     # cause loss and metric have already calculated from model_fn
     self.targets = []
     self._target_names = []
     self._feed_targets = []
     self._feed_target_names = []
     self._feed_target_shapes = []
     for i, x in enumerate(targets):
         name = 'target_%d' % (i + 1)
         self._target_names.append(name)
         if isinstance(x, list):
             x = np.asarray(x)
             if x.ndim == 1:
                 x = np.expand_dims(x, 1)
         if isinstance(x, np.ndarray):
             shape = (None,) + x.shape[1:]
             placeholder = F.placeholder(
                 shape=shape, name=name)
             self.targets.append(placeholder)
             self._feed_targets.append(placeholder)
             self._feed_target_names.append(name)
             self._feed_target_shapes.append(shape)
         else:
             self.targets.append(x)
             if F.is_placeholder(x):
                 self._feed_targets.append(x)
                 self._feed_target_names.append(name)
                 self._feed_target_shapes.append(F.int_shape(x))
Exemple #3
0
 def _build_feed_inputs(self, inputs):
     self._input_names = []
     self._feed_inputs = []
     self._feed_input_names = []
     self._feed_input_shapes = []
     self.inputs = []
     for i, x in enumerate(inputs):
         name = 'input_%d' % (i + 1)
         self._input_names.append(name)
         if isinstance(x, list):
             x = np.asarray(x)
             if x.ndim == 1:
                 x = np.expand_dims(x, 1)
         if isinstance(x, np.ndarray):
             shape = (None,) + x.shape[1:]
             placeholder = F.placeholder(
                 shape=shape, name=name)
             self.inputs.append(placeholder)
             self._feed_inputs.append(placeholder)
             self._feed_input_names.append(name)
             self._feed_input_shapes.append(shape)
         else:
             self.inputs.append(x)
             if F.is_placeholder(x):
                 self._feed_inputs.append(x)
                 self._feed_input_names.append(name)
                 self._feed_input_shapes.append(F.int_shape(x))
Exemple #4
0
 def _set_inputs(self, inputs, outputs=None, training=None):
     """
     Subclassed model
     :param inputs: Only support nested list, non-nested dict;
     :param outputs:
     :param training:
     :return:
     """
     self._nested_inputs = inputs
     self.inputs = []
     for i, x in enumerate(utils.valid_data(inputs)):
         name = 'input_%d' % (i + 1)
         self._input_names.append(name)
         if isinstance(x, list):
             x = np.asarray(x)
             if x.ndim == 1:
                 x = np.expand_dims(x, 1)
         if isinstance(x, np.ndarray):
             shape = (None,) + x.shape[1:]
             placeholder = F.placeholder(
                 shape=shape, name=name)
             self.inputs.append(placeholder)
             self._feed_inputs.append(placeholder)
             self._feed_input_names.append(name)
             self._feed_input_shapes.append(shape)
         else:
             self.inputs.append(x)
             if F.is_placeholder(x):
                 self._feed_inputs.append(x)
                 self._feed_input_names.append(name)
                 self._feed_input_shapes.append(F.int_shape(x))
     if self.model_fn is None:
         kwargs = {'training': training} if has_arg(self.forward, 'training') else {}
         self._nested_outputs = self(inputs, **kwargs)
         self.outputs = nest.flatten(self._nested_outputs)
     elif outputs is not None:
         logging.info('=>Calling model_fn...')
         result = self.model_fn(
             self, utils.nest_data(
                 self.inputs, x_keys, x),
             utils.nest_data(
                 self.targets, y_keys, y))
         logging.info('=>Finish calling model_fn...')
         if not isinstance(result, EstimatorSpec):
             raise ValueError("Result returned from `model_fn` must be"
                              "an instance of `EstimatorSpec`")
         self.train_hooks.extend(result.train_hooks)
         self.val_hooks.extend(result.val_hooks)
         self.loss = result.loss
         self.metrics = result.metrics
         self.outputs = result.outputs
     self._output_names = [
         'output_%d' % i for i in range(1, len(self.outputs) + 1)]
     self._uses_learning_phase = any(getattr(x, '_uses_learning_phase', False)
                                     for x in self.outputs)
     self.built = True
Exemple #5
0
 def _compile_targets(self, targets):
     logging.info("=>Compiling targets...")
     self.targets = []
     self._feed_targets = []
     self._feed_target_names = []
     self._feed_target_shapes = []
     self._feed_loss_fns = []
     targets = self._compile_args(targets, 'targets')
     for i in range(len(self.outputs)):
         if i in self._skip_target_indices:
             self.targets.append(None)
         else:
             name = self.output_names[i]
             output = self.outputs[i]
             target = targets[i]
             loss_fn = self.loss_functions[i]
             if target is None:
                 target = F.placeholder(
                     ndim=len(F.int_shape(output)),
                     name=name + '_target',
                     sparse=F.is_sparse(output),
                     dtype=F.dtype(output))
             elif isinstance(target, list):
                 target = np.asarray(target)
                 if target.ndim == 1:
                     target = np.expand_dims(target, 1)
             if isinstance(target, np.ndarray):
                 shape = (None,) + target.shape[1:]
                 placeholder = F.placeholder(
                     shape=shape, name=name)
                 self.targets.append(placeholder)
                 self._feed_targets.append(placeholder)
                 self._feed_target_names.append(name)
                 self._feed_target_shapes.append(shape)
                 self._feed_loss_fns.append(loss_fn)
             else:
                 self.targets.append(target)
                 if F.is_placeholder(target):
                     self._feed_targets.append(target)
                     self._feed_target_names.append(name)
                     self._feed_target_shapes.append(F.int_shape(target))
                     self._feed_loss_fns.append(loss_fn)
Exemple #6
0
 def forward(self, inputs, rois, im_info):
     """
     :param inputs: features
     :param rois: regions of interest, shape with [batch, 5]
         format as (img_id, x0, y0, x1, y1)
         img_id is index of image inside batch
     :param im_info: scales of image, shape with [batch, 2]
         format as (height, width)
     :return:
     """
     assert F.ndim(rois) == 2 and F.int_shape(rois)[-1] == 5
     assert F.ndim(im_info) == 2 and F.int_shape(im_info)[-1] == 2
     indices = F.int32(rois[:, 0])
     boxes = rois[:, 1:]
     norm = F.float32(
         array_ops.stack(
             [im_info[:, 1], im_info[:, 0], im_info[:, 1], im_info[:, 0]],
             axis=1))
     boxes = boxes / norm
     # (x0, y0, x1, y1) -> (y0, x0, y1, x1)
     boxes = array_ops.stack(
         [boxes[:, 1], boxes[:, 0], boxes[:, 3], boxes[:, 2]], axis=1)
     crop_size = array_ops.constant(self.crop_size)
     if self.data_format[-1] == 'C':
         kernel_size = (1, ) + self._kernel_size + (1, )
         strides = (1, ) + self._strides + (1, )
     else:
         kernel_size = (1, 1) + self._kernel_size
         strides = (1, 1) + self._strides
         inputs = F.transpose_to_channels_last(inputs)
     outputs = image_ops.crop_and_resize(image=inputs,
                                         boxes=boxes,
                                         box_ind=indices,
                                         crop_size=crop_size)
     if self.data_format[-1] != 'C':
         outputs = F.transpose_to_channels_first(outputs)
     outputs = nn.max_pool2d(input=outputs,
                             ksize=kernel_size,
                             strides=strides,
                             data_format=self.data_format,
                             padding='SAME')
     return outputs
Exemple #7
0
 def forward(self, inputs):
     rank = F.ndim(inputs)
     if rank > 2:
         outputs = math_ops.tensordot(inputs, self.kernel,
                                      [[rank - 1], [0]])
         outputs.set_shape(F.int_shape(inputs)[:-1] + (self.units, ))
     else:
         outputs = math_ops.matmul(inputs, self.kernel)
     if self.use_bias:
         outputs = nn.bias_add(outputs, self.bias)
     if self.activation:
         outputs = self.activation(outputs)
     return outputs
Exemple #8
0
def graph_scope(name, default_name=None, values=None):
    from tensorlib.engine import Input
    if values is None:
        raise ValueError("Argument `values` can not be None.")
    values = to_list(values)
    [F.assert_tensor_traceable(x) for x in values]
    with ops.name_scope(name=name, default_name=default_name,
                        values=values) as scope:
        inputs = unpack_singleton([
            Input(batch_input_shape=F.int_shape(x), dtype=x.dtype)
            for x in values
        ])
        handler = GraphScope(scope=scope, inputs=inputs)
        yield handler
    net = Network(inputs=inputs, outputs=handler.outputs, name=scope)
    graph_ops.build_node(net, values, to_list(handler.outputs))
    # print(getattr(handler.outputs, '_anchor')[0])
    del handler
Exemple #9
0
 def build_model(self, x, y=None, training=None):
     x_keys, valid_x = utils.valid_data(x)
     y_keys, valid_y = utils.valid_data(y)  # y is [] if y=None
     if self.inputs is None:
         self._build_feed_inputs(valid_x)
         if self.model_fn is None:
             if has_arg(self.forward, 'training'):
                 self._uses_learning_phase = True
                 self.outputs = to_list(self(*self.inputs, training=training))
             else:
                 self.outputs = to_list(self(*self.inputs))
         elif y is not None:
             self._build_feed_targets(valid_y)
             logging.info('=>Calling model_fn...')
             result = self.model_fn(
                 self, utils.nest_data(
                     self.inputs, x_keys, x),
                 utils.nest_data(
                     self.targets, y_keys, y))
             logging.info('=>Finish calling model_fn...')
             if not isinstance(result, EstimatorSpec):
                 raise ValueError("Result returned from `model_fn` must be"
                                  "an instance of `EstimatorSpec`")
             self.train_hooks.extend(result.train_hooks)
             self.val_hooks.extend(result.val_hooks)
             self.loss = result.loss
             self.metrics = result.metrics
             self.outputs = result.outputs
     else:  # graph-model, inputs and outputs already satisfied
         self._input_names = []
         self._feed_inputs = []
         self._feed_input_names = []
         self._feed_input_shapes = []
         for i, x in enumerate(self.inputs):
             name = 'input_%d' % (i + 1)
             self._input_names.append(name)
             self._feed_inputs.append(x)
             self._feed_input_names.append(name)
             self._feed_input_shapes.append(F.int_shape(x))
         if self.model_fn is not None:
             self._build_feed_targets(valid_y)
             logging.info('=>Calling model_fn...')
             result = self.model_fn(
                 self, None, utils.nest_data(
                     self.targets, y_keys, y))
             logging.info('=>Finish calling model_fn...')
             if not isinstance(result, EstimatorSpec):
                 raise ValueError("Result returned from `model_fn` must be"
                                  "an instance of `EstimatorSpec`")
             self.train_hooks.extend(result.train_hooks)
             self.val_hooks.extend(result.val_hooks)
             self.loss = result.loss
             self.metrics = result.metrics
             self.outputs = result.outputs
     self._output_names = [
         'output_%d' % i for i in range(1, len(self.outputs) + 1)]
     if not self.uses_learning_phase:
         self._uses_learning_phase = any(getattr(x, '_uses_learning_phase', False)
                                         for x in self.outputs)
     self._is_built = True
     return valid_x, valid_y