Exemple #1
0
 def _standardize_data(self,
                       x,
                       y=None):
     """
     This procedure transform any elements in x and y that are not
     placeholder to placeholder
     """
     all_inputs = self._build_model_fn(x, y)
     # If `x` and `y` were all symbolic,
     # then the model should not be fed any inputs and targets.
     # Note: in this case, `any` and `all` are equivalent since we disallow
     # mixed symbolic/value inputs.
     if any(F.is_tensor(v) for v in all_inputs):
         return [], []
     # What follows is input validation and standardization to list format,
     # in the case where all inputs are value arrays.
     x = utils.verify_and_normalize_data(
         x,
         self._feed_input_names,
         self._feed_input_shapes)
     if y is not None:
         y = utils.verify_and_normalize_data(
             y,
             self._feed_target_names,
             self._feed_target_shapes)
         utils.check_array_length_consistency(x, y)
     else:
         y = []
     return x, y
Exemple #2
0
 def call(self, inputs, all_fetches):
     assert isinstance(inputs, (list, tuple))
     feed_symbols = []
     symbol_values = []
     feed_arrays = []
     array_values = []
     for tensor, value in zip(self.inputs, inputs):
         if value is None:
             continue
         if is_tensor(value):
             feed_symbols.append(tensor)
             symbol_values.append(value)
         else:
             feed_arrays.append(tensor)
             array_values.append(
                 np.asarray(value, dtype=tf.as_dtype(tensor.dtype).as_numpy_dtype))
     if self.feed_dict:
         for key in sorted(self.feed_dict.keys()):
             array_values.append(
                 np.asarray(self.feed_dict[key], dtype=tf.as_dtype(key.dtype).as_numpy_dtype))
     if (self._callable_fn is None or
         feed_arrays != self._feed_arrays or
         symbol_values != self._symbol_values or
         feed_symbols != self._feed_symbols or
             all_fetches != self._all_fetches):
         self._make_callable(feed_arrays=feed_arrays,
                             feed_symbols=feed_symbols,
                             symbol_values=symbol_values,
                             all_fetches=all_fetches)
     if self.run_metadata:
         fetched = self._callable_fn(*array_values, run_metadata=self.run_metadata)
     else:
         fetched = self._callable_fn(*array_values)
     return fetched
Exemple #3
0
def verify_and_normalize_data(data,
                              names,
                              shapes=None):
    if not names:
        if data is not None and hasattr(data, '__len__') and len(data):
            raise ValueError('Error when checking model: expected no data, but got:', data)
        return []
    if data is None:
        return [None] * len(names)
    if isinstance(data, list):
        if isinstance(data[0], list):
            data = [np.asarray(d) for d in data]
        elif len(names) == 1 and isinstance(data[0], (float, int)):
            data = [np.asarray(data)]
    else:
        data = [data]
    # data = [normalize_single_array(x) for x in data]
    if len(data) != len(names):
        if data and hasattr(data[0], 'shape'):
            raise ValueError(
                'Error when checking model: the list of Numpy arrays that you are passing to '
                'your model is not the size the model expected. Expected to see ' + str(len(names)) +
                ' array(s), but instead got the following list of ' + str(len(data)) + ' arrays: ' +
                str(data)[:min(200, len(data))] + '...')
        elif len(names) > 1:
            raise ValueError(
                'Error when checking model : you are passing a list as input to your model, '
                'but the model expects a list of ' + str(len(names)) + ' Numpy arrays instead. '
                'The list you passed was: ' + str(data)[:min(200, len(data))])
        elif len(data) == 1 and not hasattr(data[0], 'shape'):
            raise TypeError(
                'Error when checking model: data should be a Numpy array, or list/dict of '
                'Numpy arrays. Found: ' + str(data)[:min(200, len(data))] + '...')
        elif len(names) == 1:
            data = [np.asarray(data)]

    if shapes:
        for i in range(len(names)):
            if shapes[i] is not None and not F.is_tensor(data[i]):
                data_shape = data[i].shape
                shape = shapes[i]
                if data[i].ndim != len(shape):
                    raise ValueError(
                        'Error when checking : expected ' + names[i] + ' to have ' +
                        str(len(shape)) + ' dimensions, but got array with shape ' + str(data_shape))
                data_shape = data_shape[1:]
                shape = shape[1:]
                for dim, ref_dim in zip(data_shape, shape):
                    if ref_dim != dim and ref_dim:
                        raise ValueError(
                            'Error when checking : expected ' + names[i] + ' to have shape ' +
                            str(shape) + ' but got array with shape ' + str(data_shape))
    return data
Exemple #4
0
def valid_data(data):
    if data is None:
        return []
    if isinstance(data, dict):
        data = [data[key] for key in list(sorted(data.keys()))]
    else:
        data = to_list(data)
    if not all(isinstance(x, np.ndarray)
               or F.is_tensor(x) for x in data):
        raise ValueError("All elements should be instances"
                         " of numpy.ndarray or tensorflow.Tensor, but"
                         " received: " + str([type(x) for x in data]))
    return data
Exemple #5
0
def normalize_single_array(x):
    if x is None:
        return None
    elif F.is_tensor(x):
        shape = x.get_shape()
        if shape is None or shape[0] is None:
            raise ValueError(
                'When feeding symbolic tensors to a model, we expect the'
                'tensors to have a static batch size. '
                'Got tensor with shape: %s' % str(shape))
        return x
    elif x.ndim == 1:
        x = np.expand_dims(x, 1)
    return x
Exemple #6
0
def check_num_samples(samples,
                      batch_size=None,
                      steps=None):
    if steps is not None and batch_size is not None:
        raise ValueError('When `steps` is set, the `batch_size` must be None')
    if not samples or any(F.is_tensor(x) for x in samples):
        if steps is None:
            raise ValueError("When samples from symbolic tensors(e.g. Dataset), argument"
                             " `steps` must be specified instead of batch_size, cause"
                             " symbolic tensors are expected to produce batches of data")
        return None
    if hasattr(samples[0], 'shape'):
        return int(samples[0].shape[0])
    return None
Exemple #7
0
 def _valid_data(data, name='data'):
     values = []
     names = []
     if isinstance(data, dict):
         for name, value in data.items():
             names.append(name)
             values.append(value)
     else:
         values = to_list(data)
         names = [name + '_%d' % i for i in range(1, len(values) + 1)]
     if not all(isinstance(x, np.ndarray)
                or F.is_tensor(x) for x in values):
         raise ValueError("All elements should be instances"
                          " of numpy.ndarray or tensorflow.Tensor, but"
                          " received: " + str(values))
     return names, values
Exemple #8
0
 def _standardize_data(self,
                       x,
                       y=None):
     """
     This procedure transform any elements in x and y that are not
     placeholder to placeholder
     """
     # Build the model using the retrieved inputs (value or symbolic).
     # If values, then in symbolic-mode placeholders will be created
     # to match the value shapes.
     if not self.is_built:
         x, y = self.build_model(x, y=y)  # y is [] if y=None
     else:
         _, x = utils.valid_data(x)
         _, y = utils.valid_data(y)  # y is [] if y=None
     if y is not None and y is not [] and not self.is_compiled:
         self.compile(optimizer=self.optimizer,
                      loss=self.loss,
                      loss_weights=self.loss_weights,
                      metrics=self.metrics,
                      checkpoint_dir=self._checkpoint_dir,
                      targets=None if self.model_fn else y,
                      session_cfg=self._session_cfg,
                      **self._function_kwargs)
     # If `x` and `y` were all symbolic,
     # then the model should not be fed any inputs and targets.
     # Note: in this case, `any` and `all` are equivalent since we disallow
     # mixed symbolic/value inputs.
     if any(F.is_tensor(v) for v in x + y):
         return [], []
     # What follows is input validation and standardization to list format,
     # in the case where all inputs are value arrays.
     x = utils.verify_and_normalize_data(
         x,
         self._feed_input_names,
         self._feed_input_shapes)
     if y is not None and y is not []:
         y = utils.verify_and_normalize_data(
             y,
             self._feed_target_names,
             self._feed_target_shapes)
         utils.check_array_length_consistency(x, y)
     return x, y
Exemple #9
0
    def get_updates(self, loss, params):
        multiplied_grads_and_vars = []

        def _get_multiplier(name):
            for key, value in self.lr_multiplier.items():
                if key in name:
                    return self.lr_multiplier[key]
            return None

        grads_and_vars = self.optimizer.compute_gradients(loss, params)
        base_lr = getattr(self.optimizer, '_lr')
        none_counts = 0
        for grad, var in grads_and_vars:
            multiplier = _get_multiplier(var.op.name)
            if grad is None:
                none_counts += 1
            if multiplier is not None:
                if grad is None:
                    raise ValueError('Requested multiple of `None` gradient.')
                if callable(multiplier):
                    lr = multiplier(self.global_step, base_lr)
                elif not F.is_tensor(multiplier):
                    lr = array_ops.constant(multiplier) * base_lr
                else:
                    lr = multiplier * base_lr
                if isinstance(grad, fops.IndexedSlices):
                    tmp = grad.values * lr
                    grad = fops.IndexedSlices(
                        tmp, grad.indices, grad.dense_shape)
                else:
                    grad *= lr
            multiplied_grads_and_vars.append((grad, var))
        if none_counts == len(multiplied_grads_and_vars):
            raise ValueError(
                "No gradients provided for any variable, check your graph for ops"
                " that do not support gradients, between variables %s and loss %s." %
                ([str(v) for _, v in grads_and_vars], loss))
        opt_update = self.optimizer.apply_gradients(
            multiplied_grads_and_vars, global_step=self.global_step)
        return [opt_update]
Exemple #10
0
 def _compile_metrics(self, metrics):
     """
     Compile metrics to desired format
         each output map with a list of metrics
         item inside metrics can be an instance of `training.Metric` or a tensor
     Note:
         when metrics if class-format, we will do formation check between metrics
         and `self.outputs` to make sure enough number of metrics to compatible with
         `self.outputs` and `self.targets`
         when metrics if tensor-format, we will not do formation check, cause metric
         calculation already handled by users themselves inside `model_fn`
     :param metrics: None or a nested list or dict
     """
     logging.info("=>Compiling metrics...")
     is_tensor = False
     if not metrics:
         metrics = [[]] * len(self.outputs)
     elif isinstance(metrics, list):
         if not F.is_tensor(metrics[0]):
             if not is_tensor and len(metrics) != len(self.outputs):
                 raise ValueError("Number of metric inside `metrics`"
                                  " %d is not compatible with number"
                                  " of `self.outputs` %d" % (
                                      len(metrics), len(self.outputs)))
         else:
             is_tensor = True
             metrics = [('metric_%d' % (i+1), m) for i, m in enumerate(metrics)]
     elif isinstance(metrics, dict):
         if not F.is_tensor(metrics[list(metrics.keys())[0]]):
             metrics = [metrics.get(name, [])
                        for name in self.output_names]
         else:
             is_tensor = True
             metrics = list(metrics.items())
     else:
         raise TypeError("Unexpected type of metrics: " + str(type(metrics)))
     with ops.name_scope('compile_metric'):
         if is_tensor:
             self._compile_metric_tensors(metrics)
         else:
             # Must handle sparse situation carefully!
             def _compile_metric(m, loss_fn):
                 if isinstance(loss_fn, losses.SparseCategoricalCrossEntropy):
                     if m in {'accuracy', 'acc'}:
                         m = metric_module.SparseCategoricalAccuracy()
                         return m
                 m = metric_module.get(m)
                 return m
             metric_tensors = []
             for i in range(len(self.outputs)):
                 if i in self._skip_target_indices:
                     continue
                 target = self.targets[i]
                 output = self.outputs[i]
                 output_metrics = to_list(metrics[i])
                 loss_function = self.loss_functions[i]
                 for j, metric in enumerate(output_metrics):
                     metric = _compile_metric(metric, loss_function)
                     metric_name = getattr(metric, 'name', 'metric_%d' % j)
                     metric_result = metric(target, output)
                     if len(self.output_names) > 1:
                         metric_name = self.output_names[i] + '_' + metric_name
                     metric_tensors.append((metric_name, metric_result))
             self._compile_metric_tensors(metric_tensors)