示例#1
0
    def __init__(self,
                 dataset,
                 dataset_sink_mode=True,
                 sink_size=-1,
                 epoch_num=1,
                 iter_first_order=1):
        dataset_sink_mode = Validator.check_bool(dataset_sink_mode)
        Validator.check_is_int(sink_size)
        if sink_size < -1 or sink_size == 0:
            raise ValueError(
                "The sink_size must be -1 or positive, but got sink_size {}.".
                format(sink_size))

        if dataset_sink_mode:
            if context.get_context("device_target") == "Ascend":
                iterclass = _DatasetIterMSLoopSink
                self.iter = iterclass(dataset, sink_size, epoch_num,
                                      iter_first_order)
            elif context.get_context("device_target") == "GPU":
                iterclass = _DatasetIterMS
                self.iter = iterclass(dataset, sink_size, epoch_num)
            elif context.get_context("device_target") == "CPU":
                raise RuntimeError(
                    "Currently dataset sink mode is not supported when the device target is CPU."
                )
示例#2
0
    def __init__(self, dataset, dataset_sink_mode=True, sink_size=-1, epoch_num=1):
        dataset_sink_mode = Validator.check_bool(dataset_sink_mode)
        Validator.check_is_int(sink_size)
        if sink_size < -1 or sink_size == 0:
            raise ValueError("The sink_size must be -1 or positive, but got sink_size {}.".format(sink_size))
        if sink_size == -1:
            sink_size = dataset.get_dataset_size()

        if dataset_sink_mode:
            if context.get_context("enable_ge"):
                iterclass = _DatasetIterGE
            else:
                if context.get_context("mode") == context.GRAPH_MODE:
                    ms_role = os.getenv("MS_ROLE")
                    if ms_role in ("MS_PSERVER", "MS_SCHED"):
                        iterclass = _DatasetIterPSServer
                    elif ms_role == "MS_WORKER":
                        iterclass = _DatasetIterPSWork
                    elif (context.get_context("device_target") == "Ascend") or \
                         (context.get_context("device_target") == "GPU"):
                        iterclass = _DatasetIterMSLoopSink
                    elif context.get_context("device_target") == "CPU":
                        raise RuntimeError(
                            "Currently dataset sink mode is not supported when the device target is CPU.")
                else:
                    iterclass = _DatasetIterPyNative
            self.iter = iterclass(dataset, sink_size, epoch_num)
        else:
            iterclass = _DatasetIterNormal
            self.iter = iterclass(dataset, epoch_num=epoch_num)
示例#3
0
    def __init__(self,
                 dataset,
                 dataset_sink_mode=True,
                 sink_size=-1,
                 epoch_num=1,
                 iter_first_order=0):
        dataset_sink_mode = Validator.check_bool(dataset_sink_mode)
        Validator.check_is_int(sink_size)
        if sink_size < -1 or sink_size == 0:
            raise ValueError(
                "The sink_size must be -1 or positive, but got sink_size {}.".
                format(sink_size))

        if dataset_sink_mode:
            if context.get_context("enable_ge"):
                iterclass = _DatasetIterGE
            else:
                if context.get_context("device_target") == "Ascend":
                    iterclass = _DatasetIterMSLoopSink
                elif context.get_context("device_target") == "GPU":
                    ms_role = os.getenv("MS_ROLE")
                    if ms_role in ("MS_PSERVER", "MS_SCHED"):
                        iterclass = _DatasetIterPSLite
                    else:
                        iterclass = _DatasetIterMS
                elif context.get_context("device_target") == "CPU":
                    raise RuntimeError(
                        "Currently dataset sink mode is not supported when the device target is CPU."
                    )
            self.iter = iterclass(dataset, sink_size, epoch_num,
                                  iter_first_order)
        else:
            iterclass = _DatasetIterNormal
            self.iter = iterclass(dataset)
示例#4
0
    def train(self,
              epoch,
              train_dataset,
              callbacks=None,
              dataset_sink_mode=True,
              sink_size=-1):
        """
        Training API where the iteration is controlled by python front-end.

        When setting pynative mode, the training process will be performed with dataset not sink.

        Note:
            CPU is not supported when dataset_sink_mode is true.
            If dataset_sink_mode is True, epoch of training should be equal to the count of repeat
            operation in dataset processing. Otherwise, errors could occur since the amount of data
            is not the amount training requires.
            If dataset_sink_mode is True, data will be sent to device. If device is Ascend, features
            of data will be transferred one by one. The limitation of data transmission per time is 256M.

        Args:
            epoch (int): Total number of iterations on the data.
            train_dataset (Dataset): A training dataset iterator. If there is no
                                     loss_fn, a tuple with multiply data (data1, data2, data3, ...) should be
                                     returned and passed to the network. Otherwise, a tuple (data, label) should
                                     be returned, and the data and label are passed to the network and loss
                                     function respectively.
            callbacks (list): List of callback object. Callbacks which should be executed while training. Default: None.
            dataset_sink_mode (bool): Determines whether to pass the data through dataset channel. Default: True.
                                      Configure pynative mode, the training process will be performed with
                                      dataset not sink.
            sink_size (int): Control the amount of data each sink.
                             If sink_size=-1, sink the complete dataset each epoch.
                             If sink_size>0, sink sink_size data each epoch.
                             If dataset_sink_mode is False, set sink_size invalid. Default: -1.

        Examples:
            >>> dataset = get_dataset()
            >>> net = Net()
            >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
            >>> loss_scale_manager = FixedLossScaleManager()
            >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
            >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None, loss_scale_manager=loss_scale_manager)
            >>> model.train(2, dataset)
        """
        dataset_sink_mode = Validator.check_bool(dataset_sink_mode)
        Validator.check_is_int(sink_size)
        if sink_size < -1 or sink_size == 0:
            raise ValueError(
                "The sink_size must be -1 or positive, but got sink_size {}.".
                format(sink_size))

        _device_number_check(self._parallel_mode, self._device_number)
        _parameter_broadcast_check(self._parallel_mode,
                                   self._parameter_broadcast)

        self._train(epoch,
                    train_dataset,
                    callbacks=callbacks,
                    dataset_sink_mode=dataset_sink_mode,
                    sink_size=sink_size)
示例#5
0
 def __init__(self, layer, time_axis, reshape_with_axis=None):
     if not isinstance(layer, (Cell, Primitive)):
         raise TypeError(
             "Please initialize TimeDistributed with mindspore.nn.Cell or "
             "mindspore.ops.Primitive instance. You passed: {input}".format(
                 input=layer))
     super(TimeDistributed, self).__init__()
     Validator.check_is_int(time_axis)
     if reshape_with_axis is not None:
         Validator.check_is_int(reshape_with_axis)
     self.layer = layer
     self.time_axis = time_axis
     self.reshape_with_axis = reshape_with_axis
     self.transpose = Transpose()
     self.reshape = Reshape()
示例#6
0
def test_check_is_int5():
    with pytest.raises(TypeError):
        Validator.check_is_int(True)
    with pytest.raises(TypeError):
        Validator.check_is_int(False)
示例#7
0
def test_check_int3():
    with pytest.raises(TypeError):
        Validator.check_is_int("str")
示例#8
0
def test_check_int2():
    with pytest.raises(TypeError):
        Validator.check_is_int(3.3)
示例#9
0
def test_check_int1():
    a = np.random.randint(-100, 100)
    assert Validator.check_is_int(a) == a
示例#10
0
def _check_is_int(arg_value, arg_name, op_name):
    arg_value = validator.check_is_int(arg_value, arg_name, op_name)
    return arg_value
示例#11
0
def triu(x_shape, x_dtype, k):
    Validator.check_int(len(x_shape), 1, Rel.GE, "x rank", "triu")
    Validator.check_is_int(k, "k value", "triu")
    mask = np.triu(np.ones(x_shape), k)
    return Tensor(mask, x_dtype)
示例#12
0
def test_check_int_1():
    assert Validator.check_is_int(3) == 3