Exemplo n.º 1
0
    def __init__(self,
                 dataset,
                 dataset_sink_mode=True,
                 sink_size=-1,
                 epoch_num=1,
                 iter_first_order=0):
        check_bool(dataset_sink_mode)
        check_int(sink_size)
        if sink_size < -1 or sink_size == 0:
            raise ValueError(
                "The sink_size must be -1 or positive, but got sink_size {}.".
                format(sink_size))

        if dataset_sink_mode:
            if context.get_context("enable_ge"):
                iterclass = _DatasetIterGE
            else:
                if context.get_context("device_target") == "Ascend":
                    iterclass = _DatasetIterMSLoopSink
                elif context.get_context("device_target") == "GPU":
                    ms_role = os.getenv("MS_ROLE")
                    if ms_role in ("MS_PSERVER", "MS_SCHED"):
                        iterclass = _DatasetIterPSLite
                    else:
                        iterclass = _DatasetIterMS
                elif context.get_context("device_target") == "CPU":
                    raise RuntimeError(
                        "Currently dataset sink mode is not supported when the device target is CPU."
                    )
            self.iter = iterclass(dataset, sink_size, epoch_num,
                                  iter_first_order)
        else:
            iterclass = _DatasetIterNormal
            self.iter = iterclass(dataset)
Exemplo n.º 2
0
    def train(self,
              epoch,
              train_dataset,
              callbacks=None,
              dataset_sink_mode=True,
              sink_size=-1):
        """
        Training API where the iteration is controlled by python front-end.

        When setting pynative mode, the training process will be performed with dataset not sink.

        Note:
            CPU is not supported when dataset_sink_mode is true.
            If dataset_sink_mode is True, epoch of training should be equal to the count of repeat
            operation in dataset processing. Otherwise, errors could occur since the amount of data
            is not the amount training requires.
            If dataset_sink_mode is True, data will be sent to device. If device is Ascend, features
            of data will be transferred one by one. The limitation of data transmission per time is 256M.

        Args:
            epoch (int): Total number of iterations on the data.
            train_dataset (Dataset): A training dataset iterator. If there is no
                                     loss_fn, a tuple with multiply data (data1, data2, data3, ...) should be
                                     returned and passed to the network. Otherwise, a tuple (data, label) should
                                     be returned, and the data and label are passed to the network and loss
                                     function respectively.
            callbacks (list): List of callback object. Callbacks which should be excuted while training. Default: None.
            dataset_sink_mode (bool): Determines whether to pass the data through dataset channel. Default: True.
                                      Configure pynative mode, the training process will be performed with
                                      dataset not sink.
            sink_size (int): Control the amount of data each sink.
                             If sink_size=-1, sink the complete dataset each epoch.
                             If sink_size>0, sink sink_size data each epoch.
                             If dataset_sink_mode is False, set sink_size invalid. Default: -1.

        Examples:
            >>> dataset = get_dataset()
            >>> net = Net()
            >>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
            >>> loss_scale_manager = FixedLossScaleManager()
            >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
            >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None, loss_scale_manager=loss_scale_manager)
            >>> model.train(2, dataset)
        """
        check_bool(dataset_sink_mode)
        check_int(sink_size)
        if sink_size < -1 or sink_size == 0:
            raise ValueError(
                "The sink_size must be -1 or positive, but got sink_size {}.".
                format(sink_size))

        _device_number_check(self._parallel_mode, self._device_number)
        _parameter_broadcast_check(self._parallel_mode,
                                   self._parameter_broadcast)

        self._train(epoch,
                    train_dataset,
                    callbacks=callbacks,
                    dataset_sink_mode=dataset_sink_mode,
                    sink_size=sink_size)
Exemplo n.º 3
0
    def __init__(self,
                 dataset,
                 dataset_sink_mode=True,
                 sink_size=-1,
                 epoch_num=1,
                 iter_first_order=1):
        check_bool(dataset_sink_mode)
        check_int(sink_size)
        if sink_size < -1 or sink_size == 0:
            raise ValueError(
                "The sink_size must be -1 or positive, but got sink_size {}.".
                format(sink_size))

        if dataset_sink_mode:
            if context.get_context("device_target") == "Ascend":
                iterclass = _DatasetIterMSLoopSink
                self.iter = iterclass(dataset, sink_size, epoch_num,
                                      iter_first_order)
            elif context.get_context("device_target") == "GPU":
                iterclass = _DatasetIterMS
                self.iter = iterclass(dataset, sink_size, epoch_num)
            elif context.get_context("device_target") == "CPU":
                raise RuntimeError(
                    "Currently dataset sink mode is not supported when the device target is CPU."
                )
Exemplo n.º 4
0
def test_check_int_5():
    check_int(0)
    check_int(1)
    with pytest.raises(TypeError):
        check_int(True)
    with pytest.raises(TypeError):
        check_int(False)
Exemplo n.º 5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 pad_mode,
                 padding,
                 dilation,
                 group,
                 has_bias,
                 weight_init,
                 bias_init):
        super(_Conv, self).__init__()
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.kernel_size = kernel_size
        self.stride = check_int_positive(stride)
        self.pad_mode = pad_mode
        self.padding = check_int_non_negative(padding)
        self.dilation = check_int(dilation)
        self.group = check_int_positive(group)
        self.has_bias = has_bias
        if (not isinstance(kernel_size, tuple)) or len(kernel_size) != 2 or \
            (not isinstance(kernel_size[0], int)) or (not isinstance(kernel_size[1], int)) or \
                kernel_size[0] < 1 or kernel_size[1] < 1:
            raise ValueError("Attr 'kernel_size' of 'Conv2D' Op passed "
                             + str(self.kernel_size) + ", should be a int or tuple and equal to or greater than 1.")
        if in_channels % group != 0:
            raise ValueError("Attr 'in_channels' of 'Conv2D' Op must be divisible by "
                             "attr 'group' of 'Conv2D' Op.")
        if out_channels % group != 0:
            raise ValueError("Attr 'out_channels' of 'Conv2D' Op must be divisible by "
                             "attr 'group' of 'Conv2D' Op.")

        self.weight = Parameter(initializer(weight_init, [out_channels, in_channels // group, *kernel_size]),
                                name='weight')

        if check_bool(has_bias):
            self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
        else:
            if bias_init != 'zeros':
                logger.warning("Value of 'has_bias' is False, value of 'bias_init' will be ignored.")
            self.bias = None
Exemplo n.º 6
0
def test_check_int_4():
    with pytest.raises(TypeError):
        check_int(True)
Exemplo n.º 7
0
def test_check_int_3():
    with pytest.raises(TypeError):
        check_int("str")
Exemplo n.º 8
0
def test_check_int_2():
    with pytest.raises(TypeError):
        check_int(3.3)
Exemplo n.º 9
0
def test_check_int_1():
    assert check_int(3) == 3