コード例 #1
0
ファイル: variable.py プロジェクト: tsurumeso/chainer
    def initialize(self, shape):
        """Initializes the uninitialized variable.

        Uninitialized variable is a variable created with the data array set to
        None. This method creates and initializes the data array. The shape of
        the variable can be left unknown until this method is called.

        Args:
            shape (tuple of int): Shape of the data array.

        """
        xp = numpy if self._initial_backend != 'cuda' else cuda.cupy
        with cuda.get_device_from_id(self._initial_device):
            data = initializers.generate_array(self.initializer, shape, xp)

            ginit = self._grad_initializer
            grad = None if ginit is None else initializers.generate_array(
                ginit, shape, xp)

        self.data = data
        self.grad = grad

        # Convert the array for iDeep.
        if self._initial_backend == 'intel64':
            self.to_intel64()
コード例 #2
0
    def initialize(self, shape):
        """Initializes the uninitialized variable.

        Uninitialized variable is a variable created with the data array set to
        None. This method creates and initializes the data array. The shape of
        the variable can be left unknown until this method is called.

        Args:
            shape (tuple of int): Shape of the data array.

        """
        xp = numpy if self._initial_backend != 'cuda' else cuda.cupy
        with cuda.get_device_from_id(self._initial_device):
            data = initializers.generate_array(self.initializer, shape, xp)

            ginit = self._grad_initializer
            grad = None if ginit is None else initializers.generate_array(
                ginit, shape, xp)

        self.array = data
        self.grad = grad

        # Convert the array for iDeep.
        if self._initial_backend == 'intel64':
            self.to_intel64()
コード例 #3
0
    def __init__(self,
                 sample_rate=16000,
                 window_size_sec=0.02,
                 window_stride_sec=0.01,
                 n_fft=512,
                 n_filters=64,
                 preemph=0.97,
                 dither=1.0e-05):
        super(NemoMelSpecExtractor, self).__init__()
        self.log_zero_guard_value = 2**-24
        win_length = int(window_size_sec * sample_rate)
        self.hop_length = int(window_stride_sec * sample_rate)
        self.n_filters = n_filters

        from scipy import signal as scipy_signal
        from librosa import stft as librosa_stft
        window_arr = scipy_signal.hann(win_length, sym=True)
        self.stft = lambda x: librosa_stft(x,
                                           n_fft=n_fft,
                                           hop_length=self.hop_length,
                                           win_length=win_length,
                                           window=window_arr,
                                           center=True)

        self.dither = dither
        self.preemph = preemph

        self.pad_align = 16

        from librosa.filters import mel as librosa_mel
        self.fb_arr = librosa_mel(sample_rate,
                                  n_fft,
                                  n_mels=n_filters,
                                  fmin=0,
                                  fmax=(sample_rate / 2))

        with self.init_scope():
            self.window = initializers.generate_array(initializer=Constant(
                0, dtype="float32"),
                                                      shape=window_arr.shape,
                                                      xp=self.xp,
                                                      dtype=np.float32)
            self.register_persistent("window")

            self.fb = initializers.generate_array(
                initializer=Constant(0, dtype="float32"),
                shape=np.expand_dims(self.fb_arr, axis=0).shape,
                xp=self.xp,
                dtype="float32")
            self.register_persistent("fb")
コード例 #4
0
 def _init_array(self, initializer, default_value, size):
     if initializer is None:
         initializer = default_value
     initializer = initializers._get_initializer(initializer)
     return initializers.generate_array(
         initializer, size, self.xp, dtype=self._highprec_dtype,
         device=self.device)
コード例 #5
0
ファイル: test_uniform.py プロジェクト: zwcdp/chainer
 def check_shaped_initializer(self, backend_config):
     initializer = self.target(dtype=self.dtype, **self.target_kwargs)
     xp = backend_config.xp
     w = initializers.generate_array(initializer, self.shape, xp)
     self.assertIs(backend.get_array_module(w), xp)
     self.assertTupleEqual(w.shape, self.shape)
     self.assertEqual(w.dtype, self.dtype)
コード例 #6
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 ksize,
                 stride,
                 pad,
                 groups):
        super(CondenseComplexConv, self).__init__()
        with self.init_scope():
            self.bn = L.BatchNormalization(size=in_channels)
            self.activ = F.relu
            self.conv = L.Convolution2D(
                in_channels=in_channels,
                out_channels=out_channels,
                ksize=ksize,
                stride=stride,
                pad=pad,
                nobias=True,
                groups=groups)
            self.c_shuffle = ChannelShuffle(
                channels=out_channels,
                groups=groups)

            self.index = initializers.generate_array(
                initializer=initializers._get_initializer(0),
                shape=(in_channels,),
                xp=self.xp,
                dtype=np.int32)
            self.register_persistent('index')
コード例 #7
0
 def _init_array(self, initializer, default_value, size):
     if initializer is None:
         initializer = default_value
     initializer = initializers._get_initializer(initializer)
     return initializers.generate_array(
         initializer, size, self.xp, dtype=self._highprec_dtype,
         device=self.device)
コード例 #8
0
ファイル: variable.py プロジェクト: tsurumeso/chainer
    def __init__(self, initializer=None, shape=None, name=None):
        if initializer is None:
            initializer = constant.NaN()
        elif numpy.isscalar(initializer):
            initializer = constant.Constant(initializer)
        if shape is None:
            if isinstance(initializer, (numpy.ndarray, cuda.ndarray)):
                # parameter initialized by the initial array
                super(Parameter, self).__init__(initializer, name=name)
            else:
                # uninitialized parameter
                super(Parameter, self).__init__(name=name)
                dtype = getattr(initializer, 'dtype', None)
                self._grad_initializer = constant.NaN(dtype)
        else:
            # parameter initialized with a given shape
            if isinstance(initializer, (numpy.ndarray, cuda.ndarray)):
                xp = cuda.get_array_module(initializer)
                initializer = constant.Constant(initializer)
            else:
                xp = numpy
            data = initializers.generate_array(initializer, shape, xp)
            grad = xp.full_like(data, numpy.nan)
            super(Parameter, self).__init__(data, name=name, grad=grad)

        self.update_rule = None
        self.initializer = initializer
コード例 #9
0
def load_pretrained_embedding_layer(id2unigram,
                                    embed,
                                    external_model,
                                    finetuning=False):
    xp = cuda.get_array_module(embed.W)

    n_vocab = len(id2unigram)
    dim = external_model.wv.syn0[0].shape[0]
    initialW = initializers.normal.Normal(1.0)

    weight = []
    count = 0

    for i in range(n_vocab):
        key = id2unigram[i]
        if key in external_model.wv.vocab:
            vec = external_model.wv[key]
            count += 1
        else:
            if finetuning:
                vec = initializers.generate_array(initialW, (dim, ), xp)
            else:
                vec = xp.zeros(dim, dtype='f')
        weight.append(vec)

    weight = xp.reshape(weight, (n_vocab, dim))
    embed.W = chainer.Parameter(initializer=weight)

    if count >= 1:
        print('Use {} pretrained embedding vectors\n'.format(count),
              file=sys.stderr)
コード例 #10
0
ファイル: variable.py プロジェクト: watarufk/chainer
    def __init__(self, initializer=None, shape=None, name=None):
        if initializer is None:
            initializer = constant.NaN()
        elif numpy.isscalar(initializer):
            initializer = constant.Constant(initializer)
        if shape is None:
            if isinstance(initializer, (numpy.ndarray, cuda.ndarray)):
                # parameter initialized by the initial array
                super(Parameter, self).__init__(initializer, name=name)
            else:
                # uninitialized parameter
                super(Parameter, self).__init__(name=name)
                self.initializer = initializer
                dtype = getattr(initializer, 'dtype', numpy.float32)
                self._grad_initializer = constant.NaN(dtype)
        else:
            # parameter initialized with a given shape
            if isinstance(initializer, (numpy.ndarray, cuda.ndarray)):
                xp = cuda.get_array_module(initializer)
                initializer = constant.Constant(initializer)
            else:
                xp = numpy
            data = initializers.generate_array(initializer, shape, xp)
            grad = xp.full_like(data, numpy.nan)
            super(Parameter, self).__init__(data, name=name, grad=grad)

        self.update_rule = None
コード例 #11
0
def grow_embedding_layers_with_pretrained_model(n_vocab_org,
                                                n_vocab_grown,
                                                rand_embed,
                                                pretrained_embed,
                                                external_model,
                                                id2unigram_grown,
                                                train=False):
    diff = n_vocab_grown - n_vocab_org
    d_rand = rand_embed.W.shape[1]
    d_pretrained = pretrained_embed.W.shape[
        1]  #external_model.wv.syn0[0].shape[0]

    count = 0
    initialW = initializers.normal.Normal(1.0)
    w2_rand = []
    w2_pretrained = []

    for i in range(n_vocab_org, n_vocab_grown):
        if train:  # resume training
            vec_rand = initializers.generate_array(initialW, (d_rand, ), np)
        else:  # test
            vec_rand = rand_embed.W[
                0].data  # use pretrained vector of unknown token
        w2_rand.append(vec_rand)

        key = id2unigram_grown[i]
        if key in external_model.wv.vocab:
            vec_pretrained = external_model.wv[key]
            count += 1
        else:
            vec_pretrained = np.zeros(d_pretrained, dtype='f')
        w2_pretrained.append(vec_pretrained)

    w2_rand = np.reshape(w2_rand, (diff, d_rand))
    if cuda.get_array_module(rand_embed.W) == cuda.cupy:
        w2_rand = chainer.Variable(w2_rand)
        w2_rand.to_gpu()
    w_rand = F.concat((rand_embed.W, w2_rand), axis=0)
    rand_embed.W = chainer.Parameter(initializer=w_rand.data, name='W')

    w2_pretrained = np.reshape(w2_pretrained, (diff, d_pretrained))
    if cuda.get_array_module(rand_embed.W) == cuda.cupy:
        w2_pretrained = chainer.Variable(w2_pretrained)
        w2_pretrained.to_gpu()
    w_pretrained = F.concat((pretrained_embed.W, w2_pretrained), 0)
    pretrained_embed.W = chainer.Parameter(initializer=w_pretrained.data,
                                           name='W')

    print('Grow embedding matrix: {} -> {}'.format(n_vocab_org,
                                                   rand_embed.W.shape[0]),
          file=sys.stderr)
    print('Grow pretrained embedding matrix: {} -> {}'.format(
        n_vocab_org, pretrained_embed.W.shape[0]),
          file=sys.stderr)
    if count >= 1:
        print('Add {} pretrained embedding vectors'.format(count),
              file=sys.stderr)
コード例 #12
0
 def check_shaped_initializer(self, xp):
     initializer = initializers.Identity(scale=self.scale, dtype=self.dtype)
     w = initializers.generate_array(initializer, self.shape, xp)
     self.assertIs(backend.get_array_module(w), xp)
     self.assertTupleEqual(w.shape, self.shape)
     self.assertEqual(w.dtype, self.dtype)
     testing.assert_allclose(w,
                             self.scale * numpy.identity(len(self.shape)),
                             **self.check_options)
コード例 #13
0
 def check_shaped_initializer(self, xp):
     initializer = initializers.Constant(fill_value=self.fill_value,
                                         dtype=self.dtype)
     w = initializers.generate_array(initializer, self.shape, xp)
     self.assertIs(backend.get_array_module(w), xp)
     self.assertTupleEqual(w.shape, self.shape)
     self.assertEqual(w.dtype, self.dtype)
     testing.assert_allclose(w, numpy.full(self.shape, self.fill_value),
                             **self.check_options)
コード例 #14
0
ファイル: test_constant.py プロジェクト: Fhrozen/chainer
 def check_shaped_initializer(self, xp):
     initializer = initializers.Identity(
         scale=self.scale, dtype=self.dtype)
     w = initializers.generate_array(initializer, self.shape, xp)
     self.assertIs(cuda.get_array_module(w), xp)
     self.assertTupleEqual(w.shape, self.shape)
     self.assertEqual(w.dtype, self.dtype)
     testing.assert_allclose(
         w, self.scale * numpy.identity(len(self.shape)),
         **self.check_options)
コード例 #15
0
ファイル: test_constant.py プロジェクト: Fhrozen/chainer
 def check_shaped_initializer(self, xp):
     initializer = initializers.Constant(
         fill_value=self.fill_value, dtype=self.dtype)
     w = initializers.generate_array(initializer, self.shape, xp)
     self.assertIs(cuda.get_array_module(w), xp)
     self.assertTupleEqual(w.shape, self.shape)
     self.assertEqual(w.dtype, self.dtype)
     testing.assert_allclose(
         w, numpy.full(self.shape, self.fill_value),
         **self.check_options)
コード例 #16
0
 def initialize_state(self, shape):
     if self.gpu:
         xp = cuda.cupy
     else:
         xp = np
     self.s = initializers.generate_array(initializers.Normal(),
                                          (shape[0], self.n_out),
                                          xp,
                                          dtype=xp.float32)
     self.y = Variable(xp.zeros((shape[0], self.n_out), dtype=xp.float32))
コード例 #17
0
ファイル: variable.py プロジェクト: zox312/chainer
    def initialize(self, shape):
        """Initializes the uninitialized variable.

        Uninitialized variable is a variable created with the data array set to
        None. This method creates and initializes the data array. The shape of
        the variable can be left unknown until this method is called.

        Args:
            shape (tuple of int): Shape of the data array.

        """
        xp = numpy if self._initial_device is None else cuda.cupy
        with cuda.get_device_from_id(self._initial_device):
            data = initializers.generate_array(self.initializer, shape, xp)

            ginit = self._grad_initializer
            grad = None if ginit is None else initializers.generate_array(
                ginit, shape, xp)

        self._data[0] = data
        self._node._grad = grad
コード例 #18
0
    def __init__(self, units, in_units, drop_rate=0.5):
        super(CondenseLinear, self).__init__()
        drop_in_units = int(in_units * drop_rate)
        with self.init_scope():
            self.dense = L.Linear(in_size=drop_in_units, out_size=units)

            self.index = initializers.generate_array(
                initializer=initializers._get_initializer(0),
                shape=(drop_in_units, ),
                xp=self.xp,
                dtype=np.int32)
            self.register_persistent('index')
コード例 #19
0
ファイル: link.py プロジェクト: LiuFang816/SALSTM_py_data
    def add_param(self, name, shape, dtype=numpy.float32, initializer=None):
        """Registers a parameter to the link.

        The registered parameter is saved and loaded on serialization and
        deserialization, and involved in the optimization. The data and
        gradient of the variable are initialized by NaN arrays.
        If ``initializer`` is not ``None``, the data is initialized by
        ``initializer``.

        If the supplied ``name`` argument corresponds to an uninitialized
        parameter (that is, one that was added with the
        :meth:`add_uninitialized_param` method), ``name`` will be removed
        from the set of uninitialized parameters.

        The parameter is set to an attribute of the link with the given name.

        Args:
            name (str): Name of the parameter. This name is also used as the
                attribute name. Any uninitialized parameters with the same
                name will be removed.
            shape (int or tuple of ints): Shape of the parameter array.
            dtype: Data type of the parameter array.
            initializer(chainer.initializer.Initializer): If it is not
                ``None``, the data is initialized with the given initializer.
                Note that in this case ``dtype`` argument is ignored.

        """
        d = self.__dict__
        if name in d:
            raise AttributeError(
                'cannot register a new parameter %s: attribute exists'
                % name)
        if initializer is None:
            data = self.xp.full(shape, numpy.nan, dtype=dtype)
        else:
            data = initializers.generate_array(initializer, shape, self.xp)
        u = self._uninitialized_params.get(name)
        if u is None:
            grad = self.xp.full_like(data, numpy.nan)
        else:
            if u._cleared:
                grad = None
            elif u._zeroed:
                grad = self.xp.zeros_like(data)
            else:
                grad = self.xp.full_like(data, numpy.nan)
        var = variable.Variable(data, volatile='auto', name=name)
        var.grad = grad
        self._params.append(name)
        d[name] = var
        if name in self._uninitialized_params:
            del self._uninitialized_params[name]
コード例 #20
0
ファイル: variable.py プロジェクト: delta2323/chainer
    def initialize(self, shape):
        """Initializes the uninitialized variable.

        Uninitialized variable is a variable created with the data array set to
        None. This method creates and initializes the data array. The shape of
        the variable can be left unknown until this method is called.

        Args:
            shape (tuple of int): Shape of the data array.

        """
        data = initializers.generate_array(self.initializer, shape, numpy)

        ginit = self._grad_initializer
        grad = None if ginit is None else initializers.generate_array(
            ginit, shape, numpy)

        if self._initial_device >= 0:
            data = cuda.to_gpu(data, device=self._initial_device)
            if grad is not None:
                grad = cuda.to_gpu(grad, device=self._initial_device)

        self._data[0] = data
        self._node._grad = grad
コード例 #21
0
ファイル: variable.py プロジェクト: tomohideshibata/chainer
    def initialize(self, shape):
        """Initializes the uninitialized variable.

        Uninitialized variable is a variable created with the data array set to
        None. This method creates and initializes the data array. The shape of
        the variable can be left unknown until this method is called.

        Args:
            shape (tuple of int): Shape of the data array.

        """
        data = initializers.generate_array(self.initializer, shape, numpy)

        ginit = self._grad_initializer
        grad = None if ginit is None else initializers.generate_array(
            ginit, shape, numpy)

        if self._initial_device >= 0:
            data = cuda.to_gpu(data, device=self._initial_device)
            if grad is not None:
                grad = cuda.to_gpu(grad, device=self._initial_device)

        self._data[0] = data
        self._node._grad = grad
コード例 #22
0
def grow_embedding_layer_without_pretrained_model(n_vocab_org,
                                                  n_vocab_grown,
                                                  rand_embed,
                                                  train=False):
    xp = cuda.get_array_module(rand_embed.W)
    diff = n_vocab_grown - n_vocab_org
    d_rand = rand_embed.W.shape[1]
    initialW = initializers.normal.Normal(1.0)

    if train:
        w2_rand = initializers.generate_array(initialW, (diff, d_rand), xp)
    else:
        w2_rand = xp.zeros((diff, d_rand), dtype='f')

    w_rand = F.concat((rand_embed.W, w2_rand), axis=0)
    rand_embed.W = chainer.Parameter(initializer=w_rand.data, name='W')
    print('Grow embedding matrix: {} -> {}'.format(n_vocab_org,
                                                   rand_embed.W.shape[0]),
          file=sys.stderr)
コード例 #23
0
def grow_embedding_layer_with_pretrained_model(n_vocab_org,
                                               n_vocab_grown,
                                               rand_embed,
                                               external_model,
                                               id2unigram_grown,
                                               train=False,
                                               fasttext=False):
    diff = n_vocab_grown - n_vocab_org
    d_rand = rand_embed.W.shape[1]

    count = 0
    initialW = initializers.normal.Normal(1.0)
    w2_rand = []

    # [MEMO] the following error happened if fasttext=True:
    #   cupy.cuda.cudnn.CuDNNError: CUDNN_STATUS_INTERNAL_ERROR: b'CUDNN_STATUS_INTERNAL_ERROR'
    wv_vocab = external_model.wv if fasttext else external_model.wv.vocab
    for i in range(n_vocab_org, n_vocab_grown):
        key = id2unigram_grown[i]
        if key in wv_vocab:
            vec_rand = external_model.wv[key]
            count += 1
        elif train:
            vec_rand = initializers.generate_array(initialW, (d_rand, ), np)
        else:
            vec_rand = rand_embed.W[
                0].data  # use pretrained vector of unknown token
        w2_rand.append(vec_rand)

    w2_rand = np.reshape(w2_rand, (diff, d_rand))
    if cuda.get_array_module(rand_embed.W) == cuda.cupy:
        w2_rand = chainer.Variable(w2_rand)
        w2_rand.to_gpu()

    w_rand = F.concat((rand_embed.W, w2_rand), axis=0)
    rand_embed.W = chainer.Parameter(initializer=w_rand.data, name='W')

    print('Grow embedding matrix: {} -> {}'.format(n_vocab_org,
                                                   rand_embed.W.shape[0]),
          file=sys.stderr)
    if count >= 1:
        print('Add {} pretrained embedding vectors'.format(count),
              file=sys.stderr)
コード例 #24
0
 def check_shaped_initializer(self, xp):
     initializer = self.target(scale=0.1, dtype=self.dtype)
     w = initializers.generate_array(initializer, self.shape, xp)
     self.assertIs(cuda.get_array_module(w), xp)
     self.assertTupleEqual(w.shape, self.shape)
     self.assertEqual(w.dtype, self.dtype)
コード例 #25
0
ファイル: test_init.py プロジェクト: asi1024/chainer
 def _generate_array(self, xp, dtype=None, device=None):
     initializer = initializers.Zero(dtype)
     return initializers.generate_array(initializer, (), xp, device=device)
コード例 #26
0
ファイル: test_normal.py プロジェクト: Fhrozen/chainer
 def check_shaped_initializer(self, xp):
     initializer = self.target(scale=0.1, dtype=self.dtype)
     w = initializers.generate_array(initializer, self.shape, xp)
     self.assertIs(cuda.get_array_module(w), xp)
     self.assertTupleEqual(w.shape, self.shape)
     self.assertEqual(w.dtype, self.dtype)
コード例 #27
0
ファイル: batch_normalization.py プロジェクト: mitmul/chainer
def _init_array(initializer, default_value, size, dtype):
    if initializer is None:
        initializer = default_value
    initializer = initializers._get_initializer(initializer)
    return initializers.generate_array(initializer, size, numpy, dtype=dtype)
コード例 #28
0
ファイル: test_orthogonal.py プロジェクト: jnishi/chainer
 def check_shaped_initializer(self, xp):
     initializer = initializers.Orthogonal(scale=2.0, dtype=self.dtype)
     w = initializers.generate_array(initializer, self.shape, xp)
     self.assertIs(backend.get_array_module(w), xp)
     self.assertTupleEqual(w.shape, self.shape)
     self.assertEqual(w.dtype, self.dtype)
コード例 #29
0
ファイル: test_init.py プロジェクト: zwh930712/chainer
 def _generate_array(self, xp, dtype=None, device=None):
     initializer = initializers.Zero(dtype)
     return initializers.generate_array(initializer, (), xp, device=device)
コード例 #30
0
ファイル: test_orthogonal.py プロジェクト: hvy/chainer
 def check_shaped_initializer(self, xp):
     initializer = self.target(dtype=self.dtype, **self.target_kwargs)
     w = initializers.generate_array(initializer, self.shape, xp)
     self.assertIs(backend.get_array_module(w), xp)
     self.assertTupleEqual(w.shape, self.shape)
     self.assertEqual(w.dtype, self.dtype)
コード例 #31
0
ファイル: test_init.py プロジェクト: anaruse/chainer
 def _generate_array(self, dtype=None):
     initializer = initializers.Zero(dtype)
     return initializers.generate_array(initializer, (), numpy)
コード例 #32
0
def _init_array(initializer, default_value, size, dtype):
    if initializer is None:
        initializer = default_value
    initializer = initializers._get_initializer(initializer)
    return initializers.generate_array(initializer, size, numpy, dtype=dtype)
コード例 #33
0
 def check_shaped_initializer(self, xp):
     initializer = initializers.Orthogonal(scale=2.0, dtype=self.dtype)
     w = initializers.generate_array(initializer, self.shape, xp)
     self.assertIs(backend.get_array_module(w), xp)
     self.assertTupleEqual(w.shape, self.shape)
     self.assertEqual(w.dtype, self.dtype)