def test_fill(self): for dtype in float_dtypes: weight = T.variable([2, 3, 4], dtype=dtype, initializer=0.) assert_equal(weight, T.full_like(weight, 0.)) tk.init.apply_initializer(weight, partial(tk.init.fill, fill_value=123.)) assert_equal(weight, T.full_like(weight, 123.))
def test_xavier_initializer(self): for dtype, initializer, mode in product( float_dtypes, (tk.init.xavier_normal, tk.init.xavier_uniform), (None, 'fan_in', 'fan_out'), ): weight = T.variable([n_samples // 50, 50], dtype=dtype, initializer=0.) assert_equal(weight, T.full_like(weight, 0.)) mode_arg = {'mode': mode} if mode is not None else {} # xavier fan_in, fan_out = tk.init.calculate_fan_in_and_fan_out(weight) xavier_std = np.sqrt(2.0 / float(fan_in + fan_out)) tk.init.apply_initializer(weight, initializer, **mode_arg) self.assertLessEqual(np.abs(T.to_numpy(T.reduce_mean(weight))), 5.0 / xavier_std / np.sqrt(n_samples)) # xavier with custom gain and fan_in/fan_out fan_in, fan_out = 23, 17 init_gain = 1.5 xavier_std = init_gain * np.sqrt(2.0 / float(fan_in + fan_out)) tk.init.apply_initializer(weight, initializer, fan_in_and_fan_out=(fan_in, fan_out), gain=init_gain, **mode_arg) self.assertLessEqual(np.abs(T.to_numpy(T.reduce_mean(weight))), 5.0 / xavier_std / np.sqrt(n_samples))
def test_kaming_initializer(self): for dtype, initializer, mode in product( float_dtypes, (tk.init.kaming_normal, tk.init.kaming_uniform), (None, 'fan_in', 'fan_out'), ): weight = T.variable([n_samples // 50, 50], dtype=dtype, initializer=0.) assert_equal(weight, T.full_like(weight, 0.)) mode_arg = {'mode': mode} if mode is not None else {} # kaming fan_in, fan_out = tk.init.calculate_fan_in_and_fan_out(weight) if mode == 'fan_out': kaming_std = np.sqrt(1.0 / np.sqrt(fan_out)) else: kaming_std = np.sqrt(1.0 / np.sqrt(fan_in)) tk.init.apply_initializer(weight, initializer, **mode_arg) self.assertLessEqual(np.abs(T.to_numpy(T.reduce_mean(weight))), 5.0 / kaming_std / np.sqrt(n_samples)) # kaming with custom gain and fan_in/fan_out fan_in, fan_out = 23, 17 init_gain = 1.5 if mode == 'fan_out': kaming_std = init_gain * np.sqrt(1.0 / np.sqrt(fan_out)) else: kaming_std = init_gain * np.sqrt(1.0 / np.sqrt(fan_in)) tk.init.apply_initializer(weight, initializer, fan_in_and_fan_out=(fan_in, fan_out), gain=init_gain, **mode_arg) self.assertLessEqual(np.abs(T.to_numpy(T.reduce_mean(weight))), 5.0 / kaming_std / np.sqrt(n_samples)) # test error with pytest.raises( ValueError, match='`mode` must be either "fan_in" or "fan_out"'): weight = T.variable([n_samples // 50, 50], dtype=dtype, initializer=0.) tk.init.apply_initializer(weight, initializer, mode='invalid')
def test_normal(self): for dtype in float_dtypes: weight = T.variable([n_samples // 50, 50], dtype=dtype, initializer=0.) assert_equal(weight, T.full_like(weight, 0.)) # uniform with default args tk.init.apply_initializer(weight, tk.init.normal) self.assertLessEqual(np.abs(T.to_numpy(T.reduce_mean(weight))), 5.0 / np.sqrt(n_samples)) # uniform with customized args tk.init.apply_initializer(weight, partial(tk.init.normal, mean=1., std=3.)) self.assertLessEqual( np.abs(T.to_numpy(T.reduce_mean(weight)) - 1.), 5.0 * 3. / np.sqrt(n_samples))
def test_uniform(self): for dtype in float_dtypes: weight = T.variable([n_samples // 50, 50], dtype=dtype, initializer=0.) assert_equal(weight, T.full_like(weight, 0.)) # uniform with default args tk.init.apply_initializer(weight, tk.init.uniform) self.assertLessEqual( np.abs(T.to_numpy(T.reduce_mean(weight)) - 0.5), 5.0 / np.sqrt(12.) / np.sqrt(n_samples)) # uniform with customized args tk.init.apply_initializer( weight, partial(tk.init.uniform, low=-4., high=3.)) self.assertLessEqual( np.abs(T.to_numpy(T.reduce_mean(weight)) - (-0.5)), 5.0 * 7.0 / np.sqrt(12.) / np.sqrt(n_samples))
def test_apply_initializer(self): for dtype in float_dtypes: weight = T.variable([5, 3], dtype=dtype) fan_in_and_fan_out = tk.init.calculate_fan_in_and_fan_out(weight) initializer = Mock() # test by value tk.init.apply_initializer(weight, 123) assert_equal(weight, T.full_like(weight, 123)) tk.init.apply_initializer(weight, 124.) assert_equal(weight, T.full_like(weight, 124.)) tk.init.apply_initializer(weight, np.array(125.)) assert_equal(weight, T.full_like(weight, 125.)) value = np.random.randn(*T.shape(weight)).astype(dtype) tk.init.apply_initializer(weight, value) assert_equal(weight, value) # test by initializer initializer.reset_mock() tk.init.apply_initializer(weight, initializer) self.assertEqual(initializer.call_args, ((weight, ), { 'gain': 1.0, 'mode': 'fan_in', 'fan_in_and_fan_out': fan_in_and_fan_out, })) # test fan_in_and_fan_out initializer.reset_mock() tk.init.apply_initializer(weight, initializer, fan_in_and_fan_out=(2, 3)) self.assertEqual(initializer.call_args, ((weight, ), { 'gain': 1.0, 'mode': 'fan_in', 'fan_in_and_fan_out': (2, 3), })) initializer.reset_mock() tk.init.apply_initializer(weight, initializer, mode='fan_out') self.assertEqual(initializer.call_args, ((weight, ), { 'gain': 1.0, 'mode': 'fan_out', 'fan_in_and_fan_out': fan_in_and_fan_out, })) # test gain initializer.reset_mock() tk.init.apply_initializer(weight, initializer, gain=1.5) self.assertEqual(initializer.call_args, ((weight, ), { 'gain': 1.5, 'mode': 'fan_in', 'fan_in_and_fan_out': fan_in_and_fan_out, })) for activation in ['LeakyReLU', tk.layers.ReLU, tk.layers.Tanh()]: initializer.reset_mock() init_gain = tk.init.get_activation_gain(activation) tk.init.apply_initializer(weight, initializer, activation=activation) self.assertEqual(initializer.call_args, ((weight, ), { 'gain': init_gain, 'mode': 'fan_in', 'fan_in_and_fan_out': fan_in_and_fan_out, })) # unsupported initializer with pytest.raises(TypeError, match='Unsupported initializer'): tk.init.apply_initializer(weight, object())
def test_ones(self): for dtype in float_dtypes: weight = T.variable([2, 3, 4], dtype=dtype, initializer=0.) assert_equal(weight, T.full_like(weight, 0.)) tk.init.apply_initializer(weight, tk.init.ones) assert_equal(weight, T.full_like(weight, 1.))