def test_scale_ext(self): mean_blob = np.array([1., 2.]) variance_blob = np.array([3., 4.]) blobs = [mean_blob, variance_blob] params = {'type': 'Scale', 'axis': 0, 'bias_term': True} res = scale_ext(FakeProtoLayer(FakeMultiParam(params)), FakeModelLayer(blobs)) exp_res = { 'op': 'ScaleShift', 'type': 'ScaleShift', 'axis': 0, 'infer': copy_shape_infer, 'weights': mean_blob, 'biases': variance_blob, 'embedded_inputs': [(1, 'weights', { 'bin': 'weights' }), (2, 'biases', { 'bin': 'biases' })] } for i in exp_res: if i in ('weights', 'biases'): np.testing.assert_array_equal(res[i], exp_res[i]) else: self.assertEqual(res[i], exp_res[i])
def test_create_default_weights(self): """ There are situations when scale layer doesn't have weights and biases. This test checks that if they are not available in the caffemodel file then default values [1] and [0] are generated. """ scale_blob = np.array([1]) bias_blob = np.array([0]) params = {'type': 'Scale', 'axis': 0, 'bias_term': True} res = scale_ext(FakeProtoLayer(FakeMultiParam(params)), None) exp_res = { 'op': 'ScaleShift', 'type': 'ScaleShift', 'axis': 0, 'infer': copy_shape_infer, 'weights': scale_blob, 'biases': bias_blob, 'embedded_inputs': [(1, 'weights', { 'bin': 'weights' }), (2, 'biases', { 'bin': 'biases' })] } self.assertDictEqual(exp_res, res)
def test_scale_2inputs_ext(self): params = {'type': 'Scale', 'axis': 0, 'bias_term': False} res = scale_ext(FakeProtoLayer(FakeMultiParam(params), True), None) exp_res = { 'op': 'ScaleShift', 'type': 'ScaleShift', 'axis': 0, 'infer': copy_shape_infer, } for i in exp_res: self.assertEqual(res[i], exp_res[i])