def test_shape_and_type_inference(self): with hu.temp_workspace("shape_type_inf_int64"): net = core.Net('test_net') net.ConstantFill( [], "values", shape=[64], dtype=core.DataType.INT64, ) net.IndexHash(['values'], ['values_output']) (shapes, types) = workspace.InferShapesAndTypes([net], {}) self.assertEqual(shapes["values_output"], [64]) self.assertEqual(types["values_output"], core.DataType.INT64) with hu.temp_workspace("shape_type_inf_int32"): net = core.Net('test_net') net.ConstantFill( [], "values", shape=[2, 32], dtype=core.DataType.INT32, ) net.IndexHash(['values'], ['values_output']) (shapes, types) = workspace.InferShapesAndTypes([net], {}) self.assertEqual(shapes["values_output"], [2, 32]) self.assertEqual(types["values_output"], core.DataType.INT32)
def ref(data): ret = data.astype(dtype=np.str) # the string blob will be fetched as object, we feed and re-fetch # to mimic this. with hu.temp_workspace('tmp_ref_int_to_string'): workspace.FeedBlob('tmp_blob', ret) fetched_ret = workspace.FetchBlob('tmp_blob') return (fetched_ret, )
def test_shape_and_type_inference(self): with hu.temp_workspace("shape_type_inf_int32"): net = core.Net("test_net") net.ConstantFill([], "ranges", shape=[3, 5, 2], dtype=core.DataType.INT32) net.ConstantFill([], "values", shape=[64], dtype=core.DataType.INT64) net.GatherRanges(["values", "ranges"], ["values_output", "lengths_output"]) (shapes, types) = workspace.InferShapesAndTypes([net], {}) self.assertEqual(shapes["values_output"], [64]) self.assertEqual(types["values_output"], core.DataType.INT64) self.assertEqual(shapes["lengths_output"], [3]) self.assertEqual(types["lengths_output"], core.DataType.INT32)
def test_shape_and_type_inference(self): with hu.temp_workspace("shape_type_inf_int64"): net = core.Net('test_net') net.ConstantFill( [], "values", shape=[64], dtype=core.DataType.INT64, ) net.IndexHash(['values'], ['values_output']) (shapes, types) = workspace.InferShapesAndTypes([net], {}) self.assertEqual(shapes["values_output"], [64]) self.assertEqual(types["values_output"], core.DataType.INT64) with hu.temp_workspace("shape_type_inf_int32"): net = core.Net('test_net') net.ConstantFill( [], "values", shape=[2, 32], dtype=core.DataType.INT32, ) net.IndexHash(['values'], ['values_output']) (shapes, types) = workspace.InferShapesAndTypes([net], {}) self.assertEqual(shapes["values_output"], [2, 32]) self.assertEqual(types["values_output"], core.DataType.INT32)
def test_shape_and_type_inference(self): with hu.temp_workspace("shape_type_inf_int32"): net = core.Net('test_net') net.ConstantFill( [], "ranges", shape=[3, 5, 2], dtype=core.DataType.INT32, ) net.ConstantFill( [], "values", shape=[64], dtype=core.DataType.INT64, ) net.GatherRanges(['values', 'ranges'], ['values_output', 'lengths_output']) (shapes, types) = workspace.InferShapesAndTypes([net], {}) self.assertEqual(shapes["values_output"], [64]) self.assertEqual(types["values_output"], core.DataType.INT64) self.assertEqual(shapes["lengths_output"], [3]) self.assertEqual(types["lengths_output"], core.DataType.INT32)
def run_test( size_tuple, means, stds, label_type, num_labels, dc, validator, output1=None, output2_size=None): # TODO: Does not test on GPU and does not test use_gpu_transform # WARNING: Using ModelHelper automatically does NHWC to NCHW # transformation if needed. width, height, minsize, crop = size_tuple means = [float(m) for m in means] stds = [float(s) for s in stds] out_dir = tempfile.mkdtemp() count_images = 2 # One with bounding box and one without expected_images = create_test( out_dir, width=width, height=height, default_bound=(3, 5, height - 3, width - 5), minsize=minsize, crop=crop, means=means, stds=stds, count=count_images, label_type=label_type, num_labels=num_labels, output1=output1, output2_size=output2_size ) for device_option in dc: with hu.temp_workspace(): reader_net = core.Net('reader') reader_net.CreateDB( [], 'DB', db=out_dir, db_type="lmdb" ) workspace.RunNetOnce(reader_net) outputs = ['data', 'label'] output_sizes = [] if output1: outputs.append('output1') output_sizes.append(1) if output2_size: outputs.append('output2') output_sizes.append(output2_size) imageop = core.CreateOperator( 'ImageInput', ['DB'], outputs, batch_size=count_images, color=3, minsize=minsize, crop=crop, is_test=True, bounding_ymin=3, bounding_xmin=5, bounding_height=height - 3, bounding_width=width - 5, mean_per_channel=means, std_per_channel=stds, use_gpu_transform=(device_option.device_type == 1), label_type=label_type, num_labels=num_labels, output_sizes=output_sizes ) imageop.device_option.CopyFrom(device_option) main_net = core.Net('main') main_net.Proto().op.extend([imageop]) workspace.RunNetOnce(main_net) validator(expected_images, device_option, count_images) # End for # End with # End for shutil.rmtree(out_dir)
def test_basic_rnn(self, seed, seq_length, batch_size, input_size, hidden_size, drop_states, sequence_lengths, gc, dc): np.random.seed(seed) seq_lengths_data = np.random.randint(1, seq_length + 1, size=(batch_size, )).astype( np.int32) input_blob_data = np.random.randn(seq_length, batch_size, input_size).astype(np.float32) initial_h_data = np.random.randn(batch_size, hidden_size).astype(np.float32) gates_t_w_data = np.random.randn(hidden_size, hidden_size).astype(np.float32) gates_t_b_data = np.random.randn(hidden_size).astype(np.float32) i2h_w_data = np.random.randn(hidden_size, input_size).astype(np.float32) i2h_b_data = np.random.randn(hidden_size).astype(np.float32) with core.DeviceScope(gc): with hu.temp_workspace(): workspace.FeedBlob('input_blob', input_blob_data, device_option=gc) workspace.FeedBlob('seq_lengths', seq_lengths_data, device_option=gc) workspace.FeedBlob('initial_h', initial_h_data, device_option=gc) workspace.FeedBlob('basic_rnn/gates_t_w', gates_t_w_data, device_option=gc) workspace.FeedBlob('basic_rnn/gates_t_b', gates_t_b_data, device_option=gc) workspace.FeedBlob('basic_rnn/i2h_w', i2h_w_data, device_option=gc) workspace.FeedBlob('basic_rnn/i2h_b', i2h_b_data, device_option=gc) model = ModelHelper(name='model') hidden_t_all, _ = rnn_cell.BasicRNN( model, 'input_blob', 'seq_lengths' if sequence_lengths else None, ['initial_h'], input_size, hidden_size, "basic_rnn", activation='tanh', forward_only=True, drop_states=drop_states) workspace.RunNetOnce(model.net) result = workspace.FetchBlob(hidden_t_all) reference = basic_rnn_reference( input_blob_data, initial_h_data, i2h_w_data, i2h_b_data, gates_t_w_data, gates_t_b_data, seq_lengths_data if sequence_lengths else None, drop_states=drop_states, use_sequence_lengths=sequence_lengths) np.testing.assert_allclose(result, reference, atol=1e-4, rtol=1e-4)
def run_test( size_tuple, means, stds, label_type, num_labels, is_test, scale_jitter_type, color_jitter, color_lighting, dc, validator, output1=None, output2_size=None): # TODO: Does not test on GPU and does not test use_gpu_transform # WARNING: Using ModelHelper automatically does NHWC to NCHW # transformation if needed. width, height, minsize, crop = size_tuple means = [float(m) for m in means] stds = [float(s) for s in stds] out_dir = tempfile.mkdtemp() count_images = 2 # One with bounding box and one without expected_images = create_test( out_dir, width=width, height=height, default_bound=(3, 5, height - 3, width - 5), minsize=minsize, crop=crop, means=means, stds=stds, count=count_images, label_type=label_type, num_labels=num_labels, output1=output1, output2_size=output2_size ) for device_option in dc: with hu.temp_workspace(): reader_net = core.Net('reader') reader_net.CreateDB( [], 'DB', db=out_dir, db_type="lmdb" ) workspace.RunNetOnce(reader_net) outputs = ['data', 'label'] output_sizes = [] if output1: outputs.append('output1') output_sizes.append(1) if output2_size: outputs.append('output2') output_sizes.append(output2_size) imageop = core.CreateOperator( 'ImageInput', ['DB'], outputs, batch_size=count_images, color=3, minsize=minsize, crop=crop, is_test=is_test, bounding_ymin=3, bounding_xmin=5, bounding_height=height - 3, bounding_width=width - 5, mean_per_channel=means, std_per_channel=stds, use_gpu_transform=(device_option.device_type == 1), label_type=label_type, num_labels=num_labels, output_sizes=output_sizes, scale_jitter_type=scale_jitter_type, color_jitter=color_jitter, color_lighting=color_lighting ) imageop.device_option.CopyFrom(device_option) main_net = core.Net('main') main_net.Proto().op.extend([imageop]) workspace.RunNetOnce(main_net) validator(expected_images, device_option, count_images) # End for # End with # End for shutil.rmtree(out_dir)
def test_basic_rnn(self, seed, seq_length, batch_size, input_size, hidden_size, drop_states, sequence_lengths, gc, dc): np.random.seed(seed) seq_lengths_data = np.random.randint( 1, seq_length + 1, size=(batch_size,)).astype(np.int32) input_blob_data = np.random.randn( seq_length, batch_size, input_size).astype(np.float32) initial_h_data = np.random.randn( batch_size, hidden_size).astype(np.float32) gates_t_w_data = np.random.randn( hidden_size, hidden_size).astype(np.float32) gates_t_b_data = np.random.randn( hidden_size).astype(np.float32) i2h_w_data = np.random.randn( hidden_size, input_size).astype(np.float32) i2h_b_data = np.random.randn( hidden_size).astype(np.float32) with core.DeviceScope(gc): with hu.temp_workspace(): workspace.FeedBlob( 'input_blob', input_blob_data, device_option=gc) workspace.FeedBlob( 'seq_lengths', seq_lengths_data, device_option=gc) workspace.FeedBlob( 'initial_h', initial_h_data, device_option=gc) workspace.FeedBlob( 'basic_rnn/gates_t_w', gates_t_w_data, device_option=gc) workspace.FeedBlob( 'basic_rnn/gates_t_b', gates_t_b_data, device_option=gc) workspace.FeedBlob( 'basic_rnn/i2h_w', i2h_w_data, device_option=gc) workspace.FeedBlob( 'basic_rnn/i2h_b', i2h_b_data, device_option=gc) model = ModelHelper(name='model') hidden_t_all, _ = rnn_cell.BasicRNN( model, 'input_blob', 'seq_lengths' if sequence_lengths else None, ['initial_h'], input_size, hidden_size, "basic_rnn", activation='tanh', forward_only=True, drop_states=drop_states) workspace.RunNetOnce(model.net) result = workspace.FetchBlob(hidden_t_all) reference = basic_rnn_reference( input_blob_data, initial_h_data, i2h_w_data, i2h_b_data, gates_t_w_data, gates_t_b_data, seq_lengths_data if sequence_lengths else None, drop_states=drop_states, use_sequence_lengths=sequence_lengths ) np.testing.assert_allclose(result, reference, atol=1e-4, rtol=1e-4)
def test_imageinput(self, size_tuple, means, stds, gc, dc): # TODO: Does not test on GPU and does not test use_gpu_transform # WARNING: Using ModelHelper automatically does NHWC to NCHW # transformation if needed. width, height, minsize, crop = size_tuple means = [float(m) for m in means] stds = [float(s) for s in stds] out_dir = tempfile.mkdtemp() count_images = 2 # One with bounding box and one without expected_images = create_test(out_dir, width=width, height=height, default_bound=(3, 5, height - 3, width - 5), minsize=minsize, crop=crop, means=means, stds=stds, count=count_images) for device_option in dc: with hu.temp_workspace(): reader_net = core.Net('reader') reader_net.CreateDB([], 'DB', db=out_dir, db_type="lmdb") workspace.RunNetOnce(reader_net) imageop = core.CreateOperator( 'ImageInput', ['DB'], ["data", "label"], batch_size=count_images, color=3, minsize=minsize, crop=crop, is_test=True, bounding_ymin=3, bounding_xmin=5, bounding_height=height - 3, bounding_width=width - 5, mean_per_channel=means, std_per_channel=stds, use_gpu_transform=(device_option.device_type == 1)) imageop.device_option.CopyFrom(device_option) main_net = core.Net('main') main_net.Proto().op.extend([imageop]) workspace.RunNetOnce(main_net) l = workspace.FetchBlob('label') result = workspace.FetchBlob('data').astype(np.int32) # If we don't use_gpu_transform, the output is in NHWC # Our reference output is CHW so we swap if device_option.device_type != 1: expected = [ img.swapaxes(0, 1).swapaxes(1, 2) for img in expected_images ] else: expected = expected_images for i in range(count_images): self.assertEqual(l[i], i) self.assertEqual((expected[i] - result[i] > 1).sum(), 0) # End for # End with # End for shutil.rmtree(out_dir)