def __init__(self, model, input_record, num_splits, axis=1, name='split', **kwargs): super(Split, self).__init__(model, name, input_record, **kwargs) self.axis = axis # Assume that first dimension is batch, so actual axis in shape is # axis - 1 axis -= 1 assert axis >= 0 assert isinstance(input_record, schema.Scalar),\ "Incorrect input type. Excpected Scalar, but received: {0}".\ format(input_record) input_shape = input_record.field_type().shape assert len(input_shape) >= axis assert input_shape[axis] % num_splits == 0 output_shape = list(input_shape) output_shape[axis] = int(output_shape[axis] / num_splits) data_type = input_record.field_type().base output_scalars = [ schema.Scalar( (data_type, output_shape), self.get_next_blob_reference('output_{}'.format(i)), ) for i in range(num_splits) ] self.output_schema = schema.Tuple(*output_scalars)
def testStructGet(self): net = core.Net('test_net') s1 = schema.NewRecord(net, schema.Scalar(np.float32)) s2 = schema.NewRecord(net, schema.Scalar(np.float32)) t = schema.Tuple(s1, s2) assert t.get('field_0', None) == s1 assert t.get('field_1', None) == s2 assert t.get('field_2', None) is None
def testFunctionalLayerHelperAutoInference(self): softsign = self.model.Softsign( schema.Tuple(self.model.input_feature_schema.float_features), 1) assert softsign.field_type().base == np.float32 assert softsign.field_type().shape == (32, ) self.model.output_schema = self.model.FC(softsign, 2) predict_net = layer_model_instantiator.generate_predict_net(self.model) ops = predict_net.Proto().op assert len(ops) == 2 assert ops[0].type == "Softsign" assert ops[1].type == "FC" assert len(ops[0].input) == 1 assert ops[0].input[0] ==\ self.model.input_feature_schema.float_features() assert len(ops[0].output) == 1 assert ops[0].output[0] in ops[1].input
def testTuple(self): s = schema.Tuple(np.int32, str, np.float32) s2 = schema.Struct(('field_0', schema.Scalar(dtype=np.int32)), ('field_1', schema.Scalar(dtype=np.str)), ('field_2', schema.Scalar(dtype=np.float32))) self.assertEquals(s, s2) self.assertEquals(s[0], schema.Scalar(dtype=np.int32)) self.assertEquals(s[1], schema.Scalar(dtype=np.str)) self.assertEquals(s[2], schema.Scalar(dtype=np.float32)) self.assertEquals( s[2, 0], schema.Struct( ('field_2', schema.Scalar(dtype=np.float32)), ('field_0', schema.Scalar(dtype=np.int32)), )) # test iterator behavior for i, (v1, v2) in enumerate(zip(s, s2)): self.assertEquals(v1, v2) self.assertEquals(s[i], v1) self.assertEquals(s2[i], v1)
def testMergeIdListsLayer(self, num_inputs, batch_size): inputs = [] for _ in range(num_inputs): lengths = np.random.randint(5, size=batch_size).astype(np.int32) size = lengths.sum() values = np.random.randint(1, 10, size=size).astype(np.int64) inputs.append(lengths) inputs.append(values) input_schema = schema.Tuple(*[ schema.List( schema.Scalar(dtype=np.int64, metadata=schema.Metadata(categorical_limit=20))) for _ in range(num_inputs) ]) input_record = schema.NewRecord(self.model.net, input_schema) schema.FeedRecord(input_record, inputs) output_schema = self.model.MergeIdLists(input_record) assert schema.equal_schemas(output_schema, IdList, check_field_names=False)
def testFunctionalLayerHelper(self): mean = self.model.ReduceFrontMean( self.model.input_feature_schema.float_features, 1) normalized = self.model.Sub(schema.Tuple( self.model.input_feature_schema.float_features, mean[0]), 1, broadcast=1) # Attach metadata to one of the outputs and use it in FC normalized[0].set_type((np.float32, (32, ))) self.model.FC(normalized[0], 2) predict_net = layer_model_instantiator.generate_predict_net(self.model) ops = predict_net.Proto().op assert len(ops) == 3 assert ops[0].type == "ReduceFrontMean" assert ops[1].type == "Sub" assert ops[2].type == "FC" assert len(ops[0].input) == 1 assert ops[0].input[0] ==\ self.model.input_feature_schema.float_features() assert len(ops[1].output) == 1 assert ops[1].output[0] in ops[2].input