def test_c_py_compose_transforms_module(): """ Test combining Python and C++ transforms """ ds.config.set_seed(0) def test_config(arr, input_columns, output_cols, op_list): data = ds.NumpySlicesDataset(arr, column_names=input_columns, shuffle=False) data = data.map(operations=op_list, input_columns=input_columns, output_columns=output_cols, column_order=output_cols) res = [] for i in data.create_dict_iterator(output_numpy=True): for col_name in output_cols: res.append(i[col_name].tolist()) return res arr = [1, 0] assert test_config(arr, ["cols"], ["cols"], [py_transforms.OneHotOp(2), c_transforms.Mask(c_transforms.Relational.EQ, 1)]) == \ [[[False, True]], [[True, False]]] assert test_config(arr, ["cols"], ["cols"], [py_transforms.OneHotOp(2), (lambda x: x + x), c_transforms.Fill(1)]) \ == [[[1, 1]], [[1, 1]]] assert test_config(arr, ["cols"], ["cols"], [py_transforms.OneHotOp(2), (lambda x: x + x), c_transforms.Fill(1), (lambda x: x + x)]) \ == [[[2, 2]], [[2, 2]]] assert test_config([[1, 3]], ["cols"], ["cols"], [c_transforms.PadEnd([3], -1), (lambda x: x + x)]) \ == [[2, 6, -2]] arr = ([[1]], [[3]]) assert test_config(arr, ["col0", "col1"], ["a"], [(lambda x, y: x + y), c_transforms.PadEnd([2], -1)]) == [[4, -1]]
def process_ner_msra_dataset(data_dir, label_list, bert_vocab_path, max_seq_len=128, class_filter=None, split_begin=None, split_end=None): """Process MSRA dataset""" ### Loading MSRA from CLUEDataset dataset = ds.GeneratorDataset(process_msra(data_dir, class_filter, split_begin, split_end), column_names=['text', 'label']) ### Processing label label_vocab = text.Vocab.from_list(label_list) label_lookup = text.Lookup(label_vocab) dataset = dataset.map(operations=label_lookup, input_columns="label", output_columns="label_ids") dataset = dataset.map( operations=ops.Concatenate(prepend=np.array([0], dtype='i')), input_columns=["label_ids"]) dataset = dataset.map(operations=ops.Slice(slice(0, max_seq_len)), input_columns=["label_ids"]) dataset = dataset.map(operations=ops.PadEnd([max_seq_len], 0), input_columns=["label_ids"]) ### Processing sentence vocab = text.Vocab.from_file(bert_vocab_path) lookup = text.Lookup(vocab, unknown_token='[UNK]') unicode_char_tokenizer = text.UnicodeCharTokenizer() dataset = dataset.map(operations=unicode_char_tokenizer, input_columns=["text"], output_columns=["sentence"]) dataset = dataset.map(operations=ops.Slice(slice(0, max_seq_len - 2)), input_columns=["sentence"]) dataset = dataset.map(operations=ops.Concatenate( prepend=np.array(["[CLS]"], dtype='S'), append=np.array(["[SEP]"], dtype='S')), input_columns=["sentence"]) dataset = dataset.map(operations=lookup, input_columns=["sentence"], output_columns=["input_ids"]) dataset = dataset.map(operations=ops.PadEnd([max_seq_len], 0), input_columns=["input_ids"]) dataset = dataset.map( operations=ops.Duplicate(), input_columns=["input_ids"], output_columns=["input_ids", "input_mask"], column_order=["input_ids", "input_mask", "label_ids"]) dataset = dataset.map(operations=ops.Mask(ops.Relational.NE, 0, mstype.int32), input_columns=["input_mask"]) dataset = dataset.map( operations=ops.Duplicate(), input_columns=["input_ids"], output_columns=["input_ids", "segment_ids"], column_order=["input_ids", "input_mask", "segment_ids", "label_ids"]) dataset = dataset.map(operations=ops.Fill(0), input_columns=["segment_ids"]) return dataset
def test_random_select_subpolicy(): ds.config.set_seed(0) def test_config(arr, policy): try: data = ds.NumpySlicesDataset(arr, column_names="col", shuffle=False) data = data.map(operations=visions.RandomSelectSubpolicy(policy), input_columns=["col"]) res = [] for i in data.create_dict_iterator(num_epochs=1, output_numpy=True): res.append(i["col"].tolist()) return res except (TypeError, ValueError) as e: return str(e) # 3 possible outcomes policy1 = [[(ops.PadEnd([4], 0), 0.5), (ops.Compose([ops.Duplicate(), ops.Concatenate()]), 1)], [(ops.Slice([0, 1]), 0.5), (ops.Duplicate(), 1), (ops.Concatenate(), 1)]] res1 = test_config([[1, 2, 3]], policy1) assert res1 in [[[1, 2, 1, 2]], [[1, 2, 3, 1, 2, 3]], [[1, 2, 3, 0, 1, 2, 3, 0]]] # test exceptions assert "policy can not be empty." in test_config([[1, 2, 3]], []) assert "policy[0] can not be empty." in test_config([[1, 2, 3]], [[]]) assert "op of (op, prob) in policy[1][0] is neither a c_transform op (TensorOperation) nor a callable pyfunc" \ in test_config([[1, 2, 3]], [[(ops.PadEnd([4], 0), 0.5)], [(1, 0.4)]]) assert "prob of (op, prob) policy[1][0] is not within the required interval of [0, 1]" in test_config( [[1]], [[(ops.Duplicate(), 0)], [(ops.Duplicate(), -0.1)]])
def create_ocr_val_dataset(mindrecord_file, batch_size=32, rank_size=1, rank_id=0, num_parallel_workers=4, use_multiprocessing=True): ds = de.MindDataset(mindrecord_file, columns_list=[ "image", "annotation", "decoder_input", "decoder_target" ], num_shards=rank_size, shard_id=rank_id, num_parallel_workers=num_parallel_workers, shuffle=False) resize_rescale_op = ImageResizeWithRescale(standard_img_height=128, standard_img_width=512) transforms = [C.Decode(), resize_rescale_op, C.HWC2CHW()] ds = ds.map(operations=transforms, input_columns=["image"], python_multiprocessing=use_multiprocessing, num_parallel_workers=num_parallel_workers) ds = ds.map(operations=ops.PadEnd([config.max_length], 0), input_columns=["decoder_target"], python_multiprocessing=use_multiprocessing, num_parallel_workers=8) ds = ds.map(operations=ops.PadEnd([config.max_length], 0), input_columns=["decoder_input"], python_multiprocessing=use_multiprocessing, num_parallel_workers=8) ds = ds.batch(batch_size, drop_remainder=True) print("Val dataset size= %s" % (str(int(ds.get_dataset_size()) * batch_size))) return ds
def pad_compare(array, pad_shape, pad_value, res): data = ds.NumpySlicesDataset([array]) if pad_value is not None: data = data.map(operations=ops.PadEnd(pad_shape, pad_value)) else: data = data.map(operations=ops.PadEnd(pad_shape)) for d in data.create_tuple_iterator(output_numpy=True): np.testing.assert_array_equal(res, d[0])
def test_random_choice(): """ Test RandomChoice op """ ds.config.set_seed(0) def test_config(arr, op_list): try: data = ds.NumpySlicesDataset(arr, column_names="col", shuffle=False) data = data.map(operations=ops.RandomChoice(op_list), input_columns=["col"]) res = [] for i in data.create_dict_iterator(num_epochs=1, output_numpy=True): res.append(i["col"].tolist()) return res except (TypeError, ValueError) as e: return str(e) # Test whether an operation would be randomly chosen. # In order to prevent random failure, both results need to be checked. res1 = test_config([[0, 1, 2]], [ops.PadEnd([4], 0), ops.Slice([0, 2])]) assert res1 in [[[0, 1, 2, 0]], [[0, 2]]] # Test nested structure res2 = test_config([[0, 1, 2]], [ ops.Compose([ops.Duplicate(), ops.Concatenate()]), ops.Compose([ops.Slice([0, 1]), ops.OneHot(2)]) ]) assert res2 in [[[[1, 0], [0, 1]]], [[0, 1, 2, 0, 1, 2]]] # Test RandomChoice where there is only 1 operation assert test_config([[4, 3], [2, 1]], [ops.Slice([0])]) == [[4], [2]]
def test_eager_pad_end(): """ Test PadEnd op is callable """ pad_end_op = data_trans.PadEnd([3], -1) expected = np.array([1, 2, -1]) assert np.array_equal(pad_end_op([1, 2]), expected)
def create_ocr_train_dataset(mindrecord_file, batch_size=32, rank_size=1, rank_id=0, is_training=True, num_parallel_workers=4, use_multiprocessing=True): ds = de.MindDataset( mindrecord_file, columns_list=["image", "decoder_input", "decoder_target"], num_shards=rank_size, shard_id=rank_id, num_parallel_workers=num_parallel_workers, shuffle=is_training) aug_ops = AugmentationOps() transforms = [C.Decode(), aug_ops, C.HWC2CHW()] ds = ds.map(operations=transforms, input_columns=["image"], python_multiprocessing=use_multiprocessing, num_parallel_workers=num_parallel_workers) ds = ds.map(operations=ops.PadEnd([config.max_length], 0), input_columns=["decoder_target"]) ds = ds.map(operations=random_teacher_force, input_columns=["image", "decoder_input", "decoder_target"], output_columns=[ "image", "decoder_input", "decoder_target", "teacher_force" ], column_order=[ "image", "decoder_input", "decoder_target", "teacher_force" ]) type_cast_op_bool = ops.TypeCast(mstype.bool_) ds = ds.map(operations=type_cast_op_bool, input_columns="teacher_force") print("Train dataset size= %s" % (int(ds.get_dataset_size()))) ds = ds.batch(batch_size, drop_remainder=True) return ds
def process_tnews_clue_dataset(data_dir, label_list, bert_vocab_path, data_usage='train', shuffle_dataset=False, max_seq_len=128, batch_size=64): """Process TNEWS dataset""" ### Loading TNEWS from CLUEDataset assert data_usage in ['train', 'eval', 'test'] if data_usage == 'train': dataset = ds.CLUEDataset(os.path.join(data_dir, "train.json"), task='TNEWS', usage=data_usage, shuffle=shuffle_dataset) elif data_usage == 'eval': dataset = ds.CLUEDataset(os.path.join(data_dir, "dev.json"), task='TNEWS', usage=data_usage, shuffle=shuffle_dataset) else: dataset = ds.CLUEDataset(os.path.join(data_dir, "test.json"), task='TNEWS', usage=data_usage, shuffle=shuffle_dataset) ### Processing label if data_usage == 'test': dataset = dataset.map(input_columns=["id"], output_columns=["id", "label_id"], columns_order=["id", "label_id", "sentence"], operations=ops.Duplicate()) dataset = dataset.map(input_columns=["label_id"], operations=ops.Fill(0)) else: label_vocab = text.Vocab.from_list(label_list) label_lookup = text.Lookup(label_vocab) dataset = dataset.map(input_columns="label_desc", output_columns="label_id", operations=label_lookup) ### Processing sentence vocab = text.Vocab.from_file(bert_vocab_path) tokenizer = text.BertTokenizer(vocab, lower_case=True) lookup = text.Lookup(vocab, unknown_token='[UNK]') dataset = dataset.map(input_columns=["sentence"], operations=tokenizer) dataset = dataset.map(input_columns=["sentence"], operations=ops.Slice(slice(0, max_seq_len))) dataset = dataset.map(input_columns=["sentence"], operations=ops.Concatenate(prepend=np.array(["[CLS]"], dtype='S'), append=np.array(["[SEP]"], dtype='S'))) dataset = dataset.map(input_columns=["sentence"], output_columns=["text_ids"], operations=lookup) dataset = dataset.map(input_columns=["text_ids"], operations=ops.PadEnd([max_seq_len], 0)) dataset = dataset.map(input_columns=["text_ids"], output_columns=["text_ids", "mask_ids"], columns_order=["label_id", "text_ids", "mask_ids"], operations=ops.Duplicate()) dataset = dataset.map(input_columns=["mask_ids"], operations=ops.Mask(ops.Relational.NE, 0, mstype.int32)) dataset = dataset.map(input_columns=["text_ids"], output_columns=["text_ids", "segment_ids"], columns_order=["label_id", "text_ids", "mask_ids", "segment_ids"], operations=ops.Duplicate()) dataset = dataset.map(input_columns=["segment_ids"], operations=ops.Fill(0)) dataset = dataset.batch(batch_size) label = [] text_ids = [] mask_ids = [] segment_ids = [] for data in dataset: label.append(data[0]) text_ids.append(data[1]) mask_ids.append(data[2]) segment_ids.append(data[3]) return label, text_ids, mask_ids, segment_ids
def process_cmnli_clue_dataset(data_dir, label_list, bert_vocab_path, data_usage='train', shuffle_dataset=False, max_seq_len=128, batch_size=64, drop_remainder=True): """Process CMNLI dataset""" ### Loading CMNLI from CLUEDataset assert data_usage in ['train', 'eval', 'test'] if data_usage == 'train': dataset = ds.CLUEDataset(os.path.join(data_dir, "train.json"), task='CMNLI', usage=data_usage, shuffle=shuffle_dataset) elif data_usage == 'eval': dataset = ds.CLUEDataset(os.path.join(data_dir, "dev.json"), task='CMNLI', usage=data_usage, shuffle=shuffle_dataset) else: dataset = ds.CLUEDataset(os.path.join(data_dir, "test.json"), task='CMNLI', usage=data_usage, shuffle=shuffle_dataset) ### Processing label if data_usage == 'test': dataset = dataset.map( operations=ops.Duplicate(), input_columns=["id"], output_columns=["id", "label_id"], column_order=["id", "label_id", "sentence1", "sentence2"]) dataset = dataset.map(operations=ops.Fill(0), input_columns=["label_id"]) else: label_vocab = text.Vocab.from_list(label_list) label_lookup = text.Lookup(label_vocab) dataset = dataset.map(operations=label_lookup, input_columns="label", output_columns="label_id") ### Processing sentence pairs vocab = text.Vocab.from_file(bert_vocab_path) tokenizer = text.BertTokenizer(vocab, lower_case=True) lookup = text.Lookup(vocab, unknown_token='[UNK]') ### Tokenizing sentences and truncate sequence pair dataset = dataset.map(operations=tokenizer, input_columns=["sentence1"]) dataset = dataset.map(operations=tokenizer, input_columns=["sentence2"]) dataset = dataset.map(operations=text.TruncateSequencePair(max_seq_len - 3), input_columns=["sentence1", "sentence2"]) ### Adding special tokens dataset = dataset.map(operations=ops.Concatenate( prepend=np.array(["[CLS]"], dtype='S'), append=np.array(["[SEP]"], dtype='S')), input_columns=["sentence1"]) dataset = dataset.map( operations=ops.Concatenate(append=np.array(["[SEP]"], dtype='S')), input_columns=["sentence2"]) ### Generating segment_ids dataset = dataset.map( operations=ops.Duplicate(), input_columns=["sentence1"], output_columns=["sentence1", "type_sentence1"], column_order=["sentence1", "type_sentence1", "sentence2", "label_id"]) dataset = dataset.map(operations=ops.Duplicate(), input_columns=["sentence2"], output_columns=["sentence2", "type_sentence2"], column_order=[ "sentence1", "type_sentence1", "sentence2", "type_sentence2", "label_id" ]) dataset = dataset.map(operations=[lookup, ops.Fill(0)], input_columns=["type_sentence1"]) dataset = dataset.map(operations=[lookup, ops.Fill(1)], input_columns=["type_sentence2"]) dataset = dataset.map( operations=ops.Concatenate(), input_columns=["type_sentence1", "type_sentence2"], output_columns=["segment_ids"], column_order=["sentence1", "sentence2", "segment_ids", "label_id"]) dataset = dataset.map(operations=ops.PadEnd([max_seq_len], 0), input_columns=["segment_ids"]) ### Generating text_ids dataset = dataset.map(operations=ops.Concatenate(), input_columns=["sentence1", "sentence2"], output_columns=["text_ids"], column_order=["text_ids", "segment_ids", "label_id"]) dataset = dataset.map(operations=lookup, input_columns=["text_ids"]) dataset = dataset.map(operations=ops.PadEnd([max_seq_len], 0), input_columns=["text_ids"]) ### Generating mask_ids dataset = dataset.map( operations=ops.Duplicate(), input_columns=["text_ids"], output_columns=["text_ids", "mask_ids"], column_order=["text_ids", "mask_ids", "segment_ids", "label_id"]) dataset = dataset.map(operations=ops.Mask(ops.Relational.NE, 0, mstype.int32), input_columns=["mask_ids"]) dataset = dataset.batch(batch_size, drop_remainder=drop_remainder) return dataset