コード例 #1
0
def test_cv_minddataset_random_sampler_basic(add_and_remove_cv_file):
    data = get_data(CV_DIR_NAME, True)
    columns_list = ["data", "file_name", "label"]
    num_readers = 4
    sampler = ds.RandomSampler()
    data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers,
                              sampler=sampler)
    assert data_set.get_dataset_size() == 10
    num_iter = 0
    new_dataset = []
    for item in data_set.create_dict_iterator():
        logger.info(
            "-------------- cv reader basic: {} ------------------------".format(num_iter))
        logger.info(
            "-------------- item[data]: {}  -----------------------------".format(item["data"]))
        logger.info(
            "-------------- item[file_name]: {} ------------------------".format(item["file_name"]))
        logger.info(
            "-------------- item[label]: {} ----------------------------".format(item["label"]))
        num_iter += 1
        new_dataset.append(item['file_name'])
    assert num_iter == 10
    assert new_dataset != [x['file_name'] for x in data]
コード例 #2
0
def create_yolo_dataset(mindrecord_dir, batch_size=32, repeat_num=10, device_num=device_num, rank=rankid, is_training=True, num_parallel_workers=8):
    """Creatr YOLOv3 dataset with MindDataset."""
    ds = de.MindDataset(mindrecord_dir, columns_list=["image", "annotation"], num_shards=device_num, shard_id=rank,
                        num_parallel_workers=num_parallel_workers, shuffle=is_training)
    decode = C.Decode()
    ds = ds.map(input_columns=["image"], operations=decode)
    compose_map_func = (lambda image, annotation: preprocess_fn(image, annotation, is_training))

    if is_training:
        hwc_to_chw = C.HWC2CHW()
        ds = ds.map(input_columns=["image", "annotation"],
                    output_columns=["image", "bbox_1", "bbox_2", "bbox_3", "gt_box1", "gt_box2", "gt_box3"],
                    columns_order=["image", "bbox_1", "bbox_2", "bbox_3", "gt_box1", "gt_box2", "gt_box3"],
                    operations=compose_map_func, num_parallel_workers=num_parallel_workers)
        ds = ds.map(input_columns=["image"], operations=hwc_to_chw, num_parallel_workers=num_parallel_workers)
        ds = ds.batch(batch_size, drop_remainder=True)
        ds = ds.repeat(repeat_num)
    else:
        ds = ds.map(input_columns=["image", "annotation"],
                    output_columns=["image", "image_shape", "annotation"],
                    columns_order=["image", "image_shape", "annotation"],
                    operations=compose_map_func, num_parallel_workers=num_parallel_workers)
    return ds
コード例 #3
0
def create_dataset(batch_size, data_path, device_num=1, rank=0, drop=True):
    """
    Create dataset

    Inputs:
        batch_size: batch size
        data_path: path of your MindRecord files
        device_num: total device number
        rank: current rank id
        drop: whether drop remainder

    Returns:
        dataset: the dataset for training or evaluating
    """
    home_path = os.path.join(os.getcwd(), data_path)
    data = [os.path.join(home_path, name) for name in os.listdir(data_path) if name.endswith("mindrecord")]
    print(data)
    dataset = ds.MindDataset(data, columns_list=["input_ids"], shuffle=True, num_shards=device_num, shard_id=rank)
    type_cast_op = C.TypeCast(mstype.int32)
    dataset = dataset.map(input_columns="input_ids", operations=type_cast_op)
    dataset = dataset.batch(batch_size, drop_remainder=drop)
    dataset = dataset.repeat(1)
    return dataset
コード例 #4
0
def test_cv_minddataset_pk_sample_basic(add_and_remove_cv_file):
    """tutorial for cv minderdataset."""
    columns_list = ["data", "file_name", "label"]
    num_readers = 4
    sampler = ds.PKSampler(2)
    data_set = ds.MindDataset(CV_FILE_NAME + "0",
                              columns_list,
                              num_readers,
                              sampler=sampler)

    assert data_set.get_dataset_size() == 6
    num_iter = 0
    for item in data_set.create_dict_iterator():
        logger.info(
            "-------------- cv reader basic: {} ------------------------".
            format(num_iter))
        logger.info("-------------- item[file_name]: \
                {}------------------------".format("".join(
            [chr(x) for x in item["file_name"]])))
        logger.info(
            "-------------- item[label]: {} ----------------------------".
            format(item["label"]))
        num_iter += 1
コード例 #5
0
def test_cv_minddataset_reader_basic_padded_samples(add_and_remove_cv_file):
    """tutorial for cv minderdataset."""
    columns_list = ["label", "file_name", "data"]

    data = get_data(CV_DIR_NAME)
    padded_sample = data[0]
    padded_sample['label'] = -1
    padded_sample['file_name'] = 'dummy.jpg'
    num_readers = 4
    data_set = ds.MindDataset(CV_FILE_NAME + "0",
                              columns_list,
                              num_readers,
                              padded_sample=padded_sample,
                              num_padded=5)
    assert data_set.get_dataset_size() == 15
    num_iter = 0
    num_padded_iter = 0
    for item in data_set.create_dict_iterator():
        logger.info(
            "-------------- cv reader basic: {} ------------------------".
            format(num_iter))
        logger.info(
            "-------------- item[file_name]: {} ------------------------".
            format(item["file_name"]))
        logger.info(
            "-------------- item[label]: {} ----------------------------".
            format(item["label"]))
        if item['label'] == -1:
            num_padded_iter += 1
            assert item['file_name'] == bytes(padded_sample['file_name'],
                                              encoding='utf8')
            assert item['label'] == padded_sample['label']
            assert (item['data'] == np.array(list(
                padded_sample['data']))).all()
        num_iter += 1
    assert num_padded_iter == 5
    assert num_iter == 15
コード例 #6
0
ファイル: dataset.py プロジェクト: wangbixing/mindspore
def create_dataset_eval(mindrecord_file_pos, config):
    """
    create an eval dataset

    Args:
        mindrecord_file_pos(string): mindrecord file for positive samples.
        config(dict): config of dataset.

    Returns:
        dataset
    """
    rank_size = int(os.getenv("RANK_SIZE", '1'))
    rank_id = int(os.getenv("RANK_ID", '0'))
    decode = C.Decode()

    data_set = ds.MindDataset(mindrecord_file_pos,
                              columns_list=["image", "label"],
                              num_parallel_workers=1,
                              num_shards=rank_size,
                              shard_id=rank_id,
                              shuffle=False)
    data_set = data_set.map(operations=decode,
                            input_columns=["image"],
                            num_parallel_workers=8)

    global image_height
    global image_width
    image_height = config.im_size_h
    image_width = config.im_size_w
    data_set = data_set.map(operations=resize_image,
                            input_columns=["image", "label"],
                            num_parallel_workers=config.work_nums,
                            python_multiprocessing=False)
    # apply batch operations
    data_set = data_set.batch(1, drop_remainder=True)

    return data_set
コード例 #7
0
ファイル: create_dataset.py プロジェクト: xyg320/mindspore
def create_dataset(data_file):
    """create MindDataset"""
    num_readers = 4
    data_set = ds.MindDataset(dataset_file=data_file,
                              num_parallel_workers=num_readers,
                              shuffle=True)
    index = 0
    for item in data_set.create_dict_iterator():
        # print("example {}: {}".format(index, item))
        print("example {}: input_ids: {}".format(index, item['input_ids']))
        print("example {}: input_mask: {}".format(index, item['input_mask']))
        print("example {}: segment_ids: {}".format(index, item['segment_ids']))
        print("example {}: masked_lm_positions: {}".format(
            index, item['masked_lm_positions']))
        print("example {}: masked_lm_ids: {}".format(index,
                                                     item['masked_lm_ids']))
        print("example {}: masked_lm_weights: {}".format(
            index, item['masked_lm_weights']))
        print("example {}: next_sentence_labels: {}".format(
            index, item['next_sentence_labels']))
        index += 1
        if index % 1000 == 0:
            print("read rows: {}".format(index))
    print("total rows: {}".format(index))
コード例 #8
0
def test_cv_minddataset_repeat_reshuffle(add_and_remove_cv_file):
    """tutorial for cv minddataset."""
    columns_list = ["data", "label"]
    num_readers = 4
    data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers)
    decode_op = vision.Decode()
    data_set = data_set.map(input_columns=["data"], operations=decode_op, num_parallel_workers=2)
    resize_op = vision.Resize((32, 32), interpolation=Inter.LINEAR)
    data_set = data_set.map(input_columns="data", operations=resize_op, num_parallel_workers=2)
    data_set = data_set.batch(2)
    data_set = data_set.repeat(2)
    num_iter = 0
    labels = []
    for item in data_set.create_dict_iterator():
        logger.info("-------------- get dataset size {} -----------------".format(num_iter))
        logger.info("-------------- item[label]: {} ---------------------".format(item["label"]))
        logger.info("-------------- item[data]: {} ----------------------".format(item["data"]))
        num_iter += 1
        labels.append(item["label"])
    assert num_iter == 10
    logger.info("repeat shuffle: {}".format(labels))
    assert len(labels) == 10
    assert labels[0:5] == labels[0:5]
    assert labels[0:5] != labels[5:5]
コード例 #9
0
ファイル: test_minddataset.py プロジェクト: zimaxeg/mindspore
def test_cv_minddataset_blockreader_tutorial(add_and_remove_cv_file):
    """tutorial for cv minddataset."""
    columns_list = ["data", "label"]
    num_readers = 4
    data_set = ds.MindDataset(CV_FILE_NAME + "0",
                              columns_list,
                              num_readers,
                              block_reader=True)
    assert data_set.get_dataset_size() == 10
    repeat_num = 2
    data_set = data_set.repeat(repeat_num)
    num_iter = 0
    for item in data_set.create_dict_iterator():
        logger.info(
            "-------------- block reader repeat tow {} -----------------".
            format(num_iter))
        logger.info(
            "-------------- item[label]: {} ----------------------------".
            format(item["label"]))
        logger.info(
            "-------------- item[data]: {} -----------------------------".
            format(item["data"]))
        num_iter += 1
    assert num_iter == 20
コード例 #10
0
def test_cv_lack_mindrecord():
    """tutorial for cv minderdataset."""
    columns_list = ["data", "file_name", "label"]
    num_readers = 4
    with pytest.raises(Exception, match="does not exist or permission denied"):
        _ = ds.MindDataset("no_exist.mindrecord", columns_list, num_readers)
コード例 #11
0
ファイル: dataset.py プロジェクト: yrpang/mindspore
def create_train_dataset(mindrecord_file, batch_size=1, shard_id=0, num_shard=1, num_parallel_workers=4):
    data_set = ds.MindDataset(mindrecord_file, columns_list=["lr", "hr"], num_shards=num_shard,
                              shard_id=shard_id, num_parallel_workers=num_parallel_workers, shuffle=True)
    data_set = data_set.batch(batch_size, drop_remainder=True)
    return data_set
コード例 #12
0
ファイル: pet_dataset.py プロジェクト: yrpang/mindspore
def create_dataset(dataset_path,
                   do_train,
                   config,
                   platform,
                   repeat_num=1,
                   batch_size=100):
    """
    create a train or eval dataset

    Args:
        dataset_path(string): the path of dataset.
        do_train(bool): whether dataset is used for train or eval.
        repeat_num(int): the repeat times of dataset. Default: 1
        batch_size(int): the batch size of dataset. Default: 32

    Returns:
        dataset
    """
    if platform == "Ascend":
        rank_size = int(os.getenv("RANK_SIZE"))
        rank_id = int(os.getenv("RANK_ID"))
        if rank_size == 1:
            data_set = ds.MindDataset(dataset_path,
                                      num_parallel_workers=8,
                                      shuffle=True)
        else:
            data_set = ds.MindDataset(dataset_path,
                                      num_parallel_workers=8,
                                      shuffle=True,
                                      num_shards=rank_size,
                                      shard_id=rank_id)
    elif platform == "GPU":
        if do_train:
            from mindspore.communication.management import get_rank, get_group_size
            data_set = ds.MindDataset(dataset_path,
                                      num_parallel_workers=8,
                                      shuffle=True,
                                      num_shards=get_group_size(),
                                      shard_id=get_rank())
        else:
            data_set = ds.MindDataset(dataset_path,
                                      num_parallel_workers=8,
                                      shuffle=False)
    else:
        raise ValueError("Unsupported platform.")

    resize_height = config.image_height
    buffer_size = 1000

    # define map operations
    resize_crop_op = C.RandomCropDecodeResize(resize_height,
                                              scale=(0.08, 1.0),
                                              ratio=(0.75, 1.333))
    horizontal_flip_op = C.RandomHorizontalFlip(prob=0.5)

    color_op = C.RandomColorAdjust(brightness=0.4,
                                   contrast=0.4,
                                   saturation=0.4)
    rescale_op = C.Rescale(1 / 255.0, 0)
    normalize_op = C.Normalize(mean=[0.485, 0.456, 0.406],
                               std=[0.229, 0.224, 0.225])
    change_swap_op = C.HWC2CHW()

    # define python operations
    decode_p = P.Decode()
    resize_p = P.Resize(256, interpolation=Inter.BILINEAR)
    center_crop_p = P.CenterCrop(224)
    totensor = P.ToTensor()
    normalize_p = P.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
    composeop = P2.Compose(
        [decode_p, resize_p, center_crop_p, totensor, normalize_p])
    if do_train:
        trans = [
            resize_crop_op, horizontal_flip_op, color_op, rescale_op,
            normalize_op, change_swap_op
        ]
    else:
        trans = composeop
    type_cast_op = C2.TypeCast(mstype.int32)

    data_set = data_set.map(input_columns="image",
                            operations=trans,
                            num_parallel_workers=8)
    data_set = data_set.map(input_columns="label_list",
                            operations=type_cast_op,
                            num_parallel_workers=8)

    # apply shuffle operations
    data_set = data_set.shuffle(buffer_size=buffer_size)

    # apply batch operations
    data_set = data_set.batch(batch_size, drop_remainder=True)

    # apply dataset repeat operation
    data_set = data_set.repeat(repeat_num)

    return data_set
コード例 #13
0
def test_cv_minddataset_split_sharding(add_and_remove_cv_file):
    data = get_data(CV_DIR_NAME, True)
    columns_list = ["data", "file_name", "label"]
    num_readers = 4
    d = ds.MindDataset(CV_FILE_NAME + "0",
                       columns_list,
                       num_readers,
                       shuffle=False)
    # should set seed to avoid data overlap
    ds.config.set_seed(111)
    d1, d2 = d.split([0.8, 0.2])
    assert d.get_dataset_size() == 10
    assert d1.get_dataset_size() == 8
    assert d2.get_dataset_size() == 2
    distributed_sampler = ds.DistributedSampler(2, 0)
    d1.use_sampler(distributed_sampler)
    assert d1.get_dataset_size() == 4

    num_iter = 0
    d1_shard1 = []
    for item in d1.create_dict_iterator():
        logger.info(
            "-------------- item[data]: {}  -----------------------------".
            format(item["data"]))
        logger.info(
            "-------------- item[file_name]: {} ------------------------".
            format(item["file_name"]))
        logger.info(
            "-------------- item[label]: {} ----------------------------".
            format(item["label"]))
        num_iter += 1
        d1_shard1.append(item['file_name'])
    assert num_iter == 4
    assert d1_shard1 != [x['file_name'] for x in data[0:4]]

    distributed_sampler = ds.DistributedSampler(2, 1)
    d1.use_sampler(distributed_sampler)
    assert d1.get_dataset_size() == 4

    d1s = d1.repeat(3)
    epoch1_dataset = []
    epoch2_dataset = []
    epoch3_dataset = []
    num_iter = 0
    for item in d1s.create_dict_iterator():
        logger.info(
            "-------------- item[data]: {}  -----------------------------".
            format(item["data"]))
        logger.info(
            "-------------- item[file_name]: {} ------------------------".
            format(item["file_name"]))
        logger.info(
            "-------------- item[label]: {} ----------------------------".
            format(item["label"]))
        num_iter += 1
        if num_iter <= 4:
            epoch1_dataset.append(item['file_name'])
        elif num_iter <= 8:
            epoch2_dataset.append(item['file_name'])
        else:
            epoch3_dataset.append(item['file_name'])
    assert len(epoch1_dataset) == 4
    assert len(epoch2_dataset) == 4
    assert len(epoch3_dataset) == 4
    inter_dataset = [x for x in d1_shard1 if x in epoch1_dataset]
    assert inter_dataset == []  # intersection of d1's shard1 and d1's shard2
    assert epoch1_dataset not in (epoch2_dataset, epoch3_dataset)
    assert epoch2_dataset not in (epoch1_dataset, epoch3_dataset)
    assert epoch3_dataset not in (epoch1_dataset, epoch2_dataset)

    epoch1_dataset.sort()
    epoch2_dataset.sort()
    epoch3_dataset.sort()
    assert epoch1_dataset != epoch2_dataset
    assert epoch2_dataset != epoch3_dataset
    assert epoch3_dataset != epoch1_dataset
コード例 #14
0
def _load_dataset(input_files, batch_size, sink_mode=False,
                  rank_size=1, rank_id=0, shuffle=True, drop_remainder=True,
                  is_translate=False):
    """
    Load dataset according to passed in params.

    Args:
        input_files (list): Data files.
        batch_size (int): Batch size.
        sink_mode (bool): Whether enable sink mode.
        rank_size (int): Rank size.
        rank_id (int): Rank id.
        shuffle (bool): Whether shuffle dataset.
        drop_remainder (bool): Whether drop the last possibly incomplete batch.
        is_translate (bool): Whether translate the text.

    Returns:
        Dataset, dataset instance.
    """
    if not input_files:
        raise FileNotFoundError("Require at least one dataset.")

    if not isinstance(sink_mode, bool):
        raise ValueError("`sink` must be type of bool.")

    for datafile in input_files:
        print(f" | Loading {datafile}.")

    if not is_translate:
        data_set = ds.MindDataset(
            input_files, columns_list=[
                "src", "src_padding",
                "prev_opt",
                "target", "tgt_padding"
            ], shuffle=False, num_shards=rank_size, shard_id=rank_id,
            num_parallel_workers=8
        )

        ori_dataset_size = data_set.get_dataset_size()
        print(f" | Dataset size: {ori_dataset_size}.")
        if shuffle:
            data_set = data_set.shuffle(buffer_size=ori_dataset_size // 20)
        type_cast_op = deC.TypeCast(mstype.int32)
        data_set = data_set.map(input_columns="src", operations=type_cast_op, num_parallel_workers=8)
        data_set = data_set.map(input_columns="src_padding", operations=type_cast_op, num_parallel_workers=8)
        data_set = data_set.map(input_columns="prev_opt", operations=type_cast_op, num_parallel_workers=8)
        data_set = data_set.map(input_columns="target", operations=type_cast_op, num_parallel_workers=8)
        data_set = data_set.map(input_columns="tgt_padding", operations=type_cast_op, num_parallel_workers=8)

        data_set = data_set.rename(
            input_columns=["src",
                           "src_padding",
                           "prev_opt",
                           "target",
                           "tgt_padding"],
            output_columns=["source_eos_ids",
                            "source_eos_mask",
                            "target_sos_ids",
                            "target_eos_ids",
                            "target_eos_mask"]
        )
        data_set = data_set.batch(batch_size, drop_remainder=drop_remainder)
    else:
        data_set = ds.MindDataset(
            input_files, columns_list=[
                "src", "src_padding"
            ],
            shuffle=False, num_shards=rank_size, shard_id=rank_id,
            num_parallel_workers=8
        )

        ori_dataset_size = data_set.get_dataset_size()
        print(f" | Dataset size: {ori_dataset_size}.")
        if shuffle:
            data_set = data_set.shuffle(buffer_size=ori_dataset_size // 20)
        type_cast_op = deC.TypeCast(mstype.int32)
        data_set = data_set.map(input_columns="src", operations=type_cast_op, num_parallel_workers=8)
        data_set = data_set.map(input_columns="src_padding", operations=type_cast_op, num_parallel_workers=8)

        data_set = data_set.rename(
            input_columns=["src",
                           "src_padding"],
            output_columns=["source_eos_ids",
                            "source_eos_mask"]
        )
        data_set = data_set.batch(batch_size, drop_remainder=drop_remainder)

    return data_set
コード例 #15
0
def create_dataset2(dataset_path, do_train=True, repeat_num=1, batch_size=32, target="gpu", rank=0, size=1):
    """
    create a train or eval imagenet2012 dataset for resnet50

    Args:
        dataset_path(string): the path of dataset.
        do_train(bool): whether dataset is used for train or eval.
        repeat_num(int): the repeat times of dataset. Default: 1
        batch_size(int): the batch size of dataset. Default: 32
        target(str): the device target. Default: Ascend

    Returns:
        dataset
    """
    if target == "Ascend":
        device_num, rank_id = _get_rank_info()
    else:
        init()
        rank_id = rank
        device_num = size

    file_list = [os.path.join(dataset_path, f'train-{num:05d}-of-01024') for num in range(1024)]
    if device_num == 1:
        ds = msds.MindDataset(dataset_file=file_list, num_parallel_workers=8, shuffle=True)
    else:
        ds = msds.MindDataset(dataset_file=file_list, num_parallel_workers=8, shuffle=True,
                                   num_shards=device_num, shard_id=rank_id)

    image_size = 224
    mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
    std = [0.229 * 255, 0.224 * 255, 0.225 * 255]

    # define map operations
    if do_train:
        trans = [
            C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
            C.RandomHorizontalFlip(prob=0.5),
            C.Normalize(mean=mean, std=std),
            C.HWC2CHW(),
            C2.TypeCast(mstype.float16)
        ]
    else:
        trans = [
            C.Decode(),
            C.Resize(256),
            C.CenterCrop(image_size),
            C.Normalize(mean=mean, std=std),
            C.HWC2CHW()
        ]

    type_cast_op = C2.TypeCast(mstype.int32)

    ds = ds.map(operations=trans, input_columns="image", num_parallel_workers=8)
    ds = ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)

    # apply batch operations
    ds = ds.batch(batch_size, drop_remainder=True)

    # apply dataset repeat operation
    ds = ds.repeat(repeat_num)

    return ds
コード例 #16
0
def test_cv_minddataset_reader_multi_image_and_ndarray_tutorial():
    writer = FileWriter(CV_FILE_NAME, FILES_NUM)
    cv_schema_json = {
        "id": {
            "type": "int32"
        },
        "image_0": {
            "type": "bytes"
        },
        "image_2": {
            "type": "bytes"
        },
        "image_3": {
            "type": "bytes"
        },
        "image_4": {
            "type": "bytes"
        },
        "input_mask": {
            "type": "int32",
            "shape": [-1]
        },
        "segments": {
            "type": "float32",
            "shape": [2, 3]
        }
    }
    writer.add_schema(cv_schema_json, "two_images_schema")
    with open("../data/mindrecord/testImageNetData/images/image_00010.jpg",
              "rb") as file_reader:
        img_data = file_reader.read()
    ndarray_1 = np.array([1, 2, 3, 4, 5], np.int32)
    ndarray_2 = np.array(([2, 3, 1], [7, 9, 0]), np.float32)
    data = []
    for i in range(5):
        item = {
            "id": i,
            "image_0": img_data,
            "image_2": img_data,
            "image_3": img_data,
            "image_4": img_data,
            "input_mask": ndarray_1,
            "segments": ndarray_2
        }
        data.append(item)
    writer.write_raw_data(data)
    writer.commit()
    assert os.path.exists(CV_FILE_NAME)
    assert os.path.exists(CV_FILE_NAME + ".db")
    """tutorial for minderdataset."""
    columns_list = [
        "id", "image_0", "image_2", "image_3", "image_4", "input_mask",
        "segments"
    ]
    num_readers = 1
    data_set = ds.MindDataset(CV_FILE_NAME, columns_list, num_readers)
    assert data_set.get_dataset_size() == 5
    num_iter = 0
    for item in data_set.create_dict_iterator():
        assert len(item) == 7
        logger.info("item: {}".format(item))
        assert item["image_0"].dtype == np.uint8
        assert (item["image_0"] == item["image_2"]).all()
        assert (item["image_3"] == item["image_4"]).all()
        assert (item["image_0"] == item["image_4"]).all()
        assert item["image_2"].dtype == np.uint8
        assert item["image_3"].dtype == np.uint8
        assert item["image_4"].dtype == np.uint8
        assert item["id"].dtype == np.int32
        assert item["input_mask"].shape == (5, )
        assert item["input_mask"].dtype == np.int32
        assert item["segments"].shape == (2, 3)
        assert item["segments"].dtype == np.float32
        num_iter += 1
    assert num_iter == 5

    if os.path.exists("{}".format(CV_FILE_NAME + ".db")):
        os.remove(CV_FILE_NAME + ".db")
    if os.path.exists("{}".format(CV_FILE_NAME)):
        os.remove(CV_FILE_NAME)
コード例 #17
0
ファイル: eval.py プロジェクト: peng-zhihui/mindspore
def val(args):
    '''eval'''
    print('=============yolov3 start evaluating==================')

    # logger
    args.batch_size = config.batch_size
    args.input_shape = config.input_shape
    args.result_path = config.result_path
    args.conf_thresh = config.conf_thresh
    args.nms_thresh = config.nms_thresh

    context.set_auto_parallel_context(parallel_mode=ParallelMode.STAND_ALONE,
                                      device_num=args.world_size,
                                      gradients_mean=True)
    mindrecord_path = args.mindrecord_path
    print('Loading data from {}'.format(mindrecord_path))

    num_classes = config.num_classes
    if num_classes > 1:
        raise NotImplementedError(
            'num_classes > 1: Yolov3 postprocess not implemented!')

    anchors = config.anchors
    anchors_mask = config.anchors_mask
    num_anchors_list = [len(x) for x in anchors_mask]

    reduction_0 = 64.0
    reduction_1 = 32.0
    reduction_2 = 16.0
    labels = ['face']
    classes = {0: 'face'}

    # dataloader
    ds = de.MindDataset(
        mindrecord_path + "0",
        columns_list=["image", "annotation", "image_name", "image_size"])

    single_scale_trans = SingleScaleTrans(resize=args.input_shape)

    ds = ds.batch(
        args.batch_size,
        per_batch_map=single_scale_trans,
        input_columns=["image", "annotation", "image_name", "image_size"],
        num_parallel_workers=8)

    args.steps_per_epoch = ds.get_dataset_size()

    # backbone
    network = backbone_HwYolov3(num_classes, num_anchors_list, args)

    # load pretrain model
    if os.path.isfile(args.pretrained):
        param_dict = load_checkpoint(args.pretrained)
        param_dict_new = {}
        for key, values in param_dict.items():
            if key.startswith('moments.'):
                continue
            elif key.startswith('network.'):
                param_dict_new[key[8:]] = values
            else:
                param_dict_new[key] = values
        load_param_into_net(network, param_dict_new)
        print('load model {} success'.format(args.pretrained))
    else:
        print(
            'load model {} failed, please check the path of model, evaluating end'
            .format(args.pretrained))
        exit(0)

    ds = ds.repeat(1)

    det = {}
    img_size = {}
    img_anno = {}

    model_name = args.pretrained.split('/')[-1].replace('.ckpt', '')
    result_path = os.path.join(args.result_path, model_name)
    if os.path.exists(result_path):
        pass
    if not os.path.isdir(result_path):
        os.makedirs(result_path, exist_ok=True)

    # result file
    ret_files_set = {
        'face': os.path.join(result_path, 'comp4_det_test_face_rm5050.txt'),
    }

    test_net = BuildTestNetwork(network, reduction_0, reduction_1, reduction_2,
                                anchors, anchors_mask, num_classes, args)

    print('conf_thresh:', args.conf_thresh)

    eval_times = 0

    for data in ds.create_tuple_iterator(output_numpy=True):
        batch_images = data[0]
        batch_labels = data[1]
        batch_image_name = data[2]
        batch_image_size = data[3]
        eval_times += 1

        img_tensor = Tensor(batch_images, mstype.float32)

        dets = []
        tdets = []

        coords_0, cls_scores_0, coords_1, cls_scores_1, coords_2, cls_scores_2 = test_net(
            img_tensor)

        boxes_0, boxes_1, boxes_2 = get_bounding_boxes(
            coords_0, cls_scores_0, coords_1, cls_scores_1, coords_2,
            cls_scores_2, args.conf_thresh, args.input_shape, num_classes)

        converted_boxes_0, converted_boxes_1, converted_boxes_2 = tensor_to_brambox(
            boxes_0, boxes_1, boxes_2, args.input_shape, labels)

        tdets.append(converted_boxes_0)
        tdets.append(converted_boxes_1)
        tdets.append(converted_boxes_2)

        batch = len(tdets[0])
        for b in range(batch):
            single_dets = []
            for op in range(3):
                single_dets.extend(tdets[op][b])
            dets.append(single_dets)

        det.update({
            batch_image_name[k].decode('UTF-8'): v
            for k, v in enumerate(dets)
        })
        img_size.update({
            batch_image_name[k].decode('UTF-8'): v
            for k, v in enumerate(batch_image_size)
        })
        img_anno.update({
            batch_image_name[k].decode('UTF-8'): v
            for k, v in enumerate(batch_labels)
        })

    print('eval times:', eval_times)
    print('batch size: ', args.batch_size)

    netw, neth = args.input_shape
    reorg_dets = voc_wrapper.reorg_detection(det, netw, neth, img_size)
    voc_wrapper.gen_results(reorg_dets, result_path, img_size, args.nms_thresh)

    # compute mAP
    ground_truth = parse_gt_from_anno(img_anno, classes)

    ret_list = parse_rets(ret_files_set)
    iou_thr = 0.5
    evaluate = calc_recall_presicion_ap(ground_truth, ret_list, iou_thr)

    aps_str = ''
    for cls in evaluate:
        per_line, = plt.plot(evaluate[cls]['recall'],
                             evaluate[cls]['presicion'], 'b-')
        per_line.set_label('%s:AP=%.3f' % (cls, evaluate[cls]['ap']))
        aps_str += '_%s_AP_%.3f' % (cls, evaluate[cls]['ap'])
        plt.plot([i / 1000.0 for i in range(1, 1001)],
                 [i / 1000.0 for i in range(1, 1001)], 'y--')
        plt.axis([0, 1.2, 0, 1.2])
        plt.xlabel('recall')
        plt.ylabel('precision')
        plt.grid()

        plt.legend()
        plt.title('PR')

    # save mAP
    ap_save_path = os.path.join(
        result_path,
        result_path.replace('/', '_') + aps_str + '.png')
    print('Saving {}'.format(ap_save_path))
    plt.savefig(ap_save_path)

    print('=============yolov3 evaluating finished==================')
コード例 #18
0
ファイル: eval.py プロジェクト: peixinhou/mindspore
        raise NotImplementedError(
            'num_classes > 1: Yolov3 postprocess not implemented!')

    anchors = config.anchors
    anchors_mask = config.anchors_mask
    num_anchors_list = [len(x) for x in anchors_mask]

    reduction_0 = 64.0
    reduction_1 = 32.0
    reduction_2 = 16.0
    labels = ['face']
    classes = {0: 'face'}

    # dataloader
    ds = de.MindDataset(
        mindrecord_path + "0",
        columns_list=["image", "annotation", "image_name", "image_size"])

    single_scale_trans = SingleScaleTrans(resize=args.input_shape)

    ds = ds.batch(
        args.batch_size,
        per_batch_map=single_scale_trans,
        input_columns=["image", "annotation", "image_name", "image_size"],
        num_parallel_workers=8)

    args.steps_per_epoch = ds.get_dataset_size()

    # backbone
    network = backbone_HwYolov3(num_classes, num_anchors_list, args)
コード例 #19
0
ファイル: test_save_op.py プロジェクト: yrpang/mindspore
def test_case_02(add_and_remove_cv_file):  # muti-bytes
    data = [{
        "file_name":
        "001.jpg",
        "label":
        43,
        "float32_array":
        np.array([1.2, 2.78, 3.1234, 4.9871, 5.12341], dtype=np.float32),
        "float64_array":
        np.array([
            48.1234556789, 49.3251241431, 50.13514312414, 51.8971298471,
            123414314.2141243, 87.1212122
        ],
                 dtype=np.float64),
        "float32":
        3456.12345,
        "float64":
        1987654321.123456785,
        "source_sos_ids":
        np.array([1, 2, 3, 4, 5], dtype=np.int32),
        "source_sos_mask":
        np.array([6, 7, 8, 9, 10, 11, 12], dtype=np.int64),
        "image1":
        bytes("image1 bytes abc", encoding='UTF-8'),
        "image2":
        bytes("image1 bytes def", encoding='UTF-8'),
        "image3":
        bytes("image1 bytes ghi", encoding='UTF-8'),
        "image4":
        bytes("image1 bytes jkl", encoding='UTF-8'),
        "image5":
        bytes("image1 bytes mno", encoding='UTF-8')
    }, {
        "file_name":
        "002.jpg",
        "label":
        91,
        "float32_array":
        np.array([1.2, 2.78, 4.1234, 4.9871, 5.12341], dtype=np.float32),
        "float64_array":
        np.array([
            48.1234556789, 49.3251241431, 60.13514312414, 51.8971298471,
            123414314.2141243, 87.1212122
        ],
                 dtype=np.float64),
        "float32":
        3456.12445,
        "float64":
        1987654321.123456786,
        "source_sos_ids":
        np.array([11, 2, 3, 4, 5], dtype=np.int32),
        "source_sos_mask":
        np.array([16, 7, 8, 9, 10, 11, 12], dtype=np.int64),
        "image1":
        bytes("image2 bytes abc", encoding='UTF-8'),
        "image2":
        bytes("image2 bytes def", encoding='UTF-8'),
        "image3":
        bytes("image2 bytes ghi", encoding='UTF-8'),
        "image4":
        bytes("image2 bytes jkl", encoding='UTF-8'),
        "image5":
        bytes("image2 bytes mno", encoding='UTF-8')
    }, {
        "file_name":
        "003.jpg",
        "label":
        61,
        "float32_array":
        np.array([1.2, 2.78, 5.1234, 4.9871, 5.12341], dtype=np.float32),
        "float64_array":
        np.array([
            48.1234556789, 49.3251241431, 70.13514312414, 51.8971298471,
            123414314.2141243, 87.1212122
        ],
                 dtype=np.float64),
        "float32":
        3456.12545,
        "float64":
        1987654321.123456787,
        "source_sos_ids":
        np.array([21, 2, 3, 4, 5], dtype=np.int32),
        "source_sos_mask":
        np.array([26, 7, 8, 9, 10, 11, 12], dtype=np.int64),
        "image1":
        bytes("image3 bytes abc", encoding='UTF-8'),
        "image2":
        bytes("image3 bytes def", encoding='UTF-8'),
        "image3":
        bytes("image3 bytes ghi", encoding='UTF-8'),
        "image4":
        bytes("image3 bytes jkl", encoding='UTF-8'),
        "image5":
        bytes("image3 bytes mno", encoding='UTF-8')
    }, {
        "file_name":
        "004.jpg",
        "label":
        29,
        "float32_array":
        np.array([1.2, 2.78, 6.1234, 4.9871, 5.12341], dtype=np.float32),
        "float64_array":
        np.array([
            48.1234556789, 49.3251241431, 80.13514312414, 51.8971298471,
            123414314.2141243, 87.1212122
        ],
                 dtype=np.float64),
        "float32":
        3456.12645,
        "float64":
        1987654321.123456788,
        "source_sos_ids":
        np.array([31, 2, 3, 4, 5], dtype=np.int32),
        "source_sos_mask":
        np.array([36, 7, 8, 9, 10, 11, 12], dtype=np.int64),
        "image1":
        bytes("image4 bytes abc", encoding='UTF-8'),
        "image2":
        bytes("image4 bytes def", encoding='UTF-8'),
        "image3":
        bytes("image4 bytes ghi", encoding='UTF-8'),
        "image4":
        bytes("image4 bytes jkl", encoding='UTF-8'),
        "image5":
        bytes("image4 bytes mno", encoding='UTF-8')
    }, {
        "file_name":
        "005.jpg",
        "label":
        78,
        "float32_array":
        np.array([1.2, 2.78, 7.1234, 4.9871, 5.12341], dtype=np.float32),
        "float64_array":
        np.array([
            48.1234556789, 49.3251241431, 90.13514312414, 51.8971298471,
            123414314.2141243, 87.1212122
        ],
                 dtype=np.float64),
        "float32":
        3456.12745,
        "float64":
        1987654321.123456789,
        "source_sos_ids":
        np.array([41, 2, 3, 4, 5], dtype=np.int32),
        "source_sos_mask":
        np.array([46, 7, 8, 9, 10, 11, 12], dtype=np.int64),
        "image1":
        bytes("image5 bytes abc", encoding='UTF-8'),
        "image2":
        bytes("image5 bytes def", encoding='UTF-8'),
        "image3":
        bytes("image5 bytes ghi", encoding='UTF-8'),
        "image4":
        bytes("image5 bytes jkl", encoding='UTF-8'),
        "image5":
        bytes("image5 bytes mno", encoding='UTF-8')
    }, {
        "file_name":
        "006.jpg",
        "label":
        37,
        "float32_array":
        np.array([1.2, 2.78, 7.1234, 4.9871, 5.12341], dtype=np.float32),
        "float64_array":
        np.array([
            48.1234556789, 49.3251241431, 90.13514312414, 51.8971298471,
            123414314.2141243, 87.1212122
        ],
                 dtype=np.float64),
        "float32":
        3456.12745,
        "float64":
        1987654321.123456789,
        "source_sos_ids":
        np.array([51, 2, 3, 4, 5], dtype=np.int32),
        "source_sos_mask":
        np.array([56, 7, 8, 9, 10, 11, 12], dtype=np.int64),
        "image1":
        bytes("image6 bytes abc", encoding='UTF-8'),
        "image2":
        bytes("image6 bytes def", encoding='UTF-8'),
        "image3":
        bytes("image6 bytes ghi", encoding='UTF-8'),
        "image4":
        bytes("image6 bytes jkl", encoding='UTF-8'),
        "image5":
        bytes("image6 bytes mno", encoding='UTF-8')
    }]
    schema = {
        "file_name": {
            "type": "string"
        },
        "float32_array": {
            "type": "float32",
            "shape": [-1]
        },
        "float64_array": {
            "type": "float64",
            "shape": [-1]
        },
        "float32": {
            "type": "float32"
        },
        "float64": {
            "type": "float64"
        },
        "source_sos_ids": {
            "type": "int32",
            "shape": [-1]
        },
        "source_sos_mask": {
            "type": "int64",
            "shape": [-1]
        },
        "image1": {
            "type": "bytes"
        },
        "image2": {
            "type": "bytes"
        },
        "image3": {
            "type": "bytes"
        },
        "label": {
            "type": "int32"
        },
        "image4": {
            "type": "bytes"
        },
        "image5": {
            "type": "bytes"
        }
    }
    writer = FileWriter(CV_FILE_NAME1, FILES_NUM)
    writer.add_schema(schema, "schema")
    writer.write_raw_data(data)
    writer.commit()

    d1 = ds.MindDataset(CV_FILE_NAME1, None, num_readers, shuffle=False)
    d1.save(CV_FILE_NAME2, FILES_NUM)
    data_value_to_list = []

    for item in data:
        new_data = {}
        new_data['file_name'] = np.asarray(item["file_name"], dtype='S')
        new_data['float32_array'] = item["float32_array"]
        new_data['float64_array'] = item["float64_array"]
        new_data['float32'] = item["float32"]
        new_data['float64'] = item["float64"]
        new_data['source_sos_ids'] = item["source_sos_ids"]
        new_data['source_sos_mask'] = item["source_sos_mask"]
        new_data['label'] = np.asarray(list([item["label"]]), dtype=np.int32)
        new_data['image1'] = np.asarray(list(item["image1"]), dtype=np.uint8)
        new_data['image2'] = np.asarray(list(item["image2"]), dtype=np.uint8)
        new_data['image3'] = np.asarray(list(item["image3"]), dtype=np.uint8)
        new_data['image4'] = np.asarray(list(item["image4"]), dtype=np.uint8)
        new_data['image5'] = np.asarray(list(item["image5"]), dtype=np.uint8)
        data_value_to_list.append(new_data)

    d2 = ds.MindDataset(dataset_file=CV_FILE_NAME2,
                        num_parallel_workers=num_readers,
                        shuffle=False)
    assert d2.get_dataset_size() == 6
    num_iter = 0
    for item in d2.create_dict_iterator(num_epochs=1, output_numpy=True):
        assert len(item) == 13
        for field in item:
            if isinstance(item[field], np.ndarray):
                if item[field].dtype == np.float32:
                    assert (item[field] == np.array(
                        data_value_to_list[num_iter][field],
                        np.float32)).all()
                else:
                    assert (item[field] == data_value_to_list[num_iter][field]
                            ).all()
            else:
                assert item[field] == data_value_to_list[num_iter][field]
        num_iter += 1
    assert num_iter == 6
コード例 #20
0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
######################## write mindrecord example ########################
Write mindrecord by data dictionary:
python writer.py --mindrecord_script /YourScriptPath ...
"""
import argparse

import mindspore.dataset as ds

parser = argparse.ArgumentParser(description='Mind record reader')
parser.add_argument('--path',
                    type=str,
                    default="/tmp/cora/mindrecord/cora_mr",
                    help='data file')
args = parser.parse_args()

data_set = ds.MindDataset(args.path)
num_iter = 0
for item in data_set.create_dict_iterator(output_numpy=True):
    print(item)
    num_iter += 1
print("Total items # is {}".format(num_iter))
コード例 #21
0
    def partitions(num_shards, num_padded, dataset_size):
        num_padded_iter = 0
        num_iter = 0

        epoch_result = [[["" for i in range(dataset_size)]
                         for i in range(repeat_size)]
                        for i in range(num_shards)]

        for partition_id in range(num_shards):
            data_set = ds.MindDataset(NLP_FILE_NAME + "0",
                                      columns_list,
                                      num_readers,
                                      num_shards=num_shards,
                                      shard_id=partition_id,
                                      padded_sample=padded_sample,
                                      num_padded=num_padded)
            assert data_set.get_dataset_size() == dataset_size
            data_set = data_set.repeat(repeat_size)
            inner_num_iter = 0
            for item in data_set.create_dict_iterator(num_epochs=1,
                                                      output_numpy=True):
                logger.info(
                    "-------------- item[id]: {} ------------------------".
                    format(item["id"]))
                logger.info(
                    "-------------- item[rating]: {} --------------------".
                    format(item["rating"]))
                logger.info(
                    "-------------- item[input_ids]: {}, shape: {} -----------------"
                    .format(item["input_ids"], item["input_ids"].shape))
                if item['id'] == bytes('-1', encoding='utf-8'):
                    num_padded_iter += 1
                    assert item['id'] == bytes(padded_sample['id'],
                                               encoding='utf-8')
                    assert (
                        item['input_ids'] == padded_sample['input_ids']).all()
                    assert (item['rating'] == padded_sample['rating']).all()
                # save epoch result
                epoch_result[partition_id][int(
                    inner_num_iter / dataset_size)][inner_num_iter %
                                                    dataset_size] = item["id"]
                num_iter += 1
                inner_num_iter += 1
            assert epoch_result[partition_id][0] not in (
                epoch_result[partition_id][1], epoch_result[partition_id][2])
            assert epoch_result[partition_id][1] not in (
                epoch_result[partition_id][0], epoch_result[partition_id][2])
            assert epoch_result[partition_id][2] not in (
                epoch_result[partition_id][1], epoch_result[partition_id][0])
            if dataset_size > 2:
                epoch_result[partition_id][0].sort()
                epoch_result[partition_id][1].sort()
                epoch_result[partition_id][2].sort()
                assert epoch_result[partition_id][0] != epoch_result[
                    partition_id][1]
                assert epoch_result[partition_id][1] != epoch_result[
                    partition_id][2]
                assert epoch_result[partition_id][2] != epoch_result[
                    partition_id][0]
        assert num_padded_iter == num_padded * repeat_size
        assert num_iter == dataset_size * num_shards * repeat_size
コード例 #22
0
ファイル: dataset.py プロジェクト: yrpang/mindspore
def create_ctpn_dataset(mindrecord_file,
                        batch_size=1,
                        repeat_num=1,
                        device_num=1,
                        rank_id=0,
                        is_training=True,
                        num_parallel_workers=12):
    """Creatr ctpn dataset with MindDataset."""
    ds = de.MindDataset(mindrecord_file, columns_list=["image", "annotation"], num_shards=device_num, shard_id=rank_id,\
        num_parallel_workers=num_parallel_workers, shuffle=is_training)
    decode = C.Decode()
    ds = ds.map(operations=decode,
                input_columns=["image"],
                num_parallel_workers=num_parallel_workers)
    compose_map_func = (lambda image, annotation: preprocess_fn(
        image, annotation, is_training))
    hwc_to_chw = C.HWC2CHW()
    normalize_op = C.Normalize((123.675, 116.28, 103.53),
                               (58.395, 57.12, 57.375))
    type_cast0 = CC.TypeCast(mstype.float32)
    type_cast1 = CC.TypeCast(mstype.float16)
    type_cast2 = CC.TypeCast(mstype.int32)
    type_cast3 = CC.TypeCast(mstype.bool_)
    if is_training:
        ds = ds.map(
            operations=compose_map_func,
            input_columns=["image", "annotation"],
            output_columns=[
                "image", "box", "label", "valid_num", "image_shape"
            ],
            column_order=["image", "box", "label", "valid_num", "image_shape"],
            num_parallel_workers=num_parallel_workers,
            python_multiprocessing=True)
        ds = ds.map(operations=[normalize_op, type_cast0],
                    input_columns=["image"],
                    num_parallel_workers=num_parallel_workers,
                    python_multiprocessing=True)
        ds = ds.map(operations=[hwc_to_chw, type_cast1],
                    input_columns=["image"],
                    num_parallel_workers=num_parallel_workers,
                    python_multiprocessing=True)
    else:
        ds = ds.map(
            operations=compose_map_func,
            input_columns=["image", "annotation"],
            output_columns=[
                "image", "box", "label", "valid_num", "image_shape"
            ],
            column_order=["image", "box", "label", "valid_num", "image_shape"],
            num_parallel_workers=8,
            python_multiprocessing=True)

        ds = ds.map(operations=[normalize_op, hwc_to_chw, type_cast1],
                    input_columns=["image"],
                    num_parallel_workers=8)
    # transpose_column from python to c
    ds = ds.map(operations=[type_cast1], input_columns=["image_shape"])
    ds = ds.map(operations=[type_cast1], input_columns=["box"])
    ds = ds.map(operations=[type_cast2], input_columns=["label"])
    ds = ds.map(operations=[type_cast3], input_columns=["valid_num"])
    ds = ds.batch(batch_size, drop_remainder=True)
    ds = ds.repeat(repeat_num)
    return ds
コード例 #23
0
def create_fasterrcnn_dataset(mindrecord_file,
                              batch_size=2,
                              repeat_num=12,
                              device_num=1,
                              rank_id=0,
                              is_training=True,
                              num_parallel_workers=8):
    """Creatr FasterRcnn dataset with MindDataset."""
    ds = de.MindDataset(mindrecord_file,
                        columns_list=["image", "annotation"],
                        num_shards=device_num,
                        shard_id=rank_id,
                        num_parallel_workers=num_parallel_workers,
                        shuffle=is_training)
    decode = C.Decode()
    ds = ds.map(input_columns=["image"], operations=decode)
    compose_map_func = (lambda image, annotation: preprocess_fn(
        image, annotation, is_training))

    hwc_to_chw = C.HWC2CHW()
    normalize_op = C.Normalize((123.675, 116.28, 103.53),
                               (58.395, 57.12, 57.375))
    horizontally_op = C.RandomHorizontalFlip(1)
    type_cast0 = CC.TypeCast(mstype.float32)
    type_cast1 = CC.TypeCast(mstype.float16)
    type_cast2 = CC.TypeCast(mstype.int32)
    type_cast3 = CC.TypeCast(mstype.bool_)

    if is_training:
        ds = ds.map(input_columns=["image", "annotation"],
                    output_columns=[
                        "image", "image_shape", "box", "label", "valid_num"
                    ],
                    columns_order=[
                        "image", "image_shape", "box", "label", "valid_num"
                    ],
                    operations=compose_map_func,
                    num_parallel_workers=4)

        ds = ds.map(input_columns=["image"],
                    operations=[normalize_op, type_cast0],
                    num_parallel_workers=num_parallel_workers)

        flip = (np.random.rand() < config.flip_ratio)
        if flip:
            ds = ds.map(input_columns=["image"],
                        operations=[horizontally_op],
                        num_parallel_workers=num_parallel_workers)
            ds = ds.map(input_columns=[
                "image", "image_shape", "box", "label", "valid_num"
            ],
                        operations=flipped_generation,
                        num_parallel_workers=4)

        # transpose_column from python to c
        ds = ds.map(input_columns=["image"],
                    operations=[hwc_to_chw, type_cast1])
        ds = ds.map(input_columns=["image_shape"], operations=[type_cast1])
        ds = ds.map(input_columns=["box"], operations=[type_cast1])
        ds = ds.map(input_columns=["label"], operations=[type_cast2])
        ds = ds.map(input_columns=["valid_num"], operations=[type_cast3])
        ds = ds.batch(batch_size, drop_remainder=True)
        ds = ds.repeat(repeat_num)
    else:
        ds = ds.map(input_columns=["image", "annotation"],
                    output_columns=[
                        "image", "image_shape", "box", "label", "valid_num"
                    ],
                    columns_order=[
                        "image", "image_shape", "box", "label", "valid_num"
                    ],
                    operations=compose_map_func,
                    num_parallel_workers=num_parallel_workers)
        # transpose_column from python to c
        ds = ds.map(input_columns=["image"],
                    operations=[hwc_to_chw, type_cast1])
        ds = ds.map(input_columns=["image_shape"], operations=[type_cast1])
        ds = ds.map(input_columns=["box"], operations=[type_cast1])
        ds = ds.map(input_columns=["label"], operations=[type_cast2])
        ds = ds.map(input_columns=["valid_num"], operations=[type_cast3])
        ds = ds.batch(batch_size, drop_remainder=True)
        ds = ds.repeat(repeat_num)
    return ds
コード例 #24
0
    def partitions(num_shards, num_padded, dataset_size):
        repeat_size = 5
        num_padded_iter = 0
        num_iter = 0
        for partition_id in range(num_shards):
            epoch1_shuffle_result = []
            epoch2_shuffle_result = []
            epoch3_shuffle_result = []
            epoch4_shuffle_result = []
            epoch5_shuffle_result = []
            data_set = ds.MindDataset(CV_FILE_NAME + "0",
                                      columns_list,
                                      num_readers,
                                      num_shards=num_shards,
                                      shard_id=partition_id,
                                      padded_sample=padded_sample,
                                      num_padded=num_padded)
            assert data_set.get_dataset_size() == dataset_size
            data_set = data_set.repeat(repeat_size)
            local_index = 0
            for item in data_set.create_dict_iterator():
                logger.info(
                    "-------------- partition : {} ------------------------".
                    format(partition_id))
                logger.info(
                    "-------------- len(item[data]): {} ------------------------"
                    .format(len(item["data"])))
                logger.info(
                    "-------------- item[data]: {} -----------------------------"
                    .format(item["data"]))
                logger.info(
                    "-------------- item[file_name]: {} ------------------------"
                    .format(item["file_name"]))
                logger.info(
                    "-------------- item[label]: {} -----------------------".
                    format(item["label"]))
                if item['label'] == -2:
                    num_padded_iter += 1
                    assert item['file_name'] == bytes(
                        padded_sample['file_name'], encoding='utf8')
                    assert item['label'] == padded_sample['label']
                    assert (item['data'] == np.array(
                        list(padded_sample['data']))).all()
                if local_index < dataset_size:
                    epoch1_shuffle_result.append(item["file_name"])
                elif local_index < dataset_size * 2:
                    epoch2_shuffle_result.append(item["file_name"])
                elif local_index < dataset_size * 3:
                    epoch3_shuffle_result.append(item["file_name"])
                elif local_index < dataset_size * 4:
                    epoch4_shuffle_result.append(item["file_name"])
                elif local_index < dataset_size * 5:
                    epoch5_shuffle_result.append(item["file_name"])
                local_index += 1
                num_iter += 1
            assert len(epoch1_shuffle_result) == dataset_size
            assert len(epoch2_shuffle_result) == dataset_size
            assert len(epoch3_shuffle_result) == dataset_size
            assert len(epoch4_shuffle_result) == dataset_size
            assert len(epoch5_shuffle_result) == dataset_size
            assert local_index == dataset_size * repeat_size

            # When dataset_size is equal to 2, too high probability is the same result after shuffle operation
            if dataset_size > 2:
                assert epoch1_shuffle_result != epoch2_shuffle_result
                assert epoch2_shuffle_result != epoch3_shuffle_result
                assert epoch3_shuffle_result != epoch4_shuffle_result
                assert epoch4_shuffle_result != epoch5_shuffle_result
        assert num_padded_iter == num_padded * repeat_size
        assert num_iter == dataset_size * num_shards * repeat_size
コード例 #25
0
ファイル: test_save_op.py プロジェクト: yrpang/mindspore
def test_case_00(add_and_remove_cv_file):  # only bin data
    data = [{
        "image1": bytes("image1 bytes abc", encoding='UTF-8'),
        "image2": bytes("image1 bytes def", encoding='UTF-8'),
        "image3": bytes("image1 bytes ghi", encoding='UTF-8'),
        "image4": bytes("image1 bytes jkl", encoding='UTF-8'),
        "image5": bytes("image1 bytes mno", encoding='UTF-8')
    }, {
        "image1": bytes("image2 bytes abc", encoding='UTF-8'),
        "image2": bytes("image2 bytes def", encoding='UTF-8'),
        "image3": bytes("image2 bytes ghi", encoding='UTF-8'),
        "image4": bytes("image2 bytes jkl", encoding='UTF-8'),
        "image5": bytes("image2 bytes mno", encoding='UTF-8')
    }, {
        "image1": bytes("image3 bytes abc", encoding='UTF-8'),
        "image2": bytes("image3 bytes def", encoding='UTF-8'),
        "image3": bytes("image3 bytes ghi", encoding='UTF-8'),
        "image4": bytes("image3 bytes jkl", encoding='UTF-8'),
        "image5": bytes("image3 bytes mno", encoding='UTF-8')
    }, {
        "image1": bytes("image5 bytes abc", encoding='UTF-8'),
        "image2": bytes("image5 bytes def", encoding='UTF-8'),
        "image3": bytes("image5 bytes ghi", encoding='UTF-8'),
        "image4": bytes("image5 bytes jkl", encoding='UTF-8'),
        "image5": bytes("image5 bytes mno", encoding='UTF-8')
    }, {
        "image1": bytes("image6 bytes abc", encoding='UTF-8'),
        "image2": bytes("image6 bytes def", encoding='UTF-8'),
        "image3": bytes("image6 bytes ghi", encoding='UTF-8'),
        "image4": bytes("image6 bytes jkl", encoding='UTF-8'),
        "image5": bytes("image6 bytes mno", encoding='UTF-8')
    }]
    schema = {
        "image1": {
            "type": "bytes"
        },
        "image2": {
            "type": "bytes"
        },
        "image3": {
            "type": "bytes"
        },
        "image4": {
            "type": "bytes"
        },
        "image5": {
            "type": "bytes"
        }
    }
    writer = FileWriter(CV_FILE_NAME1, FILES_NUM)
    writer.add_schema(schema, "schema")
    writer.write_raw_data(data)
    writer.commit()

    d1 = ds.MindDataset(CV_FILE_NAME1, None, num_readers, shuffle=False)
    d1.save(CV_FILE_NAME2, FILES_NUM)
    data_value_to_list = []

    for item in data:
        new_data = {}
        new_data['image1'] = np.asarray(list(item["image1"]), dtype=np.uint8)
        new_data['image2'] = np.asarray(list(item["image2"]), dtype=np.uint8)
        new_data['image3'] = np.asarray(list(item["image3"]), dtype=np.uint8)
        new_data['image4'] = np.asarray(list(item["image4"]), dtype=np.uint8)
        new_data['image5'] = np.asarray(list(item["image5"]), dtype=np.uint8)
        data_value_to_list.append(new_data)

    d2 = ds.MindDataset(dataset_file=CV_FILE_NAME2,
                        num_parallel_workers=num_readers,
                        shuffle=False)
    assert d2.get_dataset_size() == 5
    num_iter = 0
    for item in d2.create_dict_iterator(num_epochs=1, output_numpy=True):
        assert len(item) == 5
        for field in item:
            if isinstance(item[field], np.ndarray):
                assert (
                    item[field] == data_value_to_list[num_iter][field]).all()
            else:
                assert item[field] == data_value_to_list[num_iter][field]
        num_iter += 1
    assert num_iter == 5
コード例 #26
0
def train(args):
    '''train'''
    print('=============yolov3 start trainging==================')


    # init distributed
    if args.world_size != 1:
        init()
        args.local_rank = get_rank()
        args.world_size = get_group_size()

    args.batch_size = config.batch_size
    args.warmup_lr = config.warmup_lr
    args.lr_rates = config.lr_rates
    args.lr_steps = config.lr_steps
    args.gamma = config.gamma
    args.weight_decay = config.weight_decay
    args.momentum = config.momentum
    args.max_epoch = config.max_epoch
    args.log_interval = config.log_interval
    args.ckpt_path = config.ckpt_path
    args.ckpt_interval = config.ckpt_interval

    args.outputs_dir = os.path.join(args.ckpt_path, datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
    print('args.outputs_dir', args.outputs_dir)

    args.logger = get_logger(args.outputs_dir, args.local_rank)

    if args.world_size != 8:
        args.lr_steps = [i * 8 // args.world_size for i in args.lr_steps]

    if args.world_size == 1:
        args.weight_decay = 0.

    if args.world_size != 1:
        parallel_mode = ParallelMode.DATA_PARALLEL
    else:
        parallel_mode = ParallelMode.STAND_ALONE

    context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=args.world_size, gradients_mean=True)
    mindrecord_path = args.mindrecord_path

    num_classes = config.num_classes
    anchors = config.anchors
    anchors_mask = config.anchors_mask
    num_anchors_list = [len(x) for x in anchors_mask]

    momentum = args.momentum
    args.logger.info('train opt momentum:{}'.format(momentum))

    weight_decay = args.weight_decay * float(args.batch_size)
    args.logger.info('real weight_decay:{}'.format(weight_decay))
    lr_scale = args.world_size / 8
    args.logger.info('lr_scale:{}'.format(lr_scale))

    # dataloader
    args.logger.info('start create dataloader')
    epoch = args.max_epoch
    ds = de.MindDataset(mindrecord_path + "0", columns_list=["image", "annotation"], num_shards=args.world_size,
                        shard_id=args.local_rank)

    ds = ds.map(input_columns=["image", "annotation"],
                output_columns=["image", "annotation", 'coord_mask_0', 'conf_pos_mask_0', 'conf_neg_mask_0',
                                'cls_mask_0', 't_coord_0', 't_conf_0', 't_cls_0', 'gt_list_0', 'coord_mask_1',
                                'conf_pos_mask_1', 'conf_neg_mask_1', 'cls_mask_1', 't_coord_1', 't_conf_1',
                                't_cls_1', 'gt_list_1', 'coord_mask_2', 'conf_pos_mask_2', 'conf_neg_mask_2',
                                'cls_mask_2', 't_coord_2', 't_conf_2', 't_cls_2', 'gt_list_2'],
                column_order=["image", "annotation", 'coord_mask_0', 'conf_pos_mask_0', 'conf_neg_mask_0',
                              'cls_mask_0', 't_coord_0', 't_conf_0', 't_cls_0', 'gt_list_0', 'coord_mask_1',
                              'conf_pos_mask_1', 'conf_neg_mask_1', 'cls_mask_1', 't_coord_1', 't_conf_1',
                              't_cls_1', 'gt_list_1', 'coord_mask_2', 'conf_pos_mask_2', 'conf_neg_mask_2',
                              'cls_mask_2', 't_coord_2', 't_conf_2', 't_cls_2', 'gt_list_2'],
                operations=compose_map_func, num_parallel_workers=16, python_multiprocessing=True)

    ds = ds.batch(args.batch_size, drop_remainder=True, num_parallel_workers=8)

    args.steps_per_epoch = ds.get_dataset_size()
    lr = warmup_step_new(args, lr_scale=lr_scale)

    ds = ds.repeat(epoch)
    args.logger.info('args.steps_per_epoch:{}'.format(args.steps_per_epoch))
    args.logger.info('args.world_size:{}'.format(args.world_size))
    args.logger.info('args.local_rank:{}'.format(args.local_rank))
    args.logger.info('end create dataloader')
    args.logger.save_args(args)
    args.logger.important_info('start create network')
    create_network_start = time.time()

    # backbone and loss
    network = backbone_HwYolov3(num_classes, num_anchors_list, args)

    criterion0 = YoloLoss(num_classes, anchors, anchors_mask[0], 64, 0, head_idx=0.0)
    criterion1 = YoloLoss(num_classes, anchors, anchors_mask[1], 32, 0, head_idx=1.0)
    criterion2 = YoloLoss(num_classes, anchors, anchors_mask[2], 16, 0, head_idx=2.0)

    # load pretrain model
    if os.path.isfile(args.pretrained):
        param_dict = load_checkpoint(args.pretrained)
        param_dict_new = {}
        for key, values in param_dict.items():
            if key.startswith('moments.'):
                continue
            elif key.startswith('network.'):
                param_dict_new[key[8:]] = values
            else:
                param_dict_new[key] = values
        load_param_into_net(network, param_dict_new)
        args.logger.info('load model {} success'.format(args.pretrained))

    train_net = BuildTrainNetworkV2(network, criterion0, criterion1, criterion2, args)

    # optimizer
    opt = Momentum(params=train_net.trainable_params(), learning_rate=Tensor(lr), momentum=momentum,
                   weight_decay=weight_decay)

    # package training process
    train_net = TrainOneStepWithLossScaleCell(train_net, opt)
    train_net.set_broadcast_flag()

    # checkpoint
    ckpt_max_num = args.max_epoch * args.steps_per_epoch // args.ckpt_interval
    train_config = CheckpointConfig(save_checkpoint_steps=args.ckpt_interval, keep_checkpoint_max=ckpt_max_num)
    ckpt_cb = ModelCheckpoint(config=train_config, directory=args.outputs_dir, prefix='{}'.format(args.local_rank))
    cb_params = _InternalCallbackParam()
    cb_params.train_network = train_net
    cb_params.epoch_num = ckpt_max_num
    cb_params.cur_epoch_num = 1
    run_context = RunContext(cb_params)
    ckpt_cb.begin(run_context)

    train_net.set_train()
    t_end = time.time()
    t_epoch = time.time()
    old_progress = -1
    i = 0
    scale_manager = DynamicLossScaleManager(init_loss_scale=2 ** 10, scale_factor=2, scale_window=2000)

    for data in ds.create_tuple_iterator(output_numpy=True):

        batch_images = data[0]
        batch_labels = data[1]
        coord_mask_0 = data[2]
        conf_pos_mask_0 = data[3]
        conf_neg_mask_0 = data[4]
        cls_mask_0 = data[5]
        t_coord_0 = data[6]
        t_conf_0 = data[7]
        t_cls_0 = data[8]
        gt_list_0 = data[9]
        coord_mask_1 = data[10]
        conf_pos_mask_1 = data[11]
        conf_neg_mask_1 = data[12]
        cls_mask_1 = data[13]
        t_coord_1 = data[14]
        t_conf_1 = data[15]
        t_cls_1 = data[16]
        gt_list_1 = data[17]
        coord_mask_2 = data[18]
        conf_pos_mask_2 = data[19]
        conf_neg_mask_2 = data[20]
        cls_mask_2 = data[21]
        t_coord_2 = data[22]
        t_conf_2 = data[23]
        t_cls_2 = data[24]
        gt_list_2 = data[25]

        img_tensor = Tensor(batch_images, mstype.float32)
        coord_mask_tensor_0 = Tensor(coord_mask_0.astype(np.float32))
        conf_pos_mask_tensor_0 = Tensor(conf_pos_mask_0.astype(np.float32))
        conf_neg_mask_tensor_0 = Tensor(conf_neg_mask_0.astype(np.float32))
        cls_mask_tensor_0 = Tensor(cls_mask_0.astype(np.float32))
        t_coord_tensor_0 = Tensor(t_coord_0.astype(np.float32))
        t_conf_tensor_0 = Tensor(t_conf_0.astype(np.float32))
        t_cls_tensor_0 = Tensor(t_cls_0.astype(np.float32))
        gt_list_tensor_0 = Tensor(gt_list_0.astype(np.float32))

        coord_mask_tensor_1 = Tensor(coord_mask_1.astype(np.float32))
        conf_pos_mask_tensor_1 = Tensor(conf_pos_mask_1.astype(np.float32))
        conf_neg_mask_tensor_1 = Tensor(conf_neg_mask_1.astype(np.float32))
        cls_mask_tensor_1 = Tensor(cls_mask_1.astype(np.float32))
        t_coord_tensor_1 = Tensor(t_coord_1.astype(np.float32))
        t_conf_tensor_1 = Tensor(t_conf_1.astype(np.float32))
        t_cls_tensor_1 = Tensor(t_cls_1.astype(np.float32))
        gt_list_tensor_1 = Tensor(gt_list_1.astype(np.float32))

        coord_mask_tensor_2 = Tensor(coord_mask_2.astype(np.float32))
        conf_pos_mask_tensor_2 = Tensor(conf_pos_mask_2.astype(np.float32))
        conf_neg_mask_tensor_2 = Tensor(conf_neg_mask_2.astype(np.float32))
        cls_mask_tensor_2 = Tensor(cls_mask_2.astype(np.float32))
        t_coord_tensor_2 = Tensor(t_coord_2.astype(np.float32))
        t_conf_tensor_2 = Tensor(t_conf_2.astype(np.float32))
        t_cls_tensor_2 = Tensor(t_cls_2.astype(np.float32))
        gt_list_tensor_2 = Tensor(gt_list_2.astype(np.float32))

        scaling_sens = Tensor(scale_manager.get_loss_scale(), dtype=mstype.float32)

        loss0, overflow, _ = train_net(img_tensor, coord_mask_tensor_0, conf_pos_mask_tensor_0,
                                       conf_neg_mask_tensor_0, cls_mask_tensor_0, t_coord_tensor_0,
                                       t_conf_tensor_0, t_cls_tensor_0, gt_list_tensor_0,
                                       coord_mask_tensor_1, conf_pos_mask_tensor_1, conf_neg_mask_tensor_1,
                                       cls_mask_tensor_1, t_coord_tensor_1, t_conf_tensor_1,
                                       t_cls_tensor_1, gt_list_tensor_1, coord_mask_tensor_2,
                                       conf_pos_mask_tensor_2, conf_neg_mask_tensor_2,
                                       cls_mask_tensor_2, t_coord_tensor_2, t_conf_tensor_2,
                                       t_cls_tensor_2, gt_list_tensor_2, scaling_sens)

        overflow = np.all(overflow.asnumpy())
        if overflow:
            scale_manager.update_loss_scale(overflow)
        else:
            scale_manager.update_loss_scale(False)
        args.logger.info('rank[{}], iter[{}], loss[{}], overflow:{}, loss_scale:{}, lr:{}, batch_images:{}, '
                         'batch_labels:{}'.format(args.local_rank, i, loss0, overflow, scaling_sens, lr[i],
                                                  batch_images.shape, batch_labels.shape))

        # save ckpt
        cb_params.cur_step_num = i + 1  # current step number
        cb_params.batch_num = i + 2
        if args.local_rank == 0:
            ckpt_cb.step_end(run_context)

        # save Log
        if i == 0:
            time_for_graph_compile = time.time() - create_network_start
            args.logger.important_info('Yolov3, graph compile time={:.2f}s'.format(time_for_graph_compile))

        if i % args.steps_per_epoch == 0:
            cb_params.cur_epoch_num += 1

        if i % args.log_interval == 0 and args.local_rank == 0:
            time_used = time.time() - t_end
            epoch = int(i / args.steps_per_epoch)
            fps = args.batch_size * (i - old_progress) * args.world_size / time_used
            args.logger.info('epoch[{}], iter[{}], loss:[{}], {:.2f} imgs/sec'.format(epoch, i, loss0, fps))
            t_end = time.time()
            old_progress = i

        if i % args.steps_per_epoch == 0 and args.local_rank == 0:
            epoch_time_used = time.time() - t_epoch
            epoch = int(i / args.steps_per_epoch)
            fps = args.batch_size * args.world_size * args.steps_per_epoch / epoch_time_used
            args.logger.info('=================================================')
            args.logger.info('epoch time: epoch[{}], iter[{}], {:.2f} imgs/sec'.format(epoch, i, fps))
            args.logger.info('=================================================')
            t_epoch = time.time()

        i = i + 1

    args.logger.info('=============yolov3 training finished==================')