Example #1
0
    def __init__(self,
                 data_dir,
                 training=True,
                 use_third_trsfm=False,
                 use_auto_augment=False,
                 num_parallel_workers=8,
                 device_num=1,
                 device_id=0):

        if not training:
            trsfm = Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.4914, 0.4822, 0.4465),
                                     (0.2023, 0.1994, 0.2010)),
            ])
        else:
            if not use_third_trsfm:
                trsfm = Compose([
                    transforms.ToPIL(),
                    transforms.RandomResizedCrop(size=32, scale=(0.2, 1.)),
                    transforms.RandomColorAdjust(0.4, 0.4, 0.4, 0.4),
                    transforms.RandomGrayscale(prob=0.2),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(),
                    transforms.Normalize((0.4914, 0.4822, 0.4465),
                                         (0.2023, 0.1994, 0.2010)),
                ])
            else:
                if use_auto_augment:
                    trsfm = Compose([
                        transforms.ToPIL(),
                        transforms.RandomResizedCrop(size=32, scale=(0.2, 1.)),
                        transforms.RandomHorizontalFlip(),
                        CIFAR10Policy(),
                        transforms.ToTensor(),
                        transforms.Normalize((0.4914, 0.4822, 0.4465),
                                             (0.2023, 0.1994, 0.2010)),
                    ])
                else:
                    rand_augment = RandAugment(n=2, m=10)
                    trsfm = Compose([
                        transforms.ToPIL(),
                        transforms.RandomResizedCrop(size=32, scale=(0.2, 1.)),
                        transforms.RandomHorizontalFlip(),
                        rand_augment,
                        transforms.ToTensor(),
                        transforms.Normalize((0.4914, 0.4822, 0.4465),
                                             (0.2023, 0.1994, 0.2010)),
                    ])

        self.trsfm = trsfm
        self.data_dir = data_dir
        self.num_parallel_workers = num_parallel_workers
        self.device_num = device_num
        self.device_id = device_id
Example #2
0
    def __getitem__(self, index):
        img_path = self.all_img_paths[index]
        gt_path = self.all_gt_paths[index]

        img = get_img(img_path)
        bboxes, tags = get_bboxes(img, gt_path)

        # multi-scale training
        if self.is_transform:
            img = random_scale(img, min_size=self.img_size[0])

        # get gt_text and training_mask
        img_h, img_w = img.shape[0: 2]
        gt_text = np.zeros((img_h, img_w), dtype=np.float32)
        training_mask = np.ones((img_h, img_w), dtype=np.float32)
        if bboxes.shape[0] > 0:
            bboxes = np.reshape(bboxes * ([img_w, img_h] * 4), (bboxes.shape[0], -1, 2)).astype('int32')
            for i in range(bboxes.shape[0]):
                cv2.drawContours(gt_text, [bboxes[i]], 0, i + 1, -1)
                if not tags[i]:
                    cv2.drawContours(training_mask, [bboxes[i]], 0, 0, -1)

        # get gt_kernels
        gt_kernels = []
        for i in range(1, self.kernel_num):
            rate = 1.0 - (1.0 - self.min_scale) / (self.kernel_num - 1) * i
            gt_kernel = np.zeros(img.shape[0:2], dtype=np.float32)
            kernel_bboxes = shrink(bboxes, rate)
            for j in range(kernel_bboxes.shape[0]):
                cv2.drawContours(gt_kernel, [kernel_bboxes[j]], 0, 1, -1)
            gt_kernels.append(gt_kernel)

        # data augmentation
        if self.is_transform:
            imgs = [img, gt_text, training_mask]
            imgs.extend(gt_kernels)
            imgs = random_horizontal_flip(imgs)
            imgs = random_rotate(imgs)
            imgs = random_crop(imgs, self.img_size)
            img, gt_text, training_mask, gt_kernels = imgs[0], imgs[1], imgs[2], imgs[3:]

        gt_text[gt_text > 0] = 1
        gt_kernels = np.array(gt_kernels)

        if self.is_transform:
            img = Image.fromarray(img)
            img = img.convert('RGB')
            img = py_transforms.RandomColorAdjust(brightness=32.0 / 255, saturation=0.5)(img)
        else:
            img = Image.fromarray(img)
            img = img.convert('RGB')

        img = py_transforms.ToTensor()(img)
        img = py_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(img)

        gt_text = gt_text.astype(np.float32)
        gt_kernels = gt_kernels.astype(np.float32)
        training_mask = training_mask.astype(np.float32)

        return img, gt_text, gt_kernels, training_mask
Example #3
0
def util_test_normalize(mean, std, op_type):
    """
    Utility function for testing Normalize. Input arguments are given by other tests
    """
    if op_type == "cpp":
        # define map operations
        decode_op = c_vision.Decode()
        normalize_op = c_vision.Normalize(mean, std)
        # Generate dataset
        data = ds.TFRecordDataset(DATA_DIR,
                                  SCHEMA_DIR,
                                  columns_list=["image"],
                                  shuffle=False)
        data = data.map(operations=decode_op, input_columns=["image"])
        data = data.map(operations=normalize_op, input_columns=["image"])
    elif op_type == "python":
        # define map operations
        transforms = [
            py_vision.Decode(),
            py_vision.ToTensor(),
            py_vision.Normalize(mean, std)
        ]
        transform = mindspore.dataset.transforms.py_transforms.Compose(
            transforms)
        # Generate dataset
        data = ds.TFRecordDataset(DATA_DIR,
                                  SCHEMA_DIR,
                                  columns_list=["image"],
                                  shuffle=False)
        data = data.map(operations=transform, input_columns=["image"])
    else:
        raise ValueError("Wrong parameter value")
    return data
Example #4
0
def data_generator(args):
    '''Build train dataloader.'''
    mindrecord_path = args.mindrecord_path
    dst_w = args.dst_w
    dst_h = args.dst_h
    batch_size = args.per_batch_size
    attri_num = args.attri_num
    max_epoch = args.max_epoch
    transform_img = F2.Compose([
        F.Decode(),
        F.Resize((dst_w, dst_h)),
        F.RandomHorizontalFlip(prob=0.5),
        F.ToTensor(),
        F.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    de_dataset = de.MindDataset(mindrecord_path + "0",
                                columns_list=["image", "label"],
                                num_shards=args.world_size,
                                shard_id=args.local_rank)
    de_dataset = de_dataset.map(input_columns="image",
                                operations=transform_img,
                                num_parallel_workers=args.workers,
                                python_multiprocessing=True)
    de_dataset = de_dataset.batch(batch_size, drop_remainder=True)
    steps_per_epoch = de_dataset.get_dataset_size()
    de_dataset = de_dataset.repeat(max_epoch)
    de_dataloader = de_dataset.create_tuple_iterator(output_numpy=True)

    num_classes = attri_num

    return de_dataloader, steps_per_epoch, num_classes
Example #5
0
def create_dataset(batch_size,
                   train_data_url='',
                   workers=8,
                   distributed=False,
                   input_size=224,
                   color_jitter=0.4):
    """Create ImageNet training dataset"""
    if not os.path.exists(train_data_url):
        raise ValueError('Path not exists')
    decode_op = py_vision.Decode()
    type_cast_op = c_transforms.TypeCast(mstype.int32)

    random_resize_crop_bicubic = py_vision.RandomResizedCrop(
        size=(input_size, input_size),
        scale=SCALE,
        ratio=RATIO,
        interpolation=Inter.BICUBIC)
    random_horizontal_flip_op = py_vision.RandomHorizontalFlip(0.5)
    adjust_range = (max(0, 1 - color_jitter), 1 + color_jitter)
    random_color_jitter_op = py_vision.RandomColorAdjust(
        brightness=adjust_range,
        contrast=adjust_range,
        saturation=adjust_range)
    to_tensor = py_vision.ToTensor()
    normalize_op = py_vision.Normalize(IMAGENET_DEFAULT_MEAN,
                                       IMAGENET_DEFAULT_STD)

    # assemble all the transforms
    image_ops = py_transforms.Compose([
        decode_op, random_resize_crop_bicubic, random_horizontal_flip_op,
        random_color_jitter_op, to_tensor, normalize_op
    ])

    rank_id = get_rank() if distributed else 0
    rank_size = get_group_size() if distributed else 1

    dataset_train = ds.ImageFolderDataset(train_data_url,
                                          num_parallel_workers=workers,
                                          shuffle=True,
                                          num_shards=rank_size,
                                          shard_id=rank_id)

    dataset_train = dataset_train.map(input_columns=["image"],
                                      operations=image_ops,
                                      num_parallel_workers=workers)

    dataset_train = dataset_train.map(input_columns=["label"],
                                      operations=type_cast_op,
                                      num_parallel_workers=workers)

    # batch dealing
    ds_train = dataset_train.batch(batch_size,
                                   per_batch_map=split_imgs_and_labels,
                                   input_columns=["image", "label"],
                                   num_parallel_workers=2,
                                   drop_remainder=True)

    ds_train = ds_train.repeat(1)
    return ds_train
Example #6
0
def create_dataset_py(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend"):
    """
    create a train or eval dataset

    Args:
        dataset_path(string): the path of dataset.
        do_train(bool): whether dataset is used for train or eval.
        repeat_num(int): the repeat times of dataset. Default: 1
        batch_size(int): the batch size of dataset. Default: 32
        target(str): the device target. Default: Ascend

    Returns:
        dataset
    """
    if target == "Ascend":
        device_num = int(os.getenv("RANK_SIZE"))
        rank_id = int(os.getenv("RANK_ID"))
    else:
        init()
        rank_id = get_rank()
        device_num = get_group_size()

    if do_train:
        if device_num == 1:
            ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True)
        else:
            ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True,
                                       num_shards=device_num, shard_id=rank_id)
    else:
        ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=False)

    image_size = 224

    # define map operations
    decode_op = P.Decode()
    resize_crop_op = P.RandomResizedCrop(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333))
    horizontal_flip_op = P.RandomHorizontalFlip(prob=0.5)

    resize_op = P.Resize(256)
    center_crop = P.CenterCrop(image_size)
    to_tensor = P.ToTensor()
    normalize_op = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

    # define map operations
    if do_train:
        trans = [decode_op, resize_crop_op, horizontal_flip_op, to_tensor, normalize_op]
    else:
        trans = [decode_op, resize_op, center_crop, to_tensor, normalize_op]

    compose = P2.Compose(trans)
    ds = ds.map(operations=compose, input_columns="image", num_parallel_workers=8, python_multiprocessing=True)

    # apply batch operations
    ds = ds.batch(batch_size, drop_remainder=True)

    # apply dataset repeat operation
    ds = ds.repeat(repeat_num)

    return ds
Example #7
0
def test_normalize_exception_invalid_range_py():
    """
    Test Normalize in python transformation: value is not in range [0,1]
    expected to raise ValueError
    """
    logger.info("test_normalize_exception_invalid_range_py")
    try:
        _ = py_vision.Normalize([0.75, 1.25, 0.5], [0.1, 0.18, 1.32])
    except ValueError as e:
        logger.info("Got an exception in DE: {}".format(str(e)))
        assert "Input mean_value is not within the required interval of [0.0, 1.0]." in str(e)
Example #8
0
def test_normalize_exception_unequal_size_py():
    """
    Test Normalize in python transformation: len(mean) != len(std)
    expected to raise ValueError
    """
    logger.info("test_normalize_exception_unequal_size_py")
    try:
        _ = py_vision.Normalize([0.50, 0.30, 0.75], [0.18, 0.32, 0.71, 0.72])
    except ValueError as e:
        logger.info("Got an exception in DE: {}".format(str(e)))
        assert str(e) == "Length of mean and std must be equal."
Example #9
0
def create_imagenet_dataset(imagenet_dir):
    ds = de.ImageFolderDataset(imagenet_dir)

    transform = Compose([
        F.Decode(),
        F.RandomHorizontalFlip(0.5),
        F.ToTensor(),
        F.Normalize((0.491, 0.482, 0.447), (0.247, 0.243, 0.262)),
        F.RandomErasing()
    ])
    ds = ds.map(operations=transform, input_columns="image")
    ds = ds.shuffle(buffer_size=5)
    ds = ds.repeat(3)
    return ds
Example #10
0
def create_dataset_val(batch_size=128,
                       val_data_url='',
                       workers=8,
                       distributed=False,
                       input_size=224):
    """Create ImageNet validation dataset"""
    if not os.path.exists(val_data_url):
        raise ValueError('Path not exists')
    rank_id = get_rank() if distributed else 0
    rank_size = get_group_size() if distributed else 1
    dataset = ds.ImageFolderDataset(val_data_url,
                                    num_parallel_workers=workers,
                                    num_shards=rank_size,
                                    shard_id=rank_id)
    scale_size = None

    if isinstance(input_size, tuple):
        assert len(input_size) == 2
        if input_size[-1] == input_size[-2]:
            scale_size = int(math.floor(input_size[0] / DEFAULT_CROP_PCT))
        else:
            scale_size = tuple([int(x / DEFAULT_CROP_PCT) for x in input_size])
    else:
        scale_size = int(math.floor(input_size / DEFAULT_CROP_PCT))

    type_cast_op = c_transforms.TypeCast(mstype.int32)
    decode_op = py_vision.Decode()
    resize_op = py_vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
    center_crop = py_vision.CenterCrop(size=input_size)
    to_tensor = py_vision.ToTensor()
    normalize_op = py_vision.Normalize(IMAGENET_DEFAULT_MEAN,
                                       IMAGENET_DEFAULT_STD)

    image_ops = py_transforms.Compose(
        [decode_op, resize_op, center_crop, to_tensor, normalize_op])

    dataset = dataset.map(input_columns=["label"],
                          operations=type_cast_op,
                          num_parallel_workers=workers)
    dataset = dataset.map(input_columns=["image"],
                          operations=image_ops,
                          num_parallel_workers=workers)
    dataset = dataset.batch(batch_size,
                            per_batch_map=split_imgs_and_labels,
                            input_columns=["image", "label"],
                            num_parallel_workers=2,
                            drop_remainder=True)
    dataset = dataset.repeat(1)
    return dataset
Example #11
0
def util_test_normalize_grayscale(num_output_channels, mean, std):
    """
    Utility function for testing Normalize. Input arguments are given by other tests
    """
    transforms = [
        py_vision.Decode(),
        py_vision.Grayscale(num_output_channels),
        py_vision.ToTensor(),
        py_vision.Normalize(mean, std)
    ]
    transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
    # Generate dataset
    data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    data = data.map(operations=transform, input_columns=["image"])
    return data
Example #12
0
 def __call__(self, imgs, labels, batchInfo):
     # assert the imgs object are pil_images
     ret_imgs = []
     ret_labels = []
     py_to_pil_op = P.ToPIL()
     to_tensor = P.ToTensor()
     normalize_op = P.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
     rand_augment_ops = transform_utils.rand_augment_transform(
         self.config_str, self.hparams)
     for i, image in enumerate(imgs):
         img_pil = py_to_pil_op(image)
         img_pil = rand_augment_ops(img_pil)
         img_array = to_tensor(img_pil)
         img_array = normalize_op(img_array)
         ret_imgs.append(img_array)
         ret_labels.append(labels[i])
     return np.array(ret_imgs), np.array(ret_labels)
Example #13
0
def test_normalize_op_py(plot=False):
    """
    Test Normalize in python transformations
    """
    logger.info("Test Normalize in python")
    mean = [0.475, 0.45, 0.392]
    std = [0.275, 0.267, 0.278]
    # define map operations
    transforms = [py_vision.Decode(), py_vision.ToTensor()]
    transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
    normalize_op = py_vision.Normalize(mean, std)

    #  First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data1 = data1.map(operations=transform, input_columns=["image"])
    data1 = data1.map(operations=normalize_op, input_columns=["image"])

    #  Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(operations=transform, input_columns=["image"])

    num_iter = 0
    for item1, item2 in zip(
            data1.create_dict_iterator(num_epochs=1, output_numpy=True),
            data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
        image_de_normalized = (item1["image"].transpose(1, 2, 0) * 255).astype(
            np.uint8)
        image_np_normalized = (
            normalize_np(item2["image"].transpose(1, 2, 0), mean, std) *
            255).astype(np.uint8)
        image_original = (item2["image"].transpose(1, 2, 0) * 255).astype(
            np.uint8)
        mse = diff_mse(image_de_normalized, image_np_normalized)
        logger.info("image_{}, mse: {}".format(num_iter + 1, mse))
        assert mse < 0.01
        if plot:
            visualize_image(image_original, image_de_normalized, mse,
                            image_np_normalized)
        num_iter += 1
Example #14
0
def load_images(paths, batch_size=128):
    '''Load images.'''
    ll = []
    resize = V.Resize((96, 64))
    transform = T.Compose(
        [V.ToTensor(),
         V.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
    for i, _ in enumerate(paths):
        im = Image.open(paths[i])
        im = resize(im)
        img = np.array(im)
        ts = transform(img)
        ll.append(ts[0])
        if len(ll) == batch_size:
            yield np.stack(ll, axis=0)
            ll.clear()
    if ll:
        yield np.stack(ll, axis=0)
Example #15
0
def IC15_TEST_Generator():
    ic15_test_data_dir = config.TEST_ROOT_DIR + 'ch4_test_images/'
    img_size = config.INFER_LONG_SIZE

    img_size = img_size if (img_size is None
                            or isinstance(img_size, tuple)) else (img_size,
                                                                  img_size)

    data_dirs = [ic15_test_data_dir]
    all_img_paths = []

    for data_dir in data_dirs:
        img_names = [
            i for i in os.listdir(data_dir)
            if os.path.splitext(i)[-1].lower() in ['.jpg', '.jpeg', '.png']
        ]

        img_paths = []
        for _, img_name in enumerate(img_names):
            img_path = data_dir + img_name
            img_paths.append(img_path)

        all_img_paths.extend(img_paths)

    dataset_length = len(all_img_paths)

    for index in range(dataset_length):
        img_path = all_img_paths[index]
        img_name = np.array(os.path.split(img_path)[-1])
        img = get_img(img_path)

        long_size = max(img.shape[:2])
        img_resized = np.zeros((long_size, long_size, 3), np.uint8)
        img_resized[:img.shape[0], :img.shape[1], :] = img
        img_resized = cv2.resize(img_resized, dsize=img_size)

        img_resized = Image.fromarray(img_resized)
        img_resized = img_resized.convert('RGB')
        img_resized = py_transforms.ToTensor()(img_resized)
        img_resized = py_transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                              std=[0.229, 0.224,
                                                   0.225])(img_resized)

        yield img, img_resized, img_name
Example #16
0
def get_de_dataset(args):
    '''get_de_dataset'''
    lbl_transforms = [F.ToType(np.int32)]
    transform_label = F2.Compose(lbl_transforms)

    drop_remainder = False

    transforms = [
        F.ToPIL(),
        F.RandomHorizontalFlip(),
        F.ToTensor(),
        F.Normalize(mean=[0.5], std=[0.5])
    ]
    transform = F2.Compose(transforms)
    cache_path = os.path.join('cache', os.path.basename(args.data_dir),
                              'data_cache.pkl')
    print(cache_path)
    if not os.path.exists(os.path.dirname(cache_path)):
        os.makedirs(os.path.dirname(cache_path))
    dataset = CustomDataset(args.data_dir, cache_path, args.is_distributed)
    args.logger.info("dataset len:{}".format(dataset.__len__()))
    sampler = DistributedCustomSampler(dataset,
                                       num_replicas=args.world_size,
                                       rank=args.local_rank,
                                       is_distributed=args.is_distributed)
    de_dataset = de.GeneratorDataset(dataset, ["image", "label"],
                                     sampler=sampler)
    args.logger.info("after sampler de_dataset datasize :{}".format(
        de_dataset.get_dataset_size()))
    de_dataset = de_dataset.map(input_columns="image", operations=transform)
    de_dataset = de_dataset.map(input_columns="label",
                                operations=transform_label)
    de_dataset = de_dataset.project(columns=["image", "label"])
    de_dataset = de_dataset.batch(args.per_batch_size,
                                  drop_remainder=drop_remainder)
    num_iter_per_npu = math.ceil(
        len(dataset) * 1.0 / args.world_size / args.per_batch_size)
    num_classes = len(dataset.classes)

    return de_dataset, num_iter_per_npu, num_classes
Example #17
0
def data_generator_eval(args):
    '''Build eval dataloader.'''
    mindrecord_path = args.mindrecord_path
    dst_w = args.dst_w
    dst_h = args.dst_h
    batch_size = 1
    attri_num = args.attri_num
    transform_img = F2.Compose([F.Decode(),
                                F.Resize((dst_w, dst_h)),
                                F.ToTensor(),
                                F.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

    de_dataset = de.MindDataset(mindrecord_path + "0", columns_list=["image", "label"])
    de_dataset = de_dataset.map(input_columns="image", operations=transform_img, num_parallel_workers=args.workers,
                                python_multiprocessing=True)
    de_dataset = de_dataset.batch(batch_size)

    de_dataloader = de_dataset.create_tuple_iterator(output_numpy=True)
    steps_per_epoch = de_dataset.get_dataset_size()
    print("image number:{0}".format(steps_per_epoch))
    num_classes = attri_num

    return de_dataloader, steps_per_epoch, num_classes
Example #18
0
    best_acc = 0  # best test accuracy
    start_epoch = 0
    feature_dim = args.low_dim
    wG = 0
    start_time = time.time()

    print("==> Loading data")
    # Data Loading code

    transform_train = Compose([
        decode,
        py_trans.Pad(10),
        py_trans.RandomCrop((args.img_h, args.img_w)),
        py_trans.RandomHorizontalFlip(),
        py_trans.ToTensor(),
        py_trans.Normalize(mean=[0.485, 0.456, 0.406],
                           std=[0.229, 0.224, 0.225])
    ])

    transform_test = Compose([
        decode,
        py_trans.Resize((args.img_h, args.img_w)),
        py_trans.ToTensor(),
        py_trans.Normalize(mean=[0.485, 0.456, 0.406],
                           std=[0.229, 0.224, 0.225])
    ])

    ifDebug_dic = {"yes": True, "no": False}
    if dataset_type == "SYSU":
        # train_set
        ifDebug = {}
        trainset_generator = SYSUDatasetGenerator(data_dir=data_path,
Example #19
0
def main(args):
    if not os.path.exists(args.test_dir):
        args.logger.info('ERROR, test_dir is not exists, please set test_dir in config.py.')
        return 0
    all_start_time = time.time()

    net = get_model(args)
    compile_time_used = time.time() - all_start_time
    args.logger.info('INFO, graph compile finished, time used:{:.2f}s, start calculate img embedding'.
                     format(compile_time_used))

    img_transforms = transforms.Compose([vision.ToTensor(), vision.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])



    #for test images
    args.logger.info('INFO, start step1, calculate test img embedding, weight file = {}'.format(args.weight))
    step1_start_time = time.time()

    ds, img_tot, all_labels = get_dataloader(args.test_img_predix, args.test_img_list,
                                             args.test_batch_size, img_transforms)
    args.logger.info('INFO, dataset total test img:{}, total test batch:{}'.format(img_tot, ds.get_dataset_size()))
    test_embedding_tot_np = np.zeros((img_tot, args.emb_size))
    test_img_labels = all_labels
    data_loader = ds.create_dict_iterator(output_numpy=True, num_epochs=1)
    for i, data in enumerate(data_loader):
        img, idxs = data["image"], data["index"]
        out = net(Tensor(img)).asnumpy().astype(np.float32)
        embeddings = l2normalize(out)
        for batch in range(embeddings.shape[0]):
            test_embedding_tot_np[idxs[batch]] = embeddings[batch]
    try:
        check_minmax(args, np.linalg.norm(test_embedding_tot_np, ord=2, axis=1))
    except ValueError:
        return 0

    test_embedding_tot = {}
    for idx, label in enumerate(test_img_labels):
        test_embedding_tot[label] = test_embedding_tot_np[idx]

    step2_start_time = time.time()
    step1_time_used = step2_start_time - step1_start_time
    args.logger.info('INFO, step1 finished, time used:{:.2f}s, start step2, calculate dis img embedding'.
                     format(step1_time_used))

    # for dis images
    ds_dis, img_tot, _ = get_dataloader(args.dis_img_predix, args.dis_img_list, args.dis_batch_size, img_transforms)
    dis_embedding_tot_np = np.zeros((img_tot, args.emb_size))
    total_batch = ds_dis.get_dataset_size()
    args.logger.info('INFO, dataloader total dis img:{}, total dis batch:{}'.format(img_tot, total_batch))
    start_time = time.time()
    img_per_gpu = int(math.ceil(1.0 * img_tot / args.world_size))
    delta_num = img_per_gpu * args.world_size - img_tot
    start_idx = img_per_gpu * args.local_rank - max(0, args.local_rank - (args.world_size - delta_num))
    data_loader = ds_dis.create_dict_iterator(output_numpy=True, num_epochs=1)
    for idx, data in enumerate(data_loader):
        img = data["image"]
        out = net(Tensor(img)).asnumpy().astype(np.float32)
        embeddings = l2normalize(out)
        dis_embedding_tot_np[start_idx:(start_idx + embeddings.shape[0])] = embeddings
        start_idx += embeddings.shape[0]
        if args.local_rank % 8 == 0 and idx % args.log_interval == 0 and idx > 0:
            speed = 1.0 * (args.dis_batch_size * args.log_interval * args.world_size) / (time.time() - start_time)
            time_left = (total_batch - idx - 1) * args.dis_batch_size *args.world_size / speed
            args.logger.info('INFO, processed [{}/{}], speed: {:.2f} img/s, left:{:.2f}s'.
                             format(idx, total_batch, speed, time_left))
            start_time = time.time()
    try:
        check_minmax(args, np.linalg.norm(dis_embedding_tot_np, ord=2, axis=1))
    except ValueError:
        return 0

    step3_start_time = time.time()
    step2_time_used = step3_start_time - step2_start_time
    args.logger.info('INFO, step2 finished, time used:{:.2f}s, start step3, calculate top1 acc'.format(step2_time_used))

    # clear npu memory

    img = None
    net = None

    dis_embedding_tot_np = np.transpose(dis_embedding_tot_np, (1, 0))
    args.logger.info('INFO, calculate top1 acc dis_embedding_tot_np shape:{}'.format(dis_embedding_tot_np.shape))

    # find best match
    assert len(args.test_img_list) % 2 == 0
    task_num = int(len(args.test_img_list) / 2)
    correct = np.array([0] * (2 * task_num))
    tot = np.array([0] * task_num)

    for i in range(int(len(args.test_img_list) / 2)):
        jk_list = args.test_img_list[2 * i]
        zj_list = args.test_img_list[2 * i + 1]
        zj2jk_pairs = sorted(generate_test_pair(jk_list, zj_list))
        sampler = DistributedSampler(zj2jk_pairs)
        args.logger.info('INFO, calculate top1 acc sampler len:{}'.format(len(sampler)))
        for idx in sampler:
            out1, out2 = cal_topk(args, idx, zj2jk_pairs, test_embedding_tot, dis_embedding_tot_np)
            correct[2 * i] += out1[0]
            correct[2 * i + 1] += out1[1]
            tot[i] += out2[0]

    args.logger.info('local_rank={},tot={},correct={}'.format(args.local_rank, tot, correct))

    step3_time_used = time.time() - step3_start_time
    args.logger.info('INFO, step3 finished, time used:{:.2f}s'.format(step3_time_used))
    args.logger.info('weight:{}'.format(args.weight))

    for i in range(int(len(args.test_img_list) / 2)):
        test_set_name = 'test_dataset'
        zj2jk_acc = correct[2 * i] / tot[i]
        jk2zj_acc = correct[2 * i + 1] / tot[i]
        avg_acc = (zj2jk_acc + jk2zj_acc) / 2
        results = '[{}]: zj2jk={:.4f}, jk2zj={:.4f}, avg={:.4f}'.format(test_set_name, zj2jk_acc, jk2zj_acc, avg_acc)
        args.logger.info(results)
    args.logger.info('INFO, tot time used: {:.2f}s'.format(time.time() - all_start_time))
    return 0
Example #20
0
def create_dataset_py(dataset_path,
                      do_train,
                      config,
                      device_target,
                      repeat_num=1,
                      batch_size=32):
    """
    create a train or eval dataset

    Args:
        dataset_path(string): the path of dataset.
        do_train(bool): whether dataset is used for train or eval.
        repeat_num(int): the repeat times of dataset. Default: 1.
        batch_size(int): the batch size of dataset. Default: 32.

    Returns:
        dataset
    """
    if device_target == "Ascend":
        rank_size = int(os.getenv("RANK_SIZE"))
        rank_id = int(os.getenv("RANK_ID"))
        if do_train:
            if rank_size == 1:
                data_set = ds.ImageFolderDataset(dataset_path,
                                                 num_parallel_workers=8,
                                                 shuffle=True)
            else:
                data_set = ds.ImageFolderDataset(dataset_path,
                                                 num_parallel_workers=8,
                                                 shuffle=True,
                                                 num_shards=rank_size,
                                                 shard_id=rank_id)
        else:
            data_set = ds.ImageFolderDataset(dataset_path,
                                             num_parallel_workers=8,
                                             shuffle=False)
    else:
        raise ValueError("Unsupported device target.")

    resize_height = 224

    if do_train:
        buffer_size = 20480
        # apply shuffle operations
        data_set = data_set.shuffle(buffer_size=buffer_size)

    # define map operations
    decode_op = P.Decode()
    resize_crop_op = P.RandomResizedCrop(resize_height,
                                         scale=(0.08, 1.0),
                                         ratio=(0.75, 1.333))
    horizontal_flip_op = P.RandomHorizontalFlip(prob=0.5)

    resize_op = P.Resize(256)
    center_crop = P.CenterCrop(resize_height)
    to_tensor = P.ToTensor()
    normalize_op = P.Normalize(mean=[0.485, 0.456, 0.406],
                               std=[0.229, 0.224, 0.225])

    if do_train:
        trans = [
            decode_op, resize_crop_op, horizontal_flip_op, to_tensor,
            normalize_op
        ]
    else:
        trans = [decode_op, resize_op, center_crop, to_tensor, normalize_op]

    compose = P2.Compose(trans)

    data_set = data_set.map(operations=compose,
                            input_columns="image",
                            num_parallel_workers=8,
                            python_multiprocessing=True)

    # apply batch operations
    data_set = data_set.batch(batch_size, drop_remainder=True)

    # apply dataset repeat operation
    data_set = data_set.repeat(repeat_num)

    return data_set
Example #21
0
def create_dataset(dataset_path,
                   do_train,
                   config,
                   platform,
                   repeat_num=1,
                   batch_size=100):
    """
    create a train or eval dataset

    Args:
        dataset_path(string): the path of dataset.
        do_train(bool): whether dataset is used for train or eval.
        repeat_num(int): the repeat times of dataset. Default: 1
        batch_size(int): the batch size of dataset. Default: 32

    Returns:
        dataset
    """
    if platform == "Ascend":
        rank_size = int(os.getenv("RANK_SIZE"))
        rank_id = int(os.getenv("RANK_ID"))
        if rank_size == 1:
            data_set = ds.MindDataset(dataset_path,
                                      num_parallel_workers=8,
                                      shuffle=True)
        else:
            data_set = ds.MindDataset(dataset_path,
                                      num_parallel_workers=8,
                                      shuffle=True,
                                      num_shards=rank_size,
                                      shard_id=rank_id)
    elif platform == "GPU":
        if do_train:
            from mindspore.communication.management import get_rank, get_group_size
            data_set = ds.MindDataset(dataset_path,
                                      num_parallel_workers=8,
                                      shuffle=True,
                                      num_shards=get_group_size(),
                                      shard_id=get_rank())
        else:
            data_set = ds.MindDataset(dataset_path,
                                      num_parallel_workers=8,
                                      shuffle=False)
    else:
        raise ValueError("Unsupported platform.")

    resize_height = config.image_height
    buffer_size = 1000

    # define map operations
    resize_crop_op = C.RandomCropDecodeResize(resize_height,
                                              scale=(0.08, 1.0),
                                              ratio=(0.75, 1.333))
    horizontal_flip_op = C.RandomHorizontalFlip(prob=0.5)

    color_op = C.RandomColorAdjust(brightness=0.4,
                                   contrast=0.4,
                                   saturation=0.4)
    rescale_op = C.Rescale(1 / 255.0, 0)
    normalize_op = C.Normalize(mean=[0.485, 0.456, 0.406],
                               std=[0.229, 0.224, 0.225])
    change_swap_op = C.HWC2CHW()

    # define python operations
    decode_p = P.Decode()
    resize_p = P.Resize(256, interpolation=Inter.BILINEAR)
    center_crop_p = P.CenterCrop(224)
    totensor = P.ToTensor()
    normalize_p = P.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
    composeop = P2.Compose(
        [decode_p, resize_p, center_crop_p, totensor, normalize_p])
    if do_train:
        trans = [
            resize_crop_op, horizontal_flip_op, color_op, rescale_op,
            normalize_op, change_swap_op
        ]
    else:
        trans = composeop
    type_cast_op = C2.TypeCast(mstype.int32)

    data_set = data_set.map(input_columns="image",
                            operations=trans,
                            num_parallel_workers=8)
    data_set = data_set.map(input_columns="label_list",
                            operations=type_cast_op,
                            num_parallel_workers=8)

    # apply shuffle operations
    data_set = data_set.shuffle(buffer_size=buffer_size)

    # apply batch operations
    data_set = data_set.batch(batch_size, drop_remainder=True)

    # apply dataset repeat operation
    data_set = data_set.repeat(repeat_num)

    return data_set