Esempio n. 1
0
def create_ps_test_loader(args, kwargs, vm_instance, test_dataset):
    if not isinstance(test_dataset.targets, np.ndarray):
        test_dataset.targets = np.array(test_dataset.targets)

    if not isinstance(test_dataset.data, np.ndarray):
        test_dataset.data = np.array(test_dataset.data)

    if args.dataset_type == 'CIFAR10' or args.dataset_type == 'CIFAR100':
        test_dataset.data = np.transpose(
            test_dataset.data, (0, 3, 1, 2))  # <--for CIFAR10 & CIFAR100

    data_transform = datasets.load_default_transform(args.dataset_type)

    vm_dataset_instance = datasets.VMDataset(np.float32(test_dataset.data),
                                             np.int64(test_dataset.targets),
                                             data_transform).federate(
                                                 [vm_instance])

    test_loader = sy.FederatedDataLoader(  # <--this is now a FederatedDataLoader
        vm_dataset_instance,
        shuffle=False,
        batch_size=args.test_batch_size,
        **kwargs)

    return test_loader
Esempio n. 2
0
def create_random_loader(args, kwargs, tx2_idx, num_data, is_train, dataset):
    data_len = len(dataset.targets)
    #data_len = len(train_targets_array)
    print('--[Debug] tx2:{}- num_data:{}'.format(tx2_idx, num_data))

    selected_data, selected_targets = create_random_selected_data(
        args, num_data, dataset)

    #print('--[Debug] tx2:{}-piece len:{}'.format(tx2_idx, len(selected_targets)))

    data_transform = datasets.load_default_transform(args.dataset_type)

    vm_dataset_instance = datasets.VMDataset(selected_data, selected_targets,
                                             data_transform)
    if is_train:
        vm_loader = DataLoader(  # <--this is now a DataLoader
            vm_dataset_instance,
            shuffle=True,
            batch_size=args.batch_size,
            **kwargs)
    else:
        vm_loader = DataLoader(  # <--this is now a DataLoader
            vm_dataset_instance,
            shuffle=False,
            batch_size=args.test_batch_size,
            **kwargs)

    return vm_loader
Esempio n. 3
0
def create_bias_federated_loader(args, kwargs, vm_list, is_train, dataset,
                                 selected_idxs):
    vm_loaders = list()
    for vm_idx in range(0, args.vm_num):
        selected_data, selected_targets = create_bias_selected_data(
            args, selected_idxs[vm_idx], dataset)
        if args.dataset_type == 'CIFAR10' or args.dataset_type == 'CIFAR100':
            # <--for CIFAR10 & CIFAR100
            selected_data = np.transpose(selected_data, (0, 3, 1, 2))

        data_len = len(selected_data)
        if not args.train_flag:
            print('--[Debug] vm:{}-data len:{}'.format(vm_idx, data_len))

        data_transform = datasets.load_default_transform(args.dataset_type)
        vm_dataset_instance = datasets.VMDataset(
            selected_data, selected_targets,
            data_transform).federate([vm_list[vm_idx]])

        if is_train:
            vm_loader_instance = sy.FederatedDataLoader(  # <--this is now a FederatedDataLoader
                vm_dataset_instance,
                shuffle=True,
                batch_size=args.batch_size,
                **kwargs)
        else:
            vm_loader_instance = sy.FederatedDataLoader(  # <--this is now a FederatedDataLoader
                vm_dataset_instance,
                shuffle=False,
                batch_size=args.test_batch_size,
                **kwargs)

        vm_loaders.append(vm_loader_instance)

    return vm_loaders
Esempio n. 4
0
def create_labelwise_federated_loader(args, kwargs, vm_list, is_train, dataset,
                                      partition_ratios):
    vm_loaders = list()
    class_num = len(dataset.classes)
    label_wise_data = [[] for idx in range(class_num)]
    label_wise_targets = [[] for idx in range(class_num)]
    targets_array = np.array(dataset.targets)
    for c_idx in range(class_num):
        label_targets = targets_array[targets_array == c_idx]
        label_data = dataset.data[targets_array == c_idx]
        label_item_num = len(label_targets)
        begin_idx = 0
        for pr_idx in range(len(partition_ratios)):
            if pr_idx == len(partition_ratios) - 1:
                end_idx = label_item_num
            else:
                end_idx = np.min((begin_idx + np.int32(
                    np.floor(label_item_num * partition_ratios[pr_idx])),
                                  label_item_num))
            print('--[Debug] begin_idx: {} end_idx: {}'.format(
                begin_idx, end_idx))
            label_wise_targets[c_idx].append(label_targets[begin_idx:end_idx])
            label_wise_data[c_idx].append(label_data[begin_idx:end_idx])
            print('--[Debug] label_data len:',
                  len(label_data[begin_idx:end_idx]))
            begin_idx = end_idx

    for vm_idx in range(len(vm_list)):
        selected_data, selected_targets = create_labelwise_selected_data(
            args, label_wise_data, label_wise_targets)
        print('--[Debug] vm:{}-data len:{}'.format(vm_idx, len(selected_data)))

        data_transform = datasets.load_default_transform(args.dataset_type)

        vm_dataset_instance = datasets.VMDataset(
            selected_data, selected_targets,
            data_transform).federate([vm_list[vm_idx]])

        if is_train:
            vm_loader_instance = sy.FederatedDataLoader(  # <--this is now a FederatedDataLoader
                vm_dataset_instance,
                shuffle=True,
                batch_size=args.batch_size,
                **kwargs)
        else:
            vm_loader_instance = sy.FederatedDataLoader(  # <--this is now a FederatedDataLoader
                vm_dataset_instance,
                shuffle=False,
                batch_size=args.test_batch_size,
                **kwargs)

        vm_loaders.append(vm_loader_instance)

    return vm_loaders
Esempio n. 5
0
def create_segment_federated_loader(args, kwargs, vm_list, is_train, dataset):
    vm_loaders = list()
    data_len = len(dataset.targets)
    #data_len = len(train_targets_array)
    inter_num = np.int32(np.floor(data_len / len(vm_list)))
    for vm_idx in range(len(vm_list)):
        begin_idx = vm_idx * inter_num
        if vm_idx != len(vm_list) - 1:
            end_idx = (vm_idx + 1) * inter_num
        else:
            end_idx = data_len

        print('--[Debug] vm:{}-begin idx:{}'.format(vm_idx, begin_idx))
        print('--[Debug] vm:{}-end idx:{}'.format(vm_idx, end_idx))

        selected_data, selected_targets = create_segment_selected_data(
            args, begin_idx, end_idx, dataset)

        print('--[Debug] vm:{}-piece len:{}'.format(vm_idx,
                                                    len(selected_targets)))

        data_transform = datasets.load_default_transform(args.dataset_type)

        vm_dataset_instance = datasets.VMDataset(
            selected_data, selected_targets,
            data_transform).federate([vm_list[vm_idx]])
        if is_train:
            vm_loader_instance = sy.FederatedDataLoader(  # <--this is now a FederatedDataLoader
                vm_dataset_instance,
                shuffle=True,
                batch_size=args.batch_size,
                **kwargs)
        else:
            vm_loader_instance = sy.FederatedDataLoader(  # <--this is now a FederatedDataLoader
                vm_dataset_instance,
                shuffle=False,
                batch_size=args._test_batch_size,
                **kwargs)

        vm_loaders.append(vm_loader_instance)

    return vm_loaders
Esempio n. 6
0
def create_centralized_train_test_loader(args,
                                         kwargs,
                                         vm_instance,
                                         vm_dataset,
                                         is_test=False):
    # if args.dataset_type == 'CIFAR10' or args.dataset_type == 'CIFAR100':
    #     test_data = np.transpose(test_dataset.data, (0, 3, 1, 2)) #<--for CIFAR10  & CIFAR100
    # else:
    #     test_data = test_dataset.data
    if not isinstance(vm_dataset.targets, np.ndarray):
        vm_dataset.targets = np.array(vm_dataset.targets)

    if not isinstance(vm_dataset.data, np.ndarray):
        vm_dataset.data = np.array(vm_dataset.data)

    if args.dataset_type == 'FashionMNIST':
        data_transform = None
    else:
        data_transform = datasets.load_default_transform(args.dataset_type)

    vm_dataset_instance = datasets.VMDataset(np.float32(vm_dataset.data),
                                             np.int64(vm_dataset.targets),
                                             data_transform).federate(
                                                 [vm_instance])

    if is_test:
        vm_loader = sy.FederatedDataLoader(  # <--this is now a FederatedDataLoader
            vm_dataset_instance,
            shuffle=False,
            batch_size=args.test_batch_size,
            **kwargs)
    else:
        vm_loader = sy.FederatedDataLoader(  # <--this is now a FederatedDataLoader
            vm_dataset_instance,
            shuffle=True,
            batch_size=args.batch_size,
            **kwargs)

    return vm_loader
Esempio n. 7
0
def create_segment_loader(args, kwargs, num_tx2, tx2_idx, is_train, dataset):
    data_len = len(dataset.targets)
    #data_len = len(train_targets_array)
    inter_num = np.int32(np.floor(data_len / num_tx2))
    tx2_idx = tx2_idx - 1
    begin_idx = tx2_idx * inter_num
    if tx2_idx != num_tx2 - 1:
        end_idx = (tx2_idx + 1) * inter_num
    else:
        end_idx = data_len

    print('--[Debug] tx2:{}-begin idx:{}'.format(tx2_idx, begin_idx))
    print('--[Debug] tx2:{}-end idx:{}'.format(tx2_idx, end_idx))

    selected_data, selected_targets = create_segment_selected_data(
        args, begin_idx, end_idx, dataset)

    print('--[Debug] tx2:{}-piece len:{}'.format(tx2_idx,
                                                 len(selected_targets)))

    data_transform = datasets.load_default_transform(args.dataset_type)

    vm_dataset_instance = datasets.VMDataset(selected_data, selected_targets,
                                             data_transform)
    if is_train:
        vm_loader = DataLoader(  # <--this is now a DataLoader
            vm_dataset_instance,
            shuffle=True,
            batch_size=args.batch_size,
            **kwargs)
    else:
        vm_loader = DataLoader(  # <--this is now a DataLoader
            vm_dataset_instance,
            shuffle=False,
            batch_size=args.test_batch_size,
            **kwargs)

    return vm_loader