Пример #1
0
    def load_dataset(self, last_training_class_index):
        # x = np.load("datasets/DAGAN_ped_database.npy")
        #         # x_temp = []
        #         # for i in range(x.shape[0]):
        #         #     choose_samples = np.random.choice([i for i in range(1, 15)])
        #         #     x_temp.append(x[i, :choose_samples])
        #         # self.x = np.array(x_temp)
        #         # # print(np.max(self.x))
        #         # # self.x = self.x / np.max(self.x)
        #         # x_train, x_test, x_val = self.x[:25], self.x[25:35], self.x[35:45]
        #         # x_train = x_train[:last_training_class_index]
        batch_size, num_gpus, args = get_args()
        #print(args.data_dir)
        data_dir = args.data_dir
        im_size = args.im_size

        dataloader = data_loader(data_dir,
                                 grayScale=False, labels=False, img_rows=im_size, img_cols=im_size)
        num_samples, x_train, x_val, x_test = dataloader.loadImages(train_val_test_split=[0.96, 0.02, 0.02])

        print('---------> DATA LOADED')
        print(x_train.shape,x_val.shape, x_test.shape)
        return x_train, x_test, x_val
Пример #2
0
import data as dataset
from experiment_builder import ExperimentBuilder
from utils.parser_util import get_args

batch_size, num_gpus, args = get_args()
#set the data provider to use for the experiment
data = dataset.VGGFaceDAGANDataset(batch_size=batch_size,
                                   last_training_class_index=1600,
                                   reverse_channels=True,
                                   num_of_gpus=num_gpus,
                                   gen_batches=10)
#init experiment
experiment = ExperimentBuilder(args, data=data)
#run experiment
experiment.run_experiment()
Пример #3
0
import data_with_matchingclassifier as dataset
from generation_builder_with_matchingclassifier import ExperimentBuilder
from utils.parser_util import get_args

batch_size, num_gpus,support_num, args = get_args()
#set the data provider to use for the experiment


if args.dataset == 'omniglot':
    print('omniglot')
    data = dataset.OmniglotDAGANDataset(batch_size=batch_size, last_training_class_index=900, reverse_channels=True,
                                        num_of_gpus=num_gpus, gen_batches=1000, support_number=support_num,is_training=args.is_training,general_classification_samples=args.general_classification_samples,selected_classes=args.selected_classes, image_size=args.image_width)

elif args.dataset == 'vggface':
    print('vggface')
    data = dataset.VGGFaceDAGANDataset(batch_size=batch_size, last_training_class_index=1600, reverse_channels=True,
                                        num_of_gpus=num_gpus, gen_batches=1000, support_number=support_num,is_training=args.is_training,general_classification_samples=args.general_classification_samples,selected_classes=args.selected_classes,image_size=args.image_width)

elif args.dataset == 'miniimagenet':
    print('miniimagenet')
    data = dataset.miniImagenetDAGANDataset(batch_size=batch_size, last_training_class_index=900, reverse_channels=True,
                                        num_of_gpus=num_gpus, gen_batches=1000, support_number=support_num,is_training=args.is_training,general_classification_samples=args.general_classification_samples,selected_classes=args.selected_classes,image_size=args.image_width)

elif args.dataset == 'emnist':
    print('emnist')
    data = dataset.emnistDAGANDataset(batch_size=batch_size, last_training_class_index=900, reverse_channels=True,
                                        num_of_gpus=num_gpus, gen_batches=1000, support_number=support_num,is_training=args.is_training,general_classification_samples=args.general_classification_samples,selected_classes=args.selected_classes,image_size=args.image_width)

elif args.dataset == 'figr':
    print('figr')
    data = dataset.FIGRDAGANDataset(batch_size=batch_size, last_training_class_index=900, reverse_channels=True,