p=args.slabeled) fold = 1 for i in six.moves.range(fold): train_idx, test_idx = next(iter(lplo)) labeled_data_dict = { 'data': unlabeled_data_dict['data'][test_idx].astype(np.float32), 'target': dataset['train']['target'][test_idx].astype(np.int32) } # labeled_data = datafeeders.SiameseFeeder(labeled_data_dict, batchsize=args.lbatch) unlabeled_data = DataFeeder(unlabeled_data_dict, batchsize=args.ubatch) test_data = datafeeders.SiameseFeeder(test_data_dict, batchsize=args.valbatch) labeled_data.hook_preprocess(mnist_preprocess) unlabeled_data.hook_preprocess(mnist_preprocess_u) test_data.hook_preprocess(mnist_preprocess) # Model Setup h_units = 1200 model = models.SiameseModel( Mlp(labeled_data['data'][0].size, h_units, h_units, np.max(labeled_data['target']) + 1)) if args.gpu >= 0: cuda.get_device(args.gpu).use() model.to_gpu() # Opimizer Setup alpha = 0.002 optimizer = optimizers.Adam(alpha=alpha) optimizer.setup(model)
# Configure GPU Device if args.gpu >= 0: cuda.check_cuda_available() xp = cuda.cupy if args.gpu >= 0 else np # loading dataset dataset = mnist.load() dim = dataset['train']['data'][0].size N_train = len(dataset['train']['target']) N_test = len(dataset['test']['target']) train_data_dict = {'data':dataset['train']['data'].reshape(N_train, dim).astype(np.float32)} test_data_dict = {'data':dataset['test']['data'].reshape(N_test, dim).astype(np.float32)} train_data = DataFeeder(train_data_dict, batchsize=args.batch) test_data = DataFeeder(test_data_dict, batchsize=args.valbatch) train_data.hook_preprocess(mnist_preprocess) test_data.hook_preprocess(mnist_preprocess) # Model Setup h_units = 1200 model = models.AutoencoderModel( Perceptrons(train_data['data'][0].size, h_units, activation=F.relu), Perceptrons(h_units, train_data['data'][0].size) ) if args.gpu >= 0: cuda.get_device(args.gpu).use() model.to_gpu() # Opimizer Setup
test_data_dict = {'data':dataset['test']['data'].reshape(N_test, dim).astype(np.float32), 'target':dataset['test']['target'].astype(np.int32)} unlabeled_data_dict = {'data':dataset['train']['data'].reshape(N_train, dim).astype(np.float32)} # making labeld data lplo = cross_validation.LeavePLabelOut(labels=six.moves.range(N_train), p=args.slabeled) fold = 1 for i in six.moves.range(fold): train_idx, test_idx = next(iter(lplo)) labeled_data_dict = {'data':unlabeled_data_dict['data'][test_idx].astype(np.float32), 'target':dataset['train']['target'][test_idx].astype(np.int32)} labeled_data = DataFeeder(labeled_data_dict, batchsize=args.lbatch) unlabeled_data = DataFeeder(unlabeled_data_dict, batchsize=args.ubatch) test_data = DataFeeder(test_data_dict, batchsize=args.valbatch) labeled_data.hook_preprocess(mnist_preprocess) unlabeled_data.hook_preprocess(mnist_preprocess) test_data.hook_preprocess(mnist_preprocess) # Model Setup h_units = 1200 model = models.ClassifierModel(Mlp(labeled_data['data'][0].size, h_units, h_units, np.max(labeled_data['target'])+1)) if args.gpu >= 0: cuda.get_device(args.gpu).use() model.to_gpu() # Opimizer Setup optimizer = optimizers.Adam() optimizer.setup(model)
# making labeld data lplo = cross_validation.LeavePLabelOut(labels=six.moves.range(N_train), p=args.slabeled) fold = 1 for i in six.moves.range(fold): train_idx, test_idx = next(iter(lplo)) labeled_data_dict = { 'data': unlabeled_data_dict['data'][test_idx].astype(np.float32), 'target': dataset['train']['target'][test_idx].astype(np.int32) } labeled_data = DataFeeder(labeled_data_dict, batchsize=args.lbatch) unlabeled_data = DataFeeder(unlabeled_data_dict, batchsize=args.ubatch) test_data = DataFeeder(test_data_dict, batchsize=args.valbatch) labeled_data.hook_preprocess(mnist_preprocess) unlabeled_data.hook_preprocess(mnist_preprocess) test_data.hook_preprocess(mnist_preprocess) # Model Setup h_units = 1200 model = models.ClassifierModel( Mlp(labeled_data['data'][0].size, h_units, h_units, np.max(labeled_data['target']) + 1)) if args.gpu >= 0: cuda.get_device(args.gpu).use() model.to_gpu() # Opimizer Setup optimizer = optimizers.Adam() optimizer.setup(model)
xp = cuda.cupy if args.gpu >= 0 else np # loading dataset dataset = cifar10.load() dim = dataset['train']['data'][0].size N_train = len(dataset['train']['target']) N_test = len(dataset['test']['target']) train_data_dict = {'data':dataset['train']['data'].astype(np.float32), 'target':dataset['train']['target'].astype(np.int32)} test_data_dict = {'data':dataset['test']['data'].astype(np.float32), 'target':dataset['test']['target'].astype(np.int32)} train_data = DataFeeder(train_data_dict, batchsize=args.batch) test_data = DataFeeder(test_data_dict, batchsize=args.valbatch) train_data.hook_preprocess(cifar_preprocess) test_data.hook_preprocess(cifar_preprocess) # Model Setup model = models.ClassifierModel(AllConvNet()) #model = models.ClassifierModel(AllConvNetBN()) if args.gpu >= 0: cuda.get_device(args.gpu).use() model.to_gpu() # Opimizer Setup optimizer = optimizers.Adam() optimizer.setup(model) optimizer.add_hook(chainer.optimizer.WeightDecay(0.00002))
unlabeled_data_dict = {'data':dataset['train']['data'].reshape(N_train, dim).astype(np.float32)} # making labeled data lplo = cross_validation.LeavePLabelOut(labels=six.moves.range(N_train), p=args.slabeled) fold = 1 for i in six.moves.range(fold): train_idx, test_idx = next(iter(lplo)) labeled_data_dict = {'data':unlabeled_data_dict['data'][test_idx].astype(np.float32), 'target':dataset['train']['target'][test_idx].astype(np.int32)} labeled_data = datafeeders.TripletFeeder(labeled_data_dict, batchsize=args.lbatch) unlabeled_data = DataFeeder(unlabeled_data_dict, batchsize=args.ubatch) test_data = datafeeders.TripletFeeder(test_data_dict, batchsize=args.valbatch) labeled_data.hook_preprocess(mnist_preprocess) unlabeled_data.hook_preprocess(mnist_preprocess_u) test_data.hook_preprocess(mnist_preprocess) # Model Setup h_units = 1200 model = models.TripletModel(Mlp(labeled_data['data'][0].size, h_units, h_units, np.max(labeled_data['target'])+1)) if args.gpu >= 0: cuda.get_device(args.gpu).use() model.to_gpu() # Opimizer Setup alpha=0.002
dim = dataset["train"]["data"][0].size N_train = len(dataset["train"]["target"]) N_test = len(dataset["test"]["target"]) train_data_dict = { "data": dataset["train"]["data"].astype(np.float32), "target": dataset["train"]["target"].astype(np.int32), } test_data_dict = { "data": dataset["test"]["data"].astype(np.float32), "target": dataset["test"]["target"].astype(np.int32), } train_data = DataFeeder(train_data_dict, batchsize=args.batch) test_data = DataFeeder(test_data_dict, batchsize=args.valbatch) train_data.hook_preprocess(cifar_preprocess) test_data.hook_preprocess(cifar_preprocess) # Model Setup model = models.ClassifierModel(AllConvNet()) # model = models.ClassifierModel(AllConvNetBN()) if args.gpu >= 0: cuda.get_device(args.gpu).use() model.to_gpu() # Opimizer Setup optimizer = optimizers.Adam() optimizer.setup(model) optimizer.add_hook(chainer.optimizer.WeightDecay(0.00002))