# argparse parser = argparse.ArgumentParser(description='Supervised Multi Layer Perceptron Example') parser.add_argument('--epoch', '-e', type=int, default=10, help='training epoch (default: 10)') parser.add_argument('--batch', '-b', type=int, default=300, help='training batchsize (default: 300)') parser.add_argument('--valbatch', '-v', type=int, default=1000, help='validation batchsize (default: 1000)') parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU device #, if you want to use cpu, use -1 (default: -1)') args = parser.parse_args() def mnist_preprocess(data): data['data'] /= 255. return data # Logger setup logger = Logger('MNIST AE', train_log_mode='TRAIN_LOSS_ONLY', test_log_mode='TEST_LOSS_ONLY') # Configure GPU Device if args.gpu >= 0: cuda.check_cuda_available() xp = cuda.cupy if args.gpu >= 0 else np # loading dataset dataset = mnist.load() dim = dataset['train']['data'][0].size N_train = len(dataset['train']['target']) N_test = len(dataset['test']['target']) train_data_dict = {'data':dataset['train']['data'].reshape(N_train, dim).astype(np.float32)} test_data_dict = {'data':dataset['test']['data'].reshape(N_test, dim).astype(np.float32)} train_data = DataFeeder(train_data_dict, batchsize=args.batch)
parser.add_argument('--epoch', '-e', type=int, default=3, help='training epoch (default: 3)') parser.add_argument('--batch', '-b', type=int, default=64, help='training batchsize (default: 64)') parser.add_argument('--valbatch', '-v', type=int, default=64, help='validation batchsize (default: 64)') parser.add_argument('--model', '-m', type=str, default='baseline', choices=cnn_models.keys(), help='Model you train (default: baseline)') parser.add_argument('--output', '-o', type=str, default='mnist_baseline.h5', help='Name of trained model file (default: mnist_baseline.h5)') parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU device #, if you want to use cpu, use -1 (default: -1)') args = parser.parse_args() def mnist_preprocess(data): data['data'] /= 255. data['data'] = data['data'].reshape(1, 28, 28) return data # Logger setup logger = Logger('MNIST CNN') # Configure GPU Device if args.gpu >= 0: cuda.check_cuda_available() xp = cuda.cupy if args.gpu >= 0 else np # loading dataset dataset = mnist.load() dim = dataset['train']['data'][0].size N_train = len(dataset['train']['target']) N_test = len(dataset['test']['target']) train_data_dict = {'data':dataset['train']['data'].reshape(N_train, dim).astype(np.float32), 'target':dataset['train']['target'].astype(np.int32)} test_data_dict = {'data':dataset['test']['data'].reshape(N_test, dim).astype(np.float32), 'target':dataset['test']['target'].astype(np.int32)}
parser.add_argument( '--gpu', '-g', type=int, default=-1, help='GPU device #, if you want to use cpu, use -1 (default: -1)') args = parser.parse_args() def mnist_preprocess(data): data['data'] /= 255. return data # Logger setup logger = Logger('MNIST MLP') # Configure GPU Device if args.gpu >= 0: cuda.check_cuda_available() xp = cuda.cupy if args.gpu >= 0 else np # loading dataset dataset = mnist.load() dim = dataset['train']['data'][0].size N_train = len(dataset['train']['target']) N_test = len(dataset['test']['target']) train_data_dict = { 'data': dataset['train']['data'].reshape(N_train, dim).astype(np.float32), 'target': dataset['train']['target'].astype(np.int32) }
def mnist_preprocess_u(data): data['data'] /= 255. return data # Logger setup def vat_train_log(res): log_str = '{0:d}, loss={1:.5f}, lds={2:.5f}'.format( res['iteration'], res['loss'], res['lds']) return log_str logger = Logger('MNIST SIAMESE MLP VAT', train_log_mode='TRAIN_VAT', test_log_mode='TEST_LOSS_ONLY') logger.mode['TRAIN_VAT'] = vat_train_log # Configure GPU Device if args.gpu >= 0: cuda.check_cuda_available() xp = cuda.cupy if args.gpu >= 0 else np # loading dataset dataset = mnist.load() dim = dataset['train']['data'][0].size N_train = len(dataset['train']['target']) N_test = len(dataset['test']['target']) test_data_dict = { 'data': dataset['test']['data'].reshape(N_test, dim).astype(np.float32),
parser.add_argument('--ubatch', '-u', type=int, default=250, help='unlabeled training batchsize (default: 250)') parser.add_argument('--valbatch', '-v', type=int, default=1000, help='validation batchsize (default: 1000)') parser.add_argument('--slabeled', '-s', type=int, default=100, help='size of labeled training samples (default: 100)') parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU device #, if you want to use cpu, use -1 (default: -1)') args = parser.parse_args() def mnist_preprocess(data): data['data'] /= 255. return data # Logger setup def vat_train_log(res): log_str = '{0:d}, loss={1:.5f}, lds={2:.5f}, accuracy={3:.5f}'.format(res['iteration'], res['loss'], res['lds'], res['accuracy']) return log_str logger = Logger('MNIST MLP', train_log_mode='TRAIN_VAT') logger.mode['TRAIN_VAT'] = vat_train_log # Configure GPU Device if args.gpu >= 0: cuda.check_cuda_available() xp = cuda.cupy if args.gpu >= 0 else np # loading dataset dataset = mnist.load() dim = dataset['train']['data'][0].size N_train = len(dataset['train']['target']) N_test = len(dataset['test']['target']) test_data_dict = {'data':dataset['test']['data'].reshape(N_test, dim).astype(np.float32), 'target':dataset['test']['target'].astype(np.int32)} unlabeled_data_dict = {'data':dataset['train']['data'].reshape(N_train, dim).astype(np.float32)}
args = parser.parse_args() def mnist_preprocess(data): data['data'] /= 255. return data # Logger setup def vat_train_log(res): log_str = '{0:d}, loss={1:.5f}, lds={2:.5f}, accuracy={3:.5f}'.format( res['iteration'], res['loss'], res['lds'], res['accuracy']) return log_str logger = Logger('MNIST MLP', train_log_mode='TRAIN_VAT') logger.mode['TRAIN_VAT'] = vat_train_log # Configure GPU Device if args.gpu >= 0: cuda.check_cuda_available() xp = cuda.cupy if args.gpu >= 0 else np # loading dataset dataset = mnist.load() dim = dataset['train']['data'][0].size N_train = len(dataset['train']['target']) N_test = len(dataset['test']['target']) test_data_dict = { 'data': dataset['test']['data'].reshape(N_test, dim).astype(np.float32), 'target': dataset['test']['target'].astype(np.int32)
# argparse parser = argparse.ArgumentParser(description='All Convolutional Network Example on CIFAR-10') parser.add_argument('--epoch', '-e', type=int, default=300, help='training epoch (default: 100)') parser.add_argument('--batch', '-b', type=int, default=500, help='training batchsize (default: 100)') parser.add_argument('--valbatch', '-v', type=int, default=1000, help='validation batchsize (default: 100)') parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU device #, if you want to use cpu, use -1 (default: -1)') args = parser.parse_args() def cifar_preprocess(data): data['data'] /= 255. return data # Logger setup logger = Logger('CIFAR10 AllConvNet') # Configure GPU Device if args.gpu >= 0: cuda.check_cuda_available() xp = cuda.cupy if args.gpu >= 0 else np # loading dataset dataset = cifar10.load() dim = dataset['train']['data'][0].size N_train = len(dataset['train']['target']) N_test = len(dataset['test']['target']) train_data_dict = {'data':dataset['train']['data'].astype(np.float32), 'target':dataset['train']['target'].astype(np.int32)} test_data_dict = {'data':dataset['test']['data'].astype(np.float32),
'-g', type=int, default=-1, help='GPU device #, if you want to use cpu, use -1 (default: -1)') args = parser.parse_args() def cifar_preprocess(data): data['data0'] /= 255. data['data1'] /= 255. return data # Logger setup logger = Logger('CIFAR SIAMESE', train_log_mode='TRAIN_LOSS_ONLY', test_log_mode='TEST_LOSS_ONLY') # Configure GPU Device if args.gpu >= 0: cuda.check_cuda_available() xp = cuda.cupy if args.gpu >= 0 else np # loading dataset dataset = cifar10.load() dim = dataset['train']['data'][0].size N_train = len(dataset['train']['target']) N_test = len(dataset['test']['target']) train_data_dict = { 'data': dataset['train']['data'].astype(np.float32),