def main():

    batch_size = 32

    # train
    cifar_train = datasets.CIFAR10('cifar',
                                   True,
                                   transform=transforms.Compose([
                                       transforms.Resize((32, 32)),
                                       transforms.ToTensor()
                                   ]),
                                   download=True)

    cifar_train = dataloader(cifar_train, batch_size=batch_size, shuffle=True)

    # validation
    cifar_test = datasets.CIFAR10('cifar',
                                  False,
                                  transform=transforms.Compose([
                                      transforms.Resize((32, 32)),
                                      transforms.ToTensor()
                                  ]),
                                  download=True)

    cifar_test = dataloader(cifar_test, batch_size=batch_size, shuffle=True)

    x, label = iter(cifar_train).next()
    print('x: ', x.shape, 'label: ', label.shape)
Esempio n. 2
0
def main():
    # arg parser
    args = arg_parser()

    # set seed
    set_seed(args.seed)

    # dataset
    id_traindata = datasets.CIFAR10('./data/', train=True, download=True)
    id_testdata = datasets.CIFAR10('./data/', train=False, download=True)

    id_traindata = RotDataset(id_traindata, train_mode=True)
    id_testdata = RotDataset(id_testdata, train_mode=False)

    # data loader
    if args.method == 'rot' or args.method == 'msp':
        train_loader = dataloader(id_traindata,
                                  batch_size=args.batch_size,
                                  num_workers=args.num_workers,
                                  pin_memory=True)
    else:
        raise ValueError(args.method)

    test_loader = dataloader(id_testdata,
                             batch_size=args.batch_size,
                             num_workers=args.num_workers,
                             pin_memory=True)

    # model
    num_classes = 10
    model = WideResNet(args.layers,
                       num_classes,
                       args.widen_factor,
                       dropRate=args.droprate)
    model.rot_head = nn.Linear(128, 4)
    model = model.cuda()

    # optimizer
    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.decay,
                                nesterov=True)

    # training
    for epoch in range(1, args.epochs + 1):

        train_loss = train(args,
                           epoch,
                           model,
                           train_loader,
                           optimizer,
                           lr_scheduler=None)
        test_loss, test_acc = test(args, model, test_loader)

        print('epoch:{}, train_loss:{}, test_loss:{}, test_acc:{}'.format(
            epoch, round(train_loss.item(), 4), round(test_loss.item(), 4),
            round(test_acc, 4)))
        torch.save(model.state_dict(),
                   './trained_model_{}.pth'.format(args.method))
Esempio n. 3
0
def main():
    # arg parser 
    args = arg_parser()
    
    # set seed 
    set_seed(args.seed)  
    
    # dataset 
    id_testdata = datasets.CIFAR10('./data/', train=False, download=True)
    id_testdata = RotDataset(id_testdata, train_mode=False)

    if args.ood_dataset == 'cifar100':
        ood_testdata = datasets.CIFAR100('./data/', train=False, download=True)
    elif args.ood_dataset == 'svhn':
        ood_testdata = datasets.SVHN('./data/', split='test', download=True)
    else:
        raise ValueError(args.ood_dataset)
    ood_testdata = RotDataset(ood_testdata, train_mode=False)
    
    # data loader  
    id_test_loader = dataloader(id_testdata, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=True)
    ood_test_loader = dataloader(ood_testdata, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=True)
  
    # load model
    num_classes = 10
    model = WideResNet(args.layers, num_classes, args.widen_factor, dropRate=args.droprate)
    model.rot_head = nn.Linear(128, 4)
    model = model.cuda()
    model.load_state_dict(torch.load('./models/trained_model_{}.pth'.format(args.method)))

    TODO:
Esempio n. 4
0
def train(**kwargs):
	for k_, v_ in kwargs:
		setattr(config, k_, v_)
	train_dataset, val_dataset, test_dataset = Ali(config)
	train_loader = dataloader(dataset=train_dataset, batch_size=config.batch_size, 
		shuffle=True, drop_last=True)
	val_loader = dataloader(dataset=val_dataset, batch_size=config.batch_size)

	model = DeepFM()

	# testing 
	if config.test_flag:
		test_loader = dataloader(dataset=test_dataset, batch_size=config.batch_size)
		model.load_state_dict(torch.load(os.path.join(config.model_path, '_best')))
		test(model, test_loader, config.output_path)

	criterion = torch.nn.BCELoss()
	optimizer = Adam(model.parameters, lr=config.lr, betas=(config.beta1, config.beta2))
	best_val_loss = 1e6
	if torch.cuda.is_available():
		model.cuda()
		criterion.cuda()
	
	# resume training
	start = 0
	if config.resume:
		model_epoch = [int(fname.split('_')[-1]) for fname in os.listdir(config.model_path) 
			if 'best' not in fname]
		start = max(model_epoch)
		model.load_state_dict(torch.load(os.path.join(config.model_path, '_epoch_{start}')))
	if start >= config.epochs:
		print('Training already Done!')
		return 

	for i in range(start, config.epochs):
		for ii, (c_data, labels) in enumerate(train_loader):
			c_data = to_var(c_data)
			labels = to_var(labels)

			pred = model(c_data)
			loss = criterion(pred, labels, criterion)
			loss.backward()
			optimizer.step()

		if (ii + 1) % config.eval_every == 0:
			val_loss = val(model, val_loader)
			print(f'''epochs: {i + 1}/{config.epochs} batch: {ii + 1}/{len(train_loader)}\t
						train_loss: {loss.data[0] / c_data.size(0)}, val_loss: {val_loss}''')

			torch.save(model.state_dict(), os.path.join(config.model_path, '_epoch_{i}'))
			if val_loss < best_val_loss:
				torch.save(model.state_dict(), os.path.join(config.model_path, '_best'))
Esempio n. 5
0
def split_data(datasets,
               train_idx,
               test_idxsamplers=torch.utils.data.SubsetRandomSampler,
               batchsize=200):

    test_smp, train_smp = samplers(test_idx), samplers(train_idx)

    # returns  train_loader, valid_loader, test_loader

    train_loader, test_loader = dataloader(datasets,
                                           sampler=train_smp,
                                           batch_size=batchsize), dataloader(
                                               datasets,
                                               sampler=test_smp,
                                               batch_size=batchsize)
    return train_loader, test_loader
Esempio n. 6
0
def main():

   op = options
   opt = op.parse()
   # initialize train or test working directory
   opt.model_dir = os.path.join("results",opt.name)
   logging.info = ("model directory %s" % opt.model_dir)
   if not os.path.exists(opt.model_dir):
       os.makedirs(opt.model_dir)
   log_dir = opt.model_dir
   log_path = log_dir + "/train.log"
   util.opt2file(opt, log_dir + "/opt.txt")
   #log setting
   log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
   formatter = logging.Formatter(log_format)
   fh = logging.FileHandler(log_path, 'a')
   fh.setFormatter(formatter)
   ch = logging.StreamHandler()
   ch.setFormatter(formatter)
   logging.getLogger().addHandler(fh)
   logging.getLogger().addHandler(ch)
   log_level = logging.INFO
   logging.getLogger().setLevel(log_level)
    #define database
   indices = list(range(opt.num_example))
   rand_indices = np.random.RandomState(0)
   rand_indices.shuffle(indices)
   train_idx = indices[0:0.9*len(indices)]
   valid_idx = indices[0.9*len(indices)::]
   ds_train = Try_On_dataset(root=opt.data_dir,
                                  indices=train_idx,
                                  data_aug=opt.data_aug,
                                  img_size=opt.img_size,
                                  crop_size=opt.crop_size)
   ds_valid = Try_On_dataset(root=opt.data_dir,
                                  indices=valid_idx,
                                  data_aug=opt.data_aug,
                                  img_size=opt.img_size,
                                  crop_size=opt.crop_size)
   loader_train = dataloader(ds_train,shuffel=True,batch_size=opt.batch_size,num_workers=opt.num_wokers)
   loader_valid = dataloader(ds_valid,shuffel=True,batch_size=opt.batch_size,num_workers=opt.num_wokers)
Esempio n. 7
0
                               transform=transforms.Compose([
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.1307, ), (0.3081, ))
                               ]),
                               download=True)

test_dataset = datasets.MNIST(root='./data',
                              train=False,
                              transform=transforms.Compose([
                                  transforms.ToTensor(),
                                  transforms.Normalize((0.1307, ), (0.3081, ))
                              ]),
                              download=True)

train_loader = dataloader(dataset=train_dataset,
                          batch_size=batch_size,
                          shuffle=True)

test_loader = dataloader(dataset=test_dataset,
                         batch_size=batch_size,
                         shuffle=False)

model = LogisticRegression(input_dim, output_dim)
model.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=lr_rate)

# start training
total_step = len(train_loader)
for epoch in range(num_epochs):
    for i, (images, labels) in enumerate(train_loader):
import torch.nn as nn
from torch.utils.data import DataLoader as dataloader
from torch.autograd import Variable
mean_grey = 0.1307
stddev_grey = 0.3081
batch_size = 100
transforms = transforms.Compose([transforms.Resize((28, 28)),
                                 transforms.ToTensor(),
                                 transforms.Normalize((mean_grey, ), (stddev_grey, ))])


test_datasets = datasets.MNIST(root=r'./data',
                               train=False,
                               transform=transforms)
test_loader = dataloader(dataset=test_datasets,
                         shuffle=False,
                         batch_size=batch_size)


net = CNN()
CUDA = torch.cuda.is_available()
if CUDA:
    net = net.cuda()

loss_function = nn.CrossEntropyLoss()

weights = torch.load('./weights/mnist_weights.pth.tar', map_location='cpu')

net.load_state_dict(weights)

Esempio n. 9
0
import torch.nn as nn
from torch.nn import functional as F
from tqdm.notebook import tqdm
from torch.utils.data import DataLoader as dataloader

# Loading Up the data from the dataset
Train_Data = datasets.MNIST(root="./datasets",
                            train=True,
                            transform=transforms.ToTensor(),
                            download=True)
Test_Data = datasets.MNIST(root="./datasets",
                           train=False,
                           transform=transforms.ToTensor(),
                           download=True)

Train_Loader = dataloader(Train_Data, batch_size=200, shuffle=True)
Test_Loader = dataloader(Test_Data, batch_size=200, shuffle=False)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


# Building the model class
class Net(nn.Module):
    def __init__(self):
        super().__init__()
        self.Layer1 = nn.Linear(784, 500)
        self.Layer2 = nn.Linear(500, 10)

    def forward(self, x):
        x = x.view(-1, 784)
        x = F.relu(self.Layer1(x))
Esempio n. 10
0
num_epochs = 50
mean_grey = 0.1307
stddev_grey = 0.3081
transforms = transforms.Compose([
    transforms.Resize((28, 28)),
    transforms.ToTensor(),
    transforms.Normalize((mean_grey, ), (stddev_grey, ))
])

train_datasets = datasets.MNIST(root=r'./data',
                                train=True,
                                transform=transforms,
                                download=True)

train_loader = dataloader(dataset=train_datasets,
                          shuffle=True,
                          batch_size=batch_size)

net = CNN()
CUDA = torch.cuda.is_available()
if CUDA:
    net = net.cuda()

optimizer = torch.optim.SGD(net.parameters(), lr=0.01)

loss_function = nn.CrossEntropyLoss()

train_loss = []
train_accuracy = []

for epoch in range(num_epochs):
Esempio n. 11
0
    def load_data_dataloader(self, batch_size):

        x = []
        y = []

        itr = 0
        with open(self.data_path) as file:
            for line in file:

                # skip first line (title)
                if itr == 0:
                    itr += 1
                    continue

                if itr % 10000 == 0:
                    print("Loaded: ", itr)

                if itr >= self.num_data_points:
                    break

                line_vars = line.split('\t')

                peptide_sequence = line_vars[0]
                retention_time = float(line_vars[1].rstrip('\n'))

                # skip len > 20
                if len(peptide_sequence) > self.max_length_sequence:
                    continue

                peptide_matrix = self.peptide_seq_to_onehot_matrix(
                    peptide_sequence)

                x.append(peptide_matrix)
                y.append(retention_time)

                if self.backwards:
                    x.append(peptide_matrix[::-1])
                    y.append(retention_time)

                itr += 1

            print("loaded: {}".format(self.data_path))

            #test train split
            cutoff = math.floor(len(x) * .9)

            X_train = torch.Tensor(x[:cutoff])
            Y_train = torch.Tensor(y[:cutoff])

            X_val = torch.Tensor(x[cutoff:])
            Y_val = torch.Tensor(y[cutoff:])

            my_dataset = torch.utils.data.TensorDataset(
                X_train, Y_train)  # create your datset
            my_dataloader = dataloader(my_dataset,
                                       batch_size=batch_size,
                                       shuffle=True)  # create your dataloader

            my_dataset_val = torch.utils.data.TensorDataset(
                X_val, Y_val)  # create your datset
            my_dataloader_val = dataloader(
                my_dataset_val, batch_size=batch_size,
                shuffle=True)  # create your dataloader

            return my_dataloader, my_dataloader_val
Esempio n. 12
0
def create_dataloader(config):
    dataloader = find_model_using_name("data_loader.data_loaders",
                                       config['data_loader']['type'])
    instance = dataloader(config)
    print("dataset [%s] was created" % (config['data_loader']['type']))
    return instance
Esempio n. 13
0
def main(args):
    # set seed
    set_seed(args.seed)

    # dataset
    id_testdata = datasets.CIFAR10('./data/', train=False, download=True)
    id_testdata = RotDataset(id_testdata, train_mode=False)

    if args.ood_dataset == 'cifar100':
        ood_testdata = datasets.CIFAR100('./data/', train=False, download=True)
    elif args.ood_dataset == 'svhn':
        ood_testdata = datasets.SVHN('./data/', split='test', download=True)
    else:
        raise ValueError(args.ood_dataset)
    ood_testdata = RotDataset(ood_testdata, train_mode=False)

    # data loader
    id_test_loader = dataloader(id_testdata,
                                batch_size=args.batch_size,
                                num_workers=args.num_workers,
                                pin_memory=True)
    ood_test_loader = dataloader(ood_testdata,
                                 batch_size=args.batch_size,
                                 num_workers=args.num_workers,
                                 pin_memory=True)

    # load model
    num_classes = 10
    model = WideResNet(args.layers,
                       num_classes,
                       args.widen_factor,
                       dropRate=args.droprate)
    model.rot_head = nn.Linear(128, 4)
    model = model.to(device)
    model.load_state_dict(
        torch.load('./models/trained_model_{}.pth'.format(args.method),
                   map_location=device))

    # TODO:
    ## 1. calculate ood score by two methods(MSP, Rot)
    model.eval()
    id_testdata_score, ood_testdata_score = [], []

    for idx, loader in enumerate([id_test_loader, ood_test_loader]):
        for x_tf_0, x_tf_90, x_tf_180, x_tf_270, batch_y in tqdm(loader):
            batch_size = x_tf_0.shape[0]
            batch_x = torch.cat([x_tf_0, x_tf_90, x_tf_180, x_tf_270],
                                0).to(device)
            batch_y = batch_y.to(device)
            batch_rot_y = torch.cat(
                (torch.zeros(batch_size), torch.ones(batch_size),
                 2 * torch.ones(batch_size), 3 * torch.ones(batch_size)),
                0).long().to(device)

            logits, pen = model(batch_x)

            classification_probabilities = F.softmax(logits[:batch_size],
                                                     dim=-1)
            rot_logits = model.rot_head(pen)

            classification_loss = torch.max(classification_probabilities,
                                            dim=-1)[0].data.cpu()
            rotation_loss = F.cross_entropy(rot_logits,
                                            batch_rot_y,
                                            reduction='none').data

            uniform_distribution = torch.zeros_like(
                classification_probabilities).fill_(1 / num_classes)
            kl_divergence_loss = nn.KLDivLoss(reduction='none')(
                classification_probabilities.log(), uniform_distribution).data

            for i in range(batch_size):
                if args.method == 'msp':
                    score = -classification_loss[i]
                elif args.method == 'rot':
                    rotation_loss_tensor = torch.tensor([
                        rotation_loss[i], rotation_loss[i + batch_size],
                        rotation_loss[i + 2 * batch_size],
                        rotation_loss[i + 3 * batch_size]
                    ])
                    score = -torch.sum(kl_divergence_loss[i]) + torch.mean(
                        rotation_loss_tensor)

                if idx == 0:
                    id_testdata_score.append(score)
                elif idx == 1:
                    ood_testdata_score.append(score)

    y_true = torch.cat((torch.zeros(
        len(id_testdata_score)), torch.ones(len(ood_testdata_score))), 0)

    y_score = torch.cat(
        (torch.tensor(id_testdata_score), torch.tensor(ood_testdata_score)),
        0).float()

    ## 2. calculate AUROC by using ood scores
    print(f"dataset : {args.ood_dataset}, method : {args.method}")
    print(roc_auc_score(y_true, y_score))
Esempio n. 14
0
batch_size = 64
learning_rate = 1e-2
num_epoches = 20

data_tf = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize([0.5], [0.5])])

train_dataset = datasets.MNIST(root='./MNIST_data',
                               train=True,
                               transform=data_tf,
                               download=True)
test_dataset = datasets.MNIST(root='./MNIST_data',
                              train=False,
                              transform=data_tf)
train_loader = dataloader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = dataloader(test_dataset, batch_size=batch_size, shuffle=True)

model = simplenn.Net(28 * 28, 300, 100, 10)
if torch.cuda.is_available():
    model = model.cuda()

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate)

model.eval()
eval_loss = 0
eval_acc = 0
print(eval_acc.size(0))

for data in test_loader: