Example #1
0
                            help='data repeat num')
        self.parser = parser

    def parse(self):
        arg = self.parser.parse_args(args=[])
        arg.cuda = not arg.no_cuda and torch.cuda.is_available()
        arg.device = torch.device('cuda' if arg.cuda else 'cpu')
        return arg


if __name__ == '__main__':
    args = Options().parse()

    args.model = DenseNet_CNN(args.device)
    # args.model = DenseNet(args.device)
    # args.model = LeNet5(args.device)
    # args.model = ShallowConvNet(args.device)
    args.train_loader, args.valid_loader = get_train_valid_loader(
        './',
        batch_size=args.batch_size,
        random_seed=123,
        valid_ratio=0.1,
        shuffle=True)
    trainer = Trainer(args)
    trainer.train()

    args.test_loader = get_test_loader('./', batch_size=8, shuffle=False)

    tester = Tester(args)
    tester.test()
Example #2
0
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(args.seed)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

### permuted mnist
if args.pmnist:
    perm = torch.randperm(784)
else:
    perm = torch.arange(0, 784).long()

train_loader, valid_loader = get_train_valid_loader(args.data,
                                                    args.batchsize,
                                                    perm,
                                                    shuffle=True)
test_loader = get_test_loader(args.data, args.batchsize, perm)

model = model.mnistModel(args.model,
                         args.ninp,
                         args.nhid,
                         args.nlayers,
                         args,
                         quantize=args.quantize)
model.to(device)
criterion = nn.CrossEntropyLoss()
criterion.to(device)
params_fp = []
params_invariant = []
for name, param in model.named_parameters():
    criterion = nn.CrossEntropyLoss()
    if gpu:
        #net.cuda()
        #net = nn.DataParallel(net)
        net = nn.DataParallel(net, device_ids=device_ids)
        net = net.cuda()
        #net.cuda(device_ids[0])
        #net = net.cuda(device_ids[0])
        #criterion.cuda()

    if load:
        net.load_state_dict(torch.load('checkpoints/CP26.pth'))

    train_loader, valid_loader, num_train, num_valid = get_train_valid_loader(
        data_dir='inflammation/',
        batch_size=batch_size,
        num_workers=0,
        pin_memory=False)

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.85,
                          weight_decay=0.0005)
    '''
	for index_batch, (img, label) in enumerate(train_loader):
		print(index_batch)
		print(img.size())
		print(label.size())

	train(criterion, epoch, writer, net, train_loader, optimizer, batch_size=batch_size,
		  check_point_dir=check_point_dir, gpu=gpu)
Example #4
0
# either train

if config.is_train:
    if config.cv:
        cv_folds = 5
        trans = transforms.Compose([transforms.ToTensor()])
        # load dataset
        dataset = ToxicDataset(csv_file="aggregate_tox.csv", root_dir=Config().data_dir)
        num_train = len(dataset)
        indices = list(range(num_train))
        values = []
        for fold in range(cv_folds):
            kwargs['fold'] = fold
            data_loader, indices = get_train_valid_loader(
                dataset, indices,config.data_dir, config.batch_size,
                config.random_seed, config.valid_size,
                config.shuffle, config.show_sample, config.cv,**kwargs
            )
            trainer = Trainer(config,data_loader)
            valid_acc = trainer.train()
            values.append(valid_acc)
            cross_file = open("cross_val.txt", "a+")
            cross_file.write(str(valid_acc))
            cross_file.write("\n")
            cross_file.close()
        cross_file = open("cross_val.txt", "a+")
        cross_file.write(str(np.mean(np.array(values))))
        cross_file.write("\n")
        cross_file.close()
    else:
        save_config(config)