コード例 #1
0
ファイル: subspace_sgd.py プロジェクト: yyht/drbayes
with open(os.path.join(args.dir, 'command.sh'), 'w') as f:
    f.write(' '.join(sys.argv))
    f.write('\n')

torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)

print('Using model %s' % args.model)
model_cfg = getattr(models, args.model)

print('Loading dataset %s from %s' % (args.dataset, args.data_path))
loaders, num_classes = data.loaders(args.dataset,
                                    args.data_path,
                                    args.batch_size,
                                    args.num_workers,
                                    model_cfg.transform_train,
                                    model_cfg.transform_test,
                                    use_validation=not args.use_test,
                                    split_classes=args.split_classes)

print('Preparing model')
print(*model_cfg.args)

model = model_cfg.base(*model_cfg.args,
                       num_classes=num_classes,
                       **model_cfg.kwargs)
model.cuda()

swag_model = SWAG(model_cfg.base,
                  num_classes=num_classes,
                  subspace_type='pca',
コード例 #2
0
from swag import data, models, utils, losses
import numpy as np

loaders, num_classes = data.loaders("CIFAR10",
                                    "~/datasets/",
                                    1000,
                                    4,
                                    None,
                                    None,
                                    use_validation=False,
                                    split_classes=None,
                                    shuffle_train=False)

train_x = loaders['train'].dataset.data
train_y = np.array(loaders['train'].dataset.targets)

np.save("train_x", train_x)
np.save("train_y", train_y)
np.random.shuffle(train_y)
np.save("shuffled_train_y", train_y)

test_x = loaders['test'].dataset.data
test_y = np.array(loaders['test'].dataset.targets)
np.save("test_x", test_x)
np.save("test_y", test_y)
コード例 #3
0
                    choices=["cross_entropy", "aleatoric"],
                    default="cross_entropy")

args = parser.parse_args()

torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)

model_cfg = getattr(models, "FCDenseNet67")
loaders, num_classes = data.loaders(
    "CamVid",
    args.data_path,
    args.batch_size,
    4,
    ft_batch_size=1,
    transform_train=model_cfg.transform_train,
    transform_test=model_cfg.transform_test,
    joint_transform=model_cfg.joint_transform,
    ft_joint_transform=model_cfg.ft_joint_transform,
    target_transform=model_cfg.target_transform,
)

# criterion = nn.NLLLoss(weight=camvid.class_weight[:-1].cuda(), reduction='none').cuda()
if args.loss == "cross_entropy":
    criterion = losses.seg_cross_entropy
else:
    criterion = losses.seg_ale_cross_entropy

# construct and load model
if args.swa_resume is not None:
    checkpoint = torch.load(args.swa_resume)
コード例 #4
0
print('Preparing directory %s' % args.dir)
os.makedirs(args.dir, exist_ok=True)
with open(os.path.join(args.dir, 'command.sh'), 'w') as f:
    f.write(' '.join(sys.argv))
    f.write('\n')

print('Using model %s' % args.model)
model_cfg = getattr(models, args.model)

loaders, num_classes = data.loaders(
    args.dataset,
    args.data_path,
    args.batch_size,
    args.num_workers,
    ft_batch_size=args.ft_batch_size,
    transform_train=model_cfg.transform_train,
    transform_test=model_cfg.transform_test,
    joint_transform=model_cfg.joint_transform,
    ft_joint_transform=model_cfg.ft_joint_transform,
    target_transform=model_cfg.target_transform)
print('Beginning with cropped images')
train_loader = loaders['train']

print('Preparing model')
model = model_cfg.base(*model_cfg.args,
                       num_classes=num_classes,
                       **model_cfg.kwargs,
                       use_aleatoric=args.loss == 'aleatoric')
model.cuda()
model.apply(train_utils.weights_init)
コード例 #5
0
    f.write(' '.join(sys.argv))
    f.write('\n')

torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)

print('Using model %s' % args.model)
model_cfg = getattr(models, args.model)

print('Loading dataset %s from %s' % (args.dataset, args.data_path))
loaders, num_classes = data.loaders(args.dataset,
                                    args.data_path,
                                    args.batch_size,
                                    args.num_workers,
                                    transform_train=model_cfg.transform_test,
                                    transform_test=model_cfg.transform_test,
                                    shuffle_train=False,
                                    use_validation=not args.use_test,
                                    split_classes=args.split_classes)

loaders_bn, _ = data.loaders(args.dataset,
                             args.data_path,
                             args.batch_size,
                             args.num_workers,
                             transform_train=model_cfg.transform_train,
                             transform_test=model_cfg.transform_test,
                             shuffle_train=True,
                             use_validation=not args.use_test,
                             split_classes=args.split_classes)