elif args.channels == 1: normalize = transforms.Normalize(mean=[0.5], std=[0.5]) # create model avg_pool_size = (args.avg_pooling_height, args.avg_pooling_width) model = densenet_multi(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), num_classes=args.num_classes, channels=args.channels, avg_pooling_size=avg_pool_size) train_transforms = transforms.Compose([transforms.ToTensor(), normalize]) val_transforms = transforms.Compose([transforms.ToTensor(), normalize]) # create optimizer optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.5, 0.999)) # start main loop main(args, model, pil_loader, pil_loader, normalize, train_dataset=MultiTaskDataset, train_model=train_model, validate_model=validate_model, train_transforms=train_transforms, val_transforms=val_transforms, optimizer=optimizer)
# create model avg_pool_size = (args.avg_pooling_height, args.avg_pooling_width) if args.pretrained: if args.channels != 3: print('(pretrained) Must fix channels == 3') exit() if args.avg_pooling_width != 7 and args.avg_pooling_height != 7: print('(pretrained) Must fix avg_pooling_size == 7') exit() # fix num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16) model = densenet121_pretrained(num_classes=args.num_classes) else: model = DenseNet(num_init_features=32, growth_rate=16, block_config=(6, 12, 24, 16), num_classes=args.num_classes, channels=args.channels, avg_pooling_size=avg_pool_size) train_transforms = transforms.Compose([transforms.RandomCrop((args.image_height, args.image_width)), transforms.ToTensor(), normalize, ]) # create optimizer optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) # start main loop main(args, model, train_image_loader, valid_image_loader, normalize, optimizer, train_dataset=TrainDataset, valid_dataset=ValDataset, train_model=train_model, validate_model=validate_model, train_transforms=train_transforms)
args.image_height = 224 args.num_classes = 1000 args.channels = 3 def pil_loader(path): with open(path, 'rb') as f: with Image.open(f) as img: img = img.convert('RGB') if args.channels == 3 else img.convert( 'L') img = img.resize( (args.resize_image_width, args.resize_image_height)) return img if __name__ == '__main__': normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # create model model = models.__dict__[args.arch](num_classes=args.num_classes, pretrained=True) # create optimizer optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) # start main loop main(args, model, pil_loader, pil_loader, normalize, optimizer)
cat_dims, emb_dims = get_dims(find_csv(args.data, 'train')) # create model model = FeedForwardNN(emb_dims, no_of_cont=5, lin_layer_sizes=[50, 100], output_size=2, emb_dropout=0.04, lin_layer_dropouts=[0.2, 0.2]) # lin_layer_dropouts=[0.001, 0.01]) # create optimizer optimizer = torch.optim.SGD(model.parameters(), lr=args.lr) transforms = transforms.Compose(transforms.ToTensor()) dumy = lambda x: x # start main loop main(args, model, dummyImageLoader, dummyImageLoader, None, optimizer, train_dataset=DummyDataset, valid_dataset=DummyDataset, train_model=train_model, validate_model=validate_model, train_transforms=dumy, val_transforms=dumy)
if __name__ == '__main__': if args.channels == 3: normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) elif args.channels == 1: normalize = transforms.Normalize(mean=[0.5], std=[0.5]) # create model avg_pool_size = (args.avg_pooling_height, args.avg_pooling_width) if args.simple_resnet: model = resnet18(num_classes=args.num_classes, channels=args.channels, avg_pooling_size=avg_pool_size) else: model = densenet121(num_classes=args.num_classes, channels=args.channels, avg_pooling_size=avg_pool_size) train_transforms = transforms.Compose([transforms.ToTensor(), normalize]) val_transforms = transforms.Compose([transforms.ToTensor(), normalize]) # start main loop main(args, model, pil_loader, pil_loader, normalize, validate_model=validate_model, train_transforms=train_transforms, val_transforms=val_transforms)