def main(): train_loader, val_loader = create_train_val_dataloaders( args.train_data_fp, batch_size=args.batch_size) model = DenseNet(drop_prob=0) if use_cuda: model = model.cuda() train(model, train_loader, val_loader)
elif helper.params['dataset'] == 'dif': num_classes = len(helper.labels) else: num_classes = 10 reseed(5) if helper.params['model'] == 'densenet': net = DenseNet(num_classes=num_classes, depth=helper.params['densenet_depth']) elif helper.params['model'] == 'resnet': logger.info(f'Model size: {num_classes}') net = models.resnet18(num_classes=num_classes) elif helper.params['model'] == 'PretrainedRes': net = models.resnet18(pretrained=True) net.fc = nn.Linear(512, num_classes) net = net.cuda() elif helper.params['model'] == 'FlexiNet': net = FlexiNet(3, num_classes) elif helper.params['model'] == 'dif_inception': net = inception_v3(pretrained=True, dif=True) net.fc = nn.Linear(768, num_classes) net.aux_logits = False elif helper.params['model'] == 'inception': net = inception_v3(pretrained=True) net.fc = nn.Linear(2048, num_classes) net.aux_logits = False #model = torch.nn.DataParallel(model).cuda() elif helper.params['model'] == 'mobilenet': net = MobileNetV2(n_class=num_classes, input_size=64) elif helper.params['model'] == 'word': net = RNNModel(rnn_type='LSTM',