def load_checkpoint(name): check = torch.load("../models/" + model, map_location="cuda") loss = check["val_best_loss"] model = se_resnet34(num_classes=2, multi_output=True).to(device) model = densenet121(if_selayer=True).to(device) model.load_state_dict(check["model"]) model = model.to(device) model.eval() return model
size = args.size cutmix_prob = 0.5 mixup_prob = 0.1 cutout_prob = 0.5 random_erasing_prob = 0.5 print("Running device: ", device) print("batchsize: ", batchsize) efficientnet_name = "" efficientnets = ["efficientnet-b" + str(i) for i in range(8)] if args.model == "resnet34": # model = se_resnet34(num_classes=2).to(device) model = se_resnet34(num_classes=2, multi_output=True).to(device) elif args.model == "bengali_resnet34": model = model_bengali.se_resnet34(num_classes=2, multi_output=True).to(device) elif args.model == "bengali_resnext50": model = model_bengali.se_resnext50_32x4d(num_classes=2, multi_output=True).to(device) elif args.model == "resnet152": model = se_resnet152(num_classes=2, multi_output=True).to(device) elif args.model == "resnext50": model = se_resnext50_32x4d(num_classes=2, multi_output=True).to(device) elif args.model == "resnext101": model = se_resnext101_32x8d(num_classes=2, multi_output=True).to(device) elif args.model == "densenet": model = densenet121(if_selayer=True).to(device) elif args.model == "inception_v3":
transforms = transforms.Compose( [transforms.ToPILImage(mode=None), transforms.ToTensor()]) # load test_all = load_test_df() test_dataset = BengalDataset(df=test_all, transform=transforms, test_dataset_flag=True) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batchsize, shuffle=False) model = se_resnet34(num_classes=2).to(device) loss_fn = torch.nn.CrossEntropyLoss() # model load fn = "../models/model.dat" checkpoint = torch.load(fn) model.load_state_dict(checkpoint["model"]) model = model.to(device) print("model loaded.") model.eval() pred1, pred2, pred3 = [], [], [] with torch.no_grad(): print("start inference...") for idx, (inputs, _, _, _) in tqdm(enumerate(test_loader),
lr = args.lr device = "cuda:0" if torch.cuda.is_available() else "cpu" height = 137 width = 236 size = 128 cutmix_prob = 0.1 mixup_prob = 0.1 cutout_prob = 0.5 random_erasing_prob = 0.5 print("Running device: ", device) # train model for label2 if args.model == "resnet34": # model = se_resnet34(num_classes=2).to(device) model = se_resnet34(num_classes=168, multi_output=False).to(device) elif args.model == "resnet152": model = se_resnet152(num_classes=168, multi_output=False).to(device) elif args.model == "densenet": mode2 = densenet121(if_selayer=True).to(device) elif args.model == "resnext50": model = se_resnext50_32x4d(num_classes=168, multi_output=True).to(device) elif args.model == "resnext101": model = se_resnext101_32x8d(num_classes=168, multi_output=True).to(device) else: raise ValueError() # train_all = load_train_df() _, vowels, graphemes, consonants = load_pickle_images() imgs = np.asarray(pd.read_pickle(os.path.join(data_folder, "cropped_imgs.pkl")))
seed = args.seed epoch_num = args.epoch batchsize = args.batchsize lr = args.lr device = "cuda:0" if torch.cuda.is_available() else "cpu" height = 137 width = 236 cutmix_prob = 0.1 mixup_prob = 0.1 print("Running device: ", device) # train models for each label if args.model == "resnet34": # model = se_resnet34(num_classes=2).to(device) model1 = se_resnet34(num_classes=11, multi_output=False).to(device) model2 = se_resnet34(num_classes=168, multi_output=False).to(device) model3 = se_resnet34(num_classes=7, multi_output=False).to(device) elif args.model == "resnet152": model1 = se_resnet152(num_classes=11, multi_output=False).to(device) model2 = se_resnet152(num_classes=168, multi_output=False).to(device) model3 = se_resnet152(num_classes=7, multi_output=False).to(device) elif args.model == "densenet": model = densenet121(if_selayer=True).to(device) mode2 = densenet121(if_selayer=True).to(device) mode3 = densenet121(if_selayer=True).to(device) else: raise ValueError() # train_all = load_train_df() _, vowels, graphemes, consonants = load_pickle_images()