del model torch.cuda.empty_cache() return probs, preds, labels if __name__ == '__main__': ''' Example: python test.py --tta True --csv_out results/submission_UW_galdran_11Mar.csv ''' data_path = 'data' use_cuda = torch.cuda.is_available() device = torch.device("cuda:0" if use_cuda else "cpu") # reproducibility seed_value = 0 set_seeds(seed_value, use_cuda) # gather parser parameters args = parser.parse_args() model_name = args.model_name load_path_od = args.load_path_od load_path_mac = args.load_path_mac pretrained = args.pretrained bs = args.batch_size csv_test_od = args.csv_test_od csv_test_mac = args.csv_test_mac n_classes = args.n_classes tta = args.tta csv_out = args.csv_out ####################################################################################################################
if args.device.startswith("cuda"): # In case one has multiple devices, we must first set the one # we would like to use so pytorch can find it. os.environ['CUDA_VISIBLE_DEVICES'] = args.device.split(":", 1)[1] if not torch.cuda.is_available(): raise RuntimeError("cuda is not currently available!") print(f"* Training on device '{args.device}'...") device = torch.device("cuda") else: #cpu device = torch.device(args.device) # reproducibility seed_value = 0 set_seeds(seed_value, args.device.startswith("cuda")) # gather parser parameters model_name = args.model_name max_lr, min_lr, bs, grad_acc_steps = args.max_lr, args.min_lr, args.batch_size, args.grad_acc_steps cycle_lens, metric = args.cycle_lens.split('/'), args.metric cycle_lens = list(map(int, cycle_lens)) if len( cycle_lens ) == 2: # handles option of specifying cycles as pair (n_cycles, cycle_len) cycle_lens = cycle_lens[0] * [cycle_lens[1]] im_size = tuple([int(item) for item in args.im_size.split(',')]) if isinstance(im_size, tuple) and len(im_size) == 1: tg_size = (im_size[0], im_size[0])