def test(): submit_path = config.submit + config.model_name + os.sep + config.description + os.sep + str( config.fold) + os.sep #save submitted csv results weight_path = config.weights + config.model_name + os.sep + config.description + os.sep + str( config.fold) + os.sep csv_map = OrderedDict({'cls': [], 'label': [], 'probability': []}) test_loader = DataLoader(customDataset(config.test_data, train=False), batch_size=config.batch_size * 2, shuffle=False, pin_memory=True) model = get_net() model = DataParallel(model.cuda(), device_ids=config.gpus) checkpoint = torch.load(weight_path + 'model_best.pth.tar') model.load_state_dict(checkpoint['state_dict']) model.eval() with torch.no_grad(): for index, (data, file_paths) in enumerate(test_loader): labels = [int(path.split('/')[-2]) for path in file_paths] data = Variable(data).cuda() output = model(data) smax = nn.Softmax(1) smax_out = smax(output) _, cls = torch.max(smax_out, 1) csv_map['cls'].extend(cls) csv_map['label'].extend(labels) for output in smax_out: prob = ";".join([str(i) for i in output.data.tolist()]) csv_map['probability'].append(prob) result = pd.DataFrame(csv_map) result.to_csv(submit_path + 'submit.csv', index=False, header=None)
def __init__(self, model_name, batch_size, gpu_memory): super().__init__(batch_size, gpu_memory) if model_name in [ 'pt_vgg', 'pt_resnet', 'pt_inception', 'pt_densenet' ]: model = model_class_dict[model_name](pretrained=True) self.mean = np.reshape([0.485, 0.456, 0.406], [1, 3, 1, 1]) self.std = np.reshape([0.229, 0.224, 0.225], [1, 3, 1, 1]) model = DataParallel(model.cuda()) else: model = model_class_dict[model_name]() if model_name in ['pt_post_avg_cifar10', 'pt_post_avg_imagenet']: # checkpoint = torch.load(model_path_dict[model_name]) self.mean = np.reshape([0.485, 0.456, 0.406], [1, 3, 1, 1]) self.std = np.reshape([0.229, 0.224, 0.225], [1, 3, 1, 1]) else: model = DataParallel(model).cuda() checkpoint = torch.load(model_path_dict[model_name] + '.pth') self.mean = np.reshape([0.485, 0.456, 0.406], [1, 3, 1, 1]) self.std = np.reshape([0.225, 0.225, 0.225], [1, 3, 1, 1]) model.load_state_dict(checkpoint) model.float() self.mean, self.std = self.mean.astype(np.float32), self.std.astype( np.float32) model.eval() self.model = model
class Person_Attribute(object): def __init__(self, weights="resnest50.pth"): self.device = torch.device("cuda") self.net = resnest50().to(self.device) self.net = DataParallel(self.net) self.weights = weights self.net.load_state_dict(torch.load(self.weights)) TRAIN_MEAN = [0.485, 0.499, 0.432] TRAIN_STD = [0.232, 0.227, 0.266] self.transforms = transforms.Compose([ transforms.ToCVImage(), transforms.Resize((128, 256)), transforms.ToTensor(), transforms.Normalize(TRAIN_MEAN, TRAIN_STD) ]) def recog(self, img_path): img = cv2.imread(img_path) img = self.transforms(img) img = img.unsqueeze(0) with torch.no_grad(): self.net.eval() img_input = img.to(self.device) outputs = self.net(img_input) results = [] for output in outputs: output = torch.softmax(output, 1) output = np.array(output[0].cpu()) label = np.argmax(output) score = output[label] results.append((label, score)) return results
def test_pretrain_model(): device = torch.device("cuda") # 参数 opt = config.MobileNetV3Config() # 验证集 identity_list = dataset.get_lfw_list(opt.lfw_test_list) lfw_img_paths = [ os.path.join(opt.lfw_root, each) for each in identity_list ] # 所有图片的路径 # 加载模型 model = mobileNetV3.MobileNetV3(n_class=opt.embedding, input_size=opt.input_shape[2], dropout=opt.dropout_rate) model.to(device) model = DataParallel(model) # 加载预训练的模型 state = torch.load(MODEL) model.load_state_dict(state['state_dict']) # 用LFW数据集测试 accuracy, threshold = lfw_test(model, lfw_img_paths, identity_list, opt)
class CainTester: def __init__(self, opt): self.opt = opt self.model = CainGAN(opt) self.model = DataParallel(self.model) if opt.device != 'cpu': self.model = self.model.cuda() self.generated = None def infer(self, *data): self.generated = self.model(*data, infer=True) return self.generated def get_latest_generated(self): return self.generated def init_networks(self): if self.opt.resume is None: raise Exception("Need resume checkpoint for testing") current_epoch = self.opt.resume checkpoint = torch.load(self.opt.weight_path + "-" + str(current_epoch), map_location='cpu') self.model.load_state_dict(checkpoint['GAN_state_dict']) current_epoch = checkpoint['epoch'] self.model.module.print_network() return current_epoch
def infer(model, rank=0): model = model.cuda() model = DataParallel(model) model.load_state_dict(torch.load(model_state_dict)) model.eval() if rank == 0: print('preparing dataset...') data_iterator = DataIterator(coco_dir, resize=resize, max_size=max_size, batch_size=batch_size, stride=stride, training=training, dist=dist) if rank == 0: print('finish loading dataset!') results = [] with torch.no_grad(): for i, (data, ids, ratios) in enumerate(data_iterator, start=1): scores, boxes, classes = model(data) results.append([scores, boxes, classes, ids, ratios]) if rank == 0: size = len(data_iterator.ids) msg = '[{:{len}}/{}]'.format(min(i * batch_size, size), size, len=len(str(size))) print(msg, flush=True) results = [torch.cat(r, dim=0) for r in zip(*results)] results = [r.cpu() for r in results]
def load_model(model, bin_file, use_dataparallel): """ Given a model instance, loads the weights from bin_file. Handles cuda & DataParallel stuff. """ print(bin_file, use_dataparallel, use_cuda) if not use_cuda and use_dataparallel: warnings.warn( "Cuda not available. Model can not be made Data Parallel.") state_dict = torch.load(bin_file, map_location=lambda storage, loc: storage) new_state_dict = OrderedDict() if use_cuda: for k, v in state_dict.items(): if k[:7] == 'module.': if use_dataparallel: model = DataParallel(model) break else: name = k[7:] # remove `module.` new_state_dict[name] = v model = model.cuda() else: for k, v in state_dict.items(): if k[:7] == 'module.': name = k[7:] # remove `module.` new_state_dict[name] = v if len(new_state_dict.keys()) > 0: model.load_state_dict(new_state_dict) else: model.load_state_dict(state_dict) return model
def main(): os.system('cp -r ../ConvTasNet "{0}"'.format(config.basePath + '/savedCode')) model = DataParallel(ConvTasNet(C=2)) dataloader = AVSpeech('test') dataloader = DataLoader(dataloader, batch_size=config.batchsize['test'], num_workers=config.num_workers['test'], worker_init_fn=init_fn) loss_func = SISNRPIT() if config.use_cuda: model = model.cuda() config.pretrained_test = [ '/home/SharedData/Pragya/ModelsToUse/AudioOnlyConvTasNet.pth', ] for cur_test in config.pretrained_test: print('Currently working on: ', cur_test.split('/')[-1]) model.load_state_dict(torch.load(cur_test)['model_state_dict']) total_loss = test( cur_test.split('/')[-1].split('.')[0], model, dataloader, loss_func) torch.cuda.empty_cache() print('Average Loss for ', cur_test.split('/')[-1], 'is: ', np.mean(total_loss))
def main(): os.system('cp -r ../ConvTasNet "{0}"'.format(config.basePath + '/savedCode')) model = DataParallel(ConvTasNet(C=2)) print('Total Parameters: ', sum(p.numel() for p in model.parameters())) dataloader = AVSpeech('train') loss_func = SISNRPIT() if config.use_cuda: model = model.cuda() optimizer = torch.optim.Adam(model.parameters(), lr=config.lr[1]) if config.pretrained: saved_model = torch.load(config.pretrained_train) model.load_state_dict(saved_model['model_state_dict']) optimizer.load_state_dict(saved_model['optimizer_state_dict']) saved_loss = np.load(config.loss_path).tolist() else: saved_loss = None dataloader = DataLoader(dataloader, batch_size=config.batchsize['train'], num_workers=config.num_workers['train'], worker_init_fn=init_fn) train(model, dataloader, optimizer, loss_func, saved_loss)
class Feature_extract(object): def __init__(self): self.device = torch.device("cuda") self.model = resnet.resnet_face18(opt.use_se) self.model = DataParallel(self.model) self.model.load_state_dict(torch.load(opt.test_model_path)) self.model.to(self.device) normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) self.transforms = T.Compose([T.ToTensor(), normalize]) def feature_extract(self, img_path): img = Image.open(img_path) img = img.resize((112, 112)) img = self.transforms(img) img = img.unsqueeze(0) with torch.no_grad(): self.model.eval() data_input = img.to(self.device) feature = self.model(data_input) feature = np.array(feature.cpu())[0, :].tolist() vector = np.mat(feature) denom = np.linalg.norm(vector) return (np.array(feature) / denom).tolist()
def main(): os.system('cp -r ../Oracle "{0}"'.format(config.basePath + '/savedCode')) model = DataParallel(ConvTasNet(C=2, test_with_asr=True)) dataloader = AVSpeech('test') dataloader = DataLoader(dataloader, batch_size=config.batchsize['test'], num_workers=config.num_workers['test'], worker_init_fn=init_fn) if config.use_cuda: model = model.cuda() config.pretrained_test = [ '/home/SharedData/Pragya/Experiments/Oracle/2020-05-20 15:23:34.411560/116662.pth' ] for cur_test in config.pretrained_test: print('Currently working on: ', cur_test.split('/')[-1]) model.load_state_dict(torch.load(cur_test)['model_state_dict']) total_loss = test(model, dataloader) torch.cuda.empty_cache() print('Average Loss for ', cur_test.split('/')[-1], 'is: ', np.mean(total_loss))
def convert_weights_to_generic_format(model, src_path, dst_path): weights = torch.load(src_path) # for macbook # weights = torch.load(src_path, map_location=torch.device('cpu')) if list(weights.keys())[0].startswith('module.'): model_parallel = DataParallel(model) model_parallel.load_state_dict(weights) torch.save(model_parallel.module.state_dict(), dst_path)
class Person_Attribute(object): def __init__(self, weights="resnest50.pth"): self.device = torch.device("cuda") self.net = resnest50().to(self.device) self.net = DataParallel(self.net) self.weights = weights self.net.load_state_dict(torch.load(self.weights)) TRAIN_MEAN = [0.485, 0.499, 0.432] TRAIN_STD = [0.232, 0.227, 0.266] self.size = (128, 256) self.transforms = transforms.Compose([ transforms.ToCVImage(), transforms.Resize((128, 256)), transforms.ToTensor(), transforms.Normalize(TRAIN_MEAN, TRAIN_STD) ]) self.mean = torch.tensor([0.485, 0.499, 0.432], dtype=torch.float32) self.std = torch.tensor([0.232, 0.227, 0.266], dtype=torch.float32) self.atts = [ "gender", "age", "orientation", "hat", "glasses", "handBag", "shoulderBag", "backBag", "upClothing", "downClothing" ] def detect(self, img): #imgss = self.transforms(img) image = img.astype('uint8') image = cv2.resize(image, self.size, cv2.INTER_LINEAR) image = image.transpose(2, 0, 1) image = torch.from_numpy(image) image = image.float() / 255.0 image = image.sub_(self.mean[:, None, None]).div_(self.std[:, None, None]) image = image.unsqueeze(0) with torch.no_grad(): self.net.eval() img_input = image.to(self.device) outputs = self.net(img_input) results = [] for output in outputs: output = torch.softmax(output, 1) output = np.array(output[0].cpu()) label = np.argmax(output) score = output[label] results.append((label, score)) labels = [i[0] for i in results] dict_result = {} for att, label in zip(self.atts, labels): if label == -1: continue dict_result.update({str(att): name_dict[att][label]}) return dict_result
def main(args): # Select the hardware device to use for inference. if torch.cuda.is_available(): device = torch.device('cuda', torch.cuda.current_device()) torch.backends.cudnn.benchmark = True else: device = torch.device('cpu') # Disable gradient calculations. torch.set_grad_enabled(False) pretrained = not args.model_file if pretrained: print( 'No model weights file specified, using pretrained weights instead.' ) # Create the model, downloading pretrained weights if necessary. if args.arch == 'hg1': model = hg1(pretrained=pretrained) elif args.arch == 'hg2': model = hg2(pretrained=pretrained) elif args.arch == 'hg8': model = hg8(pretrained=pretrained) else: raise Exception('unrecognised model architecture: ' + args.model) model = model.to(device) if not pretrained: assert os.path.isfile(args.model_file) print('Loading model weights from file: {}'.format(args.model_file)) checkpoint = torch.load(args.model_file) state_dict = checkpoint['state_dict'] if sorted(state_dict.keys())[0].startswith('module.'): model = DataParallel(model) model.load_state_dict(state_dict) # Initialise the MPII validation set dataloader. # val_dataset = Mpii(args.image_path, is_train=False) # val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, # num_workers=args.workers, pin_memory=True) # Generate predictions for the validation set. # _, _, predictions = do_validation_epoch(val_loader, model, device, Mpii.DATA_INFO, args.flip) model = hg1(pretrained=True) predictor = HumanPosePredictor(model, device='cpu') # my_image = image_loader("../inference-img/1.jpg") # joints = image_inference(predictor, image_path=None, my_image=my_image) # imshow(my_image, joints=joints) if args.camera == False: inference_video(predictor, "../inference-video/R6llTwEh07w.mp4") elif args.camera: inference_video(predictor, 0)
def setARCFACE(self): model = resnet_face18(False) model = DataParallel(model).to(self.device) model.load_state_dict( torch.load( os.path.join(settings.BASE_DIR, 'static/') + 'src/weights/resnet18_pretrain.pth')) # model.load_state_dict(torch.load(os.path.join(settings.BASE_DIR, 'static/') + 'src/weights/resnet18_KFace.pth')) model.eval() return model
def initialize_model(self, model_type): ''' Loads and initializes the model based on model size ''' g = Generator(200, 64) g_model = DataParallel(g) g_checkpoint = load('plane_generator.tar' if model_type == 'Plane' else 'chair_generator.tar', map_location=DEVICE) g_model.load_state_dict(g_checkpoint["model_state_dict"]) return g_model
def test_model(test_dataset): num_test = len(test_dataset) test_useful_end_idx = get_useful_end_idx(sequence_length, num_test) test_idx = [] for i in test_useful_end_idx: for j in range(sequence_length): test_idx.append(i - j * srate) test_idx.reverse() test_loader = DataLoader( test_dataset, batch_size=test_batch_size, sampler=SeqSampler(test_dataset, test_idx), # sampler=test_idx, num_workers=0, pin_memory=False) model = res34_tcn() model = DataParallel(model) model.load_state_dict(torch.load(model_name)) # model = model.module # model = DataParallel(model) if use_gpu: model = model.cuda() # model = DataParallel(model) # model = model.module model.eval() all_preds_s = [] num = 0 with torch.no_grad(): for data in test_loader: num = num + 1 inputs, _, kdatas = data if use_gpu: inputs = Variable(inputs.cuda()) kdatas = Variable(kdatas.cuda()) else: inputs = Variable(inputs) kdatas = Variable(kdatas) outputs_s = model.forward(inputs, kdatas) #outputs_s = outputs_s[-1, (sequence_length - 1):: sequence_length] outputs_s = outputs_s[-1] outputs_s = F.softmax(outputs_s, dim=-1) _, preds_s = torch.max(outputs_s.data, -1) for j in range(preds_s.shape[0]): all_preds_s.append(preds_s[j].data.item()) return all_preds_s
def create_model(load_name: str, n_classes: int) -> nn.Module: # We take RGB images as input and predict the target class against the background. model = DataParallel(UNet(n_channels=3, n_classes=n_classes, bilinear=True)) if load_name: print(f"Loading {load_name}") model.load_state_dict( torch.load( f"results/unet/{load_name}/checkpoints/model_latest.pth")) return model
def _set_model(self, model_weights_path): """ A function which instantiates the model and loads the weights :param model_weights_path: str, path to the model weights :return: None """ model = resnet_face18(False) model = DataParallel(model) model.load_state_dict(torch.load(model_weights_path, map_location=self.torch_device)) model.to(self.torch_device) model.eval() self.model = model
def main(args): # Select the hardware device to use for inference. if torch.cuda.is_available(): device = torch.device('cuda', torch.cuda.current_device()) torch.backends.cudnn.benchmark = True else: device = torch.device('cpu') # Disable gradient calculations. torch.set_grad_enabled(False) pretrained = not args.model_file if pretrained: print( 'No model weights file specified, using pretrained weights instead.' ) # Create the model, downloading pretrained weights if necessary. if args.arch == 'hg1': model = hg1(pretrained=pretrained) elif args.arch == 'hg2': model = hg2(pretrained=pretrained) elif args.arch == 'hg8': model = hg8(pretrained=pretrained) else: raise Exception('unrecognised model architecture: ' + args.model) model = model.to(device) if not pretrained: assert os.path.isfile(args.model_file) print('Loading model weights from file: {}'.format(args.model_file)) checkpoint = torch.load(args.model_file) state_dict = checkpoint['state_dict'] if sorted(state_dict.keys())[0].startswith('module.'): model = DataParallel(model) model.load_state_dict(state_dict) # Initialise the MPII validation set dataloader. val_dataset = Mpii(args.image_path, is_train=False) val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) # Generate predictions for the validation set. _, _, predictions = do_validation_epoch(val_loader, model, device, Mpii.DATA_INFO, args.flip) # Report PCKh for the predictions. print('\nFinal validation PCKh scores:\n') print_mpii_validation_accuracy(predictions)
def load_generator( path: Path, channels: int, img_size: int, n_classes: int, latent_dim: int ) -> nn.Module: model = DataParallel( Generator( n_channels=channels, depth=9, n_classes=n_classes, latent_size=latent_dim ) ) model.load_state_dict(torch.load(path)) model.eval() return model
def main(): args = get_parser() with open(args.cfg_path) as f: cfg = json.load(f) model = MODELS[cfg['model']](num_nodes=cfg['grid_size'], use_crf=cfg['use_crf']) model = DataParallel(model, device_ids=None) checkpoint = torch.load(args.load_path) model.load_state_dict(checkpoint['state_dict']) model = model.cuda() model.eval() x = torch.ones((2,4,3,224,224)) y = model(x) print(y.size())
def load_model(base_dir, run_name, experiment_mode="", device=None, force_multiple_gpu=False): model_fname = get_model_fname(base_dir, run_name, experiment_mode=experiment_mode) checkpoint = torch.load(model_fname, map_location=device) hparams = checkpoint["hparams"] model_name = checkpoint.get("model_name", "v0") chosen_diseases = hparams["diseases"].split(",") train_resnet = hparams["train_resnet"] multiple_gpu = hparams.get("multiple_gpu", False) def extract_params(name): params = {} prefix = name + "_" for key, value in hparams.items(): if key.startswith(prefix): key = key[len(prefix):] params[key] = value return params opt_params = extract_params("opt") # Load model model = init_empty_model(model_name, chosen_diseases, train_resnet) # NOTE: this force param has to be used for cases when the hparam was not saved if force_multiple_gpu or multiple_gpu: model = DataParallel(model) if device: model = model.to(device) # Load optimizer opt_name = hparams["opt"] OptClass = optimizers.get_optimizer_class(opt_name) optimizer = OptClass(model.parameters(), **opt_params) model.load_state_dict(checkpoint["model_state_dict"]) optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) # Load loss loss_name = hparams["loss"] loss_params = extract_params("loss") # TODO: make a class to hold all of these values (and avoid changing a lot of code after any change here) return model, model_name, optimizer, opt_name, loss_name, loss_params, chosen_diseases
def main(): opt = Config(os.getcwd()) if opt.backbone == 'resnet18': model = resnet_face18(opt.use_se) elif opt.backbone == 'resnet34': model = resnet34() elif opt.backbone == 'resnet50': model = resnet50() model = DataParallel(model) # load_model(model, opt.test_model_path) model.load_state_dict( torch.load(opt.test_model_path, map_location={'cuda:0': 'cpu'})) model.to(torch.device(device)) model.eval() global args train_dataset = Dataset(opt.train_root, opt.train_list, phase='train', input_shape=opt.input_shape) trainloader = data.DataLoader(train_dataset, batch_size=opt.train_batch_size, shuffle=True, num_workers=opt.num_workers) # centroid_map = create_centroid(model, trainloader) test_dataset = Dataset(opt.test_root, opt.test_list, phase='test', input_shape=opt.input_shape) test_loader = data.DataLoader( test_dataset, batch_size=1000, # batch_size=opt.test_batch_size, shuffle=True, num_workers=opt.num_workers) for x, y in test_loader: latent_vecs = model(x) print(latent_vecs.shape, y.shape) target = y plot3d_tsne( latent_vecs, target, ) show_umap(latent_vecs, target) t_sne(latent_vecs, target)
def load_model_by_state_dict(model, state_dict_fn): pass from torch.nn import DataParallel import torch model_dict = torch.load(state_dict_fn) try: model.load_state_dict(model_dict['state_dict']) except: try: model.load_state_dict(model_dict) except: model = DataParallel(model.cuda()) if 'state_dict' in model_dict: model.load_state_dict(model_dict['state_dict']) else: model.load_state_dict(model_dict) return model
def load_model(backbone, device_ids, test_model_path, use_se): if backbone == 'resnet18_finger': model = resnet.resnet18_finger(use_se) elif backbone == 'resnet18': model = resnet.resnet18(pretrained=False) elif backbone == 'resnet34': model = resnet.resnet34(pretrained=False) elif backbone == 'resnet50': model = resnet.resnet50(pretrained=False) if opt.multi_gpus: model = DataParallel(model, device_ids=device_ids) model.load_state_dict(torch.load(test_model_path)) #model.to(torch.device("cuda")) if torch.cuda.is_available(): model = model.cuda() model.eval() return model
class UNetTester(object): def __init__(self, model, devices_num=2, color_dim=1, num_classes=2): self.net = UNet(color_dim=color_dim, num_classes=num_classes) checkpoint = torch.load(model) self.color_dim = color_dim self.num_classes = num_classes self.net.load_state_dict(checkpoint['state_dir']) self.net = self.net.cuda() if devices_num == 2: self.net = DataParallel(self.net, device_ids=[0, 1]) self.net.eval() def test(self, folder, target_dir): mkdir(target_dir) cracks_files = glob.glob(os.path.join(folder, "*.jpg")) print(len(cracks_files), "imgs.") for crack_file in tqdm(cracks_files): name = os.path.basename(crack_file) save_path = os.path.join(target_dir, name) data = cv2.imread(crack_file, cv2.IMREAD_GRAYSCALE) output = self._test(data) #图片结果 cv2.imwrite(save_path, output) def _test(self, data): data = data.astype(np.float32) / 255. data = np.expand_dims(data, 0) data = np.expand_dims(data, 0) input = torch.from_numpy(data) height = input.size()[-2] width = input.size()[-1] input = Variable(input, volatile=True).cuda() batch_size = 1 output = self.net(input) output = output.transpose(1, 3).transpose(1, 2).contiguous().view( -1, self.num_classes) _, output = output.data.max(dim=1) output[output > 0] = 255 output = output.view(height, width) output = output.cpu().numpy() return output
def test(best_roc, fold, device, val_loader, test_loader, val_d, test_df, meta_features, oof): best_model_path = model_dir + [ file for file in os.listdir(model_dir) if str(round(best_roc, 3)) in file and "Fold" + str(fold) in file ][0] preds = torch.zeros((len(test_df), 1), dtype=torch.float32, device=device) # add meta feature from the csv file model = DataParallel( EfficientNetwork(1, args.arch, meta_features).to(device)) model.load_state_dict( torch.load(best_model_path)) # Loading best model of this fold model.eval() # switch model to the evaluation mode with torch.no_grad(): # Predicting on validation set once again to obtain data for OOF # print(f"-------------saving results to oof---------------") # val_preds = torch.zeros((len(val_d), 1), dtype=torch.float32, device=device) # for j, (x_val, y_val) in tqdm(enumerate(val_loader), total=len(val_loader)): # y_val = y_val.to(device) # if args.use_meta_features: # l = x_val[0].shape[0] # else: # l = x_val.shape[0] # z_val = model(x_val) # val_pred = torch.sigmoid(z_val) # val_preds[j * l:j * l + l] = val_pred # oof[val_idx] = val_preds.cpu().numpy() # Predicting on test set # tta_preds = torch.zeros((len(test_df), 1), dtype=torch.float32, device=device) for j in range(args.TTA): print(f"processing {j + 1}th TTA") for i, x_test in tqdm(enumerate(test_loader), total=len(test_loader)): if args.use_meta_features: l = x_test[0].shape[0] else: l = x_test.shape[0] z_test = model(x_test) z_test = torch.sigmoid(z_test) preds[i * test_loader.batch_size:i * test_loader.batch_size + l] += z_test preds /= args.TTA return preds
def main(args=None): parser = argparse.ArgumentParser( description='Testing script for face identification.') parser.add_argument( '--depth', help= 'Resnet depth, must be one of 18, 34, 50, 101, 152 or 20 for sphere', type=int, default=50) parser.add_argument('--parallel', help='Run training with DataParallel', dest='parallel', default=False, action='store_true') parser.add_argument('--model', help='Path to model') parser.add_argument('--batch_size', help='Batch size (default 50)', type=int, default=50) parser.add_argument('--lfw_root', help='Path to LFW dataset') parser.add_argument('--lfw_pair_list', help='Path to LFW pair list file') parser = parser.parse_args(args) is_cuda = torch.cuda.is_available() print('CUDA available: {}'.format(is_cuda)) model = get_net_by_depth(parser.depth) if parser.parallel: model = DataParallel(model) # load_model(model, opt.test_model_path) model.load_state_dict(torch.load(parser.model)) if is_cuda: model.cuda() identity_list = get_pair_list(parser.lfw_pair_list) img_data = load_img_data(identity_list, parser.lfw_root) model.eval() lfw_test2(model, identity_list, img_data, is_cuda=is_cuda)
def pascal_main(): data_root = 'data/' pascal_label_list = [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ] trainloader, testloader = load_pascal(data_root, pascal_label_list) net = DataParallel(ColorizationNet(len(pascal_label_list))) model_path = 'colorization.pth' if os.path.exists(model_path): net.load_state_dict(torch.load(model_path)) train(trainloader, 100, len(pascal_label_list), net=net, lr=1, alpha=1 / 300)