def __init__(self, input_size, hidden_size, top_num_classes, mid_num_classes, bot_num_classes, device='cuda'): super(Attention_top, self).__init__() self.device = device self.attention_cell = AttentionCell_top(input_size, hidden_size, top_num_classes, mid_num_classes, bot_num_classes) self.hidden_size = hidden_size self.top_num_classes = top_num_classes self.mid_num_classes = mid_num_classes self.bot_num_classes = bot_num_classes # self.pred_gen = nn.Linear(hidden_size, top_num_classes) # self.arc_loss_gen = metrics.ArcMarginProduct(hidden_size, top_num_classes, s=30, m=0.35) self.embedding = nn.Linear(hidden_size, 512) self.pred_gen = nn.Linear(512, top_num_classes) self.arc_loss_gen = metrics.ArcMarginProduct(512, top_num_classes, s=30, m=0.35)
def main(): # Parse arguments. args = parse_args() # Set device. device = 'cuda' if torch.cuda.is_available() else 'cpu' # Load dataset. train_loader, test_loader, class_names = cifar10.load_data(args.data_dir) # Set a model. model = get_model(args.model_name, args.n_feats) model = model.to(device) print(model) # Set a metric metric = metrics.ArcMarginProduct(args.n_feats, len(class_names), s=args.norm, m=args.margin, easy_margin=args.easy_margin) metric.to(device) # Set loss function and optimization function. criterion = nn.CrossEntropyLoss() optimizer = optim.SGD([{ 'params': model.parameters() }, { 'params': metric.parameters() }], lr=args.lr, weight_decay=args.weight_decay) # Train and test. for epoch in range(args.n_epoch): # Train and test a model. train_acc, train_loss = train(device, train_loader, model, metric, criterion, optimizer) test_acc, test_loss = test(device, test_loader, model, metric, criterion) # Output score. stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}, test acc: {:<8}, test loss: {:<8}' print( stdout_temp.format(epoch + 1, train_acc, train_loss, test_acc, test_loss)) # Save a model checkpoint. model_ckpt_path = args.model_ckpt_path_temp.format( args.dataset_name, args.model_name, epoch + 1) torch.save(model.state_dict(), model_ckpt_path) print('Saved a model checkpoint at {}'.format(model_ckpt_path)) print('')
def __init__(self, input_size, hidden_size, num_classes, device='cuda'): super(Attention, self).__init__() self.device = device self.attention_cell = AttentionCell(input_size, hidden_size, num_classes) self.hidden_size = hidden_size self.num_classes = num_classes self.embedding = nn.Linear(hidden_size, 512) self.pred_gen = nn.Linear(512, num_classes) self.arc_loss_gen = metrics.ArcMarginProduct(512, num_classes, s=30, m=0.35)
def main(): # Parse arguments. args = parse_args() # Set device. device = 'cuda' if torch.cuda.is_available() else 'cpu' # Load dataset. train_loader, gallery_loader, query_loader, class_names = market1501.load_data( args.anno_path) # Load a model. model = models.resnet50(pretrained=True) model.fc = nn.Linear(2048, args.n_feats) model.load_state_dict(torch.load(args.model_path)) model = model.to(device) print(model) 1 / 0 # Set a metric metric = metrics.ArcMarginProduct(args.n_feats, len(class_names), s=args.norm, m=args.margin, easy_margin=args.easy_margin) metric.to(device) # Set loss function and optimization function. criterion = nn.CrossEntropyLoss() # Test a model. test_acc, test_loss = test(model, device, test_loader, criterion) # Output score. stdout_temp = 'test acc: {:<8}, test loss: {:<8}' print(stdout_temp.format(test_acc, test_loss))
device = torch.device("cuda:" + opt.gpu) if opt.backbone == 'dense': model = DenseNet.DenseNet121(embedding_size) elif opt.backbone == 'res': model = resnet.resnet_face18(embedding=embedding_size) else: model = MSCANet(input_dim=1, hidden_dim=32, reduction=4) model.to(device) if opt.metric == 'add': metric_fc = (metrics.AddMarginProduct(embedding_size, opt.num_classes, s=30, m=0.30)) elif opt.metric == 'arc': metric_fc = (metrics.ArcMarginProduct(embedding_size, opt.num_classes)) pass elif opt.metric == 'sphere': metric_fc = (metrics.SphereProduct(embedding_size, opt.num_classes)) pass else: metric_fc = (nn.Linear(embedding_size, opt.num_classes)) metric_fc.to(device) models_list = [] XR_files, XR_labels = load_descfile(opt.FilenameXR) XT_files, XT_labels = load_descfile(opt.FilenameXT) for root, dirs, files in os.walk(opt.test_model_list): for file in files:
def objective(trial): # Parse arguments. args = parse_args() # Set device. device = 'cuda' if torch.cuda.is_available() else 'cpu' # Load dataset. if os.path.exists(args.anno_path) == False: market1501.make_train_anno(args.data_dir, args.anno_path) train_loader, class_names = market1501.load_train_data( args.anno_path, args.n_batch) # Set a model. # cf. https://qiita.com/perrying/items/857df46bb6cdc3047bd8 n_feats = trial.suggest_categorical('n_feats', [256 * 1, 256 * 2]) model = models.resnet50(pretrained=True) model.fc = nn.Linear(2048, n_feats) model = model.to(device) #print(model) # Set a metric """ 'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. """ norm = trial.suggest_int('norm', 0, 30) margin = trial.suggest_uniform('margin', 0.0, 1e-3) easy_margin = trial.suggest_categorical('easy_margin', [0, 1]) metric = metrics.ArcMarginProduct(n_feats, len(class_names), s=norm, m=margin, easy_margin=easy_margin) metric.to(device) # Set loss function and optimization function. lr = trial.suggest_uniform('lr', 1e-3, 1e-1) weight_decay = trial.suggest_uniform('weight_decay', 1e-3, 1e-1) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD([{ 'params': model.parameters() }, { 'params': metric.parameters() }], lr=lr, weight_decay=weight_decay) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) # Train and test. for epoch in range(args.n_epoch): # Train and test a model. train_acc, train_loss = train(device, train_loader, args.n_batch, model, metric, criterion, optimizer, scheduler) #test_acc, test_loss = test(device, test_loader, model, metric, criterion) # Output score. #stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}, test acc: {:<8}, test loss: {:<8}' #print(stdout_temp.format(epoch+1, train_acc, train_loss, test_acc, test_loss)) stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(epoch + 1, train_acc, train_loss)) # Save a model checkpoint. #model_ckpt_path = args.model_ckpt_path_temp.format(args.dataset_name, args.model_name, epoch+1) #torch.save(model.state_dict(), model_ckpt_path) #print('Saved a model checkpoint at {}'.format(model_ckpt_path)) #print('') return train_acc
transform = ImageTransform(size) valid_dataset = CreateDataset(valid_path, office, transform, 'valid') valid_dataloader = data.DataLoader(valid_dataset, batch_size=1, shuffle=False) model = Resnet34(classes=classes, return_GAP=True) model = model.to(device) model.load_state_dict( torch.load(weight_folder_model.joinpath('model_weight_epoch30.pth'))) metric = metrics.ArcMarginProduct(512, classes, s=30, m=0.5, easy_margin=True) metric.to(device) metric.load_state_dict( torch.load(weight_folder_metrics.joinpath('model_weight_epoch30.pth'))) images = [] features = [] model.eval() for (inputs, target) in tqdm(valid_dataloader, desc='valid'): inputs = inputs.to(device) target = target.to(device).long() feature = model(inputs) feature = feature.to('cpu').detach().numpy().copy()[0] features.append(feature)
def main(): # Parse arguments. args = parse_args() # Set device. device = 'cuda' if torch.cuda.is_available() else 'cpu' # Load dataset. if os.path.exists(args.anno_path) == False: market1501.make_train_anno(args.data_dir, args.anno_path) train_loader, gallery_loader, query_loader, class_names = market1501.load_train_data( args.anno_path, args.n_batch) # Set a model. # cf. https://qiita.com/perrying/items/857df46bb6cdc3047bd8 model = models.resnet50(pretrained=True) model.fc = nn.Linear(2048, args.n_feats) model = model.to(device) print(model) # Set a metric metric = metrics.ArcMarginProduct(args.n_feats, len(class_names), s=args.norm, m=args.margin, easy_margin=args.easy_margin) metric.to(device) # Set loss function and optimization function. criterion = nn.CrossEntropyLoss() optimizer = optim.SGD([{ 'params': model.parameters() }, { 'params': metric.parameters() }], lr=args.lr, weight_decay=args.weight_decay) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) # Train and test. for epoch in range(args.n_epoch): # Train and test a model. train_acc, train_loss = train(device, train_loader, args.n_batch, model, metric, criterion, optimizer, scheduler) #test_acc, test_loss = test(device, test_loader, model, metric, criterion) # Output score. #stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}, test acc: {:<8}, test loss: {:<8}' #print(stdout_temp.format(epoch+1, train_acc, train_loss, test_acc, test_loss)) stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(epoch + 1, train_acc, train_loss)) # Save a model checkpoint. model_ckpt_path = args.model_ckpt_path_temp.format( args.dataset_name, args.model_name, epoch + 1) metric_ckpt_path = args.metric_ckpt_path_temp.format( args.dataset_name, args.model_name, epoch + 1) torch.save(model.state_dict(), model_ckpt_path) torch.save(metric.state_dict(), metric_ckpt_path) print('Saved a model checkpoint at {}'.format(model_ckpt_path)) print('Saved a metric checkpoint at {}'.format(metric_ckpt_path)) print('')
def __init__(self, param): super(FRModel, self).__init__() self.device = param['device'] self.label_features_path = param['label_features_path'] self.model_path = param['model_path'] self.param_path = param['param_path'] # トレーニング(学習)の設定 if param['mode'] == 'train': self.img_size = param['img_size'] # 教師データの顔画像への前処理設定 self.train_data_transform = transforms.Compose([ utils.transforms.Resize(param['img_size']), utils.transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) # データセットの設定および準備 self.dataset_path = param['dataset_path'] self.batch_size = param['batch_size'] self.train_dataset = dataset.FRDataset( dataset_path=param['dataset_path'], transform=self.train_data_transform) self.train_loader = torch.utils.data.DataLoader( self.train_dataset, batch_size=self.batch_size, shuffle=True) # ネットワーク(モデル)の設定および生成 self.num_fout1 = param['num1'] self.num_fout2 = param['num2'] self.num_fout3 = param['num3'] self.num_features = param['num_features'] self.num_classes = param['num_classes'] self.net = nets.FRNet(param['device'], param['img_size'], param['num_features'], param['num1'], param['num2'], param['num3']).to(param['device']) self.metric_net = metrics.ArcMarginProduct( in_features=param['num_features'], out_features=param['num_classes']).to(param['device']) # トレーニング(学習)時における,学習係数,最適化手法,損失関数の設定 self.learning_rate = param['lr'] self.optimizer = optim.Adam([{ 'params': self.net.parameters() }, { 'params': self.metric_net.parameters() }], param['lr']) self.criterion = nn.CrossEntropyLoss() # トレーニング(学習)時に用いたパラメータ設定の保存 self.save_train_parameters() # テスト(推薦)の設定 elif param['mode'] == 'test': t_param = self.load_train_parameters() self.dataset_path = t_param['dataset_path'] # 対象人物の顔画像への前処理設定 self.test_data_transform = transforms.Compose([ utils.transforms.GetFace(), utils.transforms.Resize(t_param['img_size']), utils.transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) self.net = nets.FRNet(param['device'], t_param['img_size'], t_param['num_features'], t_param['num1'], t_param['num2'], t_param['num3']).to(param['device'])