def get_features(net, name): dt = dataset.FeatureDataset(name) loader = data.DataLoader(dt, batch_size=100, shuffle=False, num_workers=8, pin_memory=True, drop_last=False) features = torch.Tensor(len(dt), 1024).cuda() image_list = [] num = 0 for image, image_ids in loader: print(num) image = image.cuda() with torch.no_grad(): feature = net(image) feature /= (feature.norm(dim=1, keepdim=True) + 1e-6) batch_num = len(image) features[num:num + batch_num] = feature num += batch_num for image_id in image_ids: image_list.append(int(image_id)) return features, image_list
def train(**kwargs): # opt = Config() for k, v in kwargs.items(): setattr(opt, k, v) vis = Visualizer(opt.env, opt.port) device = t.device('cuda') if opt.use_gpu else t.device('cpu') lr = opt.lr #网络配置 featurenet = FeatureNet(4, 5) if opt.model_path: featurenet.load_state_dict( t.load(opt.model_path, map_location=lambda _s, _: _s)) featurenet.to(device) #加载数据 data_set = dataset.FeatureDataset(root=opt.data_root, train=True, test=False) dataloader = DataLoader(data_set, batch_size=opt.batch_size, shuffle=True, num_workers=opt.num_workers) val_dataset = dataset.FeatureDataset(root=opt.data_root, train=False, test=False) val_dataloader = DataLoader(val_dataset, opt.batch_size, shuffle=False, num_workers=opt.num_workers) #定义优化器和随时函数 optimizer = t.optim.SGD(featurenet.parameters(), lr) criterion = t.nn.CrossEntropyLoss().to(device) #计算重要指标 loss_meter = AverageValueMeter() #开始训练 for epoch in range(opt.max_epoch): loss_meter.reset() for ii, (data, label) in enumerate(dataloader): feature = data.to(device) target = label.to(device) optimizer.zero_grad() prob = featurenet(feature) # print(prob) # print(target) loss = criterion(prob, target) loss.backward() optimizer.step() loss_meter.add(loss.item()) if (ii + 1) % opt.plot_every: vis.plot('train_loss', loss_meter.value()[0]) if os.path.exists(opt.debug_file): import ipdb ipdb.set_trace() t.save( featurenet.state_dict(), 'checkpoints/{epoch}_{time}_{loss}.pth'.format( epoch=epoch, time=time.strftime('%m%d_%H_%M_%S'), loss=loss_meter.value()[0])) #验证和可视化 accu, loss = val(featurenet, val_dataloader, criterion) featurenet.train() vis.plot('val_loss', loss) vis.log('epoch: {epoch}, loss: {loss}, accu: {accu}'.format( epoch=epoch, loss=loss, accu=accu)) lr = lr * 0.9 for param_group in optimizer.param_groups: param_group['lr'] = lr