prefix = os.path.join(dst_path, split, filename.split('.')[0]) save_npz(f'{prefix}', image, lines.copy(), centers, cfg) if plot: bez.insert_line(image, lines, color=[0, 0, 255]) bez.insert_point(image, lines, color=[255, 0, 0], thickness=2) cv2.namedWindow('image', 0) cv2.imshow('image', image) cv2.waitKey() parmap(call_back, dataset) if __name__ == "__main__": os.chdir('..') # Parameter cfg = parse() print(cfg) # Path src_path = cfg.raw_dataset_path dst_path = cfg.train_dataset_path os.makedirs(dst_path, exist_ok=True) start = time.time() for split in ['train', 'test']: json2npz(src_path, dst_path, split, cfg, plot=False) end = time.time() print('Time: %f s' % (end - start))
def main(**kwargs): '''update config''' cfg.parse(kwargs) '''step 1. define net''' ''' assert cfg.model in cfg.model_names,'No such a model!Check your inputs.' net = None if cfg.model[0:3] == 'vgg': net = models.VGGNet(cfg.model) elif cfg.model[0:3] == 'cifar_res': net = models.cifarResNet(int(cfg.model[6:]),10) else: pass ''' net = models.resnet18() #初始化权重 net.init_pm() #net = VGGNet('vgg19') if cfg.use_gpu: net = net.cuda() '''step 2. load data''' normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) #训练集 train_tf = transforms.Compose([ transforms.RandomCrop(34, 4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize ]) train_dataset = tv.datasets.CIFAR10(root='data/', train=True, download=True, transform=train_tf) train_dataloader = t.utils.data.DataLoader(train_dataset, batch_size=cfg.batch_size, shuffle=True, num_workers=cfg.num_workers) #测试集 test_tf = transforms.Compose([transforms.ToTensor(), normalize]) test_dataset = tv.datasets.CIFAR10(root='data/', train=False, download=True, transform=test_tf) test_dataloader = t.utils.data.DataLoader(test_dataset, batch_size=100, shuffle=False, num_workers=cfg.num_workers) #定义误差函数和优化函数 criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=cfg.lr, momentum=0.9, weight_decay=5e-4) lr = cfg.lr #记录验证集的最高精度 best_accuracy = 0.0 for epoch in range(0, cfg.epoch): total = 0 correct = 0 total_loss = 0 for i, data in enumerate(train_dataloader): inputs, labels = data if cfg.use_gpu: inputs = inputs.cuda() labels = labels.cuda() optimizer.zero_grad() outputs = net(inputs) if cfg.use_gpu: outputs = outputs.cuda() loss = criterion(outputs, labels) loss.backward() optimizer.step() total_loss += loss.item() _, predicted = t.max(outputs.data, 1) #统计信息 total += labels.size(0) correct += (predicted == labels).sum().item() if i % (cfg.print_freq) == cfg.print_freq - 1: print('epoch %d,batch %d: Loss:%0.3f ' % (epoch + 1, i + 1, total_loss / (i + 1))) print('epoch %d, accuracy:%0.3f%%' % (epoch + 1, 100. * correct / total)) #在测试集上验证精度 valid_accuracy = valid(net, test_dataloader) if valid_accuracy > best_accuracy: best_accuracy = valid_accuracy net.save() #更新lr if epoch == 149 or epoch == 249: lr = lr * cfg.lr_decay for pmg in optimizer.param_groups: pmg['lr'] = lr