def test(modelpara, args=None, result_folder=None): # load net net = CRAFT() # initialize print('Loading weights from checkpoint {}'.format(modelpara)) if args.cuda: net.load_state_dict(copyStateDict(torch.load(modelpara))) else: net.load_state_dict( copyStateDict(torch.load(modelpara, map_location='cpu'))) if args.cuda: net = net.cuda() net = torch.nn.DataParallel(net) cudnn.benchmark = False net.eval() t = time.time() # load data for k, image_path in enumerate(image_list): print("Test image {:d}/{:d}: {:s}".format(k + 1, len(image_list), image_path), end='\r') image = imgproc.loadImage(image_path) with torch.no_grad(): bboxes, polys, score_text = test_net(net, image, args.text_threshold, args.link_threshold, args.low_text, args.cuda, args.poly, args) # save score text filename, file_ext = os.path.splitext(os.path.basename(image_path)) mask_file = result_folder + "/res_" + filename + '_mask.jpg' #cv2.imwrite(mask_file, score_text) file_utils.saveResult(image_path, image[:, :, ::-1], polys, dirname=result_folder) net.train() print("elapsed time : {}s".format(time.time() - t))
cudnn.benchmark = True # realdata = ICDAR2015(net, '/data/CRAFT-pytorch/icdar2015', target_size=768) # real_data_loader = torch.utils.data.DataLoader( # realdata, # batch_size=10, # shuffle=True, # num_workers=0, # drop_last=True, # pin_memory=True) optimizer = optim.Adam(net.parameters(), lr=args.lr, weight_decay=args.weight_decay) criterion = Maploss() #criterion = torch.nn.MSELoss(reduce=True, size_average=True) net.train() step_index = 0 loss_time = 0 loss_value = 0 compare_loss = 1 for epoch in range(1000): loss_value = 0 # if epoch % 50 == 0 and epoch != 0: # step_index += 1 # adjust_learning_rate(optimizer, args.gamma, step_index) st = time.time() for index, (images, gh_label, gah_label, mask) in enumerate(train_loader):
def train(train_img_path, train_gt_path, pths_path, batch_size, lr, num_workers, epoch_iter, save_interval): filenum = len(os.listdir(train_img_path)) trainset = custom_dataset(train_img_path, train_gt_path) train_loader = data.DataLoader(trainset, batch_size=batch_size, \ shuffle=True, num_workers=num_workers, drop_last=True) criterion = Maploss() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = CRAFT() data_parallel = False if torch.cuda.device_count() > 1: model = nn.DataParallel(model) data_parallel = True model.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=args.weight_decay) scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[epoch_iter // 2], gamma=0.1) step_index = 0 for epoch in range(epoch_iter): if epoch % 50 == 0 and epoch != 0: step_index += 1 adjust_learning_rate(optimizer, args.gamma, step_index) model.train() scheduler.step() epoch_loss = 0 epoch_time = time.time() for i, (img, gt_score, gt_geo, ignored_map) in enumerate(train_loader): start_time = time.time() img, gt_score, gt_geo, ignored_map = img.to(device), gt_score.to( device), gt_geo.to(device), ignored_map.to(device) pred_score, pred_geo = model(img) loss = criterion(gt_score, pred_score, gt_geo, pred_geo, ignored_map) epoch_loss += loss.item() optimizer.zero_grad() loss.backward() optimizer.step() print('Epoch is [{}/{}], mini-batch is [{}/{}], time consumption is {:.8f}, batch_loss is {:.8f}'.format(\ epoch+1, epoch_iter, i+1, int(file_num/batch_size), time.time()-start_time, loss.item())) print('epoch_loss is {:.8f}, epoch_time is {:.8f}'.format( epoch_loss / int(file_num / batch_size), time.time() - epoch_time)) print(time.asctime(time.localtime(time.time()))) print('=' * 50) if (epoch + 1) % interval == 0: state_dict = model.module.state_dict( ) if data_parallel else model.state_dict() torch.save( state_dict, os.path.join(pths_path, 'model_epoch_{}.pth'.format(epoch + 1)))