def main_worker(args): if args.gpu is not None: print("Use GPU: {} for training".format(args.gpu)) # Log in Tensorboard writer = SummaryWriter() # log init save_dir = os.path.join('logs', 'train' + '_' + datetime.now().strftime('%Y%m%d_%H%M%S')) if os.path.exists(save_dir): raise NameError('model dir exists!') os.makedirs(save_dir) logger = init_log(save_dir) train_dataset = labelFpsDataLoader("/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_base", preproc=preproc(cfg_plate['image_size'], (104, 117, 123))) # valid_dataset = ValDataset(os.path.join("./data/widerface/val", "data/train/label.txt")) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, collate_fn=detection_collate, pin_memory=True) # valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False, # num_workers=args.workers, collate_fn=detection_collate, pin_memory=True) # Initialize model model = BaseModel(cfg=cfg_plate) checkpoint = [] if args.resume is not None: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) if args.gpu is None: checkpoint = torch.load(args.resume) else: # Map model to be loaded to specified single gpu. loc = 'cuda:{}'.format(args.gpu) checkpoint = torch.load(args.resume, map_location=loc) params = checkpoint['parser'] # args = params args.start_epoch = checkpoint['epoch'] + 1 model.load_state_dict(checkpoint['state_dict']) del params del checkpoint if args.gpu is not None: torch.cuda.set_device(args.gpu) model = model.cuda(args.gpu) else: model = model.cuda() print('Run with DataParallel ....') model = torch.nn.DataParallel(model).cuda() priorbox = PriorBox(cfg_plate) with torch.no_grad(): priors = priorbox.forward() priors = priors.cuda() criterion = MultiBoxLoss(args.num_classes, 0.35, True, 0, True, 7, 0.35, False) # Define optimizer optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) # Define learning rate scheduler scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=5, verbose=True) logger.info('Step per opoch: {}'.format(len(train_loader))) # Start training per epoch recall, precision = 0, 0 for epoch in range(args.start_epoch, args.epochs): train_loss = train(train_loader, model, priors, criterion, optimizer, scheduler, epoch, logger, args) # if epoch % args.eval_freq == 0: # recall, precision = evaluate(valid_loader, model) # # logger.info('Recall: {:.4f} \t' # 'Prcision: {:.3f} \t'.format(recall, precision)) # Log to Tensorboard lr = optimizer.param_groups[0]['lr'] writer.add_scalar('model/train_loss', train_loss, epoch) writer.add_scalar('model/learning_rate', lr, epoch) # writer.add_scalar('model/precision', precision, epoch) # writer.add_scalar('model/recall', recall, epoch) # scheduler.step() scheduler.step(train_loss) state = { 'epoch': epoch, 'parser': args, 'state_dict': get_state_dict(model) } torch.save( state, os.path.join( args.save_folder, args.network, "{}_{}.pth".format(args.network, epoch)))
checkpoint_path = "weights/CCPD/CCPD_150.pth" img_dir = [ "/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_weather", "/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_blur", "/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_tilt", "/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_db", "/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_fn", "/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_rotate", # "/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_np", "/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_challenge" ] print("loading model") # Initialize model model = BaseModel(cfg=cfg_plate) checkpoint = torch.load(checkpoint_path, map_location='cuda') model.load_state_dict(checkpoint['state_dict']) del checkpoint model.eval() model.to(device) for i in np.linspace(0.5, 0.9, 8): print("############################") print("threshold: " + str(i)) for index, path in enumerate(img_dir): print("**************************") print(path) val_dataset = ChaLocDataLoader([path], imgSize=320) valid_loader = torch.utils.data.DataLoader( val_dataset, batch_size=256, shuffle=False,