def main(): logger.info(args) model = Net(numAngle=CONFIGS["MODEL"]["NUMANGLE"], numRho=CONFIGS["MODEL"]["NUMRHO"], backbone=CONFIGS["MODEL"]["BACKBONE"]) model = model.cuda(device=CONFIGS["TRAIN"]["GPU_ID"]) if args.model: if isfile(args.model): logger.info("=> loading pretrained model '{}'".format(args.model)) checkpoint = torch.load(args.model) model.load_state_dict(checkpoint['state_dict']) logger.info("=> loaded checkpoint '{}' (epoch {})".format( args.model, checkpoint['epoch'])) else: logger.info("=> no pretrained model found at '{}'".format( args.model)) # dataloader test_loader = get_loader(CONFIGS["DATA"]["TEST_DIR"], CONFIGS["DATA"]["TEST_LABEL_FILE"], batch_size=1, num_thread=CONFIGS["DATA"]["WORKERS"], test=True) logger.info("Data loading done.") logger.info("Start testing.") total_time = test(test_loader, model, args) logger.info( "Test done! Total %d imgs at %.4f secs without image io, fps: %.3f" % (len(test_loader), total_time, len(test_loader) / total_time))
def main(): logger.info(args) model = Net(numAngle=CONFIGS["MODEL"]["NUMANGLE"], numRho=CONFIGS["MODEL"]["NUMRHO"], backbone=CONFIGS["MODEL"]["BACKBONE"]) model = model.cuda(device=CONFIGS["TRAIN"]["GPU_ID"]) # load the pretrained model (you are free to load your own models) state_dict = torch.hub.load_state_dict_from_url("http://data.kaizhao.net/projects/deep-hough-transform/dht_r50_fpn_sel-c9a29d40.pth", check_hash=True) model.load_state_dict(state_dict) if args.model: if isfile(args.model): logger.info("=> loading pretrained model '{}'".format(args.model)) checkpoint = torch.load(args.model) model.load_state_dict(checkpoint) logger.info("=> loaded checkpoint '{}'" .format(args.model)) else: logger.info("=> no pretrained model found at '{}'".format(args.model)) exit() # dataloader test_loader = get_loader(CONFIGS["DATA"]["TEST_DIR"], CONFIGS["DATA"]["TEST_LABEL_FILE"], batch_size=1, num_thread=CONFIGS["DATA"]["WORKERS"], test=True) logger.info("Data loading done.") logger.info("Start testing.") total_time = test(test_loader, model, args) logger.info("Test done! Total %d imgs at %.4f secs without image io, fps: %.3f" % (len(test_loader), total_time, len(test_loader) / total_time))
def train(args): # CONFIGS = yaml.load(open(args.config)) # deprecated, please set the configs in parse_args() # Set device if torch.cuda.is_available(): os.environ["CUDA_VISIBLE_DEVICES"] = args.device.strip() device = torch.device("cuda") else: device = torch.device("cpu") # Not suggested # Set save folder & logging config subfolder = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(time.time())) if not args.save_folder or (not os.path.isdir(args.save_folder)): print( "Warning: Not invalid value of 'save_folder', set as default value: './save_folder'.." ) save_folder = "./save_folder" else: save_folder = args.save_folder if not os.path.exists(save_folder): os.mkdir(save_folder) save_folder = os.path.join(save_folder, subfolder) os.mkdir(save_folder) #TODO:logging # Load Dataset trainloader = get_loader(args.train_gtfile, batch_size=args.batch_size, num_thread=args.num_workers) valloader = get_loader(args.val_gtfile, batch_size=args.batch_size, num_thread=args.num_workers) # Init Net model = Net(numAngle=args.num_angle, numRho=args.num_rho, backbone=args.backbone) if args.resume: model.load_state_dict(torch.load(args.resume)) model = torch.nn.DataParallel(model).to(device) # Optimizer optimizer = optim.Adam(model.parameters()) # Loss criterion = torch.nn.CrossEntropyLoss() losses = AverageMeter() # Start Training model.train() iter = 0 # iter id start from 1 for epoch in range(args.max_epoch): for batch in trainloader: start = time.time() iter += 1 img_tensor, gt_tensor = batch optimizer.zero_grad() # Forwarding preds = model(img_tensor) # Calculate Loss loss = criterion(preds, gt_tensor) loss.backward() optimizer.step() losses.update(loss.item(), args.batch_size) if iter % args.show_interval == 0: logging.info( f"Training [{epoch}/{args.max_epoch}][{iter}] Loss:{losses.avg} Time:{time.time()-start:.1f}s" ) if iter % args.val_interval == 0: pass
torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.backends.cudnn.benchmark = True logger = Logger('./logs') batch_size = 1 load_checkpoint= True print( '%s: calling main function ... ' % os.path.basename(__file__)) csv_path = '../data/detection_test.csv' img_path = '../data/detection_test_t' dataset = Rand_num(csv_path, img_path, 112*4, None) sampler = SequentialSampler(dataset) loader = DataLoader(dataset, batch_size = batch_size, sampler = sampler, shuffle = False, num_workers=2) net = Net(14) if load_checkpoint: net.load_state_dict(torch.load(SAVE_PATH)) net.cuda() accu_tp=[] accu_fp=[] accu_iou=[] for epoch in range(1): for num, data in enumerate(loader, 0): # get the inputs images, inputs, labels = data inputs, labels = inputs.float()[0]/256, labels.float() # # # wrap them in Variable # inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
def main(): logger.info(args) assert os.path.isdir(CONFIGS["DATA"]["DIR"]) if CONFIGS['TRAIN']['SEED'] is not None: random.seed(CONFIGS['TRAIN']['SEED']) torch.manual_seed(CONFIGS['TRAIN']['SEED']) cudnn.deterministic = True model = Net(numAngle=CONFIGS["MODEL"]["NUMANGLE"], numRho=CONFIGS["MODEL"]["NUMRHO"], backbone=CONFIGS["MODEL"]["BACKBONE"]) if CONFIGS["TRAIN"]["DATA_PARALLEL"]: logger.info("Model Data Parallel") model = nn.DataParallel(model).cuda() else: model = model.cuda(device=CONFIGS["TRAIN"]["GPU_ID"]) # optimizer optimizer = torch.optim.Adam( model.parameters(), lr=CONFIGS["OPTIMIZER"]["LR"], weight_decay=CONFIGS["OPTIMIZER"]["WEIGHT_DECAY"]) # learning rate scheduler scheduler = lr_scheduler.MultiStepLR( optimizer, milestones=CONFIGS["OPTIMIZER"]["STEPS"], gamma=CONFIGS["OPTIMIZER"]["GAMMA"]) best_acc1 = 0 if args.resume: if isfile(args.resume): logger.info("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] best_acc1 = checkpoint['best_acc1'] model.load_state_dict(checkpoint['state_dict']) # optimizer.load_state_dict(checkpoint['optimizer']) logger.info("=> loaded checkpoint '{}' (epoch {})".format( args.resume, checkpoint['epoch'])) else: logger.info("=> no checkpoint found at '{}'".format(args.resume)) # dataloader train_loader = get_loader(CONFIGS["DATA"]["DIR"], CONFIGS["DATA"]["LABEL_FILE"], batch_size=CONFIGS["DATA"]["BATCH_SIZE"], num_thread=CONFIGS["DATA"]["WORKERS"], split='train') val_loader = get_loader(CONFIGS["DATA"]["VAL_DIR"], CONFIGS["DATA"]["VAL_LABEL_FILE"], batch_size=1, num_thread=CONFIGS["DATA"]["WORKERS"], split='val') logger.info("Data loading done.") # Tensorboard summary writer = SummaryWriter(log_dir=os.path.join(CONFIGS["MISC"]["TMP"])) start_epoch = 0 best_acc = best_acc1 is_best = False start_time = time.time() if CONFIGS["TRAIN"]["RESUME"] is not None: raise (NotImplementedError) if CONFIGS["TRAIN"]["TEST"]: validate(val_loader, model, 0, writer, args) return logger.info("Start training.") for epoch in range(start_epoch, CONFIGS["TRAIN"]["EPOCHS"]): train(train_loader, model, optimizer, epoch, writer, args) acc = validate(val_loader, model, epoch, writer, args) #return scheduler.step() if best_acc < acc: is_best = True best_acc = acc else: is_best = False save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_acc1': best_acc, 'optimizer': optimizer.state_dict() }, is_best, path=CONFIGS["MISC"]["TMP"]) t = time.time() - start_time elapsed = DayHourMinute(t) t /= (epoch + 1) - start_epoch # seconds per epoch t = (CONFIGS["TRAIN"]["EPOCHS"] - epoch - 1) * t remaining = DayHourMinute(t) logger.info( "Epoch {0}/{1} finishied, auxiliaries saved to {2} .\t" "Elapsed {elapsed.days:d} days {elapsed.hours:d} hours {elapsed.minutes:d} minutes.\t" "Remaining {remaining.days:d} days {remaining.hours:d} hours {remaining.minutes:d} minutes." .format(epoch, CONFIGS["TRAIN"]["EPOCHS"], CONFIGS["MISC"]["TMP"], elapsed=elapsed, remaining=remaining)) logger.info("Optimization done, ALL results saved to %s." % CONFIGS["MISC"]["TMP"])
fold_number = config["train_config"]["fold"] nclasses = config["train_config"]["nclasses"] mode = config["train_config"]["mode"] if "mode" in config[ "train_config"] else None multiclass_df = config["train_config"][ "multiclass_df"] if "multiclass_df" in config["train_config"] else None device = 'cuda' AUGMENTATIONS_TRAIN, AUGMENTATIONS_TEST = get_transforms() model = Net(num_classes=nclasses, config=config) if torch.cuda.device_count() > 1 and device == 'cuda': print("Let's use", torch.cuda.device_count(), "GPUs!") model = nn.DataParallel(model).to(device) checkpoint = torch.load(config["test_config"]["checkpoint"]) model.load_state_dict(checkpoint['model_state_dict']) model.eval() test_ids = os.listdir(config['test_config']['input_path']) for i in range(len(test_ids)): test_ids[i] = os.path.join(config['test_config']['input_path'], test_ids[i]) dataset = DatasetSubmissionRetriever( image_names=np.array(test_ids), transforms=AUGMENTATIONS_TEST, ) data_loader = torch.utils.data.DataLoader( dataset, batch_size=1,