class Testset: """Top class for testset, a collection of testcases.""" def __init__(self, working_dir : str = None, testcases : list = [], runner : str = None, logger : Logger = None, testset_name=""): """Testset constructor.""" self.working_dir = working_dir if working_dir is not None else "" self.testcases = testcases self.logger = Logger() if logger is None else logger self.testset_name = testset_name def append(self, testcase : Testcase): """Appends a testcase to the testset.""" self.testcases.append(testcase) def run(self): """Run all testcases in testset.""" logname = os.path.join("", *[self.working_dir, self.testset_name + ".log"]) self.logger.open_file_log(filename=logname) self.results = [] n_testcases = len(self.testcases) i = 1 for testcase in self.testcases: testcase.set_logger(self.logger) print ("Running test %d / %d ..." % (i, n_testcases)) i += 1 if testcase.expected_to_fail: self.results.append(not testcase.run()) else: self.results.append(testcase.run()) self.log_results() if False in self.results: print("%d / %d testcase(s) FAILED" % (self.results.count(False), n_testcases)) else: print("All %d testcase(s) PASSED" % (n_testcases)) self.logger.close() return self.results def log_results(self, results=None): """Logs all testcases in testset and their pass / fail status.""" results = results if results is not None else self.results testcase_max_length = len(max(self.testcases, key=lambda t : len(t.testcase_name)).testcase_name) # go over all testcase names, find (one of) the biggest and get the size of it result_fmt_str = " %%-%ds : %%s" % (testcase_max_length) self.logger.log("Results:") for test_result in zip(self.testcases, self.results): self.logger.log(result_fmt_str % (test_result[0].testcase_name, "PASSED" if test_result[1] is True else "FAILED"))
def main(args): print("==> using settings {}".format(args)) num_workers = 8 num_epochs = args.num_epochs img_dir_path = args.img_dir_path cudnn.benchmark = True device = torch.device("cuda") h_dim = args.h_dim img_size = args.img_size batch_size = args.batch_size lr = 0.0001 betas = (0.0, 0.9) transform = transforms.Compose([ transforms.Scale(img_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) train_loader = torch.utils.data.DataLoader(ImageFolder(img_dir_path, 'train.txt', transform), batch_size=batch_size, num_workers=int(num_workers), shuffle=True, pin_memory=True, drop_last=True) val_loader = torch.utils.data.DataLoader(ImageFolder(img_dir_path, 'test.txt', transform, step=32), batch_size=batch_size, num_workers=int(num_workers), shuffle=False, pin_memory=True, drop_last=True) model_gen = ResUNetG(img_size, h_dim, img_dim=3, norm_dim=3) model_dis = NetD(img_size, input_dim=6) model_gen = torch.nn.DataParallel(model_gen).to(device) model_dis = torch.nn.DataParallel(model_dis).to(device) model_gen.apply(init_weights) model_dis.apply(init_weights) optim_gen = optim.Adam(model_gen.parameters(), lr=lr, betas=betas) optim_dis = optim.Adam(model_dis.parameters(), lr=lr, betas=betas) # optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) start_epoch = checkpoint['epoch'] model_gen.load_state_dict(checkpoint['gen_state_dict']) model_dis.load_state_dict(checkpoint['dis_state_dict']) optim_gen.load_state_dict(checkpoint['gen_optim']) optim_dis.load_state_dict(checkpoint['dis_optim']) print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch'])) out_dir_path = os.path.dirname(args.resume) logger = Logger(os.path.join(out_dir_path, 'log.txt'), resume=True) else: raise RuntimeError("=> no checkpoint found at '{}'".format(args.resume)) else: start_epoch = 0 out_dir_path = os.path.join('checkpoints', datetime.datetime.now().isoformat()) if not os.path.exists(out_dir_path): os.makedirs(out_dir_path) print('Make output dir: {}'.format(out_dir_path)) logger = Logger(os.path.join(out_dir_path, 'log.txt')) logger.set_names(['Epoch', 'Train Loss G', 'Train Loss D']) for epoch in range(start_epoch, start_epoch + num_epochs): print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr)) # train for one epoch loss_gen, loss_dis = train(train_loader, model_gen, model_dis, optim_gen, optim_dis, device) # append logger file logger.append([epoch + 1, loss_gen, loss_dis]) if (epoch + 1) % args.snapshot == 0: # validate validate(val_loader, model_gen, device, os.path.join(out_dir_path, 'epoch_{:04d}'.format(epoch + 1))) # save checkpoint save_checkpoint({ 'epoch': epoch + 1, 'gen_state_dict': model_gen.state_dict(), 'dis_state_dict': model_dis.state_dict(), 'gen_optim': optim_gen.state_dict(), 'dis_optim': optim_dis.state_dict() }, checkpoint=out_dir_path) logger.close() logger.plot(['Train Loss G', 'Train Loss D']) savefig(os.path.join(out_dir_path, 'log.eps'))
# Break if frammed couldn'tbe grabbed # Get the box corners for the detected objects objRects = detector.detect() # If desired, display the frame if args.display: res = detector.display_frame(objRects) if not res: run_loop = False # We have ground data for every 10th frame, we should log for these # frames to later calculate accuracy # if frame_ctr % 10 == 0: # logger.write_log(frame=frame_ctr, detected=len(objRects)) # Approximate FPS fps = detector.approx_fps() if fps: kw = { "width": width, "fps": fps, "breceived": req.breceived, "frame": frame_ctr, "detected": len(objRects) } req.breceived = 0 logger.write_log(kw) except Exception as e: raise e run_loop = False finally: req.close() logger.close()