def plotDifferences(): bigDictionary: typing.Dict = train.loadModel() epoch: int = bigDictionary["startingEpoch"] step: int = min(epoch // 25 + 1, 10) trainingDiff: typing.List[typing.Tuple[ float, float]] = bigDictionary["trainingDifference"][::step] validationDiff: typing.List[typing.Tuple[ float, float]] = bigDictionary["validationDifference"][::step] trainingDiffPos: typing.List[float] = [diff[0] for diff in trainingDiff] validationDiffPos: typing.List[float] = [ diff[0] for diff in validationDiff ] trainingDiffOri: typing.List[float] = [diff[1] for diff in trainingDiff] validationDiffOri: typing.List[float] = [ diff[1] for diff in validationDiff ] epochs: np.ndarray = np.arange(0, epoch, step) figure, (axisPos, axisOri) = plt.subplots(2, 1, sharex=True) axisPos.plot(epochs, trainingDiffPos, label="Training", color='blue') plotTrendLine(axisPos, epochs, trainingDiffPos, color='blue') axisPos.plot(epochs, validationDiffPos, label="Validation", color='yellow') plotTrendLine(axisPos, epochs, validationDiffPos, color='yellow') axisPos.set(ylabel="Position Difference (m)") plt.suptitle("Training and Validation Difference") fontdict: typing.Dict = { 'fontsize': 10, 'fontweight': matplotlib.rcParams['axes.titleweight'], 'verticalalignment': 'baseline', 'horizontalalignment': "center" } axisPos.set_title("v{} Model #{:02} Epoch {}".format( Config.version, Config.getArgs().model_number, epoch), fontdict=fontdict) axisPos.legend() axisOri.plot(epochs, trainingDiffOri, label="Training", color='blue') plotTrendLine(axisOri, epochs, trainingDiffOri, color='blue') axisOri.plot(epochs, validationDiffOri, label="Validation", color='yellow') plotTrendLine(axisOri, epochs, validationDiffOri, color='yellow') axisOri.set(xlabel="Epochs", ylabel="Orientation Difference") axisOri.legend() figure.savefig("plots/diff-v{}-E{:04}-N{:02}.png".format( Config.version, epoch, Config.getArgs().model_number), bbox_inches="tight", pad_inches=.2) plt.show()
def parse_video(i_file, o_file): """ Runs the vehicle detection, iFile= /path/to/input_video.mp4 oFile= /path/to/output_video.mp4 """ model = loadModel() clip = VideoFileClip(i_file) new_clip = clip.fl_image(lambda x: interpreteFrame(x, model)) new_clip.write_videofile(o_file, audio=False)
def plotLosses(): bigDictionary: typing.Dict = train.loadModel() epoch: int = bigDictionary["startingEpoch"] step: int = min(epoch // 25 + 1, 10) trainingLoss: typing.List[float] = bigDictionary["trainingLoss"][::step] validationLoss: typing.List[float] = bigDictionary[ "validationLoss"][::step] beta: int = Config.getArgs().beta epochs: np.ndarray = np.arange(0, epoch, step) betas: np.ndarray = np.array([beta for _ in range(0, epoch, step)]) figure, axis = plt.subplots() axis.plot(epochs, trainingLoss, label="Training Loss", color="blue") plotTrendLine(axis, epochs, trainingLoss, color='blue') axis.plot(epochs, validationLoss, label="Validation Loss", color='yellow') plotTrendLine(axis, epochs, validationLoss, color='yellow') subtitle: str = "Beta: {:03} v{} Model #{:02} Epochs {}".format( beta, Config.version, Config.getArgs().model_number, epoch) # Don't include the beta if our losses are very small. if beta // 4 <= trainingLoss[0]: axis.plot( epochs, betas, 'r--,', label="Beta", ) subtitle = subtitle[10:] axis.set(xlabel='Epochs', ylabel='Loss') plt.title(subtitle, fontsize=10) plt.suptitle("Training and Validation Loss") axis.legend() axis.grid() figure.savefig("plots/loss-v{}-E{:04}-N{:02}.png".format( Config.version, epoch, Config.getArgs().model_number)) plt.show()
def main(): #### VARIOUS SETTINGS #### # Load options from .yml opt = initialize.InitParamsYaml() if opt['train']['evaluation']: # As I will save the whole opt dict into the checkpoint # Why not just use that # when evaluate, one just make sure opt['name'] is correct ckpt = torch.load(os.path.join(opt['path']['ckptDir'], opt['train']['ckptName']), map_location='cpu') opt['network'] = ckpt['opt']['network'] opt['train']['evaluation'] = True print(initialize.dict2str(opt)) # Logger initialize.setup_logger('base', opt['path']['logDir'], 'train', screen=True, tofile=True) initialize.setup_logger('test', opt['path']['logDir'], 'test', screen=True, tofile=True) logger = logging.getLogger('base') logger.info(initialize.dict2str(opt)) # Tensorboard settings version = float(torch.__version__[0:3]) if version >= 1.1: # PyTorch 1.1 from torch.utils.tensorboard import SummaryWriter else: logger.info( 'You are using PyTorch {}. Tensorboard will use [tensorboardX]'. format(version)) from tensorboardX import SummaryWriter writer = SummaryWriter(log_dir=opt['path']['tensorboardDir']) # convert to NoneDict, which returns None for missing keys opt = initialize.dict_to_nonedict(opt) # Cuda things logger.info('Let us use {} GPUs!'.format(int(torch.cuda.device_count()))) torch.backends.cudnn.benckmark = True #### DATASET & MODEL #### # Dataset if opt['datasets']['name'] == 'LCTSC': if opt['datasets']['synthesizeDataset']: import noise noise.LCTSCDicom2Numpy(opt) if opt['datasets']['name'] == 'AAPM': if opt['datasets']['buildNumpyDataset']: import noise noise.AAPMDicom2Numpy(opt) npyTrain = np.load( os.path.join(opt['datasets']['npyDir'], opt['datasets']['train']['fileName'])) testpath = os.path.join(opt['datasets']['npyDir'], opt['datasets']['test']['fileName']) print("testfile path:", testpath) npyTest = np.load( os.path.join(opt['datasets']['npyDir'], opt['datasets']['test']['fileName'])) print(npyTest['LD'].shape) logger.info('finish loading numpy dataset!') trainDataset = dataset.LowDoseCTDataset(npyTrain['LD'], npyTrain['FD'], transforms=Option2Transforms( opt, train=True), splitname="train") testDataset = dataset.LowDoseCTDataset(npyTest['LD'], npyTest['FD'], transforms=Option2Transforms( opt, train=False), splitname="test") trainLoader, testLoader = dataset.CreateDataLoader(opt, trainDataset, testDataset) logger.info('finish establishing dataset!') # create model & load model model = create_model(opt) if opt['train']['evaluation'] or opt['train']['resume']: ckpt = torch.load(os.path.join(opt['path']['ckptDir'], opt['train']['ckptName']), map_location='cpu') logger.info('loading the model from epoch: {}\t iteration: {}'.format( ckpt['epoch'], ckpt['iters'])) logger.info('the model has psnr: {}\t ssim: {}; loss: {}'.format( ckpt['psnr'], ckpt['ssim'], ckpt['loss'])) model = train.loadModel(ckpt['state_dict'], model) logger.info('state dict has been loaded to the model successfull') # data parallel, gpu model = torch.nn.DataParallel(model).cuda() # evaluate the model if opt['train']['evaluation']: logger.info('Evaluate the model') #### To check the psnr of training data idx = 0 LDpsnrs = [] psnrs = [] ssims = [] model.eval() start = time.time() with torch.no_grad(): for _, (LD, FD, _, _) in enumerate(testLoader): print('evaluating image ', idx) LD = LD.cuda() FDRecon = model.forward(LD) LD = LD.detach().cpu().numpy() FD = FD.detach().cpu().numpy() FDRecon = FDRecon.detach().cpu().numpy() psnrs.extend(visualize.compare_psnr(FDRecon, FD)) ssims.extend(visualize.compare_ssim(FDRecon, FD)) LDpsnrs.extend(visualize.compare_psnr(LD, FD)) for i in range(LD.shape[0]): idx += 1 plt.imsave(opt['path']['debugDir'] + '/{:006d}_ld_{}.png'.format(idx, opt['name']), np.squeeze(LD[i]), cmap='gray') plt.imsave(opt['path']['debugDir'] + '/{:006d}_fd_{}.png'.format(idx, opt['name']), np.squeeze(FD[i]), cmap='gray') plt.imsave( opt['path']['debugDir'] + '/{:006d}_fdrecon_{}.png'.format(idx, opt['name']), np.squeeze(FDRecon[i]), cmap='gray') logger.info('psnr: {}\t ssim: {}\t time(s): {}\t'.format( sum(psnrs) / len(psnrs), sum(ssims) / len(ssims), time.time() - start)) #### for Patient L506 print('Patient L506\t psnr: {}\t ssim: {}'.format( sum(psnrs[1011:]) / 211, sum(ssims[1011:]) / 211)) sys.exit() start_epoch = opt['train']['start_epoch'] iters = opt['train']['start_iter'] best_psnr = -1 # resume training if opt['train']['resume']: start_epoch = ckpt['epoch'] iters = ckpt['iters'] logger.info( 'resume the model training from epoch: {}\t iteration: {}'.format( start_epoch, iters)) best_psnr = ckpt['psnr'] # optimizer, scheduler, criterion criterion = train.SetCriterion(opt) optimizer = train.SetOptimizer(opt, model) scheduler = train.SetLRScheduler(opt, optimizer) # training if not opt['train']['no_train']: for epoch in range(start_epoch + 1, start_epoch + opt['train']['epochs'] + 1): start = time.time() model.train() losses = [] for _, (LD, FD, STRS, COHERS) in enumerate(tqdm(trainLoader, desc="Iteration")): iters += 1 LD = LD.cuda() FD = FD.cuda() STRS = STRS.cuda() COHERS = COHERS.cuda() FDRecon = model.forward(LD) if 'asymmetric' in opt['train']['criterion']: loss = criterion(FDRecon, FD, LD) else: if 'triple' in opt['train']['criterion']: if 'tripleasym' in opt['train']['criterion']: loss = criterion(FDRecon, FD, LD, STRS, COHERS) else: loss = criterion(FDRecon, FD, STRS, COHERS) else: loss = criterion(FDRecon, FD) optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() losses.append(loss) writer.add_scalar('train_loss/iterations', loss, iters) writer.add_scalar('train_loss/epochs', sum(losses) / len(losses), epoch) logger.info('epoch: {}\t loss: {}\t time(s): {}\t'.format( epoch, sum(losses) / len(losses), time.time() - start)) if epoch % opt['train']['valFreq'] == 0: start = time.time() LDpsnrs = [] psnrs = [] ssims = [] model.eval() with torch.no_grad(): for _, (LD, FD, _, _) in enumerate(testLoader): LD = LD.cuda() FDRecon = model.forward(LD) LD = LD.detach().cpu().numpy() FD = FD.detach().cpu().numpy() FDRecon = FDRecon.detach().cpu().numpy() psnrs.extend(visualize.compare_psnr(FDRecon, FD)) ssims.extend(visualize.compare_ssim(FDRecon, FD)) LDpsnrs.extend(visualize.compare_psnr(LD, FD)) psnrAvg = sum(psnrs) / len(psnrs) ssimAvg = sum(ssims) / len(ssims) writer.add_scalar('test_psnr/epochs', psnrAvg, epoch) writer.add_scalar('test_ssim/epochs', ssimAvg, epoch) logger.info('Low-dose image psnr: {}'.format( sum(LDpsnrs) / len(LDpsnrs))) logger.info( 'epoch: {}\t iterations: {}\t psnr: {}\t ssim: {}\t time(s): {}\t' .format(epoch, iters, psnrAvg, ssimAvg, time.time() - start)) logger.info('saving the model') torch.save( { 'state_dict': model.state_dict(), 'epoch': epoch, 'iters': iters, 'psnr': psnrAvg, 'ssim': ssimAvg, 'loss': sum(losses) / len(losses), 'opt': opt }, opt['path']['ckptDir'] + '/latest_ckpt.t7') if (psnrAvg > best_psnr): logger.info('saving the model') torch.save( { 'state_dict': model.state_dict(), 'epoch': epoch, 'iters': iters, 'psnr': psnrAvg, 'ssim': ssimAvg, 'loss': sum(losses) / len(losses), 'opt': opt }, opt['path']['ckptDir'] + '/ckpt.t7') best_psnr = psnrAvg if opt['train']['evaluation']: logger.info('Evaluate the model') #### To check the psnr of training data testTrainDataset = dataset.LowDoseCTDataset( npyTrain['LD'], npyTrain['FD'], transforms=Option2Transforms(opt, train=False, reduce=True)) _, testTrainLoader = dataset.CreateDataLoader(opt, trainDataset, testTrainDataset) testTrainpsnrs = [] testTrainssims = [] testTrainInputPatches = [] testTrainInputKernels = [] RECORD_LIMIT = 1500 record_count = 0 model.eval() start = time.time() with torch.no_grad(): for _, (LD, FD, _, _) in enumerate(testTrainLoader): LD = LD.cuda() if ('KPN' in opt['network']['whichModel']): FDRecon, kernels = model.forward(x=LD, verbose=True) if record_count < RECORD_LIMIT: record_count += 1 testTrainInputPatches.append( input2col( LD, patch_size=opt['network']['KPNKernelSize'] ).detach().cpu().numpy().astype(np.float16)) ksize = kernels.shape[1] testTrainInputKernels.append( kernels.permute(0, 2, 3, 1).contiguous().view( -1, ksize).contiguous().detach().cpu().numpy()) else: FDRecon = model.forward(LD) LD = LD.detach().cpu().numpy() FD = FD.detach().cpu().numpy() FDRecon = FDRecon.detach().cpu().numpy() testTrainpsnrs.extend(visualize.compare_psnr(FDRecon, FD)) testTrainssims.extend(visualize.compare_ssim(FDRecon, FD)) testTrainInputPatches_total = np.concatenate(testTrainInputPatches, axis=0) testTrainInputKernels_total = np.concatenate(testTrainInputKernels, axis=0) logger.info('Saving patch-kernel pairs!') np.savez_compressed(os.path.join(opt['path']['logDir'], 'kernel_stats_cropped_small.npz'), inputs=testTrainInputPatches_total, kernels=testTrainInputKernels_total) logger.info( 'On the training data, psnr: {}\t ssim: {}\t time(s): {}\t'.format( sum(testTrainpsnrs) / len(testTrainpsnrs), sum(testTrainssims) / len(testTrainssims), time.time() - start)) # save test images logger.info('saving reconstruction results') idx = 0 if opt['logger']['save_best']: model = create_model(opt) ckpt = torch.load(os.path.join(opt['path']['ckptDir'], opt['train']['ckptName']), map_location='cpu') logger.info('loading the model from epoch: {}\t iteration: {}'.format( ckpt['epoch'], ckpt['iters'])) logger.info('the model has psnr: {}\t ssim: {}; loss: {}'.format( ckpt['psnr'], ckpt['ssim'], ckpt['loss'])) model = train.loadModel(ckpt['state_dict'], model) logger.info('state dict has been loaded to the model successfull') model.eval() model = torch.nn.DataParallel(model).cuda() save_grp = [888] if opt['network']['temperature'] is not None: temperature = opt['network']['temperature'] else: temperature = 1.0 LDs = [] FDs = [] FDRecons = [] cpu_device = torch.device('cpu') model_cpu = model.module.to(cpu_device) for _, (LD, FD, _, _) in enumerate(testLoader): print('evaluating image ', idx) #LD = LD.cuda() if opt['train']['no_train']: FDRecon, kernels = model_cpu.forward(x=LD, verbose=True) else: FDRecon = model_cpu.forward(x=LD, temperature=temperature) LD = LD.detach().cpu().numpy() FD = FD.detach().cpu().numpy() FDRecon = FDRecon.detach().cpu().numpy() LDs.append(LD) FDs.append(FD) FDRecons.append(FDRecon) for i in range(LD.shape[0]): idx += 1 if (idx in save_grp) or (1 > 0): cv2.imwrite( opt['path']['debugDir'] + '/{:006d}_fdrecon_{}.png'.format(idx, opt['name']), np.squeeze(FDRecon[i]) * 255.) cv2.imwrite( opt['path']['debugDir'] + '/{:006d}_fd_{}.png'.format(idx, opt['name']), np.squeeze(FD[i]) * 255.) cv2.imwrite( opt['path']['debugDir'] + '/{:006d}_ld_{}.png'.format(idx, opt['name']), np.squeeze(LD[i]) * 255.) if opt['train']['no_train'] and (idx in save_grp): kernels = kernels.view(kernels.shape[0], 21, 21, kernels.shape[2], kernels.shape[3]).permute( 0, 3, 4, 1, 2) (B, H, W, X, Y) = kernels.shape disp_kernels = np.zeros((32, 32, 21, 21), dtype=np.float32) kernels = kernels.detach().cpu().contiguous().numpy() for id1 in range(H): for id2 in range(W): if (id1 % 16 == 1) and (id2 % 16 == 1): disp_kernels[id1 // 16, id2 // 16, :, :] = np.squeeze( kernels[i][id1][id2]) np.savez(opt['path']['debugDir'] + '/kernels888_{}.npz'.format(opt['name']), kernels=disp_kernels) np.savez(opt['path']['debugDir'] + '/results_{}.npz'.format(opt['name']), LD=LDs, FD=FDs, FDRecon=FDRecons) logger.info('finish saving all the test images')
if __name__ == '__main__': if len(sys.argv) < 3: ID = input('Student ID: ') passwd = getpass('Password: '******'Browser opened') screenshot = getScreenshot(driver) print('Screenshot') x, y, w, h = [836, 494, 150, 75] CAPTCHA = screenshot[y:y + h, x:x + w] # cv2.imshow('', CAPTCHA) # cv2.waitKey() print('Start parsing') digits = parse.parseImage(CAPTCHA, False, False) print('Finish parsing') model = train.loadModel('SVM_v2.sav') print(digits[0].shape) answer = getAnswer(model, digits) print(answer) login(driver, ID, passwd, answer) print('Log in')
# Thresholding lastHeatMap = heatMap heatMap[heatMap <= 0.13] = 0 boxes = heatBoxes(heatMap) img = draw_boxes(img, boxes, 'random') heatMap[0, 0] = 2 plt.ion() ax1 = plt.subplot(211) ax1.imshow(img) ax2 = plt.subplot(212) ax2.imshow(lastHeatMap, cmap='hot') plt.pause(0.001) ax1.cla() ax2.cla() return img if __name__ == "__main__": # showAll = True debug = True test_images = glob.glob('./test_images/test*.jpg') for path in test_images: img = cv2.imread(path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) model = loadModel() interpreteFrame(img, model) input()
def plotOutput(): bigDictionary = train.loadModel() network: torch.nn.Module = bigDictionary["network"] epoch: int = bigDictionary["startingEpoch"] # Load data image: Image = Image.open(Config.getArgs().image) transforms = tv.transforms.Compose([ # Resize the image to 256x256 tv.transforms.Resize((256, 256)), # Crop the image to 224 x 224 tv.transforms.RandomCrop((224, 224)), tv.transforms.ToTensor() ]) output, outputs, uncertPos, uncertOri = train.testSingleImage( network=network, image=image, transforms=transforms) def getPos(theta): return np.array([np.cos(theta), np.sin(theta)]) # The real position of the network. realPos = output[:2] # The different positions from dropout. positions = outputs[:, :2] # The uncertainty matrix for positions. certPos = uncertPos[:2, :2] # The value of uncertainty for position certPosValue = certPos.diagonal().sum() U, S, _ = np.linalg.svd(certPos) fig = plt.figure(0) axis = fig.add_subplot(111, aspect='equal') for pos in positions: axis.scatter(pos[0], pos[1]) precision = 500 theta = np.linspace(0.0, 2.0 * np.pi, precision) positionForEllipse = U * np.sqrt(S) @ getPos(theta) axis.scatter(realPos[0], realPos[1], c='000') for pos in positionForEllipse.T: axis.scatter(pos[0] + realPos[0], pos[1] + realPos[1], c='000') phases = ["train", "validate", "test"] for phase in phases: data = dataset.PoseNetDataSet(transform=transforms, phase=phase, dataroot=Config.getArgs().database_root) label: np.ndarray = data[Config.getArgs().image] if label is not None: break if label is None: Logger.warn( "Could not find label for given image. Plotting without it.") else: actualPos = label[:2] axis.scatter(actualPos[0], actualPos[1], marker='*') errorVector = np.subtract(realPos, actualPos) anees = train.ANEES(errorVector.reshape(1, 2), certPos.reshape(1, 2, 2), 1) bottom, top, left, right, height, width = getBounds(axis=axis) center = ((left + right) / 2.0, (bottom + top) / 2.0) # Make numpy print 2 decimal digits. np.set_printoptions( formatter={'float_kind': lambda x: "{:.2}".format(x)}, suppress=True) data_text = "Uncert: {:.2} Error: {} ANEES: {:.2}".format( certPosValue, errorVector, anees) # Reset print options to their default value. np.set_printoptions() font_size = 10 axis.text(center[0], bottom - height / 2, data_text, fontsize=font_size, horizontalalignment='center', verticalalignment='bottom') plt.suptitle("Uncertainty In Detail") plt.title("v{} E{:04} #{:02}".format(Config.version, epoch, Config.getArgs().model_number)) axis.update_datalim(positionForEllipse.T) axis.grid(True) plt.show() fig.savefig("plots/uncert-v{}-E{:04}-N{:02}.png".format( Config.version, epoch, Config.getArgs().model_number))
def plotTestData(): bigDictionary: typing.Dict = train.loadModel() epoch: int = bigDictionary["startingEpoch"] step: int = min(3, epoch // 25 + 1) testingDiff: typing.List[typing.Tuple[ float, float]] = bigDictionary["testingDifference"][::step] uncertainty: typing.List[typing.Tuple[ float, float]] = bigDictionary["uncertainty"][::step] anees: typing.List[typing.Tuple[float, float]] = bigDictionary["anees"][::step] diffPos: typing.List[float] = [diff[0] for diff in testingDiff] certPos: typing.List[float] = [cert[0] for cert in uncertainty] diffOri: typing.List[float] = [diff[1] for diff in testingDiff] certOri: typing.List[float] = [cert[1] for cert in uncertainty] aneesPos: typing.List[float] = [err[0] for err in anees] aneesOri: typing.List[float] = [err[1] for err in anees] epochs: np.ndarray = np.arange(0, epoch, epoch / len(testingDiff)) figure, axis = plt.subplots(2, 3, sharex=True) axisPos: np.ndarray = axis[0] axisOri: np.ndarray = axis[1] axisDiffPos: Axes = axisPos[0] axisDiffOri: Axes = axisOri[0] axisCertPos: Axes = axisPos[1] axisCertOri: Axes = axisOri[1] axisANEESPos: Axes = axisPos[2] axisANEESOri: Axes = axisOri[2] axisDiffPos.plot(epochs, diffPos, color='blue') plotTrendLine(axisDiffPos, epochs, diffPos, color='blue') axisDiffPos.set(ylabel="Position Difference (m)") plt.suptitle("v{} Model #{:02} Epoch {}".format( Config.version, Config.getArgs().model_number, epoch)) axisDiffPos.set_title("Testing Difference") axisDiffOri.plot(epochs, diffOri, color='green') plotTrendLine(axisDiffOri, epochs, diffOri, color='green') axisDiffOri.set(xlabel="Epochs", ylabel="Orientation Difference") axisCertPos.plot(epochs, certPos, color='blue') plotTrendLine(axisCertPos, epochs, certPos, color='blue') axisCertPos.set(ylabel="Position Uncertainty") axisCertPos.set_title("Testing Uncertainty") axisCertOri.plot(epochs, certOri, color='green') plotTrendLine(axisCertOri, epochs, certOri, color='green') axisCertOri.set(xlabel="Epochs", ylabel="Orientation Uncertainty") axisANEESPos.plot(epochs, aneesPos, color='blue') plotTrendLine(axisANEESPos, epochs, aneesPos, color='blue') axisANEESPos.set(ylabel="ANEES (Position)") axisANEESPos.set_title("ANEES") axisANEESOri.plot(epochs, aneesOri, color='green') plotTrendLine(axisANEESOri, epochs, aneesOri, color='green') axisANEESOri.set(ylabel="ANEES (Orientation)") # wspace is space between each subplot in the width # while hspace is space between each subplot in the height. figure.subplots_adjust(wspace=0.8, hspace=0.3) figure.savefig('plots/test-v{}-E{:04}-N{:02}.png'.format( Config.version, epoch, Config.getArgs().model_number), bbox_inches="tight", pad_inches=.4) plt.show()