def executor_derived(file_path_list: List[Path], output_path: Path, **kwargs) -> Dict: debug = kwargs.pop('debug', False), logger.remove() ts = file_path_list[0].name.rstrip('.a.csv') logger.add( sys.stdout, level='DEBUG', format=f"<yellow>{ts}</yellow> - {LOGURU_FORMAT}", backtrace=True ) logger.info(f"running squeeze for {ts}") dfa = pd.read_csv(file_path_list[0].resolve(), engine='python', dtype='str', delimiter=r"\s*,\s*") dfa['real'] = dfa['real'].astype(float) dfa['predict'] = dfa['predict'].astype(float) dfb = pd.read_csv(file_path_list[1].resolve(), engine='python', dtype='str', delimiter=r"\s*,\s*") dfb['real'] = dfb['real'].astype(float) dfb['predict'] = dfb['predict'].astype(float) zero_index = (dfa.real == 0) & (dfa.predict == 0) & (dfb.real == 0) & (dfb.predict == 0) dfa = dfa[~zero_index] dfb = dfb[~zero_index] try: timestamp = int(ts) except ValueError: timestamp = ts logger.warning(f"Unresolved timestamp: {timestamp}") tic = time.time() divide = lambda x, y: np.divide(x, y, out=np.zeros_like(x), where=y != 0) model = Squeeze( data_list=[dfa, dfb], op=divide, option=SqueezeOption( debug=debug, fig_save_path=f"{output_path.resolve()}/{timestamp}" + "{suffix}" + ".pdf", enable_filter=True, **kwargs, ) ) model.run() logger.info("\n" + model.report) try: root_cause = AC.batch_to_string( frozenset(reduce(lambda x, y: x.union(y), model.root_cause, set()))) # type: except IndexError: root_cause = "" toc = time.time() elapsed_time = toc - tic return { 'timestamp': timestamp, 'elapsed_time': elapsed_time, 'root_cause': root_cause, }
def main(device=torch.device('cuda:0')): # CLI arguments parser = arg.ArgumentParser( description='We all know what we are doing. Fighting!') parser.add_argument("--datasize", "-d", default="small", type=str, help="data size you want to use, small, medium, total") # Parsing args = parser.parse_args() # Data loaders # TODO: ####### Enter the model selection here! ##### modelSelection = input( 'Please input the type of model to be used(res50,dense121,dense169,mob_v2,mob):' ) datasize = args.datasize filename = "nyu_new.zip" pathname = f"data/{filename}" csv = "data/nyu_csv.zip" te_loader = getTestingData(datasize, csv, pathname, batch_size=config(modelSelection + ".batch_size")) # Model if modelSelection.lower() == 'res50': model = Res50() elif modelSelection.lower() == 'dense121': model = Dense121() elif modelSelection.lower() == 'mob_v2': model = Mob_v2() elif modelSelection.lower() == 'dense169': model = Dense169() elif modelSelection.lower() == 'mob': model = Net() elif modelSelection.lower() == 'squeeze': model = Squeeze() else: assert False, 'Wrong type of model selection string!' model = model.to(device) # define loss function # criterion = torch.nn.L1Loss() # Attempts to restore the latest checkpoint if exists print(f"Loading {mdoelSelection}...") model, start_epoch, stats = utils.restore_checkpoint( model, utils.config(modelSelection + ".checkpoint")) acc, loss = utils.evaluate_model(model, te_loader, device, test=True) # axes = util.make_training_plot() print(f'Test Error:{acc}') print(f'Test Loss:{loss}')
def executor(file_path: Path, output_path: Path, **kwargs) -> Dict: debug = kwargs.pop('debug', False), logger.remove() logger.add( sys.stdout, level='DEBUG', format=f"<yellow>{file_path.name}</yellow> - {LOGURU_FORMAT}", backtrace=True ) logger.info(f"running squeeze for {file_path}") df = pd.read_csv(file_path.resolve(), engine='python', dtype='str', delimiter=r"\s*,\s*") df['real'] = df['real'].astype(float) df['predict'] = df['predict'].astype(float) try: timestamp = int(file_path.name.rstrip('.csv')) except ValueError: timestamp = file_path.name.rstrip('.csv') logger.warning(f"Unresolved timestamp: {timestamp}") tic = time.time() model = Squeeze( data_list=[df], op=lambda x: x, option=SqueezeOption( debug=debug, fig_save_path=f"{output_path.resolve()}/{timestamp}" + "{suffix}" + ".pdf", **kwargs, ) ) model.run() logger.info("\n" + model.report) try: root_cause = AC.batch_to_string( frozenset(reduce(lambda x, y: x.union(y), model.root_cause, set()))) # type: except IndexError: root_cause = "" toc = time.time() elapsed_time = toc - tic return { 'timestamp': timestamp, 'elapsed_time': elapsed_time, 'root_cause': root_cause, }
def main(): input = torch.randn(5, 3, 480, 640) print(input.size()) # model = Net() # model = Dense169() # model = Dense121() # model = Res50() # model = Mob_v2() model = Squeeze() output = model(input) print(output.size())
def main(device=torch.device('cuda:0')): # Model modelSelection = input( 'Please input the type of model to be used(res50,dense121,dense169,dense161,mob_v2,mob):' ) if modelSelection.lower() == 'res50': model = Res50() elif modelSelection.lower() == 'dense121': model = Dense121() elif modelSelection.lower() == 'dense161': model = Dense161() elif modelSelection.lower() == 'mob_v2': model = Mob_v2() elif modelSelection.lower() == 'dense169': model = Dense169() elif modelSelection.lower() == 'mob': model = Net() elif modelSelection.lower() == 'squeeze': model = Squeeze() else: assert False, 'Wrong type of model selection string!' model = model.to(device) # Attempts to restore the latest checkpoint if exists print("Loading unet...") model, start_epoch, stats = utils.restore_checkpoint( model, utils.config(modelSelection + ".checkpoint")) # Get Test Images img_list = glob("examples/" + "*.png") # Set model to eval mode model.eval() model = model.to(device) # Begin testing loop print("Begin Test Loop ...") for idx, img_name in enumerate(img_list): img = load_images([img_name]) img = torch.Tensor(img).float().to(device) print("Processing {}, Tensor Shape: {}".format(img_name, img.shape)) with torch.no_grad(): preds = model(img).squeeze(0) output = colorize(preds.data) output = output.transpose((1, 2, 0)) cv2.imwrite( img_name.split(".")[0] + "_" + modelSelection + "_result.png", output) print("Processing {} done.".format(img_name))
def main(device, tr_loader, va_loader, te_loader, modelSelection): """Train CNN and show training plots.""" # Model if modelSelection.lower() == 'res50': model = Res50() elif modelSelection.lower() == 'dense121': model = Dense121() elif modelSelection.lower() == 'dense161': model = Dense161() elif modelSelection.lower() == 'mobv2': model = Mob_v2() elif modelSelection.lower() == 'dense169': model = Dense169() elif modelSelection.lower() == 'mob': model = Net() elif modelSelection.lower() == 'squeeze': model = Squeeze() else: assert False, 'Wrong type of model selection string!' model = model.to(device) # TODO: define loss function, and optimizer learning_rate = utils.config(modelSelection + ".learning_rate") criterion = DepthLoss(0.1).to(device) optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) number_of_epoches = 10 # # Attempts to restore the latest checkpoint if exists print("Loading unet...") model, start_epoch, stats = utils.restore_checkpoint( model, utils.config(modelSelection + ".checkpoint")) running_va_loss = [] if 'va_loss' not in stats else stats['va_loss'] running_va_acc = [] if 'va_err' not in stats else stats['va_err'] running_tr_loss = [] if 'tr_loss' not in stats else stats['tr_loss'] running_tr_acc = [] if 'tr_err' not in stats else stats['tr_err'] tr_acc, tr_loss = utils.evaluate_model(model, tr_loader, device) acc, loss = utils.evaluate_model(model, va_loader, device) running_va_acc.append(acc) running_va_loss.append(loss) running_tr_acc.append(tr_acc) running_tr_loss.append(tr_loss) stats = { 'va_err': running_va_acc, 'va_loss': running_va_loss, 'tr_err': running_tr_acc, 'tr_loss': running_tr_loss, } # Loop over the entire dataset multiple times # for epoch in range(start_epoch, config('cnn.num_epochs')): epoch = start_epoch # while curr_patience < patience: while epoch < number_of_epoches: # Train model utils.train_epoch(device, tr_loader, model, criterion, optimizer) # Save checkpoint utils.save_checkpoint(model, epoch + 1, utils.config(modelSelection + ".checkpoint"), stats) # Evaluate model tr_acc, tr_loss = utils.evaluate_model(model, tr_loader, device) va_acc, va_loss = utils.evaluate_model(model, va_loader, device) running_va_acc.append(va_acc) running_va_loss.append(va_loss) running_tr_acc.append(tr_acc) running_tr_loss.append(tr_loss) epoch += 1 print("Finished Training") utils.make_plot(running_tr_loss, running_tr_acc, running_va_loss, running_va_acc)
def main(device, tr_loader, va_loader, te_loader, modelSelection): """Train CNN and show training plots.""" # CLI arguments # parser = arg.ArgumentParser(description='We all know what we are doing. Fighting!') # parser.add_argument("--datasize", "-d", default="small", type=str, # help="data size you want to use, small, medium, total") # Parsing # args = parser.parse_args() # Data loaders # datasize = args.datasize # Model if modelSelection.lower() == 'res50': model = Res50() elif modelSelection.lower() == 'dense121': model = Dense121() elif modelSelection.lower() == 'mobv2': model = Mob_v2() elif modelSelection.lower() == 'dense169': model = Dense169() elif modelSelection.lower() == 'mob': model = Net() elif modelSelection.lower() == 'squeeze': model = Squeeze() else: assert False, 'Wrong type of model selection string!' # Model # model = Net() # model = Squeeze() model = model.to(device) # TODO: define loss function, and optimizer learning_rate = utils.config(modelSelection + ".learning_rate") criterion = DepthLoss(0.1).to(device) optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) number_of_epoches = 10 # # Attempts to restore the latest checkpoint if exists print("Loading unet...") model, start_epoch, stats = utils.restore_checkpoint( model, utils.config(modelSelection + ".checkpoint")) running_va_loss = [] if 'va_loss' not in stats else stats['va_loss'] running_va_acc = [] if 'va_err' not in stats else stats['va_err'] running_tr_loss = [] if 'tr_loss' not in stats else stats['tr_loss'] running_tr_acc = [] if 'tr_err' not in stats else stats['tr_err'] tr_acc, tr_loss = utils.evaluate_model(model, tr_loader, device) acc, loss = utils.evaluate_model(model, va_loader, device) running_va_acc.append(acc) running_va_loss.append(loss) running_tr_acc.append(tr_acc) running_tr_loss.append(tr_loss) stats = { 'va_err': running_va_acc, 'va_loss': running_va_loss, 'tr_err': running_tr_acc, 'tr_loss': running_tr_loss, # 'num_of_epoch': 0 } # Loop over the entire dataset multiple times # for epoch in range(start_epoch, config('cnn.num_epochs')): epoch = start_epoch # while curr_patience < patience: while epoch < number_of_epoches: # Train model utils.train_epoch(device, tr_loader, model, criterion, optimizer) # Save checkpoint utils.save_checkpoint(model, epoch + 1, utils.config(modelSelection + ".checkpoint"), stats) # Evaluate model tr_acc, tr_loss = utils.evaluate_model(model, tr_loader, device) va_acc, va_loss = utils.evaluate_model(model, va_loader, device) running_va_acc.append(va_acc) running_va_loss.append(va_loss) running_tr_acc.append(tr_acc) running_tr_loss.append(tr_loss) epoch += 1 print("Finished Training") utils.make_plot(running_tr_loss, running_tr_acc, running_va_loss, running_va_acc)
def main(device=torch.device('cuda:0')): # CLI arguments parser = arg.ArgumentParser( description='We all know what we are doing. Fighting!') parser.add_argument("--datasize", "-d", default="small", type=str, help="data size you want to use, small, medium, total") # Parsing args = parser.parse_args() # Data loaders datasize = args.datasize pathname = "data/nyu.zip" # Model modelSelection = input( 'Please input the type of model to be used(res50,dense121,dense169,mob_v2,mob):' ) # Model if modelSelection.lower() == 'res50': model = Res50() elif modelSelection.lower() == 'dense121': model = Dense121() elif modelSelection.lower() == 'mob_v2': model = Mob_v2() elif modelSelection.lower() == 'dense169': model = Dense169() elif modelSelection.lower() == 'mob': model = Net() elif modelSelection.lower() == 'squeeze': model = Squeeze() else: assert False, 'Wrong type of model selection string!' model = model.to(device) # Attempts to restore the latest checkpoint if exists print("Loading unet...") model, start_epoch, stats = utils.restore_checkpoint( model, utils.config(modelSelection + ".checkpoint")) # Get Test Images img_list = glob("examples/" + "*.png") # Set model to eval mode model.eval() model = model.to(device) # Begin testing loop print("Begin Test Loop ...") for idx, img_name in enumerate(img_list): img = load_images([img_name]) img = torch.Tensor(img).float().to(device) print("Processing {}, Tensor Shape: {}".format(img_name, img.shape)) with torch.no_grad(): preds = model(img).squeeze(0) output = colorize(preds.data) output = output.transpose((1, 2, 0)) cv2.imwrite( img_name.split(".")[0] + "_" + modelSelection + "_result.png", output) print("Processing {} done.".format(img_name))