def main(device=torch.device('cuda:0')): # CLI arguments parser = arg.ArgumentParser( description='We all know what we are doing. Fighting!') parser.add_argument("--datasize", "-d", default="small", type=str, help="data size you want to use, small, medium, total") # Parsing args = parser.parse_args() # Data loaders datasize = args.datasize pathname = "data/nyu.zip" tr_loader, va_loader, te_loader = getTrainingValidationTestingData( datasize, pathname, batch_size=config("unet.batch_size")) # Model #model = Net() #model = Dense121() model = Dense169() model = model.to(device) # define loss function # criterion = torch.nn.L1Loss() # Attempts to restore the latest checkpoint if exists print("Loading unet...") model, start_epoch, stats = utils.restore_checkpoint( model, utils.config("unet.checkpoint")) acc, loss = utils.evaluate_model(model, te_loader, device) # axes = util.make_training_plot() print(f'Test Error:{acc}') print(f'Test Loss:{loss}') # Get Test Images img_list = glob("examples/" + "*.png") # Set model to eval mode model.eval() model = model.to(device) # Begin testing loop print("Begin Test Loop ...") for idx, img_name in enumerate(img_list): img = load_images([img_name]) img = torch.Tensor(img).float().to(device) print("Processing {}, Tensor Shape: {}".format(img_name, img.shape)) with torch.no_grad(): preds = model(img).squeeze(0) output = colorize(preds.data) output = output.transpose((1, 2, 0)) cv2.imwrite(img_name.split(".")[0] + "_result.png", output) print("Processing {} done.".format(img_name))
def main(device=torch.device('cuda:0')): # CLI arguments parser = arg.ArgumentParser( description='We all know what we are doing. Fighting!') parser.add_argument("--datasize", "-d", default="small", type=str, help="data size you want to use, small, medium, total") # Parsing args = parser.parse_args() # Data loaders # TODO: ####### Enter the model selection here! ##### modelSelection = input( 'Please input the type of model to be used(res50,dense121,dense169,mob_v2,mob):' ) datasize = args.datasize filename = "nyu_new.zip" pathname = f"data/{filename}" csv = "data/nyu_csv.zip" te_loader = getTestingData(datasize, csv, pathname, batch_size=config(modelSelection + ".batch_size")) # Model if modelSelection.lower() == 'res50': model = Res50() elif modelSelection.lower() == 'dense121': model = Dense121() elif modelSelection.lower() == 'mob_v2': model = Mob_v2() elif modelSelection.lower() == 'dense169': model = Dense169() elif modelSelection.lower() == 'mob': model = Net() elif modelSelection.lower() == 'squeeze': model = Squeeze() else: assert False, 'Wrong type of model selection string!' model = model.to(device) # define loss function # criterion = torch.nn.L1Loss() # Attempts to restore the latest checkpoint if exists print(f"Loading {mdoelSelection}...") model, start_epoch, stats = utils.restore_checkpoint( model, utils.config(modelSelection + ".checkpoint")) acc, loss = utils.evaluate_model(model, te_loader, device, test=True) # axes = util.make_training_plot() print(f'Test Error:{acc}') print(f'Test Loss:{loss}')
def main(device=torch.device('cuda:0')): # Model modelSelection = input( 'Please input the type of model to be used(res50,dense121,dense169,dense161,mob_v2,mob):' ) if modelSelection.lower() == 'res50': model = Res50() elif modelSelection.lower() == 'dense121': model = Dense121() elif modelSelection.lower() == 'dense161': model = Dense161() elif modelSelection.lower() == 'mob_v2': model = Mob_v2() elif modelSelection.lower() == 'dense169': model = Dense169() elif modelSelection.lower() == 'mob': model = Net() elif modelSelection.lower() == 'squeeze': model = Squeeze() else: assert False, 'Wrong type of model selection string!' model = model.to(device) # Attempts to restore the latest checkpoint if exists print("Loading unet...") model, start_epoch, stats = utils.restore_checkpoint( model, utils.config(modelSelection + ".checkpoint")) # Get Test Images img_list = glob("examples/" + "*.png") # Set model to eval mode model.eval() model = model.to(device) # Begin testing loop print("Begin Test Loop ...") for idx, img_name in enumerate(img_list): img = load_images([img_name]) img = torch.Tensor(img).float().to(device) print("Processing {}, Tensor Shape: {}".format(img_name, img.shape)) with torch.no_grad(): preds = model(img).squeeze(0) output = colorize(preds.data) output = output.transpose((1, 2, 0)) cv2.imwrite( img_name.split(".")[0] + "_" + modelSelection + "_result.png", output) print("Processing {} done.".format(img_name))
def __init__(self, imgH, nc, nclass, nh, n_rnn=2, leakyRelu=False): super(CRNN, self).__init__() assert imgH % 16 == 0, 'imgH has to be a multiple of 16' ks = [3, 3, 3, 3, 3, 3, 2] ps = [1, 1, 1, 1, 1, 1, 0] ss = [1, 1, 1, 1, 1, 1, 1] nm = [64, 128, 256, 256, 512, 512, 512] #cnn = nn.Sequential() # def convRelu(i, batchNormalization=False): # nIn = nc if i == 0 else nm[i - 1] # nOut = nm[i] # cnn.add_module('conv{0}'.format(i), # nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i])) # if batchNormalization: # cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut)) # if leakyRelu: # cnn.add_module('relu{0}'.format(i), # nn.LeakyReLU(0.2, inplace=True)) # else: # cnn.add_module('relu{0}'.format(i), nn.ReLU(True)) # convRelu(0) # cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(2, 2)) # 64x16x64 # convRelu(1) # cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(2, 2)) # 128x8x32 # convRelu(2, True) # convRelu(3) # cnn.add_module('pooling{0}'.format(2), # nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 256x4x16 # convRelu(4, True) # convRelu(5) # cnn.add_module('pooling{0}'.format(3), # nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 512x2x16 # convRelu(6, True) # 512x1x16 # self.cnn = cnn self.cnn = Dense169(32) self.rnn = nn.Sequential(BidirectionalLSTM(512, nh, nh), BidirectionalLSTM(nh, nh, nclass))
def main(device, tr_loader, va_loader, te_loader, modelSelection): """Train CNN and show training plots.""" # Model if modelSelection.lower() == 'res50': model = Res50() elif modelSelection.lower() == 'dense121': model = Dense121() elif modelSelection.lower() == 'dense161': model = Dense161() elif modelSelection.lower() == 'mobv2': model = Mob_v2() elif modelSelection.lower() == 'dense169': model = Dense169() elif modelSelection.lower() == 'mob': model = Net() elif modelSelection.lower() == 'squeeze': model = Squeeze() else: assert False, 'Wrong type of model selection string!' model = model.to(device) # TODO: define loss function, and optimizer learning_rate = utils.config(modelSelection + ".learning_rate") criterion = DepthLoss(0.1).to(device) optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) number_of_epoches = 10 # # Attempts to restore the latest checkpoint if exists print("Loading unet...") model, start_epoch, stats = utils.restore_checkpoint( model, utils.config(modelSelection + ".checkpoint")) running_va_loss = [] if 'va_loss' not in stats else stats['va_loss'] running_va_acc = [] if 'va_err' not in stats else stats['va_err'] running_tr_loss = [] if 'tr_loss' not in stats else stats['tr_loss'] running_tr_acc = [] if 'tr_err' not in stats else stats['tr_err'] tr_acc, tr_loss = utils.evaluate_model(model, tr_loader, device) acc, loss = utils.evaluate_model(model, va_loader, device) running_va_acc.append(acc) running_va_loss.append(loss) running_tr_acc.append(tr_acc) running_tr_loss.append(tr_loss) stats = { 'va_err': running_va_acc, 'va_loss': running_va_loss, 'tr_err': running_tr_acc, 'tr_loss': running_tr_loss, } # Loop over the entire dataset multiple times # for epoch in range(start_epoch, config('cnn.num_epochs')): epoch = start_epoch # while curr_patience < patience: while epoch < number_of_epoches: # Train model utils.train_epoch(device, tr_loader, model, criterion, optimizer) # Save checkpoint utils.save_checkpoint(model, epoch + 1, utils.config(modelSelection + ".checkpoint"), stats) # Evaluate model tr_acc, tr_loss = utils.evaluate_model(model, tr_loader, device) va_acc, va_loss = utils.evaluate_model(model, va_loader, device) running_va_acc.append(va_acc) running_va_loss.append(va_loss) running_tr_acc.append(tr_acc) running_tr_loss.append(tr_loss) epoch += 1 print("Finished Training") utils.make_plot(running_tr_loss, running_tr_acc, running_va_loss, running_va_acc)
def main(device, tr_loader, va_loader, te_loader, modelSelection): """Train CNN and show training plots.""" # CLI arguments # parser = arg.ArgumentParser(description='We all know what we are doing. Fighting!') # parser.add_argument("--datasize", "-d", default="small", type=str, # help="data size you want to use, small, medium, total") # Parsing # args = parser.parse_args() # Data loaders # datasize = args.datasize # Model if modelSelection.lower() == 'res50': model = Res50() elif modelSelection.lower() == 'dense121': model = Dense121() elif modelSelection.lower() == 'mobv2': model = Mob_v2() elif modelSelection.lower() == 'dense169': model = Dense169() elif modelSelection.lower() == 'mob': model = Net() elif modelSelection.lower() == 'squeeze': model = Squeeze() else: assert False, 'Wrong type of model selection string!' # Model # model = Net() # model = Squeeze() model = model.to(device) # TODO: define loss function, and optimizer learning_rate = utils.config(modelSelection + ".learning_rate") criterion = DepthLoss(0.1).to(device) optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) number_of_epoches = 10 # # Attempts to restore the latest checkpoint if exists print("Loading unet...") model, start_epoch, stats = utils.restore_checkpoint( model, utils.config(modelSelection + ".checkpoint")) running_va_loss = [] if 'va_loss' not in stats else stats['va_loss'] running_va_acc = [] if 'va_err' not in stats else stats['va_err'] running_tr_loss = [] if 'tr_loss' not in stats else stats['tr_loss'] running_tr_acc = [] if 'tr_err' not in stats else stats['tr_err'] tr_acc, tr_loss = utils.evaluate_model(model, tr_loader, device) acc, loss = utils.evaluate_model(model, va_loader, device) running_va_acc.append(acc) running_va_loss.append(loss) running_tr_acc.append(tr_acc) running_tr_loss.append(tr_loss) stats = { 'va_err': running_va_acc, 'va_loss': running_va_loss, 'tr_err': running_tr_acc, 'tr_loss': running_tr_loss, # 'num_of_epoch': 0 } # Loop over the entire dataset multiple times # for epoch in range(start_epoch, config('cnn.num_epochs')): epoch = start_epoch # while curr_patience < patience: while epoch < number_of_epoches: # Train model utils.train_epoch(device, tr_loader, model, criterion, optimizer) # Save checkpoint utils.save_checkpoint(model, epoch + 1, utils.config(modelSelection + ".checkpoint"), stats) # Evaluate model tr_acc, tr_loss = utils.evaluate_model(model, tr_loader, device) va_acc, va_loss = utils.evaluate_model(model, va_loader, device) running_va_acc.append(va_acc) running_va_loss.append(va_loss) running_tr_acc.append(tr_acc) running_tr_loss.append(tr_loss) epoch += 1 print("Finished Training") utils.make_plot(running_tr_loss, running_tr_acc, running_va_loss, running_va_acc)
import torch.nn as nn from dense169 import Dense169 #from .Dense169 import Dense169 Dense169() class BidirectionalLSTM(nn.Module): def __init__(self, nIn, nHidden, nOut): super(BidirectionalLSTM, self).__init__() self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True) self.embedding = nn.Linear(nHidden * 2, nOut) def forward(self, input): recurrent, _ = self.rnn(input) T, b, h = recurrent.size() t_rec = recurrent.view(T * b, h) output = self.embedding(t_rec) # [T * b, nOut] output = output.view(T, b, -1) return output class CRNN(nn.Module): def __init__(self, imgH, nc, nclass, nh, n_rnn=2, leakyRelu=False): super(CRNN, self).__init__() assert imgH % 16 == 0, 'imgH has to be a multiple of 16' ks = [3, 3, 3, 3, 3, 3, 2]
def main(device=torch.device('cuda:0')): # CLI arguments parser = arg.ArgumentParser( description='We all know what we are doing. Fighting!') parser.add_argument("--datasize", "-d", default="small", type=str, help="data size you want to use, small, medium, total") # Parsing args = parser.parse_args() # Data loaders datasize = args.datasize pathname = "data/nyu.zip" # Model modelSelection = input( 'Please input the type of model to be used(res50,dense121,dense169,mob_v2,mob):' ) # Model if modelSelection.lower() == 'res50': model = Res50() elif modelSelection.lower() == 'dense121': model = Dense121() elif modelSelection.lower() == 'mob_v2': model = Mob_v2() elif modelSelection.lower() == 'dense169': model = Dense169() elif modelSelection.lower() == 'mob': model = Net() elif modelSelection.lower() == 'squeeze': model = Squeeze() else: assert False, 'Wrong type of model selection string!' model = model.to(device) # Attempts to restore the latest checkpoint if exists print("Loading unet...") model, start_epoch, stats = utils.restore_checkpoint( model, utils.config(modelSelection + ".checkpoint")) # Get Test Images img_list = glob("examples/" + "*.png") # Set model to eval mode model.eval() model = model.to(device) # Begin testing loop print("Begin Test Loop ...") for idx, img_name in enumerate(img_list): img = load_images([img_name]) img = torch.Tensor(img).float().to(device) print("Processing {}, Tensor Shape: {}".format(img_name, img.shape)) with torch.no_grad(): preds = model(img).squeeze(0) output = colorize(preds.data) output = output.transpose((1, 2, 0)) cv2.imwrite( img_name.split(".")[0] + "_" + modelSelection + "_result.png", output) print("Processing {} done.".format(img_name))