def __init__(self, model_id, project_dir): super(DeepLab_Plus, self).__init__() self.model_id = model_id self.project_dir = project_dir self.create_model_dirs() self.network = DeepLabV3(self.model_id, self.project_dir) self.network.load_state_dict( torch.load("pretrained_models/model_13_2_2_2_epoch_580.pth", map_location='cpu')) self.conv1 = nn.Conv2d(in_channels=512, out_channels=8, kernel_size=3, padding=1) self.fc = nn.Linear(in_features=1296, out_features=2)
def main(): dataloaders = { 'train': torch.utils.data.DataLoader(image_datasets['train'], batch_size=1, shuffle=True, num_workers=8), 'val': torch.utils.data.DataLoader(image_datasets['val'], batch_size=1, shuffle=True, num_workers=8) } device = torch.device("cuda:" + str(gpu)) ################################# # labels = [0, 1] nclasses = len(labels) # model = Unet2D(1, nclasses) model = DeepLabV3(len(labels)) model = model.to(device) # model.half() # convert to half precision # for layer in model.modules(): # if isinstance(layer, nn.BatchNorm2d): # layer.float() ################################# # weights = None criterion = DiceLoss() # criterion = nn.CrossEntropyLoss() optimizer_conv = optim.Adam(model.parameters(), lr=0.01) # optimizer_conv = Adam16(model.parameters(), lr=1e-3) exp_lr_scheduler = lr_scheduler.ReduceLROnPlateau(optimizer_conv) # print(model) epochs = 20 train_model(model, criterion, optimizer_conv, exp_lr_scheduler, dataloaders, device, num_epochs=epochs)
def segmentation(filename, output_file="output.jpg"): """ Input: filename: location of (1024x2048x3) png input image output: filename for segmented output image, size (1024x2048x1) with labels 0-19 for each pixel Outputs: output file saved as given name """ # Load pre-trained network model = DeepLabV3(1, ".") if torch.cuda.is_available(): model.load_state_dict(torch.load("model_13_2_2_2_epoch_580.pth")) else: model.load_state_dict( torch.load("model_13_2_2_2_epoch_580.pth", map_location=lambda storage, loc: storage)) model.eval() # Preprocess input transformation = transforms.Compose([ transforms.ToTensor(), ]) #image = Image.open("stuttgart_00_000000_000080_leftImg8bit.png") image = Image.open(filename) image_tensor = transformation(image).float() image_tensor = image_tensor.unsqueeze_(0) if torch.cuda.is_available(): image_tensor.cuda() # Run model input = Variable(image_tensor) output = model(input) # Output out_img = np.argmax(output.detach().numpy(), axis=1).astype(np.uint8)[0, :, :] np.save("output.npy", out_img) imsave(output_file, out_img)
parser = argparse.ArgumentParser(description='Compute Confusion Matrix') print(os.getcwd()) parser.add_argument('--model_path', type=str, default=os.getcwd() + '/pretrained_models/model_13_2_2_2_epoch_580.pth') parser.add_argument('--fog_scale', type=float, default=0.005) parser.add_argument('--dataset_path', type=str, default='data/cityscapes') parser.add_argument('--compute_unperturbed', action='store_true') args = parser.parse_args() device = 'cuda' if torch.cuda.is_available() else 'cpu' model_name = '0' batch_size = 8 model = DeepLabV3(model_name, project_dir=os.getcwd()).to(device) model.load_state_dict(torch.load(args.model_path)) print(model) test_datasets1 = DATASET_CITYSCAPES(cityscapes_path='data/cityscapes', split='val') test_datasets2 = DATASET_CITYSCAPES_FOGGY(cityscapes_path='data/cityscapes', split='val', fog_scale=args.fog_scale) test_loader1 = torch.utils.data.DataLoader(dataset=test_datasets1, batch_size=batch_size, shuffle=False, num_workers=4) test_loader2 = torch.utils.data.DataLoader(dataset=test_datasets2, batch_size=batch_size, shuffle=False, num_workers=4)
import torch.utils.data import torch.nn as nn from torch.autograd import Variable import torch.optim as optim import torch.nn.functional as F import numpy as np import pickle import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt import cv2 batch_size = 2 network = DeepLabV3("eval_val", project_dir="..").cuda() network.load_state_dict( torch.load("../pretrained_models/model_13_2_2_2_epoch_580.pth")) val_dataset = DatasetVal(cityscapes_data_path="../data/cityscapes", cityscapes_meta_path="../data/cityscapes/meta") num_val_batches = int(len(val_dataset) / batch_size) print("num_val_batches:", num_val_batches) val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False, num_workers=1) with open("../data/cityscapes/meta/class_weights.pkl",
def main(): # Training settings parser = argparse.ArgumentParser( description='Scratch segmentation Example') parser.add_argument('--batch-size', type=int, default=1, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--test-batch-size', type=int, default=1, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument('--epochs', type=int, default=30, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument( '--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') args = parser.parse_args() use_cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) device = torch.device("cuda" if use_cuda else "cpu") print('my device is :', device) kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {} train_loader = torch.utils.data.DataLoader(FarmDataset(istrain=True), batch_size=args.batch_size, shuffle=True, drop_last=True, **kwargs) startepoch = 0 # model = torch.load('./tmp/model{}'.format(startepoch)) if startepoch else segnet(3, 4).to(device) # model = torch.load('./tmp/model{}'.format(startepoch)) if startepoch else unet(3, 4).to(device) # model = torch.load('./tmp/model{}'.format(startepoch)) if startepoch else nested_unet(3, 4).to(device) model = torch.load('./tmp/model{}'.format( startepoch)) if startepoch else DeepLabV3().to(device) args.epochs = 5 # optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) learning_rate = 1e-4 optimizer = optim.Adam(model.parameters(), lr=learning_rate) train_losses, train_mious = [], [] val_losses, val_mious = [], [] for epoch in range(startepoch, args.epochs + 1): #relu+bn train_loss, train_miou = train(args, model, device, train_loader, optimizer, epoch) val_loss, val_miou = val(args, model, device, valDataset(istrain=True, isaug=False), issave=False) train_losses.append(train_loss) train_mious.append(train_miou) val_losses.append(val_loss) val_mious.append(val_miou) if epoch % 3 == 0: print(epoch) val(args, model, device, valDataset(istrain=True, isaug=False), issave=False) torch.save(model, './tmp/model{}'.format(epoch)) plt.plot(range(1, len(train_losses) + 1), train_losses, 'bo', label='train loss') plt.plot(range(1, len(val_losses) + 1), val_losses, 'r', label='val loss') plt.legend() plt.show() plt.plot(range(1, len(train_mious) + 1), train_mious, 'bo', label='train miou') plt.plot(range(1, len(val_mious) + 1), val_mious, 'r', label='val miou') plt.legend() plt.show()
rows, cols = 5, 2 #Show 10 images in the dataset along with target and predicted masks device = torch.device("cuda")# if torch.cuda.is_available() else "cpu") num_gpu = list(range(torch.cuda.device_count())) testloader, test_dst = load_dataset(batch_size, num_workers, split=test_split) # Creating an instance of the model #model = Segnet(n_classes) #Fully Convolutional Networks #model = U_Net(img_ch=3,output_ch=n_classes) #U Network #model = R2U_Net(img_ch=3,output_ch=n_classes,t=2) #Residual Recurrent U Network, R2Unet (t=2) #model = R2U_Net(img_ch=3,output_ch=n_classes,t=3) #Residual Recurrent U Network, R2Unet (t=3) #model = RecU_Net(img_ch=3,output_ch=n_classes,t=2) #Recurrent U Network, RecUnet (t=2) #model = ResU_Net(img_ch=3,output_ch=n_classes) #Residual U Network, ResUnet #model = DeepLabV3(n_classes, 'vgg') #DeepLabV3 VGG backbone model = DeepLabV3(n_classes, 'resnet') #DeepLabV3 Resnet backbone print('Evaluation logs for model: {}'.format(model.__class__.__name__)) model = nn.DataParallel(model, device_ids=num_gpu).to(device) model_params = torch.load(os.path.join(expt_logdir, "{}".format(ckpt_name))) model.load_state_dict(model_params) #Visualization of test data test_vis = Vis(test_dst, expt_logdir, rows, cols) #Metrics calculator for test data test_metrics = Metrics(n_classes, testloader, test_split, device, expt_logdir) model.eval() for i, (inputs, labels) in enumerate(testloader): inputs = inputs.to(device)
import matplotlib.pyplot as plt import cv2 import glob def getEpoch(checkpoint_name): filename_w_ext = os.path.basename(checkpoint_name) filename, file_extension = os.path.splitext(filename_w_ext) filenames = filename.split("_") return filenames[3] batch_size = 2 model_id = 1 network = DeepLabV3("eval_val", project_dir=default_path).cuda() #network.load_state_dict(torch.load(os.path.join(default_path,'training_logs/model_1/checkpoints/model_1_epoch_251.pth'))) #check last checkpoint data_list = glob.glob( os.path.join( default_path, 'training_logs/model_' + str(model_id) + '/checkpoints/model_' + str(model_id) + '_*.pth')) #find latest checkpoint latest_epoch = 0 for name in list(data_list): if latest_epoch < int(getEpoch(name)): latest_epoch = int(getEpoch(name)) if latest_epoch != 0: network.load_state_dict( torch.load(
train_data = VOCSegmentation('data/') val_data = VOCSegmentation( 'data/', image_set='val', ) elif dataset_type == 'cityscapes': train_data = Cityscapes('data/', h=256, w=512) val_data = Cityscapes('data/', split='val', h=256, w=512) elif dataset_type == 'fss1000': train_data = FSS1000('data/', h=256, w=256) val_data = FSS1000('data', image_set='val', h=256, w=256) train_loader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True) val_loader = DataLoader(val_data, batch_size=BATCH_SIZE, shuffle=False) model = DeepLabV3(num_classes=NUM_CLASSES) if resume_training: if dataset_type == 'voc': model = torch.load(f'{checkpoint_dir}/voc.pt') elif dataset_type == 'cityscapes': model = torch.load(f'{checkpoint_dir}/cityscapes.pt') elif dataset_type == 'fss1000': model = torch.load(f'{checkpoint_dir}/fss1000.pt') model.to(device) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
10: 23, 11: 24, 12: 25, 13: 26, 14: 27, 15: 28, 16: 31, 17: 32, 18: 33, 19: 0 } trainId_to_id_map_func = np.vectorize(trainId_to_id.get) batch_size = 2 network = DeepLabV3("val_result", project_dir="..").cuda() network.load_state_dict( torch.load("../pretrained_models/model_13_2_2_2_epoch_580.pth")) val_dataset = DatasetVal(cityscapes_data_path="../data/cityscapes", cityscapes_meta_path="../data/cityscapes/meta") num_val_batches = int(len(val_dataset) / batch_size) print("num_val_batches:", num_val_batches) val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False, num_workers=1) with open("../data/cityscapes/meta/class_weights.pkl",
import numpy as np from dataset import CityScapeDataSet from deeplabv3 import DeepLabV3 from score import SegmentationMetric # dataset batch_size = 2 dataset = CityScapeDataSet() data_loader = data.DataLoader(dataset, batch_size, shuffle=True, drop_last=True) # model model = DeepLabV3() model.cuda() # define optimizer optimizer = optim.Adam(model.parameters()) # define loss function loss_fn = nn.CrossEntropyLoss() # Metric metric = SegmentationMetric(model.num_classes) for epoch in range(100): metric.reset() for step, (input_tensor, label_tensor) in enumerate(data_loader):
import pickle import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt import cv2 import time # NOTE! NOTE! change this to not overwrite all log data when you train the model: model_id = "1" num_epochs = 100 batch_size = 8 learning_rate = 0.0001 network = DeepLabV3(model_id, project_dir=".").cuda() train_dataset = DatasetTrain(cityscapes_data_path="./data/cityscapes", cityscapes_meta_path="./data/cityscapes/meta") val_dataset = DatasetVal(cityscapes_data_path="./data/cityscapes", cityscapes_meta_path="./data/cityscapes/meta") num_train_batches = int(len(train_dataset) / batch_size) num_val_batches = int(len(val_dataset) / batch_size) print("num_train_batches:", num_train_batches) print("num_val_batches:", num_val_batches) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt import cv2 import time # NOTE! NOTE! change this to not overwrite all log data when you train the model: model_id = "1" num_epochs = 1000 batch_size = 3 learning_rate = 0.0001 #需要增加学习率的控制,不要固定一个学习率,应该迭代400时候降一次 network = DeepLabV3( model_id, project_dir="/workspace/mnt/group/other/luchao/deeplabv3/deeplabv3").cuda( ) train_dataset = DatasetTrain( cityscapes_data_path= "/workspace/mnt/group/other/luchao/deeplabv3/deeplabv3/data/cityscapes", cityscapes_meta_path= "/workspace/mnt/group/other/luchao/deeplabv3/deeplabv3/data/cityscapes/meta" ) val_dataset = DatasetVal( cityscapes_data_path= "/workspace/mnt/group/other/luchao/deeplabv3/deeplabv3/data/cityscapes", cityscapes_meta_path= "/workspace/mnt/group/other/luchao/deeplabv3/deeplabv3/data/cityscapes/meta" )
for x in ["train", "validation"] } dataloaders = { x: torch.utils.data.DataLoader( image_datasets[x], batch_size=batch_size, #shuffle=x=="train", sampler=data_sampler[x], num_workers=2) for x in ['train', 'validation'] } use_gpu = torch.cuda.is_available() model = DeepLabV3(model_id="1", project_dir=current) model.load_state_dict( torch.load( os.path.join(current, "pretrained_models/model_13_2_2_2_epoch_580.pth"))) model.aspp.conv_1x1_4 = nn.Conv2d(256, 5, kernel_size=1) #model.load_state_dict() """ for idx,p in enumerate(model.parameters()): if idx!=0: p.requires_grad = False """ if use_gpu: #torch.distributed.init_process_group(backend="nccl") model = nn.DataParallel(model).to(device) #model = model.cuda()
import torch.utils.data import torch.nn as nn from torch.autograd import Variable import torch.optim as optim import torch.nn.functional as F import numpy as np import pickle import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt import cv2 batch_size = 2 network = DeepLabV3("eval_val", project_dir="/root/deeplabv3").cuda() network.load_state_dict(torch.load("/root/deeplabv3/pretrained_models/model_13_2_2_2_epoch_580.pth")) val_dataset = DatasetVal(cityscapes_data_path="/root/deeplabv3/data/cityscapes", cityscapes_meta_path="/root/deeplabv3/data/cityscapes/meta") num_val_batches = int(len(val_dataset)/batch_size) print ("num_val_batches:", num_val_batches) val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False, num_workers=1) with open("/root/deeplabv3/data/cityscapes/meta/class_weights.pkl", "rb") as file: # (needed for python3) class_weights = np.array(pickle.load(file)) class_weights = torch.from_numpy(class_weights)
from torch.autograd import Variable import torch.optim as optim import torch.nn.functional as F import numpy as np import pickle import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt import cv2 import os batch_size = 2 # very small network = DeepLabV3("eval_seq", project_dir="/root/deeplabv3").cuda() # move the nn.Module into GPU CUDA. network.load_state_dict(torch.load("/root/deeplabv3/pretrained_models/model_13_2_2_2_epoch_580.pth")) # load cityscapes trained weight for sequence in ["00", "01", "02"]: print(sequence) val_dataset = DatasetSeq(cityscapes_data_path="/data/cityscapes_dataset", cityscapes_meta_path="/data/cityscapes_dataset/meta", sequence=sequence) num_val_batches = int(len(val_dataset)/batch_size) print ("num_val_batches:", num_val_batches) val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False, num_workers=1)
import pickle import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt import cv2 import time # NOTE! NOTE! change this to not overwrite all log data when you train the model: model_id = "1" num_epochs = 1000 batch_size = 3 learning_rate = 0.0001 network = DeepLabV3(model_id, project_dir="/root/deeplabv3").cuda() train_dataset = DatasetTrain( cityscapes_data_path="/root/deeplabv3/data/cityscapes", cityscapes_meta_path="/root/deeplabv3/data/cityscapes/meta") val_dataset = DatasetVal( cityscapes_data_path="/root/deeplabv3/data/cityscapes", cityscapes_meta_path="/root/deeplabv3/data/cityscapes/meta") num_train_batches = int(len(train_dataset) / batch_size) num_val_batches = int(len(val_dataset) / batch_size) print("num_train_batches:", num_train_batches) print("num_val_batches:", num_val_batches) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size,
print(model) return model def create_model(opt, segment_network): model = get_model(opt.model) instance = model(opt, segment_network) print("Model [%s] was created" % type(instance).__name__) return instance if __name__ == '__main__': with open('config.yml', 'r') as cfg_file: opt = yaml.load(cfg_file) opt = Namespace(**opt) segment_network = DeepLabV3("eval_val", project_dir="drive_augumentation") segment_network.load_state_dict( torch.load("pretrained_models/model_13_2_2_2_epoch_580.pth", map_location='cpu')) segment_network.eval() if len(opt.gpu_ids) > 1: segment_network = segment_network.to('cuda:1') else: segment_network = segment_network.cuda('cuda:{}'.format( opt.gpu_ids[0])) model = create_model(opt, segment_network) dataset = SegmentationUnpairedDataset(opt.root_path, opt.load_size, opt.seg_size) dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, num_workers=8)
11: 24, 12: 25, 13: 26, 14: 27, 15: 28, 16: 31, 17: 32, 18: 33, 19: 0 } trainId_to_id_map_func = np.vectorize(trainId_to_id.get) batch_size = 2 model_id = 1 network = DeepLabV3("eval_val_for_metrics", project_dir=default_path).cuda() #network.load_state_dict(torch.load(os.path.join(default_path,'pretrained_models/model_13_2_2_2_epoch_580.pth'))) #check last checkpoint data_list = glob.glob( os.path.join( default_path, 'training_logs/model_' + str(model_id) + '/checkpoints/model_' + str(model_id) + '_*.pth')) #find latest checkpoint latest_epoch = 0 for name in list(data_list): if latest_epoch < int(getEpoch(name)): latest_epoch = int(getEpoch(name)) if latest_epoch != 0: network.load_state_dict( torch.load(
# -*- coding: utf-8 -*- """ Created on Fri Mar 19 12:45:05 2021 @author: Admin """ import torch from load_data import load_data from learning_function import learning_function from torchsummary import summary from plot import plot from deeplabv3 import DeepLabV3 from config import config ##################################################################################################### ######################################## load data ################################################## ##################################################################################################### l_train = load_data(r"D:/MHS data segmentation labeling/data/l_train.npy") test = load_data(r"D:/MHS data segmentation labeling/data/test.npy") ##################################################################################################### #################################### student model ################################################## ##################################################################################################### model = DeepLabV3(config["number_classes"]) train_ious,test_ious = learning_function(model,l_train,test) plot(train_ious,test_ious)