Ejemplo n.º 1
0
# from networks.kd_model import NetModel
from networks.kd_model_wo_paral import NetModel
import logging
import warnings
warnings.filterwarnings("ignore")
from torch.utils import data
from dataset.datasets import CSDataSet
import numpy as np
from tqdm import tqdm
import os
from prefetch_generator import BackgroundGenerator
from PIL import Image as PILImage
from networks.evaluate import id_to_trainid, id_to_trainid, id2trainId, get_palette, get_confusion_matrix
IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434),
                    dtype=np.float32)
args = TrainOptions().initialize()
h, w = map(int, args.input_size.split(','))
# trainloader = data.DataLoader(CSDataSet(args.data_dir, './dataset/list/cityscapes/train.lst', max_iters=args.num_steps*args.batch_size, crop_size=(h, w),
#                 scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN),
#                 batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)


class DataLoaderX(data.DataLoader):
    def __iter__(self):
        return BackgroundGenerator(super().__iter__())


dataloader = DataLoaderX  # 2:10
# dataloader = data.DataLoader #

valset = CSDataSet(args.data_dir,
Ejemplo n.º 2
0
seed = 0
os.environ['PYTHONHASHSEED'] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)  # if you are using multi-GPU.
np.random.seed(seed)  # Numpy module.
random.seed(seed)  # Python random module.
torch.manual_seed(seed)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True

IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434),
                    dtype=np.float32)

args = TrainOptions().initialize()
h, w = map(int, args.input_size.split(','))

# class data_prefetcher():
#     def __init__(self, loader):
#         self.loader = iter(loader)
#         self.stream = torch.cuda.Stream()
#
#         # With Amp, it isn't necessary to manually convert data to half.
#         # if args.fp16:
#         #     self.mean = self.mean.half()
#         #     self.std = self.std.half()
#         self.preload()
#
#     def next(self):
#         # torch.cuda.current_stream().wait_stream(self.stream)
import sys
from utils.train_options import TrainOptions
from networks.kd_model import NetModel
import logging
import warnings
warnings.filterwarnings("ignore")
from torch.utils import data
from dataset.datasets import CSDataSet
import numpy as np
IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
args = TrainOptions().initialize()
h, w = map(int, args.input_size.split(','))
trainloader = data.DataLoader(CSDataSet(args.data_dir, './dataset/list/cityscapes/train.lst', max_iters=args.num_steps*args.batch_size, crop_size=(h, w), 
                scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN), 
                batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)
valloader = data.DataLoader(CSDataSet(args.data_dir, './dataset/list/cityscapes/val.lst', crop_size=(1024, 2048), mean=IMG_MEAN, scale=False, mirror=False), 
                                batch_size=1, shuffle=False, pin_memory=True)
save_steps = int(2975/args.batch_size)
model = NetModel(args)
for epoch in range(args.start_epoch, args.epoch_nums):
    for step, data in enumerate(trainloader, args.last_step+1):
        model.adjust_learning_rate(args.lr_g, model.G_solver, step)
        model.adjust_learning_rate(args.lr_d, model.D_solver, step)
        model.set_input(data)
        model.optimize_parameters()
        model.print_info(epoch, step)
        if (step > 1) and ((step % save_steps == 0) and (step > args.num_steps - 1000)) or (step == args.num_steps - 1):
            mean_IU, IU_array = model.evalute_model(model.student, valloader, '0', '512,512', 19, True)
            model.save_ckpt(epoch, step, mean_IU, IU_array)
            logging.info('[val 512,512] mean_IU:{:.6f}  IU_array:{}'.format(mean_IU, IU_array))
Ejemplo n.º 4
0
import logging
import warnings
warnings.filterwarnings("ignore")
from torch.utils import data
from dataset.datasets import CSDataSet
from dataset.camvid import CamVid
import numpy as np
import os
import torch
import dataset.transforms as my_trans
import torchvision.transforms as transforms

IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434),
                    dtype=np.float32)

args = TrainOptions().initialize()
h, w = map(int, args.input_size.split(','))
# trainloader = torch.utils.data.DataLoader(CSDataSet(args.data_dir, './dataset/list/cityscapes/train.lst', max_iters=args.num_steps*args.batch_size, crop_size=(h, w),
#                 scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN),
#                 batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)
# valloader = torch.utils.data.DataLoader(CSDataSet(args.data_dir, './dataset/list/cityscapes/val.lst', crop_size=(1024, 2048), mean=IMG_MEAN, scale=False, mirror=False),
#                                 batch_size=2, shuffle=False, pin_memory=True)

value_scale = 255
mean = [0.485, 0.456, 0.406]
mean = [item * 255 for item in mean]
std = [0.229, 0.224, 0.225]
std = [item * 255 for item in std]
train_transform = my_trans.Compose([
    # my_trans.Resize((args.height, args.width)),
    # my_trans.RandScale([0.5, 2.0]),
Ejemplo n.º 5
0
        item['sample_index'] = index
        item['dataset_name'] = self.dataset

        try:
            item['maskname'] = self.maskname[index]
        except AttributeError:
            item['maskname'] = ''
        try:
            item['partname'] = self.partname[index]
        except AttributeError:
            item['partname'] = ''

        return item

    def __len__(self):
        return len(self.imgname)


if __name__ == '__main__':
    from utils.train_options import TrainOptions

    args = TrainOptions()
    options = args.parse_args(['--name', 'hc'])

    ds = BaseDataset(options, '3dpw', is_train=False)

    print("done")