def main():
    torch.multiprocessing.set_start_method("spawn", force=True)
    """Create the model and start the evaluation process."""
    args = get_arguments()

    os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
    gpus = [int(i) for i in args.gpu.split(',')]
    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    deeplab = CorrPM_Model(args.num_classes, args.num_points)
    if len(gpus) > 1:
        model = DataParallelModel(deeplab)
    else:
        model = deeplab

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    if args.data_name == 'lip':
        lip_dataset = LIPDataSet(args.data_dir, VAL_POSE_ANNO_FILE, args.dataset, crop_size=input_size, transform=transform)
        num_samples = len(lip_dataset)
        valloader = data.DataLoader(lip_dataset, batch_size=args.batch_size * len(gpus),
                                    shuffle=False, num_workers=4, pin_memory=True)

    restore_from = args.restore_from
    state_dict = model.state_dict().copy()
    state_dict_old = torch.load(restore_from)

    for key in state_dict.keys():
        if key not in state_dict_old.keys():
            print(key)
    for key, nkey in zip(state_dict_old.keys(), state_dict.keys()):
        if key != nkey:
            state_dict[key[7:]] = deepcopy(state_dict_old[key])
        else:
            state_dict[key] = deepcopy(state_dict_old[key])

    model.load_state_dict(state_dict)
    model.eval()
    model.cuda()

    parsing_preds, scales, centers = valid(model, valloader, input_size, num_samples, len(gpus))

    mIoU = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes, args.data_dir, input_size, args.dataset)
    print(mIoU)

    end = datetime.datetime.now()
    print(end - start, 'seconds')
    print(end)
示例#2
0
    def __init__(self, config):
        super().__init__(config)
        ## Select network
        if config.spatial_information == 'depth' and config.os == 16 and config.network == "SGNet" and config.mode != "measure_speed":
            from graphs.models.SGNet.SGNet import SGNet
        elif config.spatial_information == 'depth' and config.os == 16 and config.network == "SGNet":
            from graphs.models.SGNet.SGNet_fps import SGNet
        elif config.spatial_information == 'depth' and config.os == 16 and config.network == "SGNet_ASPP" and config.mode != "measure_speed":
            from graphs.models.SGNet.SGNet_ASPP import SGNet
        elif config.spatial_information == 'depth' and config.os == 16 and config.network == "SGNet_ASPP":
            from graphs.models.SGNet.SGNet_ASPP_fps import SGNet

        random.seed(self.config.seed)
        os.environ['PYTHONHASHSEED'] = str(self.config.seed)
        np.random.seed(self.config.seed)
        torch.manual_seed(self.config.seed)
        torch.cuda.manual_seed(self.config.seed)
        torch.cuda.manual_seed_all(self.config.seed)
        cudnn.enabled = True
        cudnn.benchmark = True
        cudnn.deterministic = True
        os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
        # create data loader
        if config.dataset == "NYUD":
            self.testloader = data.DataLoader(NYUDataset_val_full(
                self.config.val_list_path),
                                              batch_size=1,
                                              shuffle=False,
                                              pin_memory=True)
        # Create an instance from the Model
        self.logger.info("Loading encoder pretrained in imagenet...")
        self.model = SGNet(self.config.num_classes)
        print(self.model)

        self.model.cuda()
        self.model.train()
        self.model.float()
        print(config.gpu)
        if config.mode != 'measure_speed':
            self.model = DataParallelModel(self.model, device_ids=[0])
            print('parallel....................')

        total = sum([param.nelement() for param in self.model.parameters()])
        print('  + Number of params: %.2fM' % (total / 1e6))
        print_cuda_statistics()
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()
    update_config(config, args)
    print (args)
    gpus = [int(i) for i in args.gpu.split(',')]
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    h, w = map(int, args.input_size.split(','))
    
    input_size = (h, w)

    model = get_seg_model(cfg=config, num_classes=args.num_classes,is_train=False)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    lip_dataset = LIPDataSet(args.data_dir, 'val',args.list_path, crop_size=input_size, transform=transform)
    num_samples = len(lip_dataset)

    valloader = data.DataLoader(lip_dataset, batch_size=args.batch_size * len(gpus),
                                shuffle=False, pin_memory=True)

    restore_from = args.restore_from

    state_dict = model.state_dict().copy()
    state_dict_old = torch.load(restore_from)
    state_dict_old = state_dict_old['state_dict']

    for key, nkey in zip(state_dict_old.keys(), state_dict.keys()):
        if key != nkey:
            # remove the 'module.' in the 'key'
            state_dict[key[7:]] = deepcopy(state_dict_old[key])
        else:
            state_dict[key] = deepcopy(state_dict_old[key])

    model.load_state_dict(state_dict)
    model = DataParallelModel(model)

    model.eval()
    model.cuda()

    parsing_preds, scales, centers,time_list= valid(model, valloader, input_size, num_samples, len(gpus))
    mIoU = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes, args.data_dir, input_size,args.dataset,args.list_path)
    # write_results(parsing_preds, scales, centers, args.data_dir, 'val', args.save_dir, input_size=input_size)
    # write_logits(parsing_logits, scales, centers, args.data_dir, 'val', args.save_dir, input_size=input_size)
    
    

    print(mIoU)
    print('total time is ',sum(time_list))
    print('avg time is ',sum(time_list)/len(time_list))
示例#4
0
文件: train.py 项目: wuqiangch/CADLab
def main():
    writer = SummaryWriter(args.snapshot_dir)

    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    cudnn.enabled = True

    xlsor = XLSor(num_classes=args.num_classes)
    print(xlsor)

    saved_state_dict = torch.load(args.restore_from)
    new_params = xlsor.state_dict().copy()
    for i in saved_state_dict:
        i_parts = i.split('.')
        if not i_parts[0] == 'fc':
            new_params['.'.join(i_parts[0:])] = saved_state_dict[i]

    xlsor.load_state_dict(new_params)

    model = DataParallelModel(xlsor)
    model.train()
    model.float()
    model.cuda()

    criterion = Criterion()
    criterion = DataParallelCriterion(criterion)
    criterion.cuda()

    cudnn.benchmark = True

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    trainloader = data.DataLoader(XRAYDataSet(args.data_dir,
                                              args.data_list,
                                              max_iters=args.num_steps *
                                              args.batch_size,
                                              crop_size=input_size,
                                              scale=args.random_scale,
                                              mirror=args.random_mirror,
                                              mean=IMG_MEAN),
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=16,
                                  pin_memory=True)

    optimizer = optim.SGD(
        [{
            'params': filter(lambda p: p.requires_grad, xlsor.parameters()),
            'lr': args.learning_rate
        }],
        lr=args.learning_rate,
        momentum=args.momentum,
        weight_decay=args.weight_decay)
    optimizer.zero_grad()

    interp = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)

    for i_iter, batch in enumerate(trainloader):
        i_iter += args.start_iters
        images, labels, _, _ = batch
        images = images.cuda()
        labels = labels.float().cuda()
        if torch_ver == "0.3":
            images = Variable(images)
            labels = Variable(labels)

        optimizer.zero_grad()
        lr = adjust_learning_rate(optimizer, i_iter)
        preds = model(images, args.recurrence)

        loss = criterion(preds, labels)
        loss.backward()
        optimizer.step()

        if i_iter % 100 == 0:
            writer.add_scalar('learning_rate', lr, i_iter)
            writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)

        if i_iter % 100 == 0:
            images_inv = inv_preprocess(images, args.save_num_images, IMG_MEAN)
            if isinstance(preds, list):
                preds = preds[0]
            if isinstance(preds, list):
                preds = preds[0]
            preds = interp(preds)
            for index, img in enumerate(images_inv):
                writer.add_image('Images/' + str(index),
                                 torch.from_numpy(img / 255.).permute(2, 0, 1),
                                 i_iter)
                writer.add_image('Labels/' + str(index), labels[index], i_iter)
                writer.add_image('preds/' + str(index),
                                 (preds[index] > 0.5).float(), i_iter)

        print('iter = {} of {} completed, loss = {}'.format(
            i_iter, args.num_steps,
            loss.data.cpu().numpy()))

        if i_iter >= args.num_steps - 1:
            print('save model ...')
            torch.save(
                xlsor.state_dict(),
                osp.join(args.snapshot_dir,
                         'XLSor_' + str(args.num_steps) + '.pth'))
            break

        if i_iter % args.save_pred_every == 0:
            print('taking snapshot ...')
            torch.save(
                xlsor.state_dict(),
                osp.join(args.snapshot_dir, 'XLSor_' + str(i_iter) + '.pth'))

    end = timeit.default_timer()
    print(end - start, 'seconds')
示例#5
0
class SGNetAgent(BaseAgent):
    """
    This class will be responsible for handling the whole process of our architecture.
    """
    def __init__(self, config):
        super().__init__(config)
        ## Select network
        if config.spatial_information == 'depth' and config.os == 16 and config.network == "SGNet" and config.mode != "measure_speed":
            from graphs.models.SGNet.SGNet import SGNet
        elif config.spatial_information == 'depth' and config.os == 16 and config.network == "SGNet":
            from graphs.models.SGNet.SGNet_fps import SGNet
        elif config.spatial_information == 'depth' and config.os == 16 and config.network == "SGNet_ASPP" and config.mode != "measure_speed":
            from graphs.models.SGNet.SGNet_ASPP import SGNet
        elif config.spatial_information == 'depth' and config.os == 16 and config.network == "SGNet_ASPP":
            from graphs.models.SGNet.SGNet_ASPP_fps import SGNet

        random.seed(self.config.seed)
        os.environ['PYTHONHASHSEED'] = str(self.config.seed)
        np.random.seed(self.config.seed)
        torch.manual_seed(self.config.seed)
        torch.cuda.manual_seed(self.config.seed)
        torch.cuda.manual_seed_all(self.config.seed)
        cudnn.enabled = True
        cudnn.benchmark = True
        cudnn.deterministic = True
        os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
        # create data loader
        if config.dataset == "NYUD":
            self.testloader = data.DataLoader(NYUDataset_val_full(
                self.config.val_list_path),
                                              batch_size=1,
                                              shuffle=False,
                                              pin_memory=True)
        # Create an instance from the Model
        self.logger.info("Loading encoder pretrained in imagenet...")
        self.model = SGNet(self.config.num_classes)
        print(self.model)

        self.model.cuda()
        self.model.train()
        self.model.float()
        print(config.gpu)
        if config.mode != 'measure_speed':
            self.model = DataParallelModel(self.model, device_ids=[0])
            print('parallel....................')

        total = sum([param.nelement() for param in self.model.parameters()])
        print('  + Number of params: %.2fM' % (total / 1e6))
        print_cuda_statistics()

    def load_checkpoint(self, filename):
        try:
            self.logger.info("Loading checkpoint '{}'".format(filename))
            checkpoint = torch.load(filename)

            self.current_epoch = checkpoint['epoch']
            self.current_iteration = checkpoint['iteration']
            self.model.load_state_dict(checkpoint['state_dict'])

            # self.optimizer.load_state_dict(checkpoint['optimizer'])

            self.logger.info(
                "Checkpoint loaded successfully from '{}' at (epoch {}) at (iteration {})\n"
                .format(filename, checkpoint['epoch'],
                        checkpoint['iteration']))
        except OSError as e:
            self.logger.info(
                "No checkpoint exists from '{}'. Skipping...".format(
                    self.config.checkpoint_dir))
            self.logger.info("**First time to train**")

    def run(self):
        """
        This function will the operator
        :return:
        """
        assert self.config.mode in [
            'train', 'test', 'measure_speed', 'train_iters'
        ]
        try:
            if self.config.mode == 'test':
                self.test()
            elif self.config.mode == 'measure_speed':
                with torch.no_grad():
                    self.measure_speed(input_size=[1, 3, 480, 640])
        except KeyboardInterrupt:
            self.logger.info("You have entered CTRL+C.. Wait to finalize")

    def test(self):

        tqdm_batch = tqdm(self.testloader,
                          total=len(self.testloader),
                          desc="Testing...")
        self.model.eval()
        metrics = IOUMetric(self.config.num_classes)
        loss_val = 0
        metrics = IOUMetric(self.config.num_classes)
        palette = get_palette(256)
        # if (not os.path.exists(self.config.output_img_dir)):
        #     os.mkdir(self.config.output_img_dir)
        # if (not os.path.exists(self.config.output_gt_dir)):
        #     os.mkdir(self.config.output_gt_dir)
        if (not os.path.exists(self.config.output_predict_dir)):
            os.mkdir(self.config.output_predict_dir)
        self.load_checkpoint(self.config.trained_model_path)
        index = 0
        for batch_val in tqdm_batch:
            image = batch_val['image'].cuda()
            label = batch_val['seg'].cuda()
            label = torch.squeeze(label, 1).long()
            HHA = batch_val['HHA'].cuda()
            depth = batch_val['depth'].cuda()
            size = np.array([label.size(1), label.size(2)])
            input_size = (label.size(1), label.size(2))

            with torch.no_grad():
                if self.config.ms:
                    output = predict_multiscale(self.model, image, depth,
                                                input_size, [0.8, 1.0, 2.0],
                                                self.config.num_classes, False)
                else:
                    output = predict_multiscale(self.model, image, depth,
                                                input_size, [1.0],
                                                self.config.num_classes, False)
                seg_pred = np.asarray(np.argmax(output, axis=2), dtype=np.int)
                output_im = Image.fromarray(
                    np.asarray(np.argmax(output, axis=2), dtype=np.uint8))
                output_im.putpalette(palette)
                output_im.save(self.config.output_predict_dir + '/' +
                               str(index) + '.png')
                seg_gt = np.asarray(label[0].cpu().numpy(), dtype=np.int)

                ignore_index = seg_gt != 255
                seg_gt = seg_gt[ignore_index]
                seg_pred = seg_pred[ignore_index]

                metrics.add_batch(seg_pred, seg_gt, ignore_index=255)

                index = index + 1
        acc, acc_cls, iu, mean_iu, fwavacc = metrics.evaluate()
        print({
            'meanIU': mean_iu,
            'IU_array': iu,
            'acc': acc,
            'acc_cls': acc_cls
        })
        pass

    def finalize(self):
        """
        Finalize all the operations of the 2 Main classes of the process the operator and the data loader
        :return:
        """
        # TODO
        pass

    def measure_speed(self, input_size, iteration=500):
        """
        Measure the speed of model
        :return: speed_time
                 fps
        """
        self.model.eval()
        input = torch.randn(*input_size).cuda()
        depth = torch.randn(*input_size).cuda()
        HHA = torch.randn(*input_size).cuda()

        for _ in range(100):
            self.model(input, depth)
        print('=========Speed Testing=========')
        torch.cuda.synchronize()
        torch.cuda.synchronize()
        t_start = time.time()

        for _ in range(iteration):
            x = self.model(input, depth)
        torch.cuda.synchronize()
        elapsed_time = time.time() - t_start
        speed_time = elapsed_time / iteration * 1000
        fps = iteration / elapsed_time
        print(iteration)
        print('Elapsed Time: [%.2f s / %d iter]' % (elapsed_time, iteration))
        print('Speed Time: %.2f ms / iter   FPS: %.2f' % (speed_time, fps))
        return speed_time, fps
示例#6
0
def main():
    writer = SummaryWriter(args.snapshot_dir)
    
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    cudnn.enabled = True

    xlsor = XLSor(num_classes=args.num_classes)
    print(xlsor)

    saved_state_dict = torch.load(args.restore_from)
    new_params = xlsor.state_dict().copy()
    for i in saved_state_dict:
        i_parts = i.split('.')
        if not i_parts[0]=='fc':
            new_params['.'.join(i_parts[0:])] = saved_state_dict[i] 
    
    xlsor.load_state_dict(new_params)


    model = DataParallelModel(xlsor)
    model.train()
    model.float()
    model.cuda()    

    criterion = Criterion()
    criterion = DataParallelCriterion(criterion)
    criterion.cuda()
    
    cudnn.benchmark = True

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)


    trainloader = data.DataLoader(XRAYDataSet(args.data_dir, args.data_list, max_iters=args.num_steps*args.batch_size, crop_size=input_size,
                    scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN), 
                    batch_size=args.batch_size, shuffle=True, num_workers=16, pin_memory=True)

    optimizer = optim.SGD([{'params': filter(lambda p: p.requires_grad, xlsor.parameters()), 'lr': args.learning_rate }],
                lr=args.learning_rate, momentum=args.momentum,weight_decay=args.weight_decay)
    optimizer.zero_grad()

    interp = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)


    for i_iter, batch in enumerate(trainloader):
        i_iter += args.start_iters
        images, labels, _, _ = batch
        images = images.cuda()
        labels = labels.float().cuda()
        if torch_ver == "0.3":
            images = Variable(images)
            labels = Variable(labels)

        optimizer.zero_grad()
        lr = adjust_learning_rate(optimizer, i_iter)
        preds = model(images, args.recurrence)

        loss = criterion(preds, labels)
        loss.backward()
        optimizer.step()

        if i_iter % 100 == 0:
            writer.add_scalar('learning_rate', lr, i_iter)
            writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)

        if i_iter % 100 == 0:
            images_inv = inv_preprocess(images, args.save_num_images, IMG_MEAN)
            if isinstance(preds, list):
                preds = preds[0]
            if isinstance(preds, list):
                preds = preds[0]
            preds = interp(preds)
            for index, img in enumerate(images_inv):
                writer.add_image('Images/'+str(index), torch.from_numpy(img/255.).permute(2,0,1), i_iter)
                writer.add_image('Labels/'+str(index), labels[index], i_iter)
                writer.add_image('preds/'+str(index), (preds[index]>0.5).float(), i_iter)

        print('iter = {} of {} completed, loss = {}'.format(i_iter, args.num_steps, loss.data.cpu().numpy()))

        if i_iter >= args.num_steps-1:
            print('save model ...')
            torch.save(xlsor.state_dict(),osp.join(args.snapshot_dir, 'XLSor_'+str(args.num_steps)+'.pth'))
            break

        if i_iter % args.save_pred_every == 0:
            print('taking snapshot ...')
            torch.save(xlsor.state_dict(),osp.join(args.snapshot_dir, 'XLSor_'+str(i_iter)+'.pth'))

    end = timeit.default_timer()
    print(end-start,'seconds')
def main():
    """Create the model and start the training."""
    print(args)
    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    writer = SummaryWriter(args.snapshot_dir)
    gpus = [int(i) for i in args.gpu.split(',')]
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    h, w = map(int, args.input_size.split(','))
    input_size = [h, w]

    # cudnn related setting
    cudnn.enabled = True
    cudnn.benchmark = True
    torch.backends.cudnn.deterministic = False
    torch.backends.cudnn.enabled = True

    deeplab = get_cls_net(config=config,
                          num_classes=args.num_classes,
                          is_train=True)

    print('-------Load Weight', args.restore_from)
    saved_state_dict = torch.load(args.restore_from)

    if args.start_epoch > 0:
        model = DataParallelModel(deeplab)
        model.load_state_dict(saved_state_dict['state_dict'])
    else:
        new_params = deeplab.state_dict().copy()
        state_dict_pretrain = saved_state_dict
        for state_name in state_dict_pretrain:
            if state_name in new_params:
                new_params[state_name] = state_dict_pretrain[state_name]
            else:
                print('NOT LOAD', state_name)
        deeplab.load_state_dict(new_params)
        model = DataParallelModel(deeplab)
    print('-------Load Weight Finish', args.restore_from)

    model.cuda()

    criterion0 = CriterionAll(loss_type='ohem')
    criterion0 = DataParallelCriterion(criterion0)
    criterion0.cuda()

    criterion1 = LovaszSoftmax(input_size=input_size)
    criterion1 = DataParallelCriterion(criterion1)
    criterion1.cuda()

    transform = build_transforms(args)

    print("-------Loading data...")
    parsing_dataset = WYDataSet(args.data_dir,
                                args.dataset,
                                crop_size=input_size,
                                transform=transform)
    print("Data dir : ", args.data_dir)
    print("Dataset : ", args.dataset, "Sample Number: ",
          parsing_dataset.number_samples)
    trainloader = data.DataLoader(parsing_dataset,
                                  batch_size=args.batch_size * len(gpus),
                                  shuffle=True,
                                  num_workers=8,
                                  collate_fn=fast_collate_fn_mask,
                                  pin_memory=True)

    optimizer = optim.SGD(model.parameters(),
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    if args.start_epoch > 0:
        optimizer.load_state_dict(saved_state_dict['optimizer'])
        print('========Load Optimizer', args.restore_from)

    total_iters = args.epochs * len(trainloader)
    for epoch in range(args.start_epoch, args.epochs):
        model.train()
        tng_prefetcher = data_prefetcher_mask(trainloader)
        batch = tng_prefetcher.next()
        n_batch = 0
        while batch[0] is not None:
            #         for i_iter, batch in enumerate(trainloader):
            i_iter = n_batch + len(trainloader) * epoch
            lr = adjust_learning_rate(optimizer, i_iter, total_iters)

            images, labels, _ = batch
            labels = labels.squeeze(1)
            labels = labels.long().cuda(non_blocking=True)
            preds = model(images)

            loss0 = criterion0(preds, labels)
            loss1 = criterion1(preds, labels)
            loss = loss0 + loss1

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            batch = tng_prefetcher.next()
            n_batch += 1

            if i_iter % 1 == 0:
                writer.add_scalar('learning_rate', lr, i_iter)
                writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)
                writer.add_scalar('loss0', loss0.data.cpu().numpy(), i_iter)
                writer.add_scalar('loss1', loss1.data.cpu().numpy(), i_iter)

            print(
                f'epoch = {epoch}, iter = {i_iter}/{total_iters}, lr={lr:.6f}, \
                  loss = {loss.data.cpu().numpy():.6f}, \
                  loss0 = {loss0.data.cpu().numpy():.6f}, \
                  loss1 = {loss1.data.cpu().numpy():.6f}')

        if (epoch + 1) % args.save_step == 0 or epoch == args.epochs:
            time.sleep(10)
            print("-------Saving checkpoint...")
            save_checkpoint(model, epoch, optimizer)

    time.sleep(10)
    save_checkpoint(model, epoch, optimizer)
    end = timeit.default_timer()
    print(end - start, 'seconds')
示例#8
0
def main():
    """Create the model and start the training."""
    print(args)
    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    writer = SummaryWriter(args.snapshot_dir)
    gpus = [int(i) for i in args.gpu.split(',')]
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    h, w = map(int, args.input_size.split(','))
    input_size = [h, w]

    cudnn.enabled = True
    # cudnn related setting
    cudnn.benchmark = True
    torch.backends.cudnn.deterministic = False
    torch.backends.cudnn.enabled = True

    deeplab = get_cls_net(config=config,
                          num_classes=args.num_classes,
                          is_train=True)
    model = DataParallelModel(deeplab)

    saved_state_dict = torch.load(args.restore_from)

    if args.start_epoch > 0:
        model = DataParallelModel(deeplab)
        model.load_state_dict(saved_state_dict['state_dict'])
    else:
        new_params = model.state_dict().copy()
        state_dict_pretrain = saved_state_dict['state_dict']
        for state_name in state_dict_pretrain:
            if state_name in new_params:
                new_params[state_name] = state_dict_pretrain[state_name]
                #print ('LOAD',state_name)
            else:
                print('NOT LOAD', state_name)
        model.load_state_dict(new_params)

    print('-------Load Weight', args.restore_from)

    model.cuda()

    criterion = CriterionAll2()
    criterion = DataParallelCriterion(criterion)
    criterion.cuda()

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    trainloader = data.DataLoader(LIPDataSet(args.data_dir,
                                             args.dataset,
                                             crop_size=input_size,
                                             transform=transform),
                                  batch_size=args.batch_size * len(gpus),
                                  shuffle=True,
                                  num_workers=4,
                                  pin_memory=True)

    num_samples = 5000
    '''
    list_map = []

    for part in deeplab.path_list:
        list_map = list_map + list(map(id, part.parameters()))
    
    base_params = filter(lambda p: id(p) not in list_map,
                         deeplab.parameters())
    params_list = []
    params_list.append({'params': base_params, 'lr':args.learning_rate*0.1})
    for part in deeplab.path_list:
        params_list.append({'params': part.parameters()})
    print ('len(params_list)',len(params_list))
    '''

    list_map = []

    for part in deeplab.path_list:
        list_map = list_map + list(map(id, part.parameters()))

    base_params = filter(lambda p: id(p) not in list_map, deeplab.parameters())
    params_list = []
    params_list.append({'params': base_params, 'lr': 1e-6})
    for part in deeplab.path_list:
        params_list.append({'params': part.parameters()})
    print('len(params_list)', len(params_list))
    optimizer = torch.optim.SGD(params_list,
                                lr=args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.start_epoch > 0:
        optimizer.load_state_dict(saved_state_dict['optimizer'])
        print('========Load Optimizer', args.restore_from)

    optimizer.zero_grad()

    total_iters = args.epochs * len(trainloader)
    for epoch in range(args.start_epoch, args.epochs):
        model.train()
        for i_iter, batch in enumerate(trainloader):
            i_iter += len(trainloader) * epoch
            #lr = adjust_learning_rate(optimizer, i_iter, total_iters)
            lr = adjust_learning_rate_parsing(optimizer, epoch)

            images, labels, _ = batch
            labels = labels.long().cuda(non_blocking=True)
            preds = model(images)

            loss = criterion(preds, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i_iter % 100 == 0:
                writer.add_scalar('learning_rate', lr, i_iter)
                writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)

            print('epoch = {}, iter = {} of {} completed,lr={}, loss = {}'.
                  format(epoch, i_iter, total_iters, lr,
                         loss.data.cpu().numpy()))
        if epoch % 2 == 0 or epoch == args.epochs:
            time.sleep(10)
            save_checkpoint(model, epoch, optimizer)

        # parsing_preds, scales, centers = valid(model, valloader, input_size,  num_samples, len(gpus))

        # mIoU = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes, args.data_dir, input_size)

        # print(mIoU)
        # writer.add_scalars('mIoU', mIoU, epoch)
    time.sleep(10)
    save_checkpoint(model, epoch, optimizer)
    end = timeit.default_timer()
    print(end - start, 'seconds')
示例#9
0
文件: train.py 项目: huangxi6/EEN
def main():
    """Create the model and start the training."""

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)
    f = open('./tlip.txt','w')
    f.write(str(args)+'\n')

    gpus = [int(i) for i in args.gpu.split(',')]
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    h, w = map(int, args.input_size.split(','))
    input_size = [h, w]

    cudnn.enabled = True
    #cudnn.benchmark = False
    torch.backends.cudnn.deterministic = False
    torch.backends.cudnn.enabled = True
 

    deeplab = EEN(num_classes=args.num_classes)

    # Initialize the model with resnet101-imagenet.pth
    saved_state_dict = torch.load(args.restore_from)
    new_params = deeplab.state_dict().copy()
    for i in saved_state_dict:
        i_parts = i.split('.')
        if not i_parts[0] == 'fc':
            new_params['.'.join(i_parts[0:])] = saved_state_dict[i]
    deeplab.load_state_dict(new_params)
    
    # Initialize the model with cihp_11.pth
    """args.start_epoch = 11
    res = './scihp/cihp_11.pth'
    state_dict = deeplab.state_dict().copy()
    state_dict_old = torch.load(res)
    for key, nkey in zip(state_dict_old.keys(), state_dict.keys()):
        if key != nkey:
            state_dict[key[7:]] = deepcopy(state_dict_old[key])
        else:
            state_dict[key] = deepcopy(state_dict_old[key])
    deeplab.load_state_dict(state_dict)"""
    #########
   
    model = DataParallelModel(deeplab)
    model.cuda()

    criterion = CriterionAll()
    criterion = DataParallelCriterion(criterion)
    criterion.cuda()

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    trainloader = data.DataLoader(HumanDataSet(args.data_dir, args.dataset, crop_size=input_size, transform=transform),
                                  batch_size=args.batch_size * len(gpus), shuffle=True, num_workers=2,
                                  pin_memory=True)

    optimizer = optim.SGD(
        model.parameters(),
        lr=args.learning_rate,
        momentum=args.momentum,
        weight_decay=args.weight_decay
    )
    optimizer.zero_grad()

    total_iters = args.epochs * len(trainloader)
    print(len(trainloader))

    for epoch in range(args.start_epoch, args.epochs):
        start_time = timeit.default_timer()
        model.train()        
        for i_iter, batch in enumerate(trainloader):
            i_iter += len(trainloader) * epoch
            lr = adjust_learning_rate(optimizer, i_iter, total_iters)

            images, labels, edges, _ = batch
            #pdb.set_trace()
            labels = labels.long().cuda(non_blocking=True)
            edges = edges.long().cuda(non_blocking=True)

            preds = model(images)
            #pdb.set_trace()
            loss = criterion(preds, [labels, edges])
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i_iter % 100 ==0:
               print('iter = {} of {} completed, loss = {}'.format(i_iter, total_iters, loss.data.cpu().numpy()))
               f.write('iter = '+str(i_iter)+', loss = '+str(loss.data.cpu().numpy())+', lr = '+str(lr)+'\n')
        torch.save(model.state_dict(), osp.join(args.snapshot_dir, 'lip_' + str(epoch) + '.pth'))
        end_time = timeit.default_timer()
        print('epoch: ', epoch ,', the time is: ',(end_time-start_time))


    end = timeit.default_timer()
    print(end - start, 'seconds')
    f.close()
示例#10
0
def main():
    args.time = get_currect_time()

    visualizer = Visualizer(args)
    log = Log(args)
    log.record_sys_param()
    log.record_file()

    """Set GPU Environment"""
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    trainloader = data.DataLoader(NYUDataset_crop_fast(args.data_list, args.random_scale, args.random_mirror, args.random_crop,
                args.batch_size, args.colorjitter),batch_size=args.batch_size,
                shuffle=True, num_workers=4, pin_memory=True)
    valloader = data.DataLoader(NYUDataset_val_full(args.data_val_list, args.random_scale, args.random_mirror, args.random_crop,
                       1), batch_size=8, shuffle=False, pin_memory=True)

    """Create Network"""
    deeplab = Res_Deeplab(num_classes=args.num_classes)
    print(deeplab)

    """Load pretrained Network"""
    saved_state_dict = torch.load(args.restore_from)
    print(args.restore_from)
    new_params = deeplab.state_dict().copy()
    for i in saved_state_dict:
        # Scale.layer5.conv2d_list.3.weight
        i_parts = i.split('.')
        # print i_parts
        # if not i_parts[1]=='layer5':
        if not i_parts[0] == 'fc':
            new_params['.'.join(i_parts[0:])] = saved_state_dict[i]

    deeplab.load_state_dict(new_params)

    model = deeplab
    model.cuda()
    model.train()
    model = model.float()
    model = DataParallelModel(model, device_ids=[0, 1])

    criterion = CriterionDSN()
    criterion = DataParallelCriterion(criterion)

    optimizer = optim.SGD([{'params': filter(lambda p: p.requires_grad, model.parameters()), 'lr': args.learning_rate }],
                lr=args.learning_rate, momentum=args.momentum,weight_decay=args.weight_decay)

    optimizer.zero_grad()

    i_iter = 0
    args.num_steps = len(trainloader) * args.epoch
    best_iou = 0.0
    total = sum([param.nelement() for param in model.parameters()])
    print('  + Number of params: %.2fM' % (total / 1e6))

    for epoch in range(args.epoch):
        ## Train one epoch
        model.train()
        for batch in trainloader:
            start = timeit.default_timer()
            i_iter = i_iter + 1
            images = batch['image'].cuda()
            labels = batch['seg'].cuda()
            HHAs = batch['HHA'].cuda()
            depths = batch['depth'].cuda()
            labels = torch.squeeze(labels,1).long()
            if (images.size(0) != args.batch_size):
                break
            optimizer.zero_grad()
            preds = model(images, HHAs, depths)
            loss = criterion(preds, labels)
            loss.backward()
            optimizer.step()
            if i_iter % 100 == 0:
                visualizer.add_scalar('learning_rate', args.learning_rate, i_iter)
                visualizer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)

            current_lr = optimizer.param_groups[0]['lr']
            end = timeit.default_timer()
            log.log_string(
                '====================> epoch=%03d/%d, iter=%05d/%05d, loss=%.3f, %.3fs/iter, %02d:%02d:%02d, lr=%.6f' % (
                    epoch, args.epoch, i_iter, len(trainloader)*args.epoch, loss.data.cpu().numpy(), (end - start),
                    (int((end - start) * (args.num_steps - i_iter)) // 3600),
                    (int((end - start) * (args.num_steps - i_iter)) % 3600 // 60),
                    (int((end - start) * (args.num_steps - i_iter)) % 3600 % 60), current_lr))
        if (epoch+1) % 40 == 0:
            adjust_learning_rate(optimizer, i_iter, args)

        if epoch % 5 == 0:
            model.eval()
            confusion_matrix = np.zeros((args.num_classes, args.num_classes))
            loss_val = 0
            log.log_string("====================> evaluating")
            for batch_val in valloader:
                images_val = batch_val['image'].cuda()
                labels_val = batch_val['seg'].cuda()
                labels_val = torch.squeeze(labels_val,1).long()
                HHAs_val = batch_val['HHA'].cuda()
                depths_val = batch_val['depth'].cuda()

                with torch.no_grad():
                    preds_val = model(images_val, HHAs_val, depths_val)
                    loss_val += criterion(preds_val, labels_val)
                    preds_val = torch.cat([preds_val[i][0] for i in range(len(preds_val))], 0)
                    preds_val = F.upsample(input=preds_val, size=(480, 640), mode='bilinear', align_corners=True)

                    preds_val = np.asarray(np.argmax(preds_val.cpu().numpy(), axis=1), dtype=np.uint8)

                    labels_val = np.asarray(labels_val.cpu().numpy(), dtype=np.int)
                    ignore_index = labels_val != 255

                    labels_val = labels_val[ignore_index]
                    preds_val = preds_val[ignore_index]

                    confusion_matrix += get_confusion_matrix(labels_val, preds_val, args.num_classes)
            loss_val = loss_val / len(valloader)
            pos = confusion_matrix.sum(1)
            res = confusion_matrix.sum(0)
            tp = np.diag(confusion_matrix)

            IU_array = (tp / np.maximum(1.0, pos + res - tp))
            mean_IU = IU_array.mean()

            # getConfusionMatrixPlot(confusion_matrix)
            log.log_string('val loss' + ' ' + str(loss_val.cpu().numpy()) + ' ' + 'meanIU' + str(mean_IU) + 'IU_array' + str(IU_array))

            visualizer.add_scalar('val loss', loss_val.cpu().numpy(), epoch)
            visualizer.add_scalar('meanIU', mean_IU, epoch)

            if mean_IU > best_iou:
                best_iou = mean_IU
                log.log_string('save best model ...')
                torch.save(deeplab.state_dict(),
                           osp.join(args.snapshot_dir, 'model', args.dataset + NAME + 'best_iu' + '.pth'))

        if epoch % 5 == 0:
            log.log_string('save model ...')
            torch.save(deeplab.state_dict(),osp.join(args.snapshot_dir,'model', args.dataset+ NAME + str(epoch)+'.pth'))
示例#11
0
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()
    update_config(config, args)
    print(args)
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    gpus = [int(i) for i in args.gpu.split(',')]

    h, w = map(int, args.input_size.split(','))
    
    input_size = (h, w)

    model = get_cls_net(config=config, num_classes=args.num_classes, is_train=False)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    print('-------Load Data', args.data_dir)
    if 'vehicle_parsing_dataset' in args.data_dir:
        parsing_dataset = VPDataSet(args.data_dir, args.dataset, crop_size=input_size, transform=transform)
    elif 'LIP' in args.data_dir:
        parsing_dataset = LIPDataSet(args.data_dir, args.dataset, crop_size=input_size, transform=transform)
    elif 'WeiyiAll' in args.data_dir:
        parsing_dataset = WYDataSet(args.data_dir, args.dataset, crop_size=input_size, transform=transform)
    
    num_samples = len(parsing_dataset)
    valloader = data.DataLoader(parsing_dataset, batch_size=args.batch_size * len(gpus), shuffle=False, pin_memory=True)

    print('-------Load Weight', args.restore_from)
    restore_from = args.restore_from
    state_dict = model.state_dict().copy()
    state_dict_old = torch.load(restore_from)
    state_dict_old = state_dict_old['state_dict']

    for key, nkey in zip(state_dict_old.keys(), state_dict.keys()):
        if key != nkey:
            # remove the 'module.' in the 'key'
            state_dict[key[7:]] = deepcopy(state_dict_old[key])
        else:
            state_dict[key] = deepcopy(state_dict_old[key])

    model.load_state_dict(state_dict)
    model = DataParallelModel(model)

    model.eval()
    model.cuda()

    print('-------Start Evaluation...')
    parsing_preds, scales, centers, time_list = valid(model, valloader, input_size, num_samples, len(gpus))
    mIoU, no_test_class = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes, args.data_dir, list_path, input_size, dataset=args.dataset)
    print(mIoU)
    print('No test class : ', no_test_class)

    print('-------Saving Results', args.save_dir)
    write_results(parsing_preds, scales, centers, args.data_dir, args.dataset, args.save_dir, input_size=input_size)

    print('total time is ', sum(time_list))
    print('avg time is ', sum(time_list) / len(time_list))
示例#12
0
def main():
    """Create the model and start the training."""
    print(args)
    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    writer = SummaryWriter(args.snapshot_dir)
    gpus = [int(i) for i in args.gpu.split(',')]
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    h, w = map(int, args.input_size.split(','))
    input_size = [h, w]

    # cudnn related setting
    cudnn.enabled = True
    cudnn.benchmark = True
    torch.backends.cudnn.deterministic = False
    torch.backends.cudnn.enabled = True

    deeplab = get_cls_net(config=config, is_train=True)
    # model = DataParallelModel(deeplab)

    print('-------Load Weight', args.restore_from)
    saved_state_dict = torch.load(args.restore_from)

    if args.start_epoch > 0:
        model = DataParallelModel(deeplab)
        model.load_state_dict(saved_state_dict['state_dict'])
    else:
        new_params = deeplab.state_dict().copy()
        state_dict_pretrain = saved_state_dict
        for state_name in state_dict_pretrain:
            if state_name in new_params:
                new_params[state_name] = state_dict_pretrain[state_name]
                print('LOAD', state_name)
            else:
                print('NOT LOAD', state_name)
        deeplab.load_state_dict(new_params)
        model = DataParallelModel(deeplab)
    print('-------Load Weight', args.restore_from)

    model.cuda()

    criterion = CriterionAll(loss_type=args.loss)
    criterion = DataParallelCriterion(criterion)
    criterion.cuda()

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    print("-------Loading data...")
    if 'vehicle_parsing_dataset' in args.data_dir:
        parsing_dataset = VPDataSet(args.data_dir,
                                    args.dataset,
                                    crop_size=input_size,
                                    transform=transform)
    elif 'LIP' in args.data_dir:
        parsing_dataset = LIPDataSet(args.data_dir,
                                     args.dataset,
                                     crop_size=input_size,
                                     transform=transform)
    print("Data dir : ", args.data_dir)
    print("Dataset : ", args.dataset)
    trainloader = data.DataLoader(parsing_dataset,
                                  batch_size=args.batch_size * len(gpus),
                                  shuffle=True,
                                  num_workers=8,
                                  pin_memory=True)
    '''
    list_map = []

    for part in deeplab.path_list:
        list_map = list_map + list(map(id, part.parameters()))
    
    base_params = filter(lambda p: id(p) not in list_map,
                         deeplab.parameters())
    params_list = []
    params_list.append({'params': base_params, 'lr':args.learning_rate*0.1})
    for part in deeplab.path_list:
        params_list.append({'params': part.parameters()})
    print ('len(params_list)',len(params_list))
    '''

    optimizer = optim.SGD(model.parameters(),
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    if args.start_epoch > 0:
        optimizer.load_state_dict(saved_state_dict['optimizer'])
        print('========Load Optimizer', args.restore_from)

    total_iters = args.epochs * len(trainloader)
    for epoch in range(args.start_epoch, args.epochs):
        model.train()
        for i_iter, batch in enumerate(trainloader):
            i_iter += len(trainloader) * epoch
            lr = adjust_learning_rate(optimizer, i_iter, total_iters)

            images, labels, _ = batch
            labels = labels.long().cuda(non_blocking=True)
            preds = model(images)

            loss = criterion(preds, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i_iter % 100 == 0:
                writer.add_scalar('learning_rate', lr, i_iter)
                writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)

            print(
                f'epoch = {epoch}, iter = {i_iter}/{total_iters}, lr={lr:.6f}, loss = {loss.data.cpu().numpy():.6f}'
            )

        if (epoch + 1) % args.save_step == 0 or epoch == args.epochs:
            time.sleep(10)
            print("-------Saving checkpoint...")
            save_checkpoint(model, epoch, optimizer)

    time.sleep(10)
    save_checkpoint(model, epoch, optimizer)
    end = timeit.default_timer()
    print(end - start, 'seconds')
示例#13
0
def main():
    writer = SummaryWriter(args.snapshot_dir)

    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    cudnn.enabled = True

    deeplab = Res_Deeplab(num_classes=args.num_classes)
    print(deeplab)

    saved_state_dict = torch.load(args.restore_from)
    new_params = deeplab.state_dict().copy()
    for i in saved_state_dict:
        i_parts = i.split('.')
        if not i_parts[0] == 'fc':
            new_params['.'.join(i_parts[0:])] = saved_state_dict[i]

    deeplab.load_state_dict(new_params)

    model = DataParallelModel(deeplab)
    model.train()
    model.float()
    # model.apply(set_bn_momentum)
    model.cuda()

    criterion = CriterionDSN()  # CriterionCrossEntropy()
    criterion = DataParallelCriterion(criterion)
    criterion.cuda()

    cudnn.benchmark = True

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    trainloader = data.DataLoader(
        ModaDataset(
            args.data_dir,
            args.list_path,
            max_iters=args.num_steps * args.batch_size,
            # mirror=args.random_mirror,
            mirror=True,
            rotate=True,
            mean=IMG_MEAN),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=4,
        pin_memory=True)

    optimizer = optim.SGD(
        [{
            'params': filter(lambda p: p.requires_grad, deeplab.parameters()),
            'lr': args.learning_rate
        }],
        lr=args.learning_rate,
        momentum=args.momentum,
        weight_decay=args.weight_decay)
    optimizer.zero_grad()

    print('start training!')
    for i_iter, batch in enumerate(trainloader):
        i_iter += args.start_iters
        images, labels, _, _ = batch
        images = images.cuda()
        labels = labels.long().cuda()

        optimizer.zero_grad()
        lr = adjust_learning_rate(optimizer, i_iter)
        # preds = model(images, args.recurrence)
        preds = model(images)

        loss = criterion(preds, labels)
        loss.backward()
        optimizer.step()

        if i_iter % 100 == 0:
            writer.add_scalar('learning_rate', lr, i_iter)
            writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)

        # if i_iter % 5000 == 0:
        #     images_inv = inv_preprocess(images, args.save_num_images, IMG_MEAN)
        #     labels_colors = decode_labels(labels, args.save_num_images, args.num_classes)
        #     if isinstance(preds, list):
        #         preds = preds[0]
        #     preds_colors = decode_predictions(preds, args.save_num_images, args.num_classes)
        #     for index, (img, lab) in enumerate(zip(images_inv, labels_colors)):
        #         writer.add_image('Images/'+str(index), img, i_iter)
        #         writer.add_image('Labels/'+str(index), lab, i_iter)
        #         writer.add_image('preds/'+str(index), preds_colors[index], i_iter)

        print('iter = {} of {} completed, loss = {}'.format(
            i_iter, args.num_steps,
            loss.data.cpu().numpy()))

        if i_iter >= args.num_steps - 1:
            print('save model ...')
            torch.save(
                deeplab.state_dict(),
                osp.join(args.snapshot_dir,
                         'CS_scenes_' + str(args.num_steps) + '.pth'))
            break

        if i_iter % args.save_pred_every == 0:
            print('taking snapshot ...')
            torch.save(
                deeplab.state_dict(),
                osp.join(args.snapshot_dir,
                         'CS_scenes_' + str(i_iter) + '.pth'))

    end = timeit.default_timer()
    print(end - start, 'seconds')
示例#14
0
def main():
    """Create the model and start the training."""

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    timestramp = args.date
    writer = SummaryWriter(os.path.join(args.snapshot_dir, timestramp))
    gpus = [int(i) for i in args.gpu.split(',')]
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    h, w = map(int, args.input_size.split(','))
    input_size = [h, w]

    cudnn.enabled = True
    # cudnn related setting
    cudnn.benchmark = True
    torch.backends.cudnn.deterministic = False
    torch.backends.cudnn.enabled = True

    deeplab = Res_Deeplab(num_classes=args.num_classes)

    # dump_input = torch.rand((args.batch_size, 3, input_size[0], input_size[1]))
    # writer.add_graph(deeplab.cuda(), dump_input.cuda(), verbose=False)

    model = DataParallelModel(deeplab)
    if args.resume:
        # when restore form the same network, it is useful here
        checkpoint = torch.load(args.restore_from)
        model.load_state_dict(checkpoint['net'])
        args.start_epoch = checkpoint['epoch']
    else:
        saved_state_dict = torch.load(args.restore_from)
        new_params = deeplab.state_dict().copy()
        for i in saved_state_dict:
            i_parts = i.split('.')
            if not i_parts[0] == 'fc':
                new_params['.'.join(i_parts[0:])] = saved_state_dict[i]
        deeplab.load_state_dict(new_params)

    model.cuda()

    criterion = CriterionAll()
    criterion = DataParallelCriterion(criterion)
    criterion.cuda()

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    trainloader = data.DataLoader(LIPDataSet(args.data_dir,
                                             args.dataset,
                                             crop_size=input_size,
                                             transform=transform),
                                  batch_size=args.batch_size * len(gpus),
                                  shuffle=True,
                                  num_workers=2,
                                  pin_memory=True)
    lip_dataset = LIPDataSet(args.data_dir,
                             'val',
                             crop_size=input_size,
                             transform=transform)
    num_samples = len(lip_dataset)

    valloader = data.DataLoader(lip_dataset,
                                batch_size=args.batch_size * len(gpus),
                                shuffle=False,
                                pin_memory=True)

    optimizer = optim.SGD(model.parameters(),
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    optimizer.zero_grad()

    total_iters = args.epochs * len(trainloader)
    log = Logger(os.path.join(args.log_dir, '{}_train.log'.format(timestramp)),
                 level='debug')
    for epoch in range(args.start_epoch, args.epochs):
        model.train()
        #lr = adjust_learning_rate_pose(optimizer, epoch)
        for i_iter, batch in enumerate(trainloader):
            i_iter += len(trainloader) * epoch
            lr = adjust_learning_rate(optimizer, i_iter, total_iters)

            images, labels, r1, r2, r3, r4, l0, l1, l2, l3, l4, l5, _ = batch
            labels = labels.long().cuda(non_blocking=True)
            r1 = r1.long().cuda(non_blocking=True)
            r2 = r2.long().cuda(non_blocking=True)
            r3 = r3.long().cuda(non_blocking=True)
            r4 = r4.long().cuda(non_blocking=True)
            l0 = l0.long().cuda(non_blocking=True)
            l1 = l1.long().cuda(non_blocking=True)
            l2 = l2.long().cuda(non_blocking=True)
            l3 = l3.long().cuda(non_blocking=True)
            l4 = l4.long().cuda(non_blocking=True)
            l5 = l5.long().cuda(non_blocking=True)

            preds = model(images)

            loss = criterion(
                preds, [[labels], [r1, r2, r3, r4], [l0, l1, l2, l3, l4, l5]])
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i_iter % 100 == 0:
                writer.add_scalar('learning_rate', lr, i_iter)
                writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)

            if i_iter % 10 == 0:
                log.logger.info(
                    'epoch = {} iter = {} of {} completed, lr = {}, loss = {}'.
                    format(epoch, i_iter, total_iters, lr,
                           loss.data.cpu().numpy()))
        parsing_preds, scales, centers = valid(model, valloader, input_size,
                                               num_samples, len(gpus))
        mIoU = compute_mean_ioU(parsing_preds, scales, centers,
                                args.num_classes, args.data_dir, input_size)

        log.logger.info('epoch = {}'.format(epoch))
        log.logger.info(str(mIoU))
        writer.add_scalars('mIoU', mIoU, epoch)

        # save the model snapshot
        state = {"net": model.module.state_dict(), "epoch": epoch}

        torch.save(
            state,
            osp.join(args.snapshot_dir, timestramp,
                     'LIP_epoch_' + str(epoch) + '.pth'))

    end = timeit.default_timer()
    print(end - start, 'seconds')
示例#15
0
def main():
    """Create the model and start the training."""
    print(args)
    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    writer = SummaryWriter(args.snapshot_dir)
    gpus = [int(i) for i in args.gpu.split(',')]
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    h, w = map(int, args.input_size.split(','))
    input_size = [h, w]

    # cudnn related setting
    cudnn.enabled = True
    cudnn.benchmark = True
    torch.backends.cudnn.deterministic = False
    torch.backends.cudnn.enabled = True

    deeplab = get_resnet101_asp_oc_dsn(num_classes=args.num_classes)

    # dump_input = torch.rand((args.batch_size, 3, input_size[0], input_size[1]))
    # writer.add_graph(deeplab.cuda(), dump_input.cuda(), verbose=False)

    saved_state_dict = torch.load(args.restore_from)

    if args.start_epoch > 0:
        model = DataParallelModel(deeplab)
        #model = torch.nn.parallel.DistributedDataParallel(deeplab)
        model.load_state_dict(saved_state_dict['state_dict'])
    else:
        new_params = deeplab.state_dict().copy()
        state_dict_pretrain = saved_state_dict  #['state_dict']

        for state_name in state_dict_pretrain:
            # splits = i.split('.')
            # state_name = '.'.join(splits[1:])
            if state_name in new_params:
                new_params[state_name] = state_dict_pretrain[state_name]
            else:
                print('NOT LOAD', state_name)
        deeplab.load_state_dict(new_params)
        model = DataParallelModel(deeplab)
        #model = torch.nn.parallel.DistributedDataParallel(deeplab)
    print('-------Load Weight', args.restore_from)

    model.cuda()

    criterion = LovaszSoftmaxDSN(input_size)
    print('LOSS1: LovaszSoftmaxDSN')
    criterion = DataParallelCriterion(criterion)
    criterion.cuda()

    criterion_softmax = CriterionDSN()
    print('LOSS2: CriterionDSN')
    criterion_softmax = DataParallelCriterion(criterion_softmax)
    criterion_softmax.cuda()

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    trainloader = data.DataLoader(LIPDataSet(args.data_dir,
                                             args.dataset,
                                             crop_size=input_size,
                                             transform=transform,
                                             list_path=args.list_path),
                                  batch_size=args.batch_size * len(gpus),
                                  shuffle=True,
                                  num_workers=4,
                                  pin_memory=True)

    num_samples = 5000
    '''
    list_map = []

    for part in deeplab.path_list:
        list_map = list_map + list(map(id, part.parameters()))
    
    base_params = filter(lambda p: id(p) not in list_map,
                         deeplab.parameters())
    params_list = []
    params_list.append({'params': base_params, 'lr':args.learning_rate*0.1})
    for part in deeplab.path_list:
        params_list.append({'params': part.parameters()})
    print ('len(params_list)',len(params_list))
    '''
    optimizer = optim.SGD(model.parameters(),
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    if args.start_epoch > 0:
        optimizer.load_state_dict(saved_state_dict['optimizer'])
        print('========Load Optimizer', args.restore_from)

    total_iters = args.epochs * len(trainloader)
    for epoch in range(args.start_epoch, args.epochs):
        model.train()
        for i_iter, batch in enumerate(trainloader):
            i_iter += len(trainloader) * epoch
            lr = adjust_learning_rate(optimizer, i_iter, total_iters)

            images, labels, _ = batch
            labels = labels.long().cuda(non_blocking=True)
            preds = model(images)

            loss1 = criterion(preds, labels)
            loss2 = criterion_softmax(preds, labels)
            loss = loss1 + loss2
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i_iter % 100 == 0:
                writer.add_scalar('learning_rate', lr, i_iter)
                writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)

            # if i_iter % 500 == 0:

            # images_inv = inv_preprocess(images, args.save_num_images)
            # labels_colors = decode_parsing(labels, args.save_num_images, args.num_classes, is_pred=False)
            # edges_colors = decode_parsing(edges, args.save_num_images, 2, is_pred=False)

            # if isinstance(preds, list):
            # preds = preds[0]
            # preds_colors = decode_parsing(preds[0][-1], args.save_num_images, args.num_classes, is_pred=True)
            # pred_edges = decode_parsing(preds[1][-1], args.save_num_images, 2, is_pred=True)

            # img = vutils.make_grid(images_inv, normalize=False, scale_each=True)
            # lab = vutils.make_grid(labels_colors, normalize=False, scale_each=True)
            # pred = vutils.make_grid(preds_colors, normalize=False, scale_each=True)
            # edge = vutils.make_grid(edges_colors, normalize=False, scale_each=True)
            # pred_edge = vutils.make_grid(pred_edges, normalize=False, scale_each=True)

            # writer.add_image('Images/', img, i_iter)
            # writer.add_image('Labels/', lab, i_iter)
            # writer.add_image('Preds/', pred, i_iter)
            # writer.add_image('Edges/', edge, i_iter)
            # writer.add_image('PredEdges/', pred_edge, i_iter)

            print(
                'epoch = {}, iter = {} of {} completed,lr={:.4f}, loss = {:.4f}, IoU_loss = {:.4f}, BCE_loss = {:.4f}'
                .format(epoch, i_iter, total_iters, lr,
                        loss.data.cpu().numpy(),
                        loss1.data.cpu().numpy(),
                        loss2.data.cpu().numpy()))
        if epoch % args.save_step == 0 or epoch == args.epochs:
            time.sleep(10)
            save_checkpoint(model, epoch, optimizer)

        # parsing_preds, scales, centers = valid(model, valloader, input_size,  num_samples, len(gpus))
        # mIoU = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes, args.data_dir, input_size)
        # print(mIoU)
        # writer.add_scalars('mIoU', mIoU, epoch)

    time.sleep(10)
    save_checkpoint(model, epoch, optimizer)
    end = timeit.default_timer()
    print(end - start, 'seconds')
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()
    update_config(config, args)
    print(args)
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    gpus = [int(i) for i in args.gpu.split(',')]

    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)
    image_size = (2788, 1400)

    model = get_cls_net(config=config,
                        num_classes=args.num_classes,
                        is_train=False)

    transform = build_transforms(args)

    print('-------Load Data : ', args.data_dir)
    parsing_dataset = WYDataSet(args.data_dir,
                                args.dataset,
                                crop_size=input_size,
                                transform=transform)
    list_path = os.path.join(args.data_dir, parsing_dataset.list_file)

    num_samples = len(parsing_dataset)
    if 'test_no_label' not in args.dataset:
        valloader = data.DataLoader(parsing_dataset,
                                    batch_size=args.batch_size * len(gpus),
                                    shuffle=False,
                                    collate_fn=fast_collate_fn_mask,
                                    pin_memory=True)
    else:
        valloader = data.DataLoader(parsing_dataset,
                                    batch_size=args.batch_size * len(gpus),
                                    shuffle=False,
                                    collate_fn=fast_collate_fn,
                                    pin_memory=True)

    print('-------Load Weight', args.restore_from)
    restore_from = args.restore_from
    state_dict = model.state_dict().copy()
    state_dict_old = torch.load(restore_from)
    state_dict_old = state_dict_old['state_dict']

    for key, nkey in zip(state_dict_old.keys(), state_dict.keys()):
        if key != nkey:
            # remove the 'module.' in the 'key'
            state_dict[key[7:]] = deepcopy(state_dict_old[key])
        else:
            state_dict[key] = deepcopy(state_dict_old[key])

    model.load_state_dict(state_dict)
    model = DataParallelModel(model)

    model.eval()
    model.cuda()

    print('-------Start Evaluation...')
    parsing_preds, is_rotated, during_time = valid(args, model, valloader,
                                                   image_size, input_size,
                                                   num_samples, len(gpus))
    if 'test_no_label' not in args.dataset:
        mIoU, no_test_class = compute_mean_ioU_wy(parsing_preds,
                                                  is_rotated,
                                                  args.num_classes,
                                                  args.data_dir,
                                                  input_size,
                                                  dataset=args.dataset,
                                                  list_path=list_path)
        print(mIoU)
        print('No test class : ', no_test_class)

    print('-------Saving Results', args.save_dir)
    write_results_wy(parsing_preds,
                     is_rotated,
                     args.data_dir,
                     args.dataset,
                     args.save_dir,
                     input_size=input_size,
                     list_path=list_path)

    print('total time is ', during_time)
    print('avg time is ', during_time / num_samples)
def main():
    """Create the model and start the training."""

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    writer = SummaryWriter(args.snapshot_dir)
    gpus = [int(i) for i in args.gpu.split(',')]
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    h, w = map(int, args.input_size.split(','))
    input_size = [h, w]

    cudnn.enabled = True
    # cudnn related setting
    cudnn.benchmark = True
    torch.backends.cudnn.deterministic = False
    torch.backends.cudnn.enabled = True
 

    deeplab = Res_Deeplab(num_classes=args.num_classes)
    print(type(deeplab))
    

    # dump_input = torch.rand((args.batch_size, 3, input_size[0], input_size[1]))
    # writer.add_graph(deeplab.cuda(), dump_input.cuda(), verbose=False)


    """
    HOW DOES IT LOAD ONLY RESNET101 AND NOT THE RSTE OF THE NET ?
    """
    # UNCOMMENT THE FOLLOWING COMMENTARY TO INITIALYZE THE WEIGHTS
    
    # Load resnet101 weights trained on imagenet and copy it in new_params
    saved_state_dict = torch.load(args.restore_from)
    new_params = deeplab.state_dict().copy()

    # CHECK IF WEIGHTS BELONG OR NOT TO THE MODEL
    # belongs = 0
    # doesnt_b = 0
    # for key in saved_state_dict:
    #     if key in new_params:
    #         belongs+=1 
    #         print('key=', key)
    #     else:
    #         doesnt_b+=1
    #         # print('key=', key)
    # print('belongs = ', belongs, 'doesnt_b=', doesnt_b)
    # print('res101 len',len(saved_state_dict))
    # print('new param len',len(new_params))


    for i in saved_state_dict:
        i_parts = i.split('.')
        # print('i_parts:', i_parts)
        # exp : i_parts: ['layer2', '3', 'bn2', 'running_mean']

        # The deeplab weight modules  have diff name than args.restore_from weight modules
        if i_parts[0] == 'module' and not i_parts[1] == 'fc' :
            if new_params['.'.join(i_parts[1:])].size() == saved_state_dict[i].size():
                new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
        else:
            if not i_parts[0] == 'fc':
                if new_params['.'.join(i_parts[0:])].size() == saved_state_dict[i].size():
                    new_params['.'.join(i_parts[0:])] = saved_state_dict[i]
 
    deeplab.load_state_dict(new_params)
    
    # UNCOMMENT UNTIL HERE

    model = DataParallelModel(deeplab)
    model.cuda()

    criterion = CriterionAll()
    criterion = DataParallelCriterion(criterion)
    criterion.cuda()

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    trainloader = data.DataLoader(cartoonDataSet(args.data_dir, args.dataset, crop_size=input_size, transform=transform),
                                  batch_size=args.batch_size * len(gpus), shuffle=True, num_workers=8,
                                  pin_memory=True)

    #mIoU for Val set
    val_dataset = cartoonDataSet(args.data_dir, 'val', crop_size=input_size, transform=transform)
    numVal_samples = len(val_dataset)
    
    valloader = data.DataLoader(val_dataset, batch_size=args.batch_size * len(gpus),
                                shuffle=False, pin_memory=True)

    #mIoU for trainTest set
    trainTest_dataset = cartoonDataSet(args.data_dir, 'trainTest', crop_size=input_size, transform=transform)
    numTest_samples = len(trainTest_dataset)
    
    testloader = data.DataLoader(trainTest_dataset, batch_size=args.batch_size * len(gpus),
                                shuffle=False, pin_memory=True)


    optimizer = optim.SGD(
        model.parameters(),
        lr=args.learning_rate,
        momentum=args.momentum,
        weight_decay=args.weight_decay
    )
    optimizer.zero_grad()
    # valBatch_idx = 0
    total_iters = args.epochs * len(trainloader)
    for epoch in range(args.start_epoch, args.epochs):
        model.train()
        for i_iter, batch in enumerate(trainloader):
            i_iter += len(trainloader) * epoch
            lr = adjust_learning_rate(optimizer, i_iter, total_iters)
            images, labels, _, _ = batch
            labels = labels.long().cuda(non_blocking=True)
            preds = model(images)
            # print('preds size in batch', len(preds))
            # print('Size of Segmentation1 tensor output:',preds[0][0].size())
            # print('Segmentation2 tensor output:',preds[0][-1].size())
            # print('Size of Edge tensor output:',preds[1][-1].size())
            loss = criterion(preds, [labels])
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i_iter % 100 == 0:
                writer.add_scalar('learning_rate', lr, i_iter)
                writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)

            if i_iter % 500 == 0:
                # print('In iter%500 Size of Segmentation2 GT: ', labels.size())
                # print('In iter%500 Size of edges GT: ', edges.size())
                images_inv = inv_preprocess(images, args.save_num_images)
                # print(labels[0])
                labels_colors = decode_parsing(labels, args.save_num_images, args.num_classes, is_pred=False)
               
                # if isinstance(preds, list):
                #     print(len(preds))
                #     preds = preds[0]
                
                # val_images, _ = valloader[valBatch_idx]
                # valBatch_idx += 1
                # val_sampler = torch.utils.data.RandomSampler(val_dataset,replacement=True, num_samples=args.batch_size * len(gpus))
                # sample_valloader = data.DataLoader(val_dataset, batch_size=args.batch_size * len(gpus),
                #                 shuffle=False, sampler=val_sampler , pin_memory=True)
                # val_images, _ = sample_valloader
                # preds_val = model(val_images)

                # With multiple GPU, preds return a list, therefore we extract the tensor in the list
                if len(gpus)>1:
                    preds= preds[0]
                    # preds_val = preds_val[0]

                
                

                # print('In iter%500 Size of Segmentation2 tensor output:',preds[0][0][-1].size())
                # preds[0][-1] cause model returns [[seg1, seg2], [edge]]
                preds_colors = decode_parsing(preds[0][-1], args.save_num_images, args.num_classes, is_pred=True)
                # preds_val_colors = decode_parsing(preds_val[0][-1], args.save_num_images, args.num_classes, is_pred=True)
                # print("preds type:",type(preds)) #list
                # print("preds shape:", len(preds)) #2
                # hello = preds[0][-1]
                # print("preds type [0][-1]:",type(hello)) #<class 'torch.Tensor'>
                # print("preds len [0][-1]:", len(hello)) #12
                # print("preds len [0][-1]:", hello.shape)#torch.Size([12, 8, 96, 96])
                # print("preds color's type:",type(preds_colors))#torch.tensor
                # print("preds color's shape:",preds_colors.shape) #([2,3,96,96])

                # print('IMAGE', images_inv.size())
                img = vutils.make_grid(images_inv, normalize=False, scale_each=True)
                lab = vutils.make_grid(labels_colors, normalize=False, scale_each=True)
                pred = vutils.make_grid(preds_colors, normalize=False, scale_each=True)
                
                
                # print("preD type:",type(pred)) #<class 'torch.Tensor'>
                # print("preD len:", len(pred))# 3
                # print("preD shape:", pred.shape)#torch.Size([3, 100, 198])

                # 1=head red, 2=body green , 3=left_arm yellow, 4=right_arm blue, 5=left_leg pink
                # 6=right_leg skuBlue, 7=tail grey

                writer.add_image('Images/', img, i_iter)
                writer.add_image('Labels/', lab, i_iter)
                writer.add_image('Preds/', pred, i_iter)
                
               
            print('iter = {} of {} completed, loss = {}'.format(i_iter, total_iters, loss.data.cpu().numpy()))
        
        print('end epoch:', epoch)
        
        if epoch%99 == 0:
            torch.save(model.state_dict(), osp.join(args.snapshot_dir, 'DFPnet_epoch_' + str(epoch) + '.pth'))
        
        if epoch%5 == 0 and epoch<500:
            # mIou for Val set
            parsing_preds, scales, centers = valid(model, valloader, input_size,  numVal_samples, len(gpus))
            '''
            Insert a sample of prediction of a val image on tensorboard
            '''
            # generqte a rand number between len(parsing_preds)
            sample = random.randint(0, len(parsing_preds)-1)
            
            #loader resize and convert to tensor the image
            loader = transforms.Compose([
                transforms.Resize(input_size),
                transforms.ToTensor()
            ])

            # get val segmentation path and open the file
            list_path = os.path.join(args.data_dir, 'val' + '_id.txt')
            val_id = [i_id.strip() for i_id in open(list_path)]
            gt_path = os.path.join(args.data_dir, 'val' + '_segmentations', val_id[sample] + '.png')
            gt =Image.open(gt_path)
            gt = loader(gt)
            #put gt back from 0 to 255
            gt = (gt*255).int()
            # convert pred from ndarray to PIL image then to tensor
            display_preds = Image.fromarray(parsing_preds[sample])
            tensor_display_preds = transforms.ToTensor()(display_preds)
            #put gt back from 0 to 255
            tensor_display_preds = (tensor_display_preds*255).int()
            # color them 
            val_preds_colors = decode_parsing(tensor_display_preds, num_images=1, num_classes=args.num_classes, is_pred=False)
            gt_color = decode_parsing(gt, num_images=1, num_classes=args.num_classes, is_pred=False)
            # put in grid 
            pred_val = vutils.make_grid(val_preds_colors, normalize=False, scale_each=True)
            gt_val = vutils.make_grid(gt_color, normalize=False, scale_each=True)
            writer.add_image('Preds_val/', pred_val, epoch)
            writer.add_image('Gt_val/', gt_val, epoch)

            mIoUval = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes, args.data_dir, input_size, 'val')

            print('For val set', mIoUval)
            writer.add_scalars('mIoUval', mIoUval, epoch)

            # mIou for trainTest set
            parsing_preds, scales, centers = valid(model, testloader, input_size,  numTest_samples, len(gpus))

            mIoUtest = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes, args.data_dir, input_size, 'trainTest')

            print('For trainTest set', mIoUtest)
            writer.add_scalars('mIoUtest', mIoUtest, epoch)

        else:
            if epoch%20 == 0 and epoch>=500:
                # mIou for Val set
                parsing_preds, scales, centers = valid(model, valloader, input_size,  numVal_samples, len(gpus))
                '''
                Insert a sample of prediction of a val image on tensorboard
                '''
                # generqte a rand number between len(parsing_preds)
                sample = random.randint(0, len(parsing_preds)-1)
                
                #loader resize and convert to tensor the image
                loader = transforms.Compose([
                    transforms.Resize(input_size),
                    transforms.ToTensor()
                ])

                # get val segmentation path and open the file
                list_path = os.path.join(args.data_dir, 'val' + '_id.txt')
                val_id = [i_id.strip() for i_id in open(list_path)]
                gt_path = os.path.join(args.data_dir, 'val' + '_segmentations', val_id[sample] + '.png')
                gt =Image.open(gt_path)
                gt = loader(gt)
                #put gt back from 0 to 255
                gt = (gt*255).int()
                # convert pred from ndarray to PIL image then to tensor
                display_preds = Image.fromarray(parsing_preds[sample])
                tensor_display_preds = transforms.ToTensor()(display_preds)
                #put gt back from 0 to 255
                tensor_display_preds = (tensor_display_preds*255).int()
                # color them 
                val_preds_colors = decode_parsing(tensor_display_preds, num_images=1, num_classes=args.num_classes, is_pred=False)
                gt_color = decode_parsing(gt, num_images=1, num_classes=args.num_classes, is_pred=False)
                # put in grid 
                pred_val = vutils.make_grid(val_preds_colors, normalize=False, scale_each=True)
                gt_val = vutils.make_grid(gt_color, normalize=False, scale_each=True)
                writer.add_image('Preds_val/', pred_val, epoch)
                writer.add_image('Gt_val/', gt_val, epoch)

                mIoUval = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes, args.data_dir, input_size, 'val')

                print('For val set', mIoUval)
                writer.add_scalars('mIoUval', mIoUval, epoch)

                # mIou for trainTest set
                parsing_preds, scales, centers = valid(model, testloader, input_size,  numTest_samples, len(gpus))

                mIoUtest = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes, args.data_dir, input_size, 'trainTest')

                print('For trainTest set', mIoUtest)
                writer.add_scalars('mIoUtest', mIoUtest, epoch)

    end = timeit.default_timer()
    print(end - start, 'seconds')
示例#18
0
文件: train.py 项目: chjr8/CorrPM
def main():
    """start multiprocessing method"""
    try:
        mp.set_start_method('spawn')
    except RuntimeError:
        pass
    """Create the model and start the training."""
    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    gpus = [int(i) for i in args.gpu.split(',')]
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    h, w = map(int, args.input_size.split(','))
    input_size = [h, w]

    cudnn.enabled = True
    # cudnn related setting
    cudnn.benchmark = True
    torch.backends.cudnn.deterministic = True  #False
    torch.backends.cudnn.enabled = True
    torch.cuda.empty_cache()

    deeplab = CorrPM_Model(num_classes=args.num_classes)
    saved_state_dict = torch.load(args.restore_from)
    new_params = deeplab.state_dict().copy()
    i = 0
    print("Now is loading pre-trained res101 model!")
    for i in saved_state_dict:
        i_parts = i.split('.')
        if not i_parts[0] == 'fc':
            new_params['.'.join(i_parts[0:])] = saved_state_dict[i]

    deeplab.load_state_dict(new_params)
    criterion = CriterionPoseEdge()
    criterion = DataParallelCriterion(criterion)
    criterion.cuda()

    snapshot_fname = osp.join(args.snapshot_dir, 'LIP_epoch_')
    snapshot_best_fname = osp.join(args.snapshot_dir, 'LIP_best.pth')

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    dataset_lip = LIPDataSet(args.data_dir,
                             args.pose_anno_file,
                             args.dataset,
                             crop_size=input_size,
                             dataset_list=args.dataset_list,
                             transform=transform)
    trainloader = data.DataLoader(dataset_lip,
                                  batch_size=args.batch_size * len(gpus),
                                  shuffle=True,
                                  num_workers=1,
                                  pin_memory=True)
    lip_dataset = LIPDataSet(args.data_dir,
                             VAL_ANNO_FILE,
                             'val',
                             crop_size=input_size,
                             dataset_list=args.dataset_list,
                             transform=transform)
    num_samples = len(lip_dataset)
    valloader = data.DataLoader(lip_dataset,
                                batch_size=args.batch_size * len(gpus),
                                shuffle=False,
                                num_workers=0,
                                pin_memory=True)

    optimizer = optim.SGD(deeplab.parameters(),
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    model = DataParallelModel(deeplab)
    model.cuda()

    optimizer.zero_grad()

    total_iters = args.epochs * len(trainloader)
    total_iter_per_batch = len(trainloader)
    print("total iters:", total_iters)

    best_iou = 0
    i_iter = 0
    temp = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        model.train()
        for i_iter, batch in enumerate(trainloader):
            iter_lr = i_iter + epoch * len(trainloader)
            lr = adjust_learning_rate(optimizer, iter_lr, total_iters)
            images, labels, pose, edge, _ = batch
            labels = labels.long().cuda(non_blocking=True)
            edge = edge.long().cuda(non_blocking=True)
            pose = pose.float().cuda(non_blocking=True)

            preds = model(images)
            loss = criterion(preds, [labels, edge, pose])
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i_iter % 500 == 0:
                tim = time.time()
                print('iter:{}/{},loss:{:.3f},lr:{:.3e},time:{:.1f}'.format(
                    i_iter, total_iter_per_batch,
                    loss.data.cpu().numpy(), lr, tim - temp))
                temp = tim

        h = time.time()
        if epoch % 5 == 0:
            print("----->Epoch:", epoch)
            parsing_preds, scales, centers = valid(model, valloader,
                                                   input_size, num_samples,
                                                   len(gpus), criterion, args)
            if args.dataset_list == '_id.txt':
                mIoU = compute_mean_ioU(parsing_preds, scales, centers,
                                        args.num_classes, args.data_dir,
                                        input_size)
            miou = mIoU['Mean IU']
            is_best_iou = miou > best_iou
            best_iou = max(miou, best_iou)
            torch.save(model.state_dict(), snapshot_fname + '.pth')
            if is_best_iou:
                print("Best iou epoch: ", epoch)
                shutil.copyfile(snapshot_fname + '.pth', snapshot_best_fname)

    end = datetime.datetime.now()
    print(end - start, 'seconds')
    print(end)
示例#19
0
def main():
    """Create the model and start the training."""
    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)
    writer = SummaryWriter(args.snapshot_dir)
    gpus = [int(i) for i in args.gpu.split(',')]
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    h, w = [int(i) for i in args.input_size.split(',')]
    input_size = [h, w]
    cudnn.enabled = True
    # cudnn related setting
    cudnn.benchmark = False
    torch.backends.cudnn.deterministic = False  ##为何使用了非确定性的卷积
    torch.backends.cudnn.enabled = True
    NUM_CLASSES = 7  # parsing
    NUM_HEATMAP = 15  # pose
    NUM_PAFS = 28  # pafs
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])
    # load dataset
    num_samples = 0
    trainloader = data.DataLoader(VOCSegmentation(args.data_dir,
                                                  args.dataset,
                                                  crop_size=input_size,
                                                  stride=args.stride,
                                                  transform=transform),
                                  batch_size=args.batch_size * len(gpus),
                                  shuffle=True,
                                  num_workers=2,
                                  pin_memory=True)

    valloader = None
    if args.print_val != 0:
        valdataset = VOCSegmentation(args.data_dir,
                                     'val',
                                     crop_size=input_size,
                                     transform=transform)
        num_samples = len(valdataset)
        valloader = data.DataLoader(
            valdataset,
            batch_size=8 * len(gpus),  # batchsize
            shuffle=False,
            pin_memory=True)

    parsingnet = ParsingNet(num_classes=NUM_CLASSES,
                            num_heatmaps=NUM_HEATMAP,
                            num_pafs=NUM_PAFS)
    criterion_parsing = Criterion()
    criterion_parsing = DataParallelCriterion(criterion_parsing)
    criterion_parsing.cuda()

    optimizer_parsing = optim.SGD(parsingnet.parameters(),
                                  lr=args.learning_rate,
                                  momentum=args.momentum,
                                  weight_decay=args.weight_decay)

    optimizer_parsing.zero_grad()
    # 加载预训练参数
    print(args.train_continue)
    if not args.train_continue:
        checkpoint = torch.load(RESNET_IMAGENET)
        load_state(parsingnet, checkpoint)
    else:
        checkpoint = torch.load(args.restore_from_parsing)
        if 'current_epoch' in checkpoint:
            current_epoch = checkpoint['current_epoch']
            args.start_epoch = current_epoch

        if 'state_dict' in checkpoint:
            checkpoint = checkpoint['state_dict']

        load_state(parsingnet, checkpoint)

    parsingnet = DataParallelModel(parsingnet).cuda()
    total_iters = args.epochs * len(trainloader)
    for epoch in range(args.start_epoch, args.epochs):
        parsingnet.train()
        for i_iter, batch in enumerate(trainloader):
            i_iter += len(trainloader) * epoch
            lr = adjust_parsing_lr(optimizer_parsing, i_iter, total_iters)

            images, labels, edges, heatmap, pafs, heatmap_mask, pafs_mask, _ = batch
            images = images.cuda()
            labels = labels.long().cuda(non_blocking=True)
            edges = edges.long().cuda(non_blocking=True)
            heatmap = heatmap.cuda()
            pafs = pafs.cuda()
            heatmap_mask = heatmap_mask.cuda()
            pafs_mask = pafs_mask.cuda()

            preds = parsingnet(images)
            loss_parsing = criterion_parsing(
                preds, [labels, edges, heatmap, pafs, heatmap_mask, pafs_mask],
                writer, i_iter, total_iters)
            optimizer_parsing.zero_grad()
            loss_parsing.backward()
            optimizer_parsing.step()
            if i_iter % 100 == 0:
                writer.add_scalar('parsing_lr', lr, i_iter)
                writer.add_scalar('loss_total', loss_parsing.item(), i_iter)
            if i_iter % 500 == 0:

                if len(gpus) > 1:
                    preds = preds[0]

                images_inv = inv_preprocess(images, args.save_num_images)
                parsing_labels_c = decode_parsing(labels,
                                                  args.save_num_images,
                                                  is_pred=False)
                preds_colors = decode_parsing(preds[0][-1],
                                              args.save_num_images,
                                              is_pred=True)
                edges_colors = decode_parsing(edges,
                                              args.save_num_images,
                                              is_pred=False)
                pred_edges = decode_parsing(preds[1][-1],
                                            args.save_num_images,
                                            is_pred=True)

                img = vutils.make_grid(images_inv,
                                       normalize=False,
                                       scale_each=True)
                parsing_lab = vutils.make_grid(parsing_labels_c,
                                               normalize=False,
                                               scale_each=True)
                pred_v = vutils.make_grid(preds_colors,
                                          normalize=False,
                                          scale_each=True)
                edge = vutils.make_grid(edges_colors,
                                        normalize=False,
                                        scale_each=True)
                pred_edges = vutils.make_grid(pred_edges,
                                              normalize=False,
                                              scale_each=True)

                writer.add_image('Images/', img, i_iter)
                writer.add_image('Parsing_labels/', parsing_lab, i_iter)
                writer.add_image('Parsing_Preds/', pred_v, i_iter)

                writer.add_image('Edges/', edge, i_iter)
                writer.add_image('Edges_preds/', pred_edges, i_iter)

        if (epoch + 1) % 15 == 0:
            if args.print_val != 0:
                parsing_preds, scales, centers = valid(parsingnet, valloader,
                                                       input_size, num_samples,
                                                       gpus)
                mIoU = compute_mean_ioU(parsing_preds, scales, centers,
                                        NUM_CLASSES, args.data_dir, input_size)
                f = open(os.path.join(args.snapshot_dir, "val_res.txt"), "a+")
                f.write(str(epoch) + str(mIoU) + '\n')
                f.close()
            snapshot_name_parsing = osp.join(
                args.snapshot_dir,
                'PASCAL_parsing_' + str(epoch) + '' + '.pth')
            torch.save(
                {
                    'state_dict': parsingnet.state_dict(),
                    'optimizer': optimizer_parsing.state_dict(),
                    'current_epoch': epoch
                }, snapshot_name_parsing)

    end = timeit.default_timer()
    print(end - start, 'seconds')
示例#20
0
def main():
    """Create the model and start the training."""

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    writer = SummaryWriter(args.snapshot_dir)
    gpus = [int(i) for i in args.gpu.split(',')]
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    h, w = map(int, args.input_size.split(','))
    input_size = [h, w]

    cudnn.enabled = True
    # cudnn related setting
    cudnn.benchmark = True
    torch.backends.cudnn.deterministic = False
    torch.backends.cudnn.enabled = True

    deeplab = Res_Deeplab(num_classes=args.num_classes)

    # dump_input = torch.rand((args.batch_size, 3, input_size[0], input_size[1]))
    # writer.add_graph(deeplab.cuda(), dump_input.cuda(), verbose=False)

    saved_state_dict = torch.load(args.restore_from)
    new_params = deeplab.state_dict().copy()
    for i in saved_state_dict:
        i_parts = i.split('.')
        # print(i_parts)
        if not i_parts[0] == 'fc':
            new_params['.'.join(i_parts[0:])] = saved_state_dict[i]

    deeplab.load_state_dict(new_params)

    model = DataParallelModel(deeplab)
    model.cuda()

    criterion = CriterionAll()
    criterion = DataParallelCriterion(criterion)
    criterion.cuda()

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    trainloader = data.DataLoader(LIPDataSet(args.data_dir,
                                             args.dataset,
                                             crop_size=input_size,
                                             transform=transform),
                                  batch_size=args.batch_size * len(gpus),
                                  shuffle=True,
                                  num_workers=2,
                                  pin_memory=True)
    #lip_dataset = LIPDataSet(args.data_dir, 'val', crop_size=input_size, transform=transform)
    #num_samples = len(lip_dataset)

    #valloader = data.DataLoader(lip_dataset, batch_size=args.batch_size * len(gpus),
    #                             shuffle=False, pin_memory=True)

    optimizer = optim.SGD(model.parameters(),
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    optimizer.zero_grad()

    total_iters = args.epochs * len(trainloader)
    for epoch in range(args.start_epoch, args.epochs):
        model.train()
        for i_iter, batch in enumerate(trainloader):
            i_iter += len(trainloader) * epoch
            lr = adjust_learning_rate(optimizer, i_iter, total_iters)

            images, labels, edges, _ = batch
            labels = labels.long().cuda(non_blocking=True)
            edges = edges.long().cuda(non_blocking=True)

            preds = model(images)

            loss = criterion(preds, [labels, edges])
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i_iter % 100 == 0:
                writer.add_scalar('learning_rate', lr, i_iter)
                writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)

            if i_iter % 500 == 0:

                images_inv = inv_preprocess(images, args.save_num_images)
                labels_colors = decode_parsing(labels,
                                               args.save_num_images,
                                               args.num_classes,
                                               is_pred=False)
                edges_colors = decode_parsing(edges,
                                              args.save_num_images,
                                              2,
                                              is_pred=False)

                if isinstance(preds, list):
                    preds = preds[0]
                preds_colors = decode_parsing(preds[0][-1],
                                              args.save_num_images,
                                              args.num_classes,
                                              is_pred=True)
                pred_edges = decode_parsing(preds[1][-1],
                                            args.save_num_images,
                                            2,
                                            is_pred=True)

                img = vutils.make_grid(images_inv,
                                       normalize=False,
                                       scale_each=True)
                lab = vutils.make_grid(labels_colors,
                                       normalize=False,
                                       scale_each=True)
                pred = vutils.make_grid(preds_colors,
                                        normalize=False,
                                        scale_each=True)
                edge = vutils.make_grid(edges_colors,
                                        normalize=False,
                                        scale_each=True)
                pred_edge = vutils.make_grid(pred_edges,
                                             normalize=False,
                                             scale_each=True)

                writer.add_image('Images/', img, i_iter)
                writer.add_image('Labels/', lab, i_iter)
                writer.add_image('Preds/', pred, i_iter)
                writer.add_image('Edges/', edge, i_iter)
                writer.add_image('PredEdges/', pred_edge, i_iter)

            print('iter = {} of {} completed, loss = {}'.format(
                i_iter, total_iters,
                loss.data.cpu().numpy()))

        torch.save(
            model.state_dict(),
            osp.join(args.snapshot_dir, 'LIP_epoch_' + str(epoch) + '.pth'))

        #parsing_preds, scales, centers = valid(model, valloader, input_size,  num_samples, len(gpus))

        #mIoU = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes, args.data_dir, input_size)

        #print(mIoU)
        #writer.add_scalars('mIoU', mIoU, epoch)

    end = timeit.default_timer()
    print(end - start, 'seconds')
def main():
    args = get_arguments()
    print(args)

    start_epoch = 0
    cycle_n = 0

    if not os.path.exists(args.log_dir):
        os.makedirs(args.log_dir)
    with open(os.path.join(args.log_dir, 'args.json'), 'w') as opt_file:
        json.dump(vars(args), opt_file)

    gpus = [int(i) for i in args.gpu.split(',')]
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    input_size = list(map(int, args.input_size.split(',')))

    cudnn.enabled = True
    cudnn.benchmark = True

    # Model Initialization
    AugmentCE2P = networks.init_model(args.arch,
                                      num_classes=args.num_classes,
                                      pretrained=args.imagenet_pretrain)
    model = DataParallelModel(AugmentCE2P)
    model.cuda()

    IMAGE_MEAN = AugmentCE2P.mean
    IMAGE_STD = AugmentCE2P.std
    INPUT_SPACE = AugmentCE2P.input_space
    print('image mean: {}'.format(IMAGE_MEAN))
    print('image std: {}'.format(IMAGE_STD))
    print('input space:{}'.format(INPUT_SPACE))

    restore_from = args.model_restore
    if os.path.exists(restore_from):
        print('Resume training from {}'.format(restore_from))
        checkpoint = torch.load(restore_from)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

    SCHP_AugmentCE2P = networks.init_model(args.arch,
                                           num_classes=args.num_classes,
                                           pretrained=args.imagenet_pretrain)
    schp_model = DataParallelModel(SCHP_AugmentCE2P)
    schp_model.cuda()

    if os.path.exists(args.schp_restore):
        print('Resuming schp checkpoint from {}'.format(args.schp_restore))
        schp_checkpoint = torch.load(args.schp_restore)
        schp_model_state_dict = schp_checkpoint['state_dict']
        cycle_n = schp_checkpoint['cycle_n']
        schp_model.load_state_dict(schp_model_state_dict)

    # Loss Function
    criterion = CriterionAll(lambda_1=args.lambda_s,
                             lambda_2=args.lambda_e,
                             lambda_3=args.lambda_c,
                             num_classes=args.num_classes)
    criterion = DataParallelCriterion(criterion)
    criterion.cuda()

    # Data Loader
    if INPUT_SPACE == 'BGR':
        print('BGR Transformation')
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=IMAGE_MEAN, std=IMAGE_STD),
        ])

    elif INPUT_SPACE == 'RGB':
        print('RGB Transformation')
        transform = transforms.Compose([
            transforms.ToTensor(),
            BGR2RGB_transform(),
            transforms.Normalize(mean=IMAGE_MEAN, std=IMAGE_STD),
        ])

    train_dataset = LIPDataSet(args.data_dir,
                               args.split_name,
                               crop_size=input_size,
                               transform=transform)
    train_loader = data.DataLoader(train_dataset,
                                   batch_size=args.batch_size * len(gpus),
                                   num_workers=16,
                                   shuffle=True,
                                   pin_memory=True,
                                   drop_last=True)
    print('Total training samples: {}'.format(len(train_dataset)))

    # Optimizer Initialization
    optimizer = optim.SGD(model.parameters(),
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    lr_scheduler = SGDRScheduler(optimizer,
                                 total_epoch=args.epochs,
                                 eta_min=args.learning_rate / 100,
                                 warmup_epoch=10,
                                 start_cyclical=args.schp_start,
                                 cyclical_base_lr=args.learning_rate / 2,
                                 cyclical_epoch=args.cycle_epochs)

    total_iters = args.epochs * len(train_loader)
    start = timeit.default_timer()
    for epoch in range(start_epoch, args.epochs):
        lr_scheduler.step(epoch=epoch)
        lr = lr_scheduler.get_lr()[0]

        model.train()
        for i_iter, batch in enumerate(train_loader):
            i_iter += len(train_loader) * epoch

            images, labels, _ = batch
            labels = labels.cuda(non_blocking=True)

            edges = generate_edge_tensor(labels)
            labels = labels.type(torch.cuda.LongTensor)
            edges = edges.type(torch.cuda.LongTensor)

            preds = model(images)

            # Online Self Correction Cycle with Label Refinement
            if cycle_n >= 1:
                with torch.no_grad():
                    soft_preds = schp_model(images)
                    soft_parsing = []
                    soft_edge = []
                    for soft_pred in soft_preds:
                        soft_parsing.append(soft_pred[0][-1])
                        soft_edge.append(soft_pred[1][-1])
                    soft_preds = torch.cat(soft_parsing, dim=0)
                    soft_edges = torch.cat(soft_edge, dim=0)
            else:
                soft_preds = None
                soft_edges = None

            loss = criterion(preds, [labels, edges, soft_preds, soft_edges],
                             cycle_n)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i_iter % 100 == 0:
                print('iter = {} of {} completed, lr = {}, loss = {}'.format(
                    i_iter, total_iters, lr,
                    loss.data.cpu().numpy()))
        if (epoch + 1) % (args.eval_epochs) == 0:
            schp.save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                },
                False,
                args.log_dir,
                filename='checkpoint_{}.pth.tar'.format(epoch + 1))

        # Self Correction Cycle with Model Aggregation
        if (epoch + 1) >= args.schp_start and (
                epoch + 1 - args.schp_start) % args.cycle_epochs == 0:
            print('Self-correction cycle number {}'.format(cycle_n))
            schp.moving_average(schp_model, model, 1.0 / (cycle_n + 1))
            cycle_n += 1
            schp.bn_re_estimate(train_loader, schp_model)
            schp.save_schp_checkpoint(
                {
                    'state_dict': schp_model.state_dict(),
                    'cycle_n': cycle_n,
                },
                False,
                args.log_dir,
                filename='schp_{}_checkpoint.pth.tar'.format(cycle_n))

        torch.cuda.empty_cache()
        end = timeit.default_timer()
        print('epoch = {} of {} completed using {} s'.format(
            epoch, args.epochs, (end - start) / (epoch - start_epoch + 1)))

    end = timeit.default_timer()
    print('Training Finished in {} seconds'.format(end - start))