Пример #1
0
def _main():

    args = get_generate_config()
    setattr(args, 'PAD_index', constants.PAD_index)
    setattr(args, 'BOS_index', constants.BOS_index)
    setattr(args, 'EOS_index', constants.EOS_index)
    setattr(args, 'rank', 0)
    assert (args.file is None) ^ (args.raw_file is None)
    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(args.cuda_num)
    model_state_dict, model_config = load_model(args.model_path)
    for key, value in model_config.items():
        setattr(args, key, value)
    print(args)
    get_vocab_info(args)
    model = make_model(args, model_state_dict, 0, False)
    setattr(args, 'model', model)
    get_dataloader(args)
    output = generate(args)
    output = restore_rank(output)
    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)
    save_file = os.path.join(args.output_path, 'result.txt')
    save2file(output, save_file)

    if args.ref_file is not None:
        eval = Eval(reference_file=args.ref_file)
        eval(save_file)
Пример #2
0
 def __init__(self, args):
     self.args = args
     self.logger = logging.getLogger()
     self.logger.setLevel(logging.INFO)
     self.Eval = Eval(self.args.num_classes)
     self.model = DeepLab(output_stride=self.args.output_stride,
                          class_num=self.args.num_classes,
                          pretrained=self.args.imagenet_pretrained
                          and self.args.pretrained_ckpt_file == None,
                          bn_momentum=self.args.bn_momentum,
                          freeze_bn=self.args.freeze_bn)
     if self.args.pretrained_ckpt_file:
         self.load_checkpoint(self.args.pretrained_ckpt_file)
Пример #3
0
    def __init__(self, args, cuda=None, train_id="None", logger=None):
        self.args = args
        os.environ["CUDA_VISIBLE_DEVICES"] = self.args.gpu
        self.cuda = cuda and torch.cuda.is_available()
        self.device = torch.device('cuda' if self.cuda else 'cpu')
        self.train_id = train_id
        self.logger = logger

        self.current_MIoU = 0
        self.best_MIou = 0
        self.best_source_MIou = 0
        self.current_epoch = 0
        self.current_iter = 0
        self.second_best_MIou = 0

        # set TensorboardX
        self.writer = SummaryWriter(self.args.checkpoint_dir)

        # Metric definition
        self.Eval = Eval(self.args.num_classes)

        # loss definition
        self.loss = nn.CrossEntropyLoss(weight=None, ignore_index= -1)
        self.loss.to(self.device)

        # model
        self.model, params = get_model(self.args)
        self.model = nn.DataParallel(self.model, device_ids=[0])
        self.model.to(self.device)

        if self.args.optim == "SGD":
            self.optimizer = torch.optim.SGD(
                params=params,
                momentum=self.args.momentum,
                weight_decay=self.args.weight_decay
            )
        elif self.args.optim == "Adam":
            self.optimizer = torch.optim.Adam(params, betas=(0.9, 0.99), weight_decay=self.args.weight_decay)
        # dataloader
        if self.args.dataset=="cityscapes":
            self.dataloader = City_DataLoader(self.args)  
        elif self.args.dataset=="gta5":
            self.dataloader = GTA5_DataLoader(self.args)
        else:
            self.dataloader = SYNTHIA_DataLoader(self.args)
        self.dataloader.num_iterations = min(self.dataloader.num_iterations, ITER_MAX)
        print(self.args.iter_max, self.dataloader.num_iterations)
        self.epoch_num = ceil(self.args.iter_max / self.dataloader.num_iterations) if self.args.iter_stop is None else \
                            ceil(self.args.iter_stop / self.dataloader.num_iterations)
Пример #4
0
    def __init__(self, args, cuda=None, train_id=None, logger=None):
        self.args = args
        os.environ["CUDA_VISIBLE_DEVICES"] = self.args.gpu
        self.cuda = cuda and torch.cuda.is_available()
        self.device = torch.device('cuda' if self.cuda else 'cpu')

        self.current_MIoU = 0
        self.best_MIou = 0
        self.current_epoch = 0
        self.current_iter = 0
        self.train_id = train_id
        self.logger = logger

        # set TensorboardX
        self.writer = SummaryWriter(self.args.checkpoint_dir)

        # Metric definition
        self.Eval = Eval(self.args.num_classes)

        # loss definition
        self.loss = nn.CrossEntropyLoss(ignore_index=-1)
        self.loss.to(self.device)

        # model
        self.model, params = get_model(self.args)
        self.model = nn.DataParallel(self.model, device_ids=[0])
        self.model.to(self.device)

        # load pretrained checkpoint
        if self.args.pretrained_ckpt_file is not None:
            path1 = os.path.join(*self.args.checkpoint_dir.split('/')[:-1],
                                 self.train_id + 'best.pth')
            path2 = self.args.pretrained_ckpt_file
            if os.path.exists(path1):
                pretrained_ckpt_file = path1
            elif os.path.exists(path2):
                pretrained_ckpt_file = path2
            else:
                raise AssertionError("no pretrained_ckpt_file")
            self.load_checkpoint(pretrained_ckpt_file)

        # dataloader
        self.dataloader = City_DataLoader(
            self.args
        ) if self.args.dataset == "cityscapes" else GTA5_DataLoader(self.args)
        if self.args.city_name != "None":
            target_data_set = CrossCity_Dataset(
                self.args,
                data_root_path=self.args.data_root_path,
                list_path=self.args.list_path,
                split='val',
                base_size=self.args.target_base_size,
                crop_size=self.args.target_crop_size,
                class_13=self.args.class_13)
            self.target_val_dataloader = data.DataLoader(
                target_data_set,
                batch_size=self.args.batch_size,
                shuffle=False,
                num_workers=self.args.data_loader_workers,
                pin_memory=self.args.pin_memory,
                drop_last=True)
            self.dataloader.val_loader = self.target_val_dataloader
            self.dataloader.valid_iterations = (
                len(target_data_set) +
                self.args.batch_size) // self.args.batch_size
        else:
            self.dataloader.val_loader = self.dataloader.data_loader
            self.dataloader.valid_iterations = min(
                self.dataloader.num_iterations, 500)
        self.epoch_num = ceil(self.args.iter_max /
                              self.dataloader.num_iterations)
    def __init__(self, args, config, cuda=None):
        self.args = args
        os.environ["CUDA_VISIBLE_DEVICES"] = self.args.gpu
        self.config = config
        self.cuda = cuda and torch.cuda.is_available()
        self.device = torch.device('cuda' if self.cuda else 'cpu')

        self.best_MIou = 0
        self.current_epoch = 0
        self.epoch_num = self.config.epoch_num
        self.current_iter = 0

        self.writer = SummaryWriter()

        # path definition
        self.val_list_filepath = os.path.join(
            args.data_root_path, 'VOC2012/ImageSets/Segmentation/val.txt')
        self.gt_filepath = os.path.join(args.data_root_path,
                                        'VOC2012/SegmentationClass/')
        self.pre_filepath = os.path.join(args.data_root_path,
                                         'VOC2012/JPEGImages/')

        # Metric definition
        self.Eval = Eval(self.config.num_classes)

        # loss definition
        if args.loss_weight:
            classes_weights_path = os.path.join(
                self.config.classes_weight,
                self.args.dataset + 'classes_weights_log.npy')
            print(classes_weights_path)
            if not os.path.isfile(classes_weights_path):
                logger.info('calculating class weights...')
                calculate_weigths_labels(self.config)
            class_weights = np.load(classes_weights_path)
            pprint.pprint(class_weights)
            weight = torch.from_numpy(class_weights.astype(np.float32))
            logger.info('loading class weights successfully!')
        else:
            weight = None

        self.loss = nn.CrossEntropyLoss(weight=weight, ignore_index=255)
        self.loss.to(self.device)

        # model
        self.model = DeepLab(output_stride=self.args.output_stride,
                             class_num=self.config.num_classes,
                             pretrained=self.args.imagenet_pretrained,
                             bn_momentum=self.args.bn_momentum,
                             freeze_bn=self.args.freeze_bn)
        self.model = nn.DataParallel(self.model, device_ids=range(4))
        patch_replication_callback(self.model)
        self.model.to(self.device)

        self.optimizer = torch.optim.SGD(
            params=[
                {
                    "params": self.get_params(self.model.module, key="1x"),
                    "lr": self.args.lr,
                },
                {
                    "params": self.get_params(self.model.module, key="10x"),
                    "lr": 10 * self.args.lr,
                },
            ],
            momentum=self.config.momentum,
            # dampening=self.config.dampening,
            weight_decay=self.config.weight_decay,
            # nesterov=self.config.nesterov
        )
        # dataloader
        self.dataloader = VOCDataLoader(self.args, self.config)
    def __init__(self, args, cuda=None):
        self.args = args
        os.environ["CUDA_VISIBLE_DEVICES"] = self.args.gpu
        self.cuda = cuda and torch.cuda.is_available()
        self.device = torch.device('cuda' if self.cuda else 'cpu')

        self.current_MIoU = 0
        self.best_MIou = 0
        self.current_epoch = 0
        self.current_iter = 0

        # set TensorboardX
        self.writer = SummaryWriter(log_dir=self.args.run_name)

        # Metric definition
        self.Eval = Eval(self.args.num_classes)

        # loss definition
        if self.args.loss_weight_file is not None:
            classes_weights_path = os.path.join(self.args.loss_weights_dir,
                                                self.args.loss_weight_file)
            print(classes_weights_path)
            if not os.path.isfile(classes_weights_path):
                logger.info('calculating class weights...')
                calculate_weigths_labels(self.args)
            class_weights = np.load(classes_weights_path)
            pprint.pprint(class_weights)
            weight = torch.from_numpy(class_weights.astype(np.float32))
            logger.info('loading class weights successfully!')
        else:
            weight = None

        self.loss = nn.CrossEntropyLoss(weight=weight, ignore_index=255)
        self.loss.to(self.device)

        # model
        self.model = Unet_decoder(output_stride=self.args.output_stride,
                                  class_num=self.args.num_classes,
                                  pretrained=self.args.imagenet_pretrained
                                  and self.args.pretrained_ckpt_file == None,
                                  bn_momentum=self.args.bn_momentum,
                                  freeze_bn=self.args.freeze_bn)
        self.model = nn.DataParallel(self.model,
                                     device_ids=range(
                                         ceil(len(self.args.gpu) / 2)))
        patch_replication_callback(self.model)
        self.model.to(self.device)

        self.optimizer = torch.optim.SGD(
            params=[
                {
                    "params": self.get_params(self.model.module, key="1x"),
                    "lr": self.args.lr,
                },
                {
                    "params": self.get_params(self.model.module, key="10x"),
                    "lr": 10 * self.args.lr,
                },
            ],
            momentum=self.args.momentum,
            # dampening=self.args.dampening,
            weight_decay=self.args.weight_decay,
            # nesterov=self.args.nesterov
        )
        # dataloader
        self.dataloader = VOCDataLoader(self.args)
        self.epoch_num = ceil(self.args.iter_max /
                              self.dataloader.train_iterations)
Пример #7
0
        lr=config.learning_rate,
        betas=(config.beta_1, config.beta_2),
        eps=config.eps,
        weight_decay=config.weight_decay),
                      d_model=config.transformer_embedding_dim,
                      warmup_steps=config.warmup_steps,
                      factor=config.factor)

    criterion_para = CopyLoss(ignore_index=Constants.PAD,
                              reduction='none').to(device_para)
    criterion_back = LabelSmoothing(smoothing=config.smoothing,
                                    ignore_index=Constants.PAD).to(device_back)

    generator = generator(test_data=test_dataloader,
                          source_path=args.test_source[0],
                          eval=Eval(args.test_source[0], args.test_target[0]),
                          word2index=word2index,
                          index2word=index2word,
                          UNK_WORD=Constants.UNK_WORD,
                          PAD=Constants.PAD)

    fit = Fit(train_data=train_dataloader,
              para_model=para_model,
              back_model=back_model,
              optim=optim,
              criterion_para=criterion_para,
              criterion_back=criterion_back,
              generator=generator,
              num_rounds=args.num_rounds,
              epoch=args.epoch,
              device_para=device_para,
Пример #8
0
    def __init__(self, args, cuda=None, train_id="None", logger=None):
        self.args = args
        self.cuda = cuda and torch.cuda.is_available()
        self.device = torch.device('cuda' if self.cuda else 'cpu')
        self.train_id = train_id
        self.logger = logger

        self.current_MIoU = 0
        self.best_MIou = 0
        self.best_source_MIou = 0
        self.current_epoch = 0
        self.current_iter = 0
        self.second_best_MIou = 0

        # set TensorboardX
        self.writer = SummaryWriter(self.args.checkpoint_dir)

        # Metric definition
        self.Eval = Eval(self.args.num_classes)

        # loss definition
        self.loss = nn.CrossEntropyLoss(weight=None, ignore_index= -1)
        self.loss.to(self.device)

        # model
        self.model, params = get_model(self.args)
        self.model = nn.DataParallel(self.model)
        self.model.to(self.device)

        # support for FCN8s
        if self.args.backbone == "fcn8s_vgg" and self.args.optim == "SGD":
            self.args.optim = "Adam"
            print('WARNING: FCN8s requires Adam optimizer, but SGD was set. Switching to Adam.')

        if self.args.optim == "SGD":
            self.optimizer = torch.optim.SGD(
                params=params,
                momentum=self.args.momentum,
                weight_decay=self.args.weight_decay
            )
        elif self.args.optim == "Adam":
            self.optimizer = torch.optim.Adam(params, betas=(0.9, 0.99), weight_decay=self.args.weight_decay)
        # dataloader
        if DEBUG: print('DEBUG: Loading training and validation datasets (for UDA only val one is used, but it is overwritten)')
        if self.args.dataset=="cityscapes":
            self.dataloader = City_DataLoader(self.args)  
        elif self.args.dataset=="gta5":
            self.dataloader = GTA5_DataLoader(self.args)
        else:
            self.dataloader = SYNTHIA_DataLoader(self.args)

        ###
        use_target_val = True
        if use_target_val:
            if DEBUG: print('DEBUG: Overwriting validation set, using target set instead of source one')
            target_data_set = City_Dataset(args,
                                           data_root_path=datasets_path['cityscapes']['data_root_path'],
                                           list_path=datasets_path['cityscapes']['list_path'],
                                           split='test',
                                           base_size=args.target_base_size,
                                           crop_size=args.target_crop_size,
                                           class_16=args.class_16)
            self.target_val_dataloader = data.DataLoader(target_data_set,
                                                         batch_size=self.args.batch_size,
                                                         shuffle=False,
                                                         num_workers=self.args.data_loader_workers,
                                                         pin_memory=self.args.pin_memory,
                                                         drop_last=True)
            self.dataloader.val_loader = self.target_val_dataloader
        ###


        self.dataloader.num_iterations = min(self.dataloader.num_iterations, ITER_MAX)
        self.epoch_num = ceil(self.args.iter_max / self.dataloader.num_iterations) if self.args.iter_stop is None else \
                            ceil(self.args.iter_stop / self.dataloader.num_iterations)
Пример #9
0
    def __init__(self, cfg, logger, writer):

        # Args
        self.cfg = cfg
        self.device = torch.device('cuda')
        self.logger = logger
        self.writer = writer

        # Counters
        self.epoch = 0
        self.iter = 0
        self.current_MIoU = 0
        self.best_MIou = 0
        self.best_source_MIou = 0

        # Metrics
        self.evaluator = Eval(self.cfg.data.num_classes)

        # Loss
        self.ignore_index = -1
        self.loss = nn.CrossEntropyLoss(ignore_index=self.ignore_index)

        # Model
        self.model, params = get_model(self.cfg)
        # self.model = nn.DataParallel(self.model, device_ids=[0])  # TODO: test multi-gpu
        self.model.to(self.device)

        # EMA
        self.ema = EMA(self.model, self.cfg.ema_decay)

        # Optimizer
        if self.cfg.opt.kind == "SGD":
            self.optimizer = torch.optim.SGD(
                params,
                momentum=self.cfg.opt.momentum,
                weight_decay=self.cfg.opt.weight_decay)
        elif self.cfg.opt.kind == "Adam":
            self.optimizer = torch.optim.Adam(
                params,
                betas=(0.9, 0.99),
                weight_decay=self.cfg.opt.weight_decay)
        else:
            raise NotImplementedError()
        self.lr_factor = 10

        # Source
        if self.cfg.data.source.dataset == 'synthia':
            source_train_dataset = SYNTHIA_Dataset(
                split='train', **self.cfg.data.source.kwargs)
            source_val_dataset = SYNTHIA_Dataset(split='val',
                                                 **self.cfg.data.source.kwargs)
        elif self.cfg.data.source.dataset == 'gta5':
            source_train_dataset = GTA5_Dataset(split='train',
                                                **self.cfg.data.source.kwargs)
            source_val_dataset = GTA5_Dataset(split='val',
                                              **self.cfg.data.source.kwargs)
        else:
            raise NotImplementedError()
        self.source_dataloader = DataLoader(source_train_dataset,
                                            shuffle=True,
                                            drop_last=True,
                                            **self.cfg.data.loader.kwargs)
        self.source_val_dataloader = DataLoader(source_val_dataset,
                                                shuffle=False,
                                                drop_last=False,
                                                **self.cfg.data.loader.kwargs)

        # Target
        if self.cfg.data.target.dataset == 'cityscapes':
            target_train_dataset = City_Dataset(split='train',
                                                **self.cfg.data.target.kwargs)
            target_val_dataset = City_Dataset(split='val',
                                              **self.cfg.data.target.kwargs)
        else:
            raise NotImplementedError()
        self.target_dataloader = DataLoader(target_train_dataset,
                                            shuffle=True,
                                            drop_last=True,
                                            **self.cfg.data.loader.kwargs)
        self.target_val_dataloader = DataLoader(target_val_dataset,
                                                shuffle=False,
                                                drop_last=False,
                                                **self.cfg.data.loader.kwargs)

        # Perturbations
        if self.cfg.lam_aug > 0:
            self.aug = get_augmentation()
Пример #10
0
    from utils.eval import Eval

    word2index, index2word = load_vocab(args.vocab_path)
    source = lang(filelist=args.source,
                  word2index=word2index,
                  PAD=Constants.PAD_WORD)

    dataloader = get_dataloader(source=source,
                                batch_size=args.batch_size,
                                shuffle=False)

    model = get_vae(vocab_size=len(word2index),
                    device=device,
                    checkpoint_path=args.model_path)

    eval = Eval(args.source[0],
                args.target[0]) if args.target is not None else None

    generator = generator(test_data=dataloader,
                          eval=eval,
                          word2index=word2index,
                          index2word=index2word,
                          source_path=args.source[0],
                          UNK_WORD=Constants.UNK_WORD,
                          PAD=Constants.PAD)

    generator(model=model,
              max_length=args.max_length,
              num_rounds=args.num_rounds,
              device=device,
              save_path=args.save_path,
              save_info=args.save_path,
Пример #11
0
 for epoch in range(1, cfg.num_epoch):
     # 根据训练的epoch进度调节lr
     if epoch in [20,30]:
         lr *= 0.1
     optimizer = torch.optim.Adam(model.parameters(), lr, weight_decay=5e-4)
     model.train()
     for inp, hm, true_mask, ind, wh, offset in tqdm(train_loader):
         inp, hm, true_mask, ind, wh, offset = inp.cuda(), hm.cuda(), true_mask.cuda(), ind.cuda(), wh.cuda(), offset.cuda()
         outputs = model(inp)
         total_loss, hm_loss, wh_loss, off_loss = loss_func(outputs, hm, true_mask, ind, wh, offset)
         total_loss.backward()
         optimizer.step()
         optimizer.zero_grad()
     # save_model(os.path.join(os.path.dirname(__file__), 'weights', '_{}.pth'.format(epoch)), epoch, model,
     #            optimizer)
     eval_result = Eval(model=model, test_loader=val_loader)
     ap_table = [["Index", "Class name", "Precision", "Recall", "AP", "F1-score"]]
     for p, r, ap, f1, cls_id in zip(*eval_result):
         ap_table += [[cls_id, cfg.class_name[cls_id], "%.3f" % p, "%.3f" % r, "%.3f" % ap, "%.3f" % f1]]
     print('\n' + AsciiTable(ap_table).table)
     eval_map = round(eval_result[2].mean(), 4)
     print("Epoch %d/%d ---- mAP:%.4f Loss:%.4f" % (epoch, cfg.num_epoch, eval_map, total_loss.item()))
     vis.line(X=np.array([epoch]), Y=np.array([hm_loss.item()]), win='hm', update=None if epoch == 1 else 'append',
              opts={'title': 'hm'})
     vis.line(X=np.array([epoch]), Y=np.array([wh_loss.item()]), win='wh', update=None if epoch == 1 else 'append',
              opts={'title': 'wh'})
     vis.line(X=np.array([epoch]), Y=np.array([off_loss.item()]), win='offset', update=None if epoch == 1 else 'append',
              opts={'title': 'offset'})
     vis.line(X=np.array([epoch]), Y=np.array([eval_map]), win='map', update=None if epoch == 1 else 'append',
              opts={'title': 'map'})
     if eval_map > mAP: