def create_optimization(self):
        if self.args.loss_function == 'FocalLoss':
            self.loss = FocalLoss(gamma=self.args.gamma)
        else:
            self.loss = nn.CrossEntropyLoss()

        if self.args.cuda:
            self.loss.cuda()

        if self.args.classify:
            self.metric_fc = ArcMarginModel(self.args)
            self.optimizer = RMSprop(self.model.parameters(),
                                     self.args.learning_rate,
                                     momentum=self.args.momentum,
                                     weight_decay=self.args.weight_decay)

        else:
            self.optimizer = RMSprop([{
                'params': self.model.parameters()
            }, {
                'params': self.metric_fc.parameters()
            }],
                                     self.args.learning_rate,
                                     momentum=self.args.momentum,
                                     weight_decay=self.args.weight_decay)
Exemple #2
0
    def compute_loss(self, start_logits, end_logits, start_labels, end_labels, label_mask):
        """compute loss on squad task."""
        if len(start_labels.size()) > 1:
            start_labels = start_labels.squeeze(-1)
        if len(end_labels.size()) > 1:
            end_labels = end_labels.squeeze(-1)

        # sometimes the start/end positions are outside our model inputs, we ignore these terms
        batch_size, ignored_index = start_logits.shape # ignored_index: seq_len
        start_labels.clamp_(0, ignored_index)
        end_labels.clamp_(0, ignored_index)

        if self.loss_type != "ce":
            # start_labels/end_labels: position index of answer starts/ends among the document.
            # F.one_hot will map the postion index to a sequence of 0, 1 labels.
            start_labels = F.one_hot(start_labels, num_classes=ignored_index)
            end_labels = F.one_hot(end_labels, num_classes=ignored_index)

        if self.loss_type == "ce":
            loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
            start_loss = loss_fct(start_logits, start_labels)
            end_loss = loss_fct(end_logits, end_labels)
        elif self.loss_type == "bce":
            start_loss = F.binary_cross_entropy_with_logits(start_logits.view(-1), start_labels.view(-1).float(), reduction="none")
            end_loss = F.binary_cross_entropy_with_logits(end_logits.view(-1), end_labels.view(-1).float(), reduction="none")

            start_loss = (start_loss * label_mask.view(-1)).sum() / label_mask.sum()
            end_loss = (end_loss * label_mask.view(-1)).sum() / label_mask.sum()
        elif self.loss_type == "focal":
            loss_fct = FocalLoss(gamma=self.args.focal_gamma, reduction="none")
            start_loss = loss_fct(FocalLoss.convert_binary_pred_to_two_dimension(start_logits.view(-1)),
                                         start_labels.view(-1))
            end_loss = loss_fct(FocalLoss.convert_binary_pred_to_two_dimension(end_logits.view(-1)),
                                       end_labels.view(-1))
            start_loss = (start_loss * label_mask.view(-1)).sum() / label_mask.sum()
            end_loss = (end_loss * label_mask.view(-1)).sum() / label_mask.sum()

        elif self.loss_type in ["dice", "adaptive_dice"]:
            loss_fct = DiceLoss(with_logits=True, smooth=self.args.dice_smooth, ohem_ratio=self.args.dice_ohem,
                                      alpha=self.args.dice_alpha, square_denominator=self.args.dice_square)
            # add to test
            # start_logits, end_logits = start_logits.view(batch_size, -1), end_logits.view(batch_size, -1)
            # start_labels, end_labels = start_labels.view(batch_size, -1), end_labels.view(batch_size, -1)
            start_logits, end_logits = start_logits.view(-1, 1), end_logits.view(-1, 1)
            start_labels, end_labels = start_labels.view(-1, 1), end_labels.view(-1, 1)
            # label_mask = label_mask.view(batch_size, -1)
            label_mask = label_mask.view(-1, 1)
            start_loss = loss_fct(start_logits, start_labels, mask=label_mask)
            end_loss = loss_fct(end_logits, end_labels, mask=label_mask)
        else:
            raise ValueError("This type of loss func donot exists.")

        total_loss = (start_loss + end_loss) / 2

        return total_loss, start_loss, end_loss
Exemple #3
0
def train(total_epochs=1, interval=100, resume=False, ckpt_path = ''):
    print("Loading training dataset...")
    train_dset = OpenImagesDataset(root='./data/train',
                            list_file ='./data/tmp/train_images_bbox.csv',
                            transform=transform, train=True, input_size=600)

    train_loader = data.DataLoader(train_dset, batch_size=4, shuffle=True, num_workers=4, collate_fn=train_dset.collate_fn)
    
    print("Loading completed.")

    #val_dset = OpenImagesDataset(root='./data/train',
    #                  list_file='./data/tmp/train_images_bbox.csv', train=False, transform=transform, input_size=600)
    #val_loader = torch.utils.data.DataLoader(val_dset, batch_size=1, shuffle=False, num_workers=4, collate_fn=val_dset.collate_fn)

    net = RetinaNet()
    net.load_state_dict(torch.load('./model/net.pth'))

    criterion = FocalLoss()
    
    net.cuda()
    criterion.cuda()
    optimizer = optim.SGD(net.parameters(), lr=1e-3, momentum=0.9, weight_decay=1e-4)
    best_val_loss = 1000

    start_epoch=0

    if resume:
        if os.path.isfile(ckpt_path):
            print(f'Loading from the checkpoint {ckpt_path}')
            checkpoint = torch.load(ckpt_path)
            start_epoch = checkpoint['epoch']
            best_val_loss = checkpoint['best_val_loss']
            net.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print(f'Loaded checkpoint {ckpt_path}, epoch : {start_epoch}')
        else:
            print(f'No check point found at the path {ckpt_path}')

    

    for epoch in range(start_epoch, total_epochs):
        train_one_epoch(train_loader, net, criterion, optimizer, epoch, interval)
        val_loss = 0
        #val_loss = validate(val_loader, net, criterion, interval)

        if val_loss < best_val_loss:
            best_val_loss = val_loss
            save_checkpoint({
                'epoch': epoch+1,
                'state_dict': net.state_dict(),
                'best_val_loss': best_val_loss,
                'optimizer' : optimizer.state_dict()
            }, is_best=True)
Exemple #4
0
def val(model, dataloader, data_len):
    # 把模型设为验证模式
    criterion = FocalLoss(2)
    model.train(False)
    running_loss = 0
    running_corrects = 0
    confusion_matrix = meter.ConfusionMeter(2)
    for ii, data in enumerate(
            tqdm(dataloader, desc='Val On Anti-spoofing', unit='batch')):
        input, label = data
        with torch.no_grad():
            val_input = Variable(input)
            val_label = Variable(label)
        if opt.use_gpu:
            val_input = val_input.cuda()
            val_label = val_label.cuda()
        score = model(val_input)
        _, preds = torch.max(score, 1)
        loss = criterion(score, val_label)
        confusion_matrix.add(score.data.squeeze(), val_label)
        running_loss += loss.item() * val_input.size(0)
        running_corrects += torch.sum(preds == val_label.data)
    # 把模型恢复为训练模式
    model.train(True)

    cm_value = confusion_matrix.value()
    val_loss = running_loss / data_len
    val_accuracy = running_corrects.double() / float(data_len)
    return confusion_matrix, val_loss, val_accuracy
Exemple #5
0
def main():
    print('==> chooseing data..')
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
    ])
    # Model
    net = RetinaNet()
    criterion = FocalLoss()
    #     optimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-4)
    optimizer = optim.SGD(net.parameters(),
                          lr=0.001,
                          momentum=0.9,
                          weight_decay=1e-4)

    load_model_epoch = args.load_model_epoch
    checkpoint = torch.load(
        './checkpoint/{}_ckpt.pth'.format(load_model_epoch))  # max_epoch
    net = torch.nn.DataParallel(net,
                                device_ids=range(torch.cuda.device_count()))
    net.cuda()
    net.load_state_dict(checkpoint['net'])

    optimizer.load_state_dict(checkpoint['optimizer'])
    start_epoch = checkpoint['epoch']

    test(start_epoch, transform, net, criterion, optimizer)
Exemple #6
0
    def __init__(self, model_dir, args):
        self.args = args
        self.label_num = args.num_labels
        self.num_labels = args.num_labels

        self.config_class, _, config_model = MODEL_CLASSES[args.model_type]
        bert_config = self.config_class.from_pretrained(args.model_name_or_path)
        super(LanguageSoftmaxForNer, self).__init__(bert_config)

        self.device1 = "cuda" if torch.cuda.is_available() else "cpu"
        self.bert = config_model.from_pretrained(args.model_name_or_path, config=bert_config)  # Load pretrained bert
        # for param in self.bert.parameters():
        #     param.requires_grad = True

        # self.dropout = nn.Dropout(self.args.dropout_rate)
        # self.classifier = nn.Linear(bert_config.hidden_size, self.label_num)
        self.loss_type = self.args.loss_type

        assert self.loss_type in ["lsr", 'focal', 'ce', 'bce', 'bce_with_log']

        if self.loss_type in ["lsr", 'focal', 'ce']:
            self.classifier = FCLayer1(bert_config.hidden_size, self.label_num)
        else:
            self.classifier = FCLayer(bert_config.hidden_size, self.label_num)

        if self.loss_type == 'lsr':
            self.loss_fct = LabelSmoothingCrossEntropy(ignore_index=0)
        elif self.loss_type == 'focal':
            self.loss_fct = FocalLoss(ignore_index=0)
        elif self.loss_type == 'bce':
            self.loss_fct = nn.BCELoss()
        elif self.loss_type == 'bce_with_log':
            self.loss_fct = nn.BCEWithLogitsLoss()
        else:
            self.loss_fct = CrossEntropyLoss(ignore_index=0)
Exemple #7
0
def run_train(args):
    assert torch.cuda.is_available(), 'Error: CUDA not found!'
    best_loss = float('inf')  # best test loss
    start_epoch = 0  # start from epoch 0 or last epoch

    # Data
    print('==> Preparing data..')

    trainloader = get_train_loader(img_dir=settings.IMG_DIR,
                                   batch_size=batch_size)
    #trainloader = get_small_train_loader()
    print(trainloader.num)
    #testloader = get_train_loader(img_dir=settings.IMG_DIR)

    # Model
    net = RetinaNet()
    #net.load_state_dict(torch.load('./model/net.pth'))
    net.load_state_dict(torch.load('./ckps/best_0.pth'))
    net = torch.nn.DataParallel(net,
                                device_ids=range(torch.cuda.device_count()))
    net.cuda()

    criterion = FocalLoss()
    #optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4)
    optimizer = optim.Adam(net.parameters(), lr=args.lr)

    iter_save = 200
    bgtime = time.time()
    # Training
    for epoch in range(start_epoch, start_epoch + 100):
        print('\nEpoch: %d' % epoch)
        net.train()
        #net.module.freeze_bn()
        train_loss = 0
        for batch_idx, (inputs, loc_targets,
                        cls_targets) in enumerate(trainloader):
            inputs = Variable(inputs.cuda())
            loc_targets = Variable(loc_targets.cuda())
            cls_targets = Variable(cls_targets.cuda())

            optimizer.zero_grad()
            loc_preds, cls_preds = net(inputs)
            loss = criterion(loc_preds, loc_targets, cls_preds, cls_targets)
            loss.backward()
            optimizer.step()

            #train_loss += loss.data[0]
            sample_num = (batch_idx + 1) * batch_size
            avg_loss = running_loss(loss.data[0])
            print(
                'Epoch: {}, num: {}/{} train_loss: {:.3f} | run_loss: {:.3f} min: {:.1f}'
                .format(epoch, sample_num, trainloader.num, loss.data[0],
                        avg_loss, (time.time() - bgtime) / 60),
                end='\r')

            if batch_idx % iter_save == 0:
                torch.save(
                    net.module.state_dict(),
                    './ckps/best_{}.pth'.format(batch_idx // iter_save % 5))
                log.info('batch: {}, loss: {:.4f}'.format(batch_idx, avg_loss))
Exemple #8
0
    def __init__(self, num_classes, ndf=64):
        super(FCDiscriminator, self).__init__()

        self.conv1 = nn.Conv2d(num_classes,
                               ndf,
                               kernel_size=4,
                               stride=2,
                               padding=1)
        self.conv2 = nn.Conv2d(ndf,
                               ndf * 2,
                               kernel_size=4,
                               stride=2,
                               padding=1)
        self.conv3 = nn.Conv2d(ndf * 2,
                               ndf * 4,
                               kernel_size=4,
                               stride=2,
                               padding=1)
        self.conv4 = nn.Conv2d(ndf * 4,
                               ndf * 8,
                               kernel_size=4,
                               stride=2,
                               padding=1)
        self.classifier = nn.Conv2d(ndf * 8,
                                    1,
                                    kernel_size=4,
                                    stride=2,
                                    padding=1)

        self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
        self.bce_loss = nn.BCEWithLogitsLoss()
        self.focal_loss = FocalLoss()
Exemple #9
0
def main():
    # assert torch.cuda.is_available(), 'Error: CUDA not found!'
    best_loss = float('inf')  # best test loss
    start_epoch = 0  # start from epoch 0 or last epoch
    save_model_path = args.model

    # Data
    print('==> Preparing data..')
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
    ])

    # Model
    net = RetinaNet()
    net.load_state_dict(torch.load('./model/net.pth'))

    net = torch.nn.DataParallel(net,
                                device_ids=range(torch.cuda.device_count()))
    net.cuda()

    criterion = FocalLoss()
    #     optimizer = optim.Adam(net.parameters(), lr=args.lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-4)
    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=1e-4)

    for epoch in range(start_epoch, start_epoch + args.train_epoch):
        train(epoch + 1, transform, net, optimizer, criterion)
        save_model(epoch + 1, save_model_path, net, optimizer)
Exemple #10
0
def train():
    model = AttentionUNet()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)
    criterion = FocalLoss()

    trained_model = train_and_test(model, dataloaders, optimizer, criterion, num_epochs=epochs)

    return trained_model
Exemple #11
0
    def __init__(self, exp, env):
        super().__init__()
        self._mode = 'test'

        # check exp for errors
        check_exp(exp)
        self._k = 0
        self.visu_forward = False

        self.track = exp['model_mode'] == 'test' and \
            exp['d_test']['batch_list_cfg']['seq_length'] != 1 and \
            exp['d_test']['batch_list_cfg']['mode'].find('dense') == -1

        # logging h-params
        exp_config_flatten = flatten_dict(copy.deepcopy(exp))
        for k in exp_config_flatten.keys():
            if exp_config_flatten[k] is None:
                exp_config_flatten[k] = 'is None'

        self.hparams = exp_config_flatten
        self.hparams['lr'] = exp['lr']
        self.env, self.exp = env, exp

        for i in range(0, int(torch.cuda.device_count())):
            print(f'GPU {i} Type {torch.cuda.get_device_name(i)}')

        self.pixelwise_refiner = EfficientDisparity( **exp['efficient_disp_cfg'] )

        self.criterion_adds = AddSLoss(sym_list=exp['d_train']['obj_list_sym'])
        coe = exp['loss'].get('coefficents',[0.0005,0.001,0.005,0.01,0.02,0.08,1])
        self.criterion_focal = FocalLoss() 
        s = self.pixelwise_refiner.size
        self.criterion_flow = FlowLoss(s, s, coe)
       
        self.best_validation = 999
        self.visualizer = None
        self._dict_track = {}
        self.counter_images_logged = 0
        self.test_size = 0.1
        self.init_train_vali_split = False

        if self.exp.get('visu', {}).get('log_to_file', False):
            mp = exp['model_path']
            sys.stdout = Logger2(f'{mp}/Live_Logger_Lightning.log')
            console = logging.StreamHandler()
            console.setLevel(logging.DEBUG)
            logging.getLogger().addHandler(console)
            logging.getLogger("lightning").addHandler(console)
            sys.stderr = sys.stdout

        self.adds_mets = ['init','gt_flow__gt_label', 'pred_flow__gt_label','pred_flow__flow_mask','pred_flow__pred_label']
        lis = []
        for i in range(0, 6):
            lis.append( f'L2_{i}')
        self.df = pd.DataFrame(columns= self.adds_mets + lis )

        self.start = time.time()
Exemple #12
0
 def __init__(self, config, num_labels=2, output_attentions=False, keep_multihead_output=False):
     super(F1_BertForSequenceClassification, self).__init__(config)
     self.output_attentions = output_attentions
     self.num_labels = num_labels
     self.bert = BertModel(config, output_attentions=output_attentions,
                           keep_multihead_output=keep_multihead_output)
     self.dropout = nn.Dropout(config.hidden_dropout_prob)
     self.classifier = nn.Linear(config.hidden_size, num_labels)
     self.apply(self.init_bert_weights)
     self.focal_loss = FocalLoss(num_labels)
Exemple #13
0
 def criterion(self, logit, truth):
     """Define the (customized) loss function here."""
     Loss_FUNC = FocalLoss()
     #Loss_FUNC = nn.BCEWithLogitsLoss()#nn.MultiLabelSoftMarginLoss()
     if self.training and self.aux_logits:
         logit, aux = logit[0], logit[1]
         return Loss_FUNC(logit, truth) + 0.3 * Loss_FUNC(
             aux, truth)  #according to paper, aux_loss_weight=0.3
     loss = Loss_FUNC(logit, truth)
     return loss
Exemple #14
0
 def criterion(self, logit_clf, truth, logit_mask=None, mask=None):
     """Define the (customized) loss function here."""
     ## 1. classification loss
     Loss_FUNC = FocalLoss()
     #Loss_FUNC = nn.BCEWithLogitsLoss()#nn.MultiLabelSoftMarginLoss()
     loss_clf = Loss_FUNC(logit_clf, truth)
     if logit_mask is not None:
         ## 2. segmentation mask loss
         loss_mask = L.lovasz_hinge(logit_mask, mask, ignore=255)
         return loss_clf, loss_mask
     else:
         return loss_clf
def main():

    print('model settings:\n', args)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print('training with device:', device)

    # 1. load the datasets
    train_loader, val_loader, test_loader = dataloaders()
    #show number of positive attributes
    print(positive_attributes(train_loader.dataset))
    print(positive_attributes(val_loader.dataset))
    print(positive_attributes(test_loader.dataset))

    # 2. retrieve the pretrained model
    model = load_model()
    #if resume is true, load the previously save checkpoint
    if args.resume:
        print('resume from last checkpoint')
        state_dict = torch.load(args.root + 'checkpoints/' + args.checkpoint)
        model.load_state_dict(state_dict)
    model.to(device)

    #freeze conv layer parameters if args.train_conv is false, other wise set requires_grad=True
    for params in model.parameters():
        params.requires_grad = args.train_conv

    if args.train_conv:
        parameters = model.parameters()
    else:
        parameters = model.fc.parameters()

    # 3. train and validate the model
    if args.loss == 'bce':
        criterion = nn.BCEWithLogitsLoss()
    elif args.loss == 'focal':
        criterion = FocalLoss(
            alpha=args.alpha, gamma=args.gamma
        )  #alpha=1 means no emphasis on 0 or 1, smaller gamma means less emphasis on minor probs
    optimizer = optim.SGD(parameters, lr=args.lr, momentum=0.9)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.5)
    print('model training starts:')

    # 4. test model accuracy on test set
    if args.test_mode:
        test(model, test_loader, criterion, device)
    else:
        model = train_validate(model, criterion, optimizer, scheduler,
                               train_loader, val_loader, device)

        test(model, test_loader, criterion, device)
Exemple #16
0
    def __init__(self, model_dir, args):

        self.args = args
        self.num_labels = args.num_labels

        self.config_class, _, config_model = MODEL_CLASSES[args.model_type]
        bert_config = self.config_class.from_pretrained(args.model_name_or_path)
        super(LanguageSpanForNer, self).__init__(bert_config)

        self.bert = config_model.from_pretrained(args.model_name_or_path, config=bert_config)  # Load pretrained bert

        self.soft_label = True
        # self.num_labels = config.num_labels
        self.loss_type = self.args.loss_type

        # self.bert = BertModel(config)

        self.device1 = "cuda" if torch.cuda.is_available() else "cpu"

        self.dropout = nn.Dropout(self.args.dropout_rate)

        assert self.loss_type in ["lsr", 'focal', 'ce', 'bce', 'bce_with_log']

        if self.loss_type in ["lsr", 'focal', 'ce']:

            self.start_fc = PoolerStartLogits1(bert_config.hidden_size, self.num_labels)
            if self.soft_label:
                self.end_fc = PoolerEndLogits1(bert_config.hidden_size + self.num_labels, self.num_labels)
            else:
                self.end_fc = PoolerEndLogits1(bert_config.hidden_size + 1, self.num_labels)
        else:
            self.start_fc = PoolerStartLogits(bert_config.hidden_size, self.num_labels)
            if self.soft_label:
                self.end_fc = PoolerEndLogits(bert_config.hidden_size + self.num_labels, self.num_labels)
            else:
                self.end_fc = PoolerEndLogits(bert_config.hidden_size + 1, self.num_labels)

        if self.loss_type == 'lsr':
            self.loss_fct = LabelSmoothingCrossEntropy(ignore_index=0)
        elif self.loss_type == 'focal':
            self.loss_fct = FocalLoss(ignore_index=0)
        elif self.loss_type == 'bce':
            self.loss_fct = nn.BCELoss()
        elif self.loss_type == 'bce_with_log':
            self.loss_fct = nn.BCEWithLogitsLoss()
        else:
            self.loss_fct = CrossEntropyLoss(ignore_index=0)

        self.init_weights()
def model_evaluate(config, model, data_iter, test=False):
    model.eval()
    loss_total = 0
    predict_all = np.array([], dtype=int)
    labels_all = np.array([], dtype=int)
    label_map = {i: label for i, label in enumerate(config.class_list)}
    criterion = FocalLoss(gamma=2, alpha=1)
    with torch.no_grad():
        for i, (input_ids, attention_mask, token_type_ids,
                labels) in enumerate(data_iter):

            input_ids = torch.tensor(input_ids).type(torch.LongTensor).to(
                config.device)
            attention_mask = torch.tensor(attention_mask).type(
                torch.LongTensor).to(config.device)
            token_type_ids = torch.tensor(token_type_ids).type(
                torch.LongTensor).to(config.device)
            labels = torch.tensor(labels).type(torch.LongTensor).to(
                config.device)

            outputs = model(input_ids, attention_mask, token_type_ids)

            active_loss = attention_mask.view(-1) == 1
            active_logits = outputs.view(-1, config.num_labels)[active_loss]
            active_labels = labels.view(-1)[active_loss]

            #loss = F.cross_entropy(active_logits, active_labels)
            loss = criterion(active_logits, active_labels)
            loss_total += loss
            active_labels = active_labels.data.cpu().numpy()
            predic = torch.max(active_logits.data, 1)[1].cpu().numpy()

            labels_all = np.append(labels_all, active_labels)
            predict_all = np.append(predict_all, predic)

    true_label = [label_map[key] for key in labels_all]
    predict_label = [label_map[key] for key in predict_all]

    acc = metrics.accuracy_score(labels_all, predict_all)
    precision = precision_score(true_label, predict_label)
    recall = recall_score(true_label, predict_label)
    f1 = f1_score(true_label, predict_label)
    if test:
        report = classification_report(true_label, predict_label, digits=4)
        confusion = metrics.confusion_matrix(true_label, predict_label)
        return acc, precision, recall, f1, loss_total / len(
            data_iter), report, confusion
    return acc, precision, recall, f1, loss_total / len(data_iter)
Exemple #18
0
    def forward(self,
                input_ids,
                token_type_ids=None,
                attention_mask=None,
                labels=None):
        last_output, pooled_output = self.bert(input_ids,
                                               token_type_ids,
                                               attention_mask,
                                               output_all_encoded_layers=False)
        # last_output = torch.cuda.FloatTensor(last_output)
        # attention_mask = torch.cuda.FloatTensor(attention_mask)
        pooled_output = torch.sum(
            last_output * attention_mask.float().unsqueeze(2),
            dim=1) / torch.sum(attention_mask.float(), dim=1, keepdim=True)
        '''
        batch_size = input_ids.size(0)
        caps_output = self.caps(last_output)  # (batch_size, num_capsule, dim_capsule)
        caps_output = caps_output.view(batch_size, -1)  # (batch_size, num_capsule*dim_capsule)
        caps_dropout = self.dropout(caps_output)
        logits = self.dense(caps_dropout)
        '''

        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)

        if labels is not None:
            # loss_fct = BCEWithLogitsLoss()
            # loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1, self.num_labels))
            alpha = 0.75
            gamma = 3

            # focal loss

            x = logits.view(-1, self.num_labels)
            t = labels.view(-1, self.num_labels)
            '''
            p = x.sigmoid()
            pt = p*t + (1-p)*(1-t)
            w = alpha*t + (1-alpha)*(1-t)
            w = w*(1-pt).pow(gamma)
            # return F.binary_cross_entropy_with_logits(x, t, w, size_average=False)
            return binary_cross_entropy(x, t, weight=w, smooth_eps=0.1, from_logits=True)
            '''
            loss_fct = FocalLoss(logits=True)
            loss = loss_fct(x, t)
            return loss
        else:
            return logits
def setup_and_train(parmas):
    model = Net(params).cuda() if params.cuda else Net(params)

    image_size = model.image_size()
    train_transform = transforms.Compose([
        transforms.RandomResizedCrop(image_size),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), normalize
    ])

    valid_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(image_size),
        transforms.ToTensor(), normalize
    ])

    loss_fn = FocalLoss()

    # Observe that all parameters are being optimized
    # optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    optimizer = optim.SGD([{
        'params': model.base_parameters
    }, {
        'params': model.last_parameters,
        'lr': 1e-2
    }],
                          lr=1e-3,
                          momentum=0.9)
    # optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)

    # Decay LR by a factor of 0.1 every 7 epochs
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer,
                                           step_size=params.step_size,
                                           gamma=params.gama)

    dataloaders = get_dateloaders(params,
                                  train_transform=train_transform,
                                  valid_transform=valid_transform)

    train_and_evaluate(model=model,
                       dataloaders=dataloaders,
                       optimizer=optimizer,
                       loss_fn=loss_fn,
                       scheduler=exp_lr_scheduler,
                       params=params)
Exemple #20
0
    def __init__(
        self,
        classes=80,
        state_dict_path='/Users/nick/.cache/torch/checkpoints/resnet50-19c8e357.pth',
        strides=(8, 16, 32, 64, 128),
        regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512), (512,
                                                                      INF))):
        super(FCOS, self).__init__()
        self.backbone = ResNet50FPN(state_dict_path=state_dict_path)
        self.name = 'RetinaNet'
        self.classes = classes
        self.strides = strides
        self.regress_ranges = regress_ranges

        self.threshold = 0.05
        self.top_n = 1000
        self.nms = 0.5
        self.detections = 100

        def make_head():
            layers = []
            for _ in range(4):
                layers += [
                    nn.Conv2d(256, 256, 3, padding=1, bias=False),
                    nn.GroupNorm(32, 256),
                    nn.ReLU(inplace=True)
                ]
            return nn.Sequential(*layers)

        self.cls_convs = make_head()
        self.reg_convs = make_head()

        self.fcos_cls = nn.Conv2d(256, 80, kernel_size=3, padding=1)
        self.fcos_reg = nn.Conv2d(256, 4, kernel_size=3, padding=1)
        self.fcos_centerness = nn.Conv2d(256, 1, kernel_size=3, padding=1)

        self.initialize()

        self.cls_criterion = FocalLoss()
        self.box_criterion = IoULoss()
        self.centerness_criterion = nn.BCEWithLogitsLoss(reduction='none')
 def configure_loss_function(loss_function):
     # 损失函数
     if loss_function == 'CrossEntropyLoss':
         loss_func = torch.nn.CrossEntropyLoss()
     elif loss_function == 'LabelSmoothCrossEntropyLoss':
         loss_func = LabelSmoothCrossEntropyLoss(smoothing=0.2)
     elif loss_function == 'BiTemperedLogisticLoss':
         loss_func = BiTemperedLogisticLoss(reduction='mean',
                                            t1=0.8,
                                            t2=1.4,
                                            label_smoothing=0.2,
                                            OHEM=0.7)
     elif loss_function == 'FocalLoss':
         loss_func = FocalLoss(reduction='sum',
                               alpha=0.25,
                               gamma=2,
                               smooth_eps=0.2,
                               class_num=5)
     else:
         raise ValueError(loss_function)
     return loss_func
Exemple #22
0
def main():
    print('model settings:\n', args)

    # 1. load the datasets
    train_loader, val_loader, test_loader = dataloaders()
    #show number of positive attributes
    print(positive_attributes(train_loader.dataset))
    print(positive_attributes(val_loader.dataset))
    print(positive_attributes(test_loader.dataset))

    # 2. retrieve the pretrained model
    model = load_model()
    #if resume is true, load the previously save checkpoint
    if args.resume:
        state_dict = torch.load(args.root + 'checkpoints/checkpoint.pth')
        model.load_state_dict(state_dict)
    model.to(device)

    if args.train_conv:
        parameters = model.parameters()
    else:
        parameters = model.fc.parameters()

    # 3. train and validate the model
    # criterion = nn.BCEWithLogitsLoss()
    from loss import FocalLoss
    criterion = FocalLoss(alpha=1, gamma=1)

    optimizer = optim.SGD(parameters, lr=args.lr, momentum=0.9)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.5)

    # 4. test model accuracy on test set

    if args.test_mode:
        test(model, test_loader, criterion, device)
    else:
        model = train_validate(model, criterion, optimizer, scheduler,
                               train_loader, val_loader, device)

        test(model, test_loader, criterion, device)
Exemple #23
0
    def __init__(
        self,
        classes=80,
        stride=128,
        state_dict_path='/Users/nick/.cache/torch/checkpoints/resnet50-19c8e357.pth'
    ):
        super(RetinaNet, self).__init__()
        self.backbone = ResNet50FPN(state_dict_path=state_dict_path,
                                    stride=stride)
        self.name = 'RetinaNet'
        self.ratios = [1.0, 2.0, 0.5]
        self.scales = [4 * 2**(i / 3) for i in range(3)]
        self.anchors = {}
        self.classes = classes

        self.threshold = 0.05
        self.top_n = 1000
        self.nms = 0.5
        self.detections = 100

        self.stride = self.backbone.stride

        def make_head(out_size):
            layers = []
            for _ in range(4):
                layers += [
                    nn.Conv2d(256, 256, 3, padding=1),
                    nn.ReLU(inplace=True)
                ]
            layers += [nn.Conv2d(256, out_size, 3, padding=1)]
            return nn.Sequential(*layers)

        num_anchors = len(self.ratios) * len(self.scales)
        self.cls_head = make_head(classes * num_anchors)
        self.box_head = make_head(4 * num_anchors)
        self.initialize()

        self.cls_criterion = FocalLoss()
        self.box_criterion = SmoothL1Loss()
Exemple #24
0
 def __init__(self,
              num_classes=81,
              in_channels=256,
              stacked_convs=4,
              anchor_ratios=(0.5, 1, 2),
              anchor_strides=(8, 16, 32, 64, 128)):
     super(RetinaHead, self).__init__()
     self.num_classes = num_classes
     self.in_channels = in_channels
     self.stacked_convs = stacked_convs
     self.anchor_scales = np.array([2 ** (i / 3) for i in range(3)]) * 4
     self.cls_out_channels = num_classes - 1
     self.anchor_ratios = anchor_ratios
     self.anchor_strides = anchor_strides
     self.anchor_base_sizes = list(anchor_strides)
     self.loss_cls = FocalLoss()
     self.loss_bbox = SmoothL1Loss()
     self.anchor_generators = []
     for anchor_base in self.anchor_base_sizes:
         self.anchor_generators.append(AnchorGenerator(anchor_base, self.anchor_scales, anchor_ratios))
     self.num_anchors = len(self.anchor_ratios) * len(self.anchor_scales)
     self._init_layers()
Exemple #25
0
    def train_obj():

        # Model
        net = RetinaNet()
        net = torch.nn.DataParallel(net,
                                    device_ids=range(
                                        torch.cuda.device_count()))
        net.cuda()

        criterion = FocalLoss()
        optimizer = optim.SGD(net.parameters(),
                              lr=args.lr,
                              momentum=0.9,
                              weight_decay=1e-4)

        scheduler_obj = torch.optim.lr_scheduler.LambdaLR(
            optimizer,
            lr_lambda=lambda x: (1 - x / (len(trainloader) * epochs))**0.9)

        obj_trainer = ObjDetTrainer(net, criterion, optimizer, scheduler_obj,
                                    trainloader, valloader, device)
        obj_trainer.train(epochs, True)
Exemple #26
0
    def __init__(self, backbones='ResNet50FPN', classes=80, config={}):
        super().__init__()

        if not isinstance(backbones, list):
            backbones = [backbones]

        self.backbones = nn.ModuleDict(
            {b: getattr(backbones_mod, b)()
             for b in backbones})
        self.name = 'RetinaNet'
        self.exporting = False

        self.ratios = [1.0, 2.0, 0.5]
        self.scales = [4 * 2**(i / 3) for i in range(3)]
        self.anchors = {}
        self.classes = classes

        self.threshold = config.get('threshold', 0.05)
        self.top_n = config.get('top_n', 1000)
        self.nms = config.get('nms', 0.5)
        self.detections = config.get('detections', 100)

        self.stride = max([b.stride for _, b in self.backbones.items()])

        # classification and box regression heads
        def make_head(out_size):
            layers = []
            for _ in range(4):
                layers += [nn.Conv2d(256, 256, 3, padding=1), nn.ReLU()]
            layers += [nn.Conv2d(256, out_size, 3, padding=1)]
            return nn.Sequential(*layers)

        anchors = len(self.ratios) * len(self.scales)
        self.cls_head = make_head(classes * anchors)
        self.box_head = make_head(4 * anchors)

        self.cls_criterion = FocalLoss()
        self.box_criterion = SmoothL1Loss(beta=0.11)
Exemple #27
0
def val(model, dataloader, data_len):
    # 把模型设为验证模式
    criterion = FocalLoss(2)
    model.train(False)
    running_loss = 0
    running_corrects = 0
    confusion_matrix = meter.ConfusionMeter(2)
    result_list = []

    label_list = []
    for ii, data in enumerate(tqdm(dataloader, desc='Val %s On Anti-spoofing' % (opt.model), unit='batch')):
        input, label = data
        with torch.no_grad():
            val_input = Variable(input)
            val_label = Variable(label)
        if opt.use_gpu:
            val_input = val_input.cuda()
            val_label = val_label.cuda()
        score = model(val_input)
        _, preds = torch.max(score, 1)
        loss = criterion(score, val_label)
        # confusion_matrix.add(score.data.squeeze(), val_label)
        running_loss += loss.item() * val_input.size(0)
        running_corrects += torch.sum(preds == val_label.data)

        outputs = torch.softmax(score, dim=-1)
        preds = outputs.to('cpu').detach().numpy()
        for i_batch in range(preds.shape[0]):
            result_list.append(preds[i_batch, 1])
            label_list.append(label[i_batch])
    # 把模型恢复为训练模式
    model.train(True)

    metric = roc.cal_metric(label_list, result_list)
    # cm_value = confusion_matrix.value()
    val_loss = running_loss / data_len
    val_accuracy = running_corrects.double() / float(data_len)
    return val_loss, val_accuracy, metric
Exemple #28
0
def criterion(prediction, mask, regr, size_average=True):
    # Binary mask loss
    pred_mask = torch.sigmoid(prediction[:, 0])

    #     mask_loss = mask * (1 - pred_mask)**2 * torch.log(pred_mask + 1e-12) + (1 - mask) * pred_mask**2 * torch.log(1 - pred_mask + 1e-12)
    # mask_loss = mask * torch.log(pred_mask + 1e-12) + (1 - mask) * torch.log(1 - pred_mask + 1e-12)
    # mask_loss = -mask_loss.mean(0).sum()

    # focal loss
    mask_criterion = FocalLoss(alpha=0.5)
    mask_loss = mask_criterion(pred_mask, mask)

    # Regression L1 loss
    pred_regr = prediction[:, 1:]
    regr_loss = (torch.abs(pred_regr - regr).sum(1) *
                 mask).sum(1).sum(1) / mask.sum(1).sum(1)
    regr_loss = regr_loss.mean(0)

    # Sum
    loss = Config.MASK_WEIGHT * mask_loss + regr_loss
    if not size_average:
        loss *= prediction.shape[0]
    return loss
Exemple #29
0
def criterion(prediction,
              mask,
              regr,
              uncertain_loss,
              batch_idx,
              size_average=True):
    # Binary mask loss
    pred_mask = torch.sigmoid(prediction[:, 0])

    #     mask_loss = mask * (1 - pred_mask)**2 * torch.log(pred_mask + 1e-12) + (1 - mask) * pred_mask**2 * torch.log(1 - pred_mask + 1e-12)
    # mask_loss = mask * torch.log(pred_mask + 1e-12) + (1 - mask) * torch.log(1 - pred_mask + 1e-12)
    # mask_loss = -mask_loss.mean(0).sum()

    # focal loss
    mask_criterion = FocalLoss(alpha=Config.FOCAL_ALPHA)
    mask_loss = mask_criterion(pred_mask, mask)

    # Regression L1 loss
    pred_regr = prediction[:, 1:]
    regr_loss = (torch.abs(pred_regr - regr).sum(1) *
                 mask).sum(1).sum(1) / mask.sum(1).sum(1)
    regr_loss = regr_loss.mean(0)

    if batch_idx % 500 == 0:
        print("mask loss{}".format(mask_loss))
        print("regr loss{}".format(regr_loss))

    # Sum

    if not Config.USE_UNCERTAIN_LOSS:
        loss = Config.MASK_WEIGHT * mask_loss + regr_loss
    else:
        loss = uncertain_loss(Config.MASK_WEIGHT * mask_loss, regr_loss)

    if not size_average:
        loss *= prediction.shape[0]
    return loss
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--name', type=str)
    arg('--jaccard-weight', default=0.25, type=float)
    arg('--device-ids',
        type=str,
        default='0',
        help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--output-dir', default='../data/runs', help='checkpoint root')
    arg('--batch-size', type=int, default=32)
    arg('--iter-size', type=int, default=1)
    arg('--n-epochs', type=int, default=100)
    arg('--lr', type=float, default=0.0001)
    arg('--workers', type=int, default=4)
    arg('--seed', type=int, default=0)
    arg('--model', type=str, default=models.archs[0], choices=models.archs)
    arg('--loss',
        type=str,
        default='focal',
        choices=[
            'focal', 'lovasz', 'bjd', 'bce_jaccard', 'bce_dice', 'cos_dice',
            'hinge'
        ])
    arg('--focal-gamma', type=float, default=.5)
    arg('--num-channels', type=int, default=3)
    arg('--weighted-sampler', action="store_true")
    arg('--ignore-empty-masks', action='store_true')
    arg('--remove-suspicious', action='store_true')
    arg('--resume', action="store_true")
    args = parser.parse_args()

    random.seed(args.seed)
    torch.manual_seed(args.seed)

    if not args.name:
        experiment = uuid.uuid4().hex
    else:
        experiment = args.name

    output_dir = Path(args.output_dir) / experiment
    output_dir.mkdir(exist_ok=True, parents=True)
    output_dir.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    # in case --resume is provided it will be loaded later
    model = models.get_model(None, args.model)
    # model = models.get_model(f"../data/runs/exp81/model_{args.fold}.pth", args.model)

    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()

    train_ids, val_ids = dataset.get_split(args.fold)

    cudnn.benchmark = True

    train_loader = dataset.make_loader(
        train_ids,
        num_channels=args.num_channels,
        transform=dataset.train_transform(),
        shuffle=True,
        weighted_sampling=args.weighted_sampler,
        ignore_empty_masks=args.ignore_empty_masks,
        remove_suspicious=args.remove_suspicious,
        batch_size=args.batch_size,
        workers=args.workers)

    valid_loader = dataset.make_loader(
        val_ids,
        num_channels=args.num_channels,
        transform=dataset.val_transform(),
        shuffle=False,
        #batch_size=len(device_ids),
        batch_size=args.batch_size,  # len(device_ids),
        workers=args.workers)

    # optimizer = Adam([p for p in model.parameters() if p.requires_grad], lr=args.lr)
    optimizer = Adam(model.parameters(), lr=args.lr)

    # loss = LossBinary(jaccard_weight=args.jaccard_weight)
    # loss = LossBinaryMixedDiceBCE(dice_weight=0.5, bce_weight=0.5)
    if args.loss == 'focal':
        loss = FocalLoss(args.focal_gamma)
    elif args.loss == 'lovasz':
        loss = LossLovasz()
    elif args.loss == 'bjd':
        loss = BCEDiceJaccardLoss({'bce': 0.25, 'jaccard': None, 'dice': 0.75})
    elif args.loss == 'bce_jaccard':
        loss = LossBinary(args.jaccard_weight)
    elif args.loss == 'bce_dice':
        import loss2
        bce_weight = 1
        dice_weight = 2
        loss = loss2.make_loss(bce_weight, dice_weight)
    elif args.loss == 'cos_dice':
        import loss2
        loss = loss2.make_cos_dice_loss()
    elif args.loss == 'hinge':
        loss = LossHinge()

    else:
        raise NotImplementedError

    validation = validation_binary
    scheduler = ReduceLROnPlateau(optimizer,
                                  verbose=True,
                                  min_lr=1e-7,
                                  factor=0.5)
    snapshot = utils.fold_snapshot(output_dir,
                                   args.fold) if args.resume else None

    utils.train(experiment=experiment,
                output_dir=output_dir,
                optimizer=optimizer,
                args=args,
                model=model,
                criterion=loss,
                scheduler=scheduler,
                train_loader=train_loader,
                valid_loader=valid_loader,
                validation=validation,
                fold=args.fold,
                batch_size=args.batch_size,
                n_epochs=args.n_epochs,
                snapshot=snapshot,
                iter_size=args.iter_size)