예제 #1
0
def feature_set2():
    for w1, w2, tag in pairs:
        s1, s2 = words[w1], words[w2]
        num_inter = len(s1.intersection(s2))
        num_adiffb = len(s1.difference(s2))
        num_bdiffa = len(s2.difference(s1))
        ji = metrics.jaccard_index(s1, s2)
        ida = metrics.intersect_divide_a(s1, s2)
        idb = metrics.intersect_divide_b(s1, s2)
        dadu = metrics.diff_a_divide_union(s1, s2)
        dbdu = metrics.diff_b_divide_union(s1, s2)

        if tag == 'Yes':
            tag = 1
        else:
            tag = -1

        #out = [num_inter, num_adiffb, num_bdiffa, ji, ida, idb, dadu, dbdu, tag]
        #sent = "{},{},{},{},{},{},{},{}'{}'".format(*out)

        out = [
            tag, num_inter, num_adiffb, num_bdiffa, ji, ida, idb, dadu, dbdu
        ]
        sent = "{} 1:{} 2:{} 3:{} 4:{} 5:{} 6:{} 7:{} 8:{}".format(*out)

        data.append(sent)

    print_features2_svm_format(data)
예제 #2
0
def feature_set2():
    for w1, w2, tag in pairs:
        s1, s2 = words[w1], words[w2]
        num_inter = len(s1.intersection(s2))
        num_adiffb = len(s1.difference(s2))
        num_bdiffa = len(s2.difference(s1))
        ji = metrics.jaccard_index(s1, s2)
        ida = metrics.intersect_divide_a(s1, s2)
        idb = metrics.intersect_divide_b(s1, s2)
        dadu = metrics.diff_a_divide_union(s1, s2)
        dbdu = metrics.diff_b_divide_union(s1, s2)

        if tag == 'Yes':
            tag = 1
        else: tag = -1

        #out = [num_inter, num_adiffb, num_bdiffa, ji, ida, idb, dadu, dbdu, tag]
        #sent = "{},{},{},{},{},{},{},{}'{}'".format(*out)
        
        out = [tag, num_inter, num_adiffb, num_bdiffa, ji, ida, idb, dadu, dbdu]
        sent = "{} 1:{} 2:{} 3:{} 4:{} 5:{} 6:{} 7:{} 8:{}".format(*out)
        
        data.append(sent)

    print_features2_svm_format(data)
예제 #3
0
def make_train_step(idx, data, model, optimizer, criterion, meters):

    # get the inputs and wrap in Variable
    if torch.cuda.is_available():
        inputs = Variable(data['sat_img'].cuda())
        labels = Variable(data['map_img'].cuda())
    else:
        inputs = Variable(data['sat_img'])
        labels = Variable(data['map_img'])

    # zero the parameter gradients
    optimizer.zero_grad()

    # forward
    # prob_map = model(inputs) # last activation was a sigmoid
    # outputs = (prob_map > 0.3).float()
    outputs = model(inputs)

    # pay attention to the weighted loss should input logits not probs
    if args.lovasz_loss:
        loss, BCE_loss, DICE_loss = criterion(outputs, labels)
        outputs = torch.nn.functional.sigmoid(outputs)
    else:
        outputs = torch.nn.functional.sigmoid(outputs)
        loss, BCE_loss, DICE_loss = criterion(outputs, labels)

    # backward
    loss.backward()
    # https://github.com/asanakoy/kaggle_carvana_segmentation/blob/master/albu/src/train.py
    # torch.nn.utils.clip_grad_norm(model.parameters(), 1.)
    optimizer.step()

    meters["train_acc"].update(metrics.dice_coeff(outputs, labels),
                               outputs.size(0))
    meters["train_loss"].update(loss.data[0], outputs.size(0))
    meters["train_IoU"].update(metrics.jaccard_index(outputs, labels),
                               outputs.size(0))
    meters["train_BCE"].update(BCE_loss.data[0], outputs.size(0))
    meters["train_DICE"].update(DICE_loss.data[0], outputs.size(0))
    meters["outputs"] = outputs
    return meters
def compile_model(model, num_classes, metrics, loss, lr):
    from keras.losses import binary_crossentropy
    from keras.losses import categorical_crossentropy

    from keras.metrics import binary_accuracy
    from keras.metrics import categorical_accuracy

    from keras.optimizers import Adam

    from metrics import dice_coeff
    from metrics import jaccard_index
    from metrics import class_jaccard_index
    from metrics import pixelwise_precision
    from metrics import pixelwise_sensitivity
    from metrics import pixelwise_specificity
    from metrics import pixelwise_recall

    from losses import focal_loss

    if isinstance(loss, str):
        if loss in {'ce', 'crossentropy'}:
            if num_classes == 1:
                loss = binary_crossentropy
            else:
                loss = categorical_crossentropy
        elif loss in {'focal', 'focal_loss'}:
            loss = focal_loss(num_classes)
        else:
            raise ValueError('unknown loss %s' % loss)

    if isinstance(metrics, str):
        metrics = [metrics, ]

    for i, metric in enumerate(metrics):
        if not isinstance(metric, str):
            continue
        elif metric == 'acc':
            metrics[i] = binary_accuracy if num_classes == 1 else categorical_accuracy
        elif metric == 'jaccard_index':
            metrics[i] = jaccard_index(num_classes)
        elif metric == 'jaccard_index0':
            metrics[i] = class_jaccard_index(0)
        elif metric == 'jaccard_index1':
            metrics[i] = class_jaccard_index(1)
        elif metric == 'jaccard_index2':
            metrics[i] = class_jaccard_index(2)
        elif metric == 'jaccard_index3':
            metrics[i] = class_jaccard_index(3)
        elif metric == 'jaccard_index4':
            metrics[i] = class_jaccard_index(4)
        elif metric == 'jaccard_index5':
            metrics[i] = class_jaccard_index(5)
        elif metric == 'dice_coeff':
            metrics[i] = dice_coeff(num_classes)
        elif metric == 'pixelwise_precision':
            metrics[i] = pixelwise_precision(num_classes)
        elif metric == 'pixelwise_sensitivity':
            metrics[i] = pixelwise_sensitivity(num_classes)
        elif metric == 'pixelwise_specificity':
            metrics[i] = pixelwise_specificity(num_classes)
        elif metric == 'pixelwise_recall':
            metrics[i] = pixelwise_recall(num_classes)
        else:
            raise ValueError('metric %s not recognized' % metric)

    model.compile(optimizer=Adam(lr=lr),
                  loss=loss,
                  metrics=metrics)
def test_jaccard_index(box1, box2, gt_iou):
    iou = jaccard_index(box1, box2)
    print("Is my jaccard index computation correct?",
          np.array_equal(iou, gt_iou))
예제 #6
0
def validation(valid_loader, model, criterion, logger, epoch_num):
    """

    Args:
        train_loader:
        model:
        criterion:
        optimizer:
        epoch:

    Returns:

    """
    # logging accuracy and loss
    valid_acc = metrics.MetricTracker()
    valid_loss = metrics.MetricTracker()
    valid_IoU = metrics.MetricTracker()
    valid_BCE = metrics.MetricTracker()
    valid_DICE = metrics.MetricTracker()

    log_iter = len(valid_loader) // logger.print_freq

    # switch to evaluate mode
    model.eval()

    # Iterate over data.
    for idx, data in enumerate(tqdm(valid_loader, desc='validation')):

        # get the inputs and wrap in Variable
        if torch.cuda.is_available():
            inputs = Variable(data['sat_img'].cuda(), volatile=True)
            labels = Variable(data['map_img'].cuda(), volatile=True)
        else:
            inputs = Variable(data['sat_img'], volatile=True)
            labels = Variable(data['map_img'], volatile=True)

        # forward
        # prob_map = model(inputs) # last activation was a sigmoid
        # outputs = (prob_map > 0.3).float()
        outputs = model(inputs)

        # pay attention to the weighted loss should input logits not probs
        if args.lovasz_loss:
            loss, BCE_loss, DICE_loss = criterion(outputs, labels)
            outputs = torch.nn.functional.sigmoid(outputs)
        else:
            outputs = torch.nn.functional.sigmoid(outputs)
            loss, BCE_loss, DICE_loss = criterion(outputs, labels)

        valid_acc.update(metrics.dice_coeff(outputs, labels), outputs.size(0))
        valid_loss.update(loss.data[0], outputs.size(0))
        valid_IoU.update(metrics.jaccard_index(outputs, labels),
                         outputs.size(0))
        valid_BCE.update(BCE_loss.data[0], outputs.size(0))
        valid_DICE.update(DICE_loss.data[0], outputs.size(0))

        # tensorboard logging
        if idx % log_iter == 0:

            step = (epoch_num * logger.print_freq) + (idx / log_iter)

            # log accuracy and loss
            info = {
                'loss': valid_loss.avg,
                'accuracy': valid_acc.avg,
                'IoU': valid_IoU.avg
            }

            for tag, value in info.items():
                logger.scalar_summary(tag, value, step)

            # log the sample images
            log_img = [
                data_utils.show_tensorboard_image(data['sat_img'],
                                                  data['map_img'],
                                                  outputs,
                                                  as_numpy=True),
            ]
            logger.image_summary('valid_images', log_img, step)

    print(
        'Validation Loss: {:.4f} BCE: {:.4f} DICE: {:.4f} Acc: {:.4f} IoU: {:.4f}'
        .format(valid_loss.avg, valid_BCE.avg, valid_DICE.avg, valid_acc.avg,
                valid_IoU.avg))
    print()

    return {
        'valid_loss': valid_loss.avg,
        'valid_acc': valid_acc.avg,
        'valid_IoU': valid_IoU.avg,
        'valid_BCE': valid_BCE.avg,
        'valid_DICE': valid_DICE.avg
    }