Esempio n. 1
0
    def update_pbar(self, masks_predictions, masks_targets, pbar, average_meter, pbar_description):
        average_meter.add('iou', iou(masks_predictions > 0.5, masks_targets.byte()))
        average_meter.add('mAP', mAP(masks_predictions > 0.5, masks_targets.byte()))

        pbar.set_description(
            pbar_description + ''.join(
                [' {}:{:6.4f}'.format(k, v) for k, v in average_meter.get_all().items()]
            )
        )

        pbar.update()
def validation(valid_loader, model, criterion, num_classes, batch_size, classifier):
    """Args:
        valid_loader: validation data loader
        model: model to validate
        criterion: loss criterion
        num_classes: number of classes
        batch_size: number of samples to process simultaneously
        classifier: True if doing a classification task, False if doing semantic segmentation
    """

    valid_metrics = create_metrics_dict(num_classes)
    model.eval()

    for index, data in enumerate(valid_loader):
        with torch.no_grad():
            if classifier:
                inputs, labels = data
                if torch.cuda.is_available():
                    inputs = inputs.cuda()
                    labels = labels.cuda()

                outputs = model(inputs)
                outputs_flatten = outputs
            else:
                if torch.cuda.is_available():
                    inputs = data['sat_img'].cuda()
                    labels = flatten_labels(data['map_img']).cuda()
                else:
                    inputs = data['sat_img']
                    labels = flatten_labels(data['map_img'])

                outputs = model(inputs)
                outputs_flatten = flatten_outputs(outputs, num_classes)

            loss = criterion(outputs_flatten, labels)
            valid_metrics['loss'].update(loss.item(), batch_size)

            # Compute metrics every 2 batches. Time consuming.
            if index % 2 == 0:
                a, segmentation = torch.max(outputs_flatten, dim=1)
                valid_metrics = report_classification(segmentation, labels, batch_size, valid_metrics)
                valid_metrics = iou(segmentation, labels, batch_size, valid_metrics)

    print('Validation Loss: {:.4f}'.format(valid_metrics['loss'].avg))
    print('Validation iou: {:.4f}'.format(valid_metrics['iou'].avg))
    print('Validation precision: {:.4f}'.format(valid_metrics['precision'].avg))
    print('Validation recall: {:.4f}'.format(valid_metrics['recall'].avg))
    print('Validation f1-score: {:.4f}'.format(valid_metrics['fscore'].avg))

    return valid_metrics
Esempio n. 3
0
def compile_model(model, num_classes, metrics, loss, lr):
    if isinstance(loss, str):
        if loss in {'ce', 'crossentropy'}:
            loss = binary_crossentropy if num_classes == 1 else categorical_crossentropy
        elif loss in {'iou', 'jaccard_loss'}:
            loss = jaccard_loss
        elif loss in {'dice_loss', 'Dice'}:
            loss = dice_loss
        elif loss in {'ce_dice_loss'}:
            loss = ce_dice_loss
        elif loss in {'ce_jaccard_loss'}:
            loss = ce_jaccard_loss
        elif loss in {'tversky_loss'}:
            loss = tversky_loss
        else:
            raise ValueError('unknown loss %s' % loss)

    if isinstance(metrics, str):
        metrics = [
            metrics,
        ]

    for i, metric in enumerate(metrics):
        if not isinstance(metric, str):
            continue
        elif metric == 'acc':
            metrics[
                i] = binary_accuracy if num_classes == 1 else categorical_accuracy
        elif metric == 'iou':
            metrics[i] = iou(num_classes)
        elif metric == 'dice':
            metrics[i] = dice(num_classes)
        elif metric == 'pw_precesion':
            metrics[i] = pw_precesion(num_classes)
        elif metric == 'pw_prec':  # pixelwise
            metrics[i] = pw_prec(num_classes)
        elif metric == 'pw_sens':
            metrics[i] = pw_sens(num_classes)
        elif metric == 'pw_spec':
            metrics[i] = pw_spec(num_classes)
        elif metric == 'pw_recall':
            metrics[i] = pw_recall(num_classes)
        else:
            raise ValueError('metric %s not recognized' % metric)

    model.compile(optimizer=Adam(lr=lr), loss=loss, metrics=metrics)
Esempio n. 4
0
def test_on_mask_data(model, devkit_path):
    print(" [*] Starting Evaluation", flush=True)
    print(" [**] Reading Data", flush=True)
    image_fns, gt_fns = examples(devkit_path)
    predictions = []
    gts = []
    for x in range(len(image_fns)):
        img_fn = image_fns[x]
        gt_fn = gt_fns[x]
        image = read_image(img_fn)
        gt = read_groundtruth(gt_fn)
        gt = (gt == 26).astype(np.float32)

        gt = aspect_preserving_resize(gt,
                                      model.image_size,
                                      resize_method=cv2.INTER_NEAREST)
        image = aspect_preserving_resize(image,
                                         model.image_size,
                                         resize_method=cv2.INTER_AREA)

        prediction, _, _ = model(image, scale_image=False)
        predictions.append(prediction)
        gts.append(gt)
    return iou(predictions, gts)
Esempio n. 5
0
    def fit(self, X, y, step, stop_gradients_for_convs=False):
        feed_dict = {
            self.X: X,
            self.y: y,
            self.is_training: True,
            self.keep_prob: self.S.keep_prob,
            self.step: step,
            self.stop_gradients_for_convs: stop_gradients_for_convs,
        }
        ops = [self.train_step, self.loss,
               self.predictions, self.merged_summary]

        start = timer()
        (_, loss, predictions, summary) = self.session.run(ops, feed_dict)
        end = timer()

        accuracy = metrics.accuracy(y, predictions)
        iou = metrics.iou(y, predictions, self.S.num_classes)

        if step % 10 == 0:
            logging.info("fit took %.6f sec" % (end - start, ))
            self.summary_writer.add_summary(summary, step)

        return (loss, predictions, accuracy, iou)
Esempio n. 6
0
                vertices = cv2.approxPolyDP(contour, epsilon, True)
                vertices = cv2.convexHull(vertices, clockwise=True)
                cv2.fillPoly(predicted_mask, [vertices], 1)

            predictions.append(predicted_mask)
            gts.append(gt)
            results[y1:y2,
                    x1:x2] = np.logical_or(results[y1:y2, x1:x2],
                                           predicted_mask.astype(np.bool))
            gt_masks[y1:y2, x1:x2] = np.logical_or(gt_masks[y1:y2, x1:x2],
                                                   gt.astype(np.bool))

        c = np.array([220.0, 50.0, 50.0])
        results = results.astype(np.float)
        colored_mask = (np.expand_dims(results, axis=-1) * c).astype(np.uint8)
        masked_image = cv2.addWeighted(image, 1.0, colored_mask, .8, gamma=0.0)
        masked_image = Image.fromarray(masked_image)
        masked_image.save(
            os.path.join("coco_results",
                         str(counter) + "mask.png"))

        gt_masks = gt_masks.astype(np.float)
        colored_mask = (np.expand_dims(gt_masks, axis=-1) * c).astype(np.uint8)
        masked_image = cv2.addWeighted(image, 1.0, colored_mask, .8, gamma=0.0)
        masked_image = Image.fromarray(masked_image)
        masked_image.save(os.path.join("coco_results",
                                       str(counter) + "gt.png"))
        counter += 1

    print(iou(predictions, gts))
Esempio n. 7
0
def run_test(weights='weights/yolo.h5',
             trt_engine='weights/engines/model_trained_yolo.fp16.engine',
             iou_threshold=0.5,
             confidence_threshold=0.8,
             trt=False,
             show=True):

    if trt:
        engine = load_engine(trt_engine)
        inputs, outputs, bindings, stream = allocate_buffers(engine)
        context = engine.create_execution_context()
    else:
        # create the model
        model = yolo_model()
        model.load_weights(weights)

    # model.summary()

    # test
    list_test_images = load_test_images()
    test_set_size = len(list_test_images)
    print('Test_set_size : ', test_set_size)

    iou_list = []
    pr_list = []
    gt_list = []

    for i in range(test_set_size):
        print(i)
        image_name = list_test_images[i]
        preprocess = get_test_image(image_name)
        np.copyto(inputs[0].host, preprocess.ravel())
        if trt:
            yolo_out = np.array([
                do_inference(context,
                             bindings=bindings,
                             inputs=inputs,
                             outputs=outputs,
                             stream=stream)
            ]).reshape((1, 7, 7, 5))
            yolo_output = yolo_out[0]
        else:
            yolo_output = model.predict(preprocess)[0]

        pr_bbox = convert_anchor_to_bbox(yolo_output,
                                         threshold=confidence_threshold,
                                         width=f.target_size,
                                         height=f.target_size)
        gt_bbox = get_test_bbox(image_name)

        if gt_bbox is None and pr_bbox is None:
            pr_list.append(0)
            gt_list.append(0)
            tmp_iou = -1

        elif gt_bbox is None and pr_bbox is not None:
            pr_list.append(1)
            gt_list.append(0)
            iou_list.append(0)
            tmp_iou = 0

        elif gt_bbox is not None and pr_bbox is None:
            pr_list.append(0)
            gt_list.append(1)
            iou_list.append(0)
            tmp_iou = 0
        elif gt_bbox is not None and pr_bbox is not None:
            gt_list.append(1)
            tmp_iou = iou(gt_bbox, pr_bbox)

            if tmp_iou > iou_threshold:
                pr_list.append(1)
            else:
                pr_list.append(0)

            iou_list.append(tmp_iou)

        #if show:
        #    show_result(preprocess[0], pr_bbox, gt_bbox, tmp_iou)
        #    if cv2.waitKey(60) & 0xff == 27:
        #        cv2.destroyAllWindows()
        #        break
    avg_iou = sum(iou_list) / len(iou_list)
    acc, recall, precision, _ = get_stat(gt_list, pr_list)

    print('Avg iou   : {:.2f}'.format(avg_iou * 100))
    print('Accuracy  : {:.2f} %'.format(acc * 100))
    print('Recall    : {:.2f} %'.format(recall * 100))
    print('Precision : {:.2f} %'.format(precision * 100))
Esempio n. 8
0
def train(
    epochs: int,
    models_dir: Path,
    x_cities: List[CityData],
    y_city: List[CityData],
    mask_dir: Path,
):
    model = UNet11().cuda()
    optimizer = Adam(model.parameters(), lr=3e-4)
    scheduler = ReduceLROnPlateau(optimizer, patience=4, factor=0.25)
    min_loss = sys.maxsize
    criterion = nn.BCEWithLogitsLoss()
    train_data = DataLoader(TrainDataset(x_cities, mask_dir),
                            batch_size=4,
                            num_workers=4,
                            shuffle=True)
    test_data = DataLoader(TestDataset(y_city, mask_dir),
                           batch_size=6,
                           num_workers=4)

    for epoch in range(epochs):
        print(f'Epoch {epoch}, lr {optimizer.param_groups[0]["lr"]}')
        print(f"    Training")

        losses = []
        ious = []
        jaccs = []

        batch_iterator = enumerate(train_data)

        model = model.train().cuda()
        for i, (x, y) in tqdm(batch_iterator):
            optimizer.zero_grad()
            x = x.cuda()
            y = y.cuda()

            y_real = y.view(-1).float()
            y_pred = model(x)
            y_pred_probs = torch.sigmoid(y_pred).view(-1)
            loss = 0.75 * criterion(y_pred.view(
                -1), y_real) + 0.25 * dice_loss(y_pred_probs, y_real)

            iou_ = iou(y_pred_probs.float(), y_real.byte())
            jacc_ = jaccard(y_pred_probs.float(), y_real)
            ious.append(iou_.item())
            losses.append(loss.item())
            jaccs.append(jacc_.item())

            loss.backward()
            optimizer.step()

        print(
            f"Loss: {np.mean(losses)}, IOU: {np.mean(ious)}, jacc: {np.mean(jaccs)}"
        )

        model = model.eval().cuda()
        losses = []
        ious = []
        jaccs = []

        with torch.no_grad():
            batch_iterator = enumerate(test_data)
            for i, (x, y) in tqdm(batch_iterator):
                x = x.cuda()
                y = y.cuda()
                y_real = y.view(-1).float()
                y_pred = model(x)
                y_pred_probs = torch.sigmoid(y_pred).view(-1)
                loss = 0.75 * criterion(y_pred.view(
                    -1), y_real) + 0.25 * dice_loss(y_pred_probs, y_real)

                iou_ = iou(y_pred_probs.float(), y_real.byte())
                jacc_ = jaccard(y_pred_probs.float(), y_real)
                ious.append(iou_.item())
                losses.append(loss.item())
                jaccs.append(jacc_.item())
            test_loss = np.mean(losses)
            print(
                f"Loss: {np.mean(losses)}, IOU: {np.mean(ious)}, jacc: {np.mean(jaccs)}"
            )

        scheduler.step(test_loss)
        if test_loss < min_loss:
            min_loss = test_loss
            save_model(model, epoch, models_dir / y_city[0].name)