Exemplo n.º 1
0
def img_to_cut(img):
    contour = get_contour(img)
    base_a, base_b = get_base(contour)
    cut_a, cut_b = get_cut(contour, base_a, base_b)
    cut_indices = get_cut_indices(contour, base_a, base_b, cut_a, cut_b)
    rotation_angle, base_center = calc_rotation_parameters(
        contour, base_a, base_b, cut_a)
    return get_cut_img(img, contour, cut_indices, rotation_angle, base_center)
Exemplo n.º 2
0
                                         step3=16)
        best_iou = (val_iou['Cerebrospinal fluid'] + val_iou['Gray matter'] +
                    val_iou['White matter']) / 3
        print('loss = {} avg_iou = {}'.format(val_loss, best_iou))
        print(
            'Cerebrospinal fluid:{:.3f} Gray matter:{:.3f} White matter:{:.3f}'
            .format(val_iou['Cerebrospinal fluid'], val_iou['Gray matter'],
                    val_iou['White matter']))
    save_path = './weight/densenet9_v4_batch3.pth'

    for epoch in range(EPOCHES):
        train_loss = 0
        train_ious = {}
        model.train()
        for i, (img, gt_img) in enumerate(train_loader):
            contour = get_contour(gt_img)
            contour = contour.cuda()
            img = img.cuda()
            gt_img = gt_img.cuda()

            optimizer.zero_grad()
            pred, contour_pred = model(img)

            if opt.contour_reg_flag == 1:
                loss = loss_fn([pred, contour_pred], [gt_img, contour])
            else:
                loss = loss_fn(pred, gt_img)
            train_loss += loss.item()
            iou = calculate_dc(torch.argmax(pred, dim=1), gt_img)
            for k, v in iou.items():
                if k in train_ious:
Exemplo n.º 3
0
def evaluate_avg(model, val_loader, loss_fn, step1=32, step2=32, step3=32):
    FRAMES = opt.frames
    HEIGHT = opt.cut_height
    WIDTH = opt.cut_width
    EPOCHES = opt.epoches

    with torch.no_grad():
        model.eval()
        losses = 0
        ious = {}
        itr = 0
        for idx, (img, gt_img) in enumerate(val_loader):
            contour = get_contour(gt_img)
            contour = contour.cuda()
            img = img.cuda()
            gt_img = gt_img.cuda()
            D, H, W = img.shape[-3], img.shape[-2], img.shape[-1]
            pred_full = torch.zeros((1, 4, D, H, W),
                                    dtype=torch.float32).cuda()
            contour_pred_full = torch.zeros((1, 1, D, H, W),
                                            dtype=torch.float32).cuda()
            pred_cnt = torch.zeros((D, H, W), dtype=torch.float32).cuda()
            for i in range(0, D, step1):
                for j in range(0, H, step2):
                    for k in range(0, W, step3):
                        ti, tj, tk = i, j, k
                        if i + FRAMES > D:
                            ti = D - FRAMES
                        if j + HEIGHT > H:
                            tj = H - HEIGHT
                        if k + WIDTH > W:
                            tk = W - WIDTH
                        imgp = img[:, :, ti:ti + FRAMES, tj:tj + HEIGHT,
                                   tk:tk + WIDTH]
                        pred_imgp, contour_pred_imgp = model(imgp)
                        pred_full[:, :, ti:ti + FRAMES, tj:tj + HEIGHT,
                                  tk:tk + WIDTH] += pred_imgp
                        contour_pred_full[:, :, ti:ti + FRAMES, tj:tj + HEIGHT,
                                          tk:tk + WIDTH] += contour_pred_imgp
                        pred_cnt[ti:ti + FRAMES, tj:tj + HEIGHT,
                                 tk:tk + WIDTH] += 1

            # print((pred_cnt == 0).nonzero())
            pred_full /= pred_cnt
            contour_pred_full /= pred_cnt

            if opt.contour_reg_flag == 1:
                loss = loss_fn([pred_full, contour_pred_full],
                               [gt_img, contour])
            else:
                loss = loss_fn(pred_full, gt_img)

            losses += loss
            iou = calculate_dc(torch.argmax(pred_full, dim=1), gt_img)

            for k, v in iou.items():
                if k in ious:
                    ious[k] += v
                else:
                    ious[k] = v
        losses = losses / len(val_loader)
        for k in ious:
            ious[k] = ious[k] / len(val_loader)
        return losses, ious
Exemplo n.º 4
0
IMG = wwf.open_image(IMG_NAME)
IMG = cf.crop_image(IMG)
AX_MASK = cf.color_mask(IMG, COL_VALUES.WHITE.AXE)
ANGLE, CENTER, SCALE = axes.get_rotation_angle(AX_MASK)
CENTER = [CENTER[1], CENTER[0]]
ROTATED_IMG = cf.rotate_image(IMG, ANGLE)
"""При повороте изображения значения пикселей немного меняются,
и створки уже не попадают в маску. Поэтому поворачиваем маску
оригинального изображения"""

WDG_MASK = cf.rotate_image(cf.color_mask(IMG, COL_VALUES.WHITE.WEDGE), ANGLE)

WEDGE_X, WEDGE_Y = wdg.get_wedges(WDG_MASK)

CONTOUR_MASK = cf.rotate_image(cf.color_mask(IMG, COL_VALUES.WHITE.CONTOUR),
                               ANGLE)
ROTATED_AXE_MASK = cf.rotate_image(AX_MASK, ANGLE)
CONTOUR_MASK = cnt.draw_lines(CONTOUR_MASK, WEDGE_X, WEDGE_Y,
                              int(round(SCALE)))
# возвращает уже не маску, а [200,200,200]
CONTOUR_MASK = cnt.fix_contour1(CONTOUR_MASK)
CONTOUR_MASK = cnt.inverse_fill(cnt.simple_fill(CONTOUR_MASK)).astype(
    "int32")  # возвращает уже маску
CONTOUR_POINTS = cnt.get_contour(CONTOUR_MASK)
CONTOUR_POINTS = cnt.rearrange(CONTOUR_POINTS, CENTER)

# Копипаст из прошлой версии. Не гарантирую работоспособность
X, Y = fin.transform_coords(CONTOUR_POINTS, CENTER, SCALE)
wwf.writeinfile(X, Y, CENTER, IMG_NAME)
# wwf.generate_txt(WEDGE_X,WEDGE_Y,X,Y,CENTER)