예제 #1
0
def demo_eDP():
    I = imread('images/barbara.png')
    sigma = 25.0 / 255.0
    PRNG = np.random.RandomState(0)
    y = I + sigma * PRNG.randn(I.shape[0], I.shape[1])
    gridModel = DPGridModel('models/DP')
    x, PSNR = gridModel.denoise(y, sigma, I)
    imwrite(x, 'eDP_results.png')
예제 #2
0
def demo_inpainting():
    inFileName = 'images/new_original.png'
    y = imread(inFileName, outputFormat='YCbCr')
    maskFileName = 'images/new_mask.png'
    mask = np.array(imread(maskFileName), dtype=bool)
    gridModel = HDPGridModel('models/HDP')
    x = gridModel.inpaint(y, mask)
    imwrite(x, 'HDP_inpainting_results.png')
예제 #3
0
def detect_with_network(network, imgs, masks, threshold=0.5, fold=-1):
    #imgs = np.array([[imgs[0]]])
    length = len(imgs)
    outs = network.network.predict(imgs)
    print('Prob map output {}'.format(outs.shape))
    util.imwrite('data/probmap_{}.jpg'.format(fold), outs[0, 0, :, :])

    side = outs[0][0].shape[0]
    scale_factor = int(512 / side)
    min_distance = int(side / 71.68)
    print 'img size {}, prob size {}, min_distance {}, scale_factor {}'.format(
        512, side, min_distance, scale_factor)

    blob_set = []
    prob_set = []
    for i in range(length):
        prob_map = outs[i][0]
        coords = peak_local_max(prob_map, min_distance)

        blobs = []
        for coord in coords:
            blobs.append(
                [coord[0] * scale_factor, coord[1] * scale_factor, 25])

        blobs = filter_by_margin(
            filter_by_size(filter_by_masks(blobs, masks[i])), masks[i])

        print '# coords {}'.format(len(coords))
        print '# before blobs {}'.format(len(blobs))
        threshold = 0
        for blob in blobs:
            threshold += prob_map[blob[0] / scale_factor,
                                  blob[1] / scale_factor]
        threshold /= len(coords)

        tmp = []
        for blob in blobs:
            if prob_map[blob[0] / scale_factor,
                        blob[1] / scale_factor] > threshold:
                tmp.append(blob)
        blobs = np.array(tmp)

        print 'after # blobs {}'.format(len(blobs))
        if len(blobs) == 0:
            raise Exception("No blobs founds at {}".format(i))
        blob_set.append(blobs)

        prob_set.append([])
        for blob in blobs:
            prob_set[-1].append(prob_map[blob[0] / scale_factor,
                                         blob[1] / scale_factor])
        prob_set[-1] = np.array(prob_set[-1])

    return np.array(blob_set), np.array(prob_set)
예제 #4
0
def test_canvas(model_name, image_name, weight, prob_thresh=0.2, nms_thresh=0.4, mode=1,use_gpu=True):
    print('load weight')
    model = load_model(model_name, weight, mode,use_gpu)
    if use_gpu:
        model.to(gpudevice)
    print("detecting")
    image = cv2.imread(image_name)
    img = prep_image(image, inp_size)
    im_dim = image.shape[1], image.shape[0]  # w,h
    im_dim = np.array(im_dim)

    output = []
    if use_gpu:
        # im_dim = im_dim.cuda()
        img = img.to(gpudevice)
    with torch.no_grad():
        pred = model(img)
    if use_gpu:
        pred = pred.cpu()
    probs = np.zeros((side * side * num, classes))
    boxes = np.zeros((side * side * num, 4))
    get_detection_boxes(pred, prob_thresh, nms_thresh, boxes, probs)
	
    # im_dim = torch.FloatTensor(im_dim).repeat(1, 2).numpy()
    scaling_factor = np.min(inp_size / im_dim)
	
    for i in range(probs.shape[0]):
        cls = np.argmax(probs[i])
        prob = probs[i][cls]
        if prob > 0:
            out = np.zeros(6)
            out[:4] = correct_box(boxes[i],inp_size,inp_size)
            out[[0,2]] -= (inp_size-scaling_factor*im_dim[0])/2
            out[[1,3]] -= (inp_size-scaling_factor*im_dim[1])/2

            out[:4] = out[:4]/scaling_factor
            out[[0,2]] = np.clip(out[[0,2]],0.0,im_dim[0])
            out[[1,3]] = np.clip(out[[1,3]],0.0,im_dim[1])

            out[4] = prob
            out[5] = cls
            output.append(out)

    for item in output:
        # item = output[i]
        cls = int(item[-1])
        prob = float(item[-2])
        box = item[:4]
        image = imwrite(image, box, voc_class_names[cls], cls, prob)

    cv2.imshow("{}".format(image_name), image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
예제 #5
0
def save_rois(args):
    imgs_tr, blobs_tr = lidc.load(pts=False)
    pred_blobs_tr = detect.read_blobs('data/sbf-aam-lidc-pred-blobs.pkl')
    masks_tr = np.load('data/aam-lidc-pred-masks.npy')

    imgs_te, blobs_te = jsrt.load(set_name='jsrt140p')
    pred_blobs_te = detect.read_blobs('data/sbf-aam-jsrt140p-pred-blobs.pkl')
    masks_te = np.load('data/aam-jsrt140p-pred-masks.npy')

    rois_tr = create_rois(imgs_tr,
                          masks_tr,
                          pred_blobs_tr,
                          args,
                          real_blobs=blobs_tr)
    rois_te = create_rois(imgs_te,
                          masks_te,
                          pred_blobs_te,
                          args,
                          real_blobs=blobs_te)
    X_tr, Y_tr, X_te, Y_te = neural.create_train_test_sets(
        blobs_tr, pred_blobs_tr, rois_tr, blobs_te, pred_blobs_te, rois_te)
    X_tr, Y_tr = util.split_data_pos_neg(X_tr, Y_tr)
    X_te, Y_te = util.split_data_pos_neg(X_te, Y_te)

    X_pos = X_tr[0]
    idx = np.random.randint(0, len(X_tr[1]), len(X_pos))
    X_neg = X_tr[1][idx]

    print len(X_pos), len(X_neg)
    for i in range(len(X_pos)):
        util.imwrite('data/lidc/roi{}p.jpg'.format(i), X_pos[i][0])
        np.save('data/lidc/roi{}p.npy'.format(i), X_pos[i])
        util.imwrite('data/lidc/roi{}n.jpg'.format(i), X_neg[i][0])
        np.save('data/lidc/roi{}n.npy'.format(i), X_neg[i])

    X_pos = X_te[0]
    idx = np.random.randint(0, len(X_te[1]), len(X_pos))
    X_neg = X_te[1][idx]

    print len(X_pos), len(X_neg)
    for i in range(len(X_pos)):
        util.imwrite('data/jsrt140/roi{}p.jpg'.format(i), X_pos[i][0])
        np.save('data/jsrt140/roi{}p.npy'.format(i), X_pos[i])
        util.imwrite('data/jsrt140/roi{}n.jpg'.format(i), X_neg[i][0])
        np.save('data/jsrt140/roi{}n.npy'.format(i), X_neg[i])
예제 #6
0
def test(model_name, image_name, weight, prob_thresh=0.2, nms_thresh=0.4, mode=1, use_gpu=True):
    result = get_test_result(model_name, image_name, weight, prob_thresh, nms_thresh, mode, use_gpu)
    image = cv2.imread(image_name)
    print('get result:%d'%len(result))
    for item in result:
        if len(item) == 6:
            box = item[:4]
            prob = float(item[4])
            cls_ind = int(item[5])
            cls_names = voc_class_names[cls_ind]
        elif len(item) == 3:
            box = item[0]
            prob = item[1]
            cls_ind = item[2]
            cls_names = [voc_class_names[i] for i in cls_ind]
            prob = [p for p in prob]
            cls_ind = cls_ind[0]
        image = imwrite(image, box, cls_names, cls_ind, prob)

    # cv2.imshow("{}".format(image_name), image)
    # cv2.waitKey(0)
    cv2.imwrite('bbox_%s.png' % image_name.split('/')[-1].split('.')[0], image)
예제 #7
0
            os.mkdir('local')
        vout.open('./local/output.avi', fourcc, fps, sz, True)

    frames = 0
    start = time.time()

    while cap.isOpened():
        ret, frame = cap.read()
        if ret:
            output = predictv1(frame, model, CUDA=CUDA,nms_thresh=nms_thresh,prob_thresh=confidence)
            for item in output:
                # item = output[i]
                cls = int(item[-1])
                prob = float(item[-2])
                box = item[:4]
                frame = imwrite(frame, box, class_names[cls], cls)

            frames += 1
            fps = frames / (time.time()-start)
            label='fps:{:.3f}'.format(fps)
            # t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]
            if dv:
                vout.write(frame)
            cv2.putText(frame, label, (1, 10), cv2.FONT_HERSHEY_PLAIN, 1, [0, 255, 255], 1)
            cv2.imshow("frame", frame)

            key = cv2.waitKey(1)
            if key & 0xFF == ord('q'):
                break

            print(time.time()-start)
예제 #8
0
        sample = voc_dataset.get_item(idx)
        # except Exception as e:
        #     print(e, idx)
        # continue
        image = sample["image"]
        boxes = sample["bbox"]
        classes = sample["classes"]
        h, w, _ = image.shape
        dir = 'gt'
        if not os.path.exists(dir):
            os.mkdir(dir)

        for j in range(len(boxes)):
            cls_ind = classes[j].item()
            # print(cls_ind)
            image = imwrite(image, convert_box(boxes[j], h, w, 1),
                            voc_class_names[cls_ind], cls_ind)

        #plt.figure()

        # plt.title("pic{}".format(i+1))
        # plt.imshow(image)
        # cv2.imwrite("data/temp/pic{}.jpg".format(i+1),image)
        # plt.pause(1)
        cv2.imshow('pic', image)
        key = cv2.waitKey(0)
        if key & 0xFF == ord('q'):
            cv2.destroyWindow('pic')
            break
        elif key & 0xFF == ord('s'):
            cv2.imwrite('{}/pic_gt{}.png'.format(dir, idx), image)
        time.sleep(1)
예제 #9
0
    idx = np.array(range(len(Y_aug)))
    np.random.shuffle(idx)

    return X_aug[idx], Y_aug[idx]


# Test
if __name__ == '__main__':
    fname = 'grid2.jpg'

    import cv2
    img = cv2.imread(fname)
    print img.shape
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img = np.array([img])
    util.imwrite('or_{}'.format(fname), img[0])
    augment_params = {
        'output_shape':
        (img.shape[1] * (1.0 / factor), img.shape[1] * (1.0 / factor)),
        'ratio':
        1,
        'batch_size':
        32,
        'rotation_range': (-18, 18),
        'translation_range': (-0.12, 0.12),
        'flip':
        True,
        'intensity_shift_std':
        0.2,
        'mode':
        'balance_batch',
예제 #10
0
def evaluate(real, predicted, data=None, sample=False, criteria='25mm'):
    assert len(real) == len(predicted)
    num_imgs = len(real)
    sensitivity = 0
    fppi = []
    iou = []
    iou_pos = []
    tp = 0
    p = 0
    MAX_DIST = 35.7142  # 25 mm

    for i in range(num_imgs):
        if sample:
            img, mask = data.get(i)
            sampled, lce, norm = preprocess(img, mask)
            img = lce

        found = False
        found_blob_idx = -1
        overlap = -1e10
        for j in range(len(real[i])):
            if real[i][j][0] == -1:
                continue

            p += 1
            for k in range(len(predicted[i])):
                #overlap = _iou_circle(real[i][j], predicted[i][k])
                dist = _dist(real[i][j], predicted[i][k])
                iou.append(dist)
                if dist < MAX_DIST * MAX_DIST:
                    iou_pos.append(overlap)
                    found = True
                    found_blob_idx = k
                    break
            if found:
                break

        #print "real blob {}".format(real[i][0])
        # Assuming that we just have one true object per image at most
        if found:
            fppi.append(len(predicted[i]) - 1)
            tp += 1
            if sample:
                for k in range(len(predicted[i])):
                    if k != found_blob_idx:
                        util.save_blob(
                            'data/fp/{}_{}_{}.jpg'.format(
                                i, k, data.img_paths[i].split('/')[-1]), img,
                            predicted[i][k])
                        _, masks = segment(img, [predicted[i][k]])
                        rmask = cv2.resize(masks[0], (128, 128),
                                           interpolation=cv2.INTER_CUBIC)
                        util.imwrite(
                            'data/fp/{}_{}_{}_mask.jpg'.format(
                                i, k, data.img_paths[i].split('/')[-1]), rmask)
                    else:
                        print 'real, predicted ->', real[i][0], predicted[i][k]
                        util.save_blob(
                            'data/tp/real_{}_{}.jpg'.format(
                                i, data.img_paths[i].split('/')[-1]), img,
                            real[i][0])
                        util.save_blob(
                            'data/tp/{}_{}_{}.jpg'.format(
                                i, k, data.img_paths[i].split('/')[-1]), img,
                            predicted[i][k])
        else:
            fppi.append(len(predicted[i]))
            if sample:
                if real[i][0][0] != -1:
                    util.save_blob(
                        'data/fn/{}_{}.jpg'.format(
                            i, data.img_paths[i].split('/')[-1]), img,
                        real[i][0])
                for k in range(len(predicted[i])):
                    util.save_blob(
                        'data/fp/{}_{}_{}.jpg'.format(
                            i, k, data.img_paths[i].split('/')[-1]), img,
                        predicted[i][k])
                    _, masks = segment(img, [predicted[i][k]])
                    rmask = cv2.resize(masks[0], (128, 128),
                                       interpolation=cv2.INTER_CUBIC)
                    util.imwrite(
                        'data/fp/{}_{}_{}_mask.jpg'.format(
                            i, k, data.img_paths[i].split('/')[-1]), rmask)

        #print "found {}, overlap {}".format(found, overlap)

        #if paths != None:
        #   util.show_blobs_real_predicted(paths[i], [real[i]], predicted[i])

    fppi = np.array(fppi)
    iou = np.array(iou)
    iou_pos = np.array(iou_pos)

    sensitivity = tp * 1.0 / p
    #return sensitivity, np.mean(fppi), np.std(fppi), np.mean(iou), np.std(iou), np.mean(iou_pos), np.std(iou_pos)
    return sensitivity, np.mean(fppi), np.std(fppi)
예제 #11
0
 def save_result(self, img, path):
     fn = join(self.results_dir, path)
     directory = os.path.dirname(fn)
     if not os.path.exists(directory):
         os.mkdir(directory)
     util.imwrite(img, fn)
예제 #12
0
def test_many(model_name,test_file,weight, prob_thresh=0.1, nms_thresh=0.5, mode=1,pd=True,use_gpu=True):
    print('load weight')
    model = load_model(model_name, weight, mode, use_gpu)
    if use_gpu:
        model.to(gpudevice)
    print("detecting")
    images = []
    try:
        images = [os.path.join(test_file, img) for img in os.listdir(test_file) if img.endswith('.jpg') or img.endswith('.png')]
    except NotADirectoryError:
        try:
            with open(test_file) as f:
                for l in f:
                    images.append(l.strip())
        except Exception as e:
            print(e)
            sys.exit(1)
    except FileNotFoundError:
        print("No file or directory with the name {}".format(images))
        sys.exit(1)
    if not os.path.exists('det'):
        os.mkdir('det')
    random.shuffle(images)
    since = time.time()
    for imp in images:
        print(imp)
        image = cv2.imread(imp)
        h, w, _ = image.shape
        pred = get_pred(image,model,use_gpu)
        probs = np.zeros((side * side * num, classes))
        boxes = np.zeros((side * side * num, 4))
        get_detection_boxes(pred, prob_thresh, nms_thresh, boxes, probs)

        maxclsind = np.argmax(probs, 1)
        maxprob = np.max(probs, 1)
        mask = maxprob > 0
        if np.sum(mask) == 0:
            continue

        maskbox = boxes[mask]
        maskprob = maxprob[mask]
        maskind = maxclsind[mask]
        if pd:
            output = postdeal(maskbox,maskprob,maskind,h,w)
            for it in output:
                box = it[0]
                prob = it[1]
                cls_ind = it[2]
                cls_names = [voc_class_names[i] for i in cls_ind]
                prob = [p for p in prob]
                cls_ind = cls_ind[0]
                image = imwrite(image, box, cls_names, cls_ind, prob)
        else:
            maskbox = correct_boxes(maskbox, h, w)
            for i in range(maskbox.shape[0]):
                image = imwrite(image, maskbox[i], voc_class_names[maskind[i]], maskind[i], maskprob[i])

        # boxes, probs, cls_indices = get_detection_boxes_1(pred, prob_thresh, nms_thresh, True)
        # for i, box in enumerate(boxes):
        #     if probs[i] == 0:
        #         continue
        #     box = convert_box(box,h,w)
        #     cls_index = int(cls_indices[i])
        #     # print(cls_index)
        #     prob = float(probs[i])
        #     image = imwrite(image, box, voc_class_names[cls_index], cls_index, prob)

        cv2.imshow("img", image)
        key = cv2.waitKey(0)
        if key & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break
        elif key & 0xFF == ord('s'):
            cv2.imwrite('det/bbox_%s.png' % imp.split('/')[-1].split('.')[0], image)
        # time.sleep(2)
    print('{:.3f}s per image'.format((time.time()-since)/len(images)))
예제 #13
0
파일: video.py 프로젝트: swehrwein/pipi
def save_frames(filepattern, video):
    for i in range(video.shape[-1]):
        util.imwrite(video[...,i], filepattern % i)