示例#1
0
def instances2dict_with_polygons(imageFileList, verbose=False):
    imgCount = 0
    instanceDict = {}

    if not isinstance(imageFileList, list):
        imageFileList = [imageFileList]

    if verbose:
        print("Processing {} images...".format(len(imageFileList)))

    for imageFileName in imageFileList:
        # Load image
        img = Image.open(imageFileName)

        # Image as numpy array
        imgNp = np.array(img)

        # Initialize label categories
        instances = {}
        for label in labels:
            instances[label.name] = []

        # Loop through all instance ids in instance image
        for instanceId in np.unique(imgNp):
            if instanceId < 1000:
                continue
            instanceObj = Instance(imgNp, instanceId)
            instanceObj_dict = instanceObj.toDict()

            #instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict())
            if id2label[instanceObj.labelID].hasInstances:
                mask = (imgNp == instanceId).astype(np.uint8)
                contour, hier = cv2_util.findContours(mask.copy(),
                                                      cv2.RETR_EXTERNAL,
                                                      cv2.CHAIN_APPROX_NONE)

                polygons = [c.reshape(-1).tolist() for c in contour]
                instanceObj_dict['contours'] = polygons

            instances[id2label[instanceObj.labelID].name].append(
                instanceObj_dict)

        imgKey = os.path.abspath(imageFileName)
        instanceDict[imgKey] = instances
        imgCount += 1

        if verbose:
            print("\rImages Processed: {}".format(imgCount), end=' ')
            sys.stdout.flush()

    if verbose:
        print("")

    return instanceDict
def PredictImg(image, model, device):
    #img, _ = dataset_test[0]
    img = cv2.imread(image)
    result = img.copy()
    dst = img.copy()
    img = toTensor(img)

    names = {'0': 'background', '1': 'car'}
    # put the model in evaluati
    # on mode
    model.eval()
    with torch.no_grad():
        prediction = model([img.to(device)])

    boxes = prediction[0]['boxes']
    labels = prediction[0]['labels']
    scores = prediction[0]['scores']
    masks = prediction[0]['masks']

    m_bOK = False
    for idx in range(boxes.shape[0]):
        if scores[idx] >= 0.8:
            m_bOK = True
            color = random_color()
            mask = masks[idx, 0].mul(255).byte().cpu().numpy()
            thresh = mask
            contours, hierarchy = cv2_util.findContours(
                thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
            cv2.drawContours(dst, contours, -1, color, -1)

            x1, y1, x2, y2 = boxes[idx][0], boxes[idx][1], boxes[idx][
                2], boxes[idx][3]
            name = names.get(str(labels[idx].item()))
            cv2.rectangle(result, (x1, y1), (x2, y2), color, thickness=2)
            cv2.putText(result,
                        text=name,
                        org=(x1, y1 + 10),
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.5,
                        thickness=1,
                        lineType=cv2.LINE_AA,
                        color=color)

            dst1 = cv2.addWeighted(result, 0.7, dst, 0.3, 0)

    if m_bOK:
        cv2.imshow('result', dst1)
        cv2.waitKey()
        cv2.destroyAllWindows()
示例#3
0
def predict(image, model, mode='img', show_cv=False):
    img = cv2.imread(image) if mode == 'img' else image
    result = img.copy()
    dst = img.copy()
    img = to_tensor(img)

    # names = {'0': 'backgroud', '1': 'traffic light',
    #          '2': 'traffic sign', '3': 'person', '4': 'rider', '5': 'car',
    #          '6': 'truck', '7': 'bus', '8': 'motorcycle',
    #          '9': 'bicycle'}

    names = {
        '0': 'backgroud',
        '1': 'person',
        '2': 'rider',
        '3': 'car',
        '4': 'bus'
    }
    colors = {
        '0': (0, 0, 0),
        '1': (255, 0, 0),
        '2': (255, 255, 0),
        '3': (95, 158, 160),
        '4': (148, 0, 211)
    }
    # put the model in evaluation mode
    model.eval()
    with torch.no_grad():
        prediction = model([img.to(DEVICE)])
    boxes = prediction[0]['boxes']
    labels = prediction[0]['labels']
    scores = prediction[0]['scores']
    masks = prediction[0]['masks']

    m_bOK = False
    has_dst = False

    for idx in range(boxes.shape[0]):
        if scores[idx] >= 0.9:
            m_bOK = True
            color = colors.get(str(labels[idx].item()))
            mask = masks[idx, 0].mul(255).byte().cpu().numpy()
            thresh = mask
            contours, hierarchy = cv2_util.findContours(
                thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
            # cv2.drawContours(dst, contours, -1, color, -1)

            x1, y1, x2, y2 = boxes[idx][0], boxes[idx][1], boxes[idx][
                2], boxes[idx][3]
            name = names.get(str(labels[idx].item()))
            cv2.rectangle(result, (x1, y1), (x2, y2), color, thickness=2)
            cv2.putText(result,
                        text=' ' + name,
                        org=(x1, y1 + 10),
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.5,
                        thickness=1,
                        lineType=cv2.LINE_AA,
                        color=color)

            dst1 = cv2.addWeighted(result, 0.7, dst, 0.3, 0)
            has_dst = True

    if m_bOK and show_cv and has_dst:
        cv2.imshow('sample', dst1)
        cv2.waitKey()
        cv2.destroyAllWindows()
    if has_dst:
        return dst1
    else:
        return None