예제 #1
0
def main(argv):
    if len(argv) < 1:
        raise Exception(
            "Please indicate the index of file you want to observe")
    index = int(argv[0])
    fileList = parse.listFileFromDir('meta')
    data = np.load(fileList[index])
    frameLabels = selectPedestrian(data, fast_forward=10)
    print frameLabels
    videoFileList = parse.listFileFromDir('video-data/processed')
    video_filepath = videoFileList[index + 1]
    tracking(video_filepath, frameLabels[0], frameLabels[1])
예제 #2
0
def main(argv):
    # Argument Parsing
    if len(argv) < 1 or int(argv[0]) > 5 or int(argv[0]) < 0:
        printIndexInfo()
        raise Exception("Please indicate the index of file you want to observe")
    index = int(argv[0])

    filepath = parse.listFileFromDir('meta')[index]
    data = np.load(filepath)

    refPt = crop_image(np.copy(data))
    # print refPt

    # Assume start at frame = 0
    # feature, out_frame = simulateSample(data[0], refPt[0])
    out_frame = simulateSample(data[0], refPt[0])
    trackNextFrame(data[1], out_frame)
예제 #3
0
def main(argv):

    # Argument Parsing
    if len(argv) < 1 or int(argv[0]) > 5 or int(argv[0]) < 0:
        printIndexInfo()
        raise Exception(
            "Please indicate the index of file you want to observe")
    index = int(argv[0])

    # List all files in the directory and load the data as specified
    fileList = parse.listFileFromDir('meta')
    data = np.load(fileList[index])

    # Get the frame, top left corner, and top right corner from manual labelling
    frame, refPt = selectPedestrian(np.copy(data), fast_forward=10)
    # print refPt
    # print keyPt

    # Perform tracking
    tracking(data, frame, refPt)
def main(argv):
    # Argument Parsing
    if len(argv) < 1 or int(argv[0]) > 5 or int(argv[0]) < 0:
        printIndexInfo()
        raise Exception(
            "Please indicate the index of file you want to observe")
    index = int(argv[0])

    filepath = parse.listFileFromDir('meta')[index]
    data = np.load(filepath)

    refPt = crop_image(np.copy(data))
    topLeft, bottomRight = np.array(refPt[0][0]), np.array(refPt[0][1])
    imageWidth = abs(bottomRight[0] - topLeft[0])
    imageHeight = abs(bottomRight[1] - topLeft[1])
    extension = 15
    numSample = 2000
    cv2.namedWindow("NewFrame")

    # 1. Construct Positive bags and Negative bags (All Positive are in one bag and Negative bag contain only one data)
    positiveBags, negativeBags, positiveMaps, negativeMaps, labels = bagConstruction(
        data[0],
        topLeft,
        bottomRight,
        imageWidth,
        imageHeight,
        extension=extension,
        numSample=numSample)
    # print len(positiveBags[0]), len(negativeBags)

    # 2. Train in MISVM
    classifier.fit(positiveBags + negativeBags, labels)

    # 3. For loop on each frame
    for frame in xrange(1, data.shape[0], 3):
        # 3.1 Crop a set of image using and compute feature vector based on MIL apperance model
        bags, bagMaps = cropImage(data[frame],
                                  np.array(topLeft),
                                  np.array(bottomRight),
                                  imageWidth,
                                  imageHeight,
                                  extension=extension,
                                  numSample=numSample)
        # 3.2 Use MI Classifier to estimate p(y=1|x) => apply MILBoost
        predict = classifier.predict(bags)
        maxPoint = bagMaps[predict.argsort()[-1]]
        topLeft = tuple(
            [int((maxPoint[0] - topLeft[0]) * 0.85 + topLeft[0]), topLeft[1]])
        # topLeft = tuple([int(maxPoint[0] - imageWidth/2), topLeft[1]])#int(maxPoint[1] - imageHeight/2)])
        bottomRight = tuple(
            [int(maxPoint[0] + imageWidth / 2),
             bottomRight[1]])  #int(maxPoint[1] + imageHeight/2)])
        print "After update"
        print topLeft, bottomRight

        # 3.3 Update the location of the image
        cv2.rectangle(data[frame], topLeft, bottomRight, (0, 255, 0), 1)
        # 3.4 Crop out two images one for positive and another is negative
        positiveBags, negativeBags, positiveMaps, negativeMaps, labels = bagConstruction(
            data[frame],
            np.array(topLeft),
            np.array(bottomRight),
            imageWidth,
            imageHeight,
            extension=extension,
            numSample=numSample)
        # 3.5 Update MIL appearance model
        classifier.fit(positiveBags + negativeBags, labels)

        while True:
            cv2.imshow("NewFrame", data[frame])
            key = cv2.waitKey(1) & 0xFF
            if key == ord("n"):
                break
    cv2.destroyAllWindows()