def meanshiftUsingIntensityAndLocation(path):
    im = cv2.LoadImageM(path, cv2.CV_LOAD_IMAGE_GRAYSCALE)
    #creat a mat from the pixel intensity and its location
    mat = cv2.LoadImageM(path)
    for i in range(0, im.height):
        for j in range(0, im.width):
            value = (im[i, j], i, j)
            mat[i, j] = value

#print mat[i,j]

    (segmentedImage, labelsImage, numberRegions) = pms.segmentMeanShift(mat)

    clusters = {}
    for i in range(0, labelsImage.height):
        for j in range(0, labelsImage.width):
            v = labelsImage[i, j]
            if v in clusters:
                clusters[v].append(im[i, j])
            else:
                clusters[v] = [im[i, j]]

    means = {}
    for c in clusters:
        means[c] = sum(clusters[c]) / len(clusters[c])

    for i in range(0, im.height):
        for j in range(0, im.width):
            lbl = labelsImage[i, j]
            im[i, j] = means[lbl]

    print("number of region", numberRegions)
    return im
Exemple #2
0
 def __init__(self, video, csv_file, background=None, xls_sheet=None):
     self.sample_name = video.split("/")[-1]
     self.capture = cv2.VideoCapture(video)
     self.numframes = self.capture.get(CV_CAP_PROP_FRAME_COUNT)
     print >> sys.stderr, self.numframes
     self.fps = self.capture.get(CV_CAP_PROP_FPS)
     print >> sys.stderr, self.fps
     self.increaser = int(self.fps / 5)
     if self.increaser == 0:
         self.increaser = 1
     self.length_cali = 250
     self.width = self.capture.get(CV_CAP_PROP_FRAME_WIDTH)
     self.height = self.capture.get(CV_CAP_PROP_FRAME_HEIGHT)
     if xls_sheet:
         self.xls_sheet = xls_sheet
     if background:
         self.background = cv2.LoadImageM(background)
     else:
         self.background = self.get_background()
     self.clip = me.VideoFileClip(video)
     #clip = (VideoFileClip("./frozen_trailer.mp4")
     #    .subclip((1,22.65),(1,23.2))
     #    .resize(0.3))
     self.report = report.Report(csv_file)
     self.report.read_csv()
     self.offset = 0
     self.get_offset()
def meanshiftUsingILM(path):
    # Load original image given the image path
    im = cv2.LoadImageM(path)
    # Load bank of filters
    filterBank = lmfilters.loadLMFilters()
    # Resize image to decrease dimensions during clustering
    resize_factor = 1
    thumbnail = cv2.CreateMat(im.height / resize_factor,
                              im.width / resize_factor, cv2.CV_8UC3)
    cv2.Resize(im, thumbnail)
    # now work with resized thumbnail image
    response = np.zeros(shape=((thumbnail.height) * (thumbnail.width), 4),
                        dtype=float)
    for f in range(0, 48):
        filter = filterBank[f]
        # Resize the filter with the same factor for the resized image
        dst = cv2.CreateImage(cv2.GetSize(thumbnail), cv2.IPL_DEPTH_32F, 3)
        resizedFilter = cv2.CreateMat(filter.height / resize_factor,
                                      filter.width / resize_factor,
                                      filter.type)
        cv2.Resize(filter, resizedFilter)
        # Apply the current filter
        cv2.Filter2D(thumbnail, dst, resizedFilter)
        featureIndex = getFilterTypeIndex(f)
        for j in range(0, thumbnail.height):
            for i in range(0, thumbnail.width):
                # Select the max. along the three channels
                maxRes = max(dst[j, i])
                if math.isnan(maxRes):
                    maxRes = 0.0
                if maxRes > response[thumbnail.width * j + i, featureIndex]:
                    # Store the max. response for the given feature index
                    response[thumbnail.width * j + i, featureIndex] = maxRes

    # Create new mean shift instance
    ms = MeanShift(bandwidth=10, bin_seeding=True)
    # Apply the mean shift clustering algorithm
    ms.fit(response)
    labels = ms.labels_
    n_clusters_ = np.unique(labels)
    print("Number of clusters: ", len(n_clusters_))
    repaintImage(thumbnail, labels)
    cv2.Resize(thumbnail, im)
    return im
def meanshiftUsingYUV(path):
    im = cv2.LoadImageM(path)
    cv2.CvtColor(im, im, cv2.CV_BGR2YCrCb)
    (segmentedImage, labelsImage, numberRegions) = pms.segmentMeanShift(im)
    print("number of region", numberRegions)
    return segmentedImage
def meanshiftUsingRGB(path):
    im = cv2.LoadImageM(path)
    (segmentedImage, labelsImage, numberRegions) = pms.segmentMeanShift(im)
    print("number of region", numberRegions)
    return segmentedImage
def meanshiftUsingPCA(path):
    # Load original image given the image path
    im = cv2.LoadImageM(path)
    #convert image to YUV color space
    cv.CvtColor(im, im, cv2.CV_BGR2YCrCb)
    # Load bank of filters
    filterBank = lmfilters.loadLMFilters()
    # Resize image to decrease dimensions during clustering
    resize_factor = 1
    thumbnail = cv2.CreateMat(im.height / resize_factor,
                              im.width / resize_factor, cv2.CV_8UC3)
    cv2.Resize(im, thumbnail)
    # now work with resized thumbnail image
    response = np.zeros(shape=((thumbnail.height) * (thumbnail.width), 51),
                        dtype=float)
    for f in range(0, 48):
        filter = filterBank[f]
        # Resize the filter with the same factor for the resized image
        dst = cv2.CreateImage(cv2.GetSize(thumbnail), cv2.IPL_DEPTH_32F, 3)
        resizedFilter = cv2.CreateMat(filter.height / resize_factor,
                                      filter.width / resize_factor,
                                      filter.type)
        cv2.Resize(filter, resizedFilter)
        # Apply the current filter
        cv2.Filter2D(thumbnail, dst, resizedFilter)
        for j in range(0, thumbnail.height):
            for i in range(0, thumbnail.width):
                # Select the max. along the three channels
                maxRes = max(dst[j, i])
                if math.isnan(maxRes):
                    maxRes = 0.0
                if maxRes > response[thumbnail.width * j + i, f]:
                    # Store the max. response for the given feature index
                    response[thumbnail.width * j + i, f] = maxRes

    #YUV features
    count = 0
    for j in range(0, thumbnail.height):
        for i in range(0, thumbnail.width):
            response[count, 48] = thumbnail[j, i][0]
            response[count, 49] = thumbnail[j, i][1]
            response[count, 50] = thumbnail[j, i][2]
            count += 1

    #get the first 4 primary components using pca
    pca = PCA(response)
    pcaResponse = zeros([thumbnail.height * thumbnail.width, 4])

    for i in range(0, thumbnail.height * thumbnail.width):
        pcaResponse[i] = pca.getPCA(response[i], 4)

    # Create new mean shift instance
    ms = MeanShift(bandwidth=10, bin_seeding=True)
    # Apply the mean shift clustering algorithm
    ms.fit(pcaResponse)
    labels = ms.labels_
    n_clusters_ = np.unique(labels)
    print("Number of clusters: ", len(n_clusters_))
    repaintImage(thumbnail, labels)
    cv2.Resize(thumbnail, im)
    return im
def meanshiftUsingIntensity(path):
    im = cv2.LoadImageM(path, cv2.CV_LOAD_IMAGE_GRAYSCALE)
    (segmentedImage, labelsImage, numberRegions) = pms.segmentMeanShift(im)
    print("number of region", numberRegions)
    return segmentedImage