Пример #1
0
def measure_focused_roi(im, roi, area, focus_points, debug=False):  
    g = Grid(cv.GetSize(im))      
    canvas = image.new_from(im)
    cv.Set(canvas, 0)
    focus_in_roi = image.And(focus_points, roi)
    if debug:
        image.show(focus_in_roi, "ROI + Focused Points")
        
    densities = []
    points = convert_to_points(focus_in_roi)
    groups = form_groups(points, estimated_size=24, iter=5)
    for group in groups:
        ch = ConvexHull(map(lambda x: (x[0], x[1]), group))
        
        ppp = ch.points_per_pixel()
        a = int(ppp * 255)
        ch.draw_filled_hull(canvas, rgb=(a,a,a))
    if debug:
        image.show(canvas, "Focused Regions in ROI")

    quadrants = g.split_in_four(canvas)
    sums = []
    for i,quad in enumerate(quadrants):
        sums.append(cv.Sum(quad)[0] / float(area/4))
    arr = array(sums)
    print arr.mean(), arr.std()
    diff = max(sums) - min(sums)
    
    return diff, arr.std()
Пример #2
0
def draw_mask(img, cx, cy, r, out=0):
    ss = np.shape(img)
    x = np.arange(ss[0])
    x = np.transpose(np.tile(x, [ss[1], 1]))
    y = np.arange(ss[1])
    y = np.tile(y, [ss[0], 1])
    x = x - cx
    y = y - cy
    nr = np.size(r)
    if nr == 2:
        rr = x * x / r[0]**2 + y * y / r[1]**2
        r1 = 1

    else:
        rr = x * x + y * y
        r1 = r**2

    pp = np.where(rr < r1)
    img1 = img.copy()
    if out == 1:
        img1[pp] = 0
    else:
        img1[pp] = 1
    image.show(img1)
    return img1
def measure(im, debug=False):
    im2 = image.max_size(im, (800, 600))
    
    b,g,r = image.split(im2)
    #cv.EqualizeHist(r,r)
    ##cv.EqualizeHist(g,g)
    ##cv.EqualizeHist(b,b)
    im2 = image.merge(b,g,r)
    
    eyes = 0
    #objs = []
    #for cascade in eye_cascades:
    #    print cascade
    #    cascade = cv.Load(cascade)
    #    objs = filter_overlap(detect(im, cascade))
    #    draw_objects(im, objs, color=cv.RGB(0,255,0))
    #    eyes += len(objs)
    #faces = 0
    if debug:
        im3 = cv.CloneImage(im2)
    faces = []
    for cascade in face_cascades:#(face_cascades[0],face_cascades[-1]):
        cascade = cv.Load(cascade)
        detected_faces = detect(im2, cascade)
        faces += detected_faces
        if debug:
            for i,rect in enumerate(faces):
                rect.draw(im3, color=cv.RGB(255,16*i,16*i))
    if debug:
        image.show(im3, "Faces + Repeats")
    faces = filter_overlap(faces)
    #print (objs[1], objs[6])
    #draw_objects(im2, map(tuple, faces.keys()))
    for rect, count in faces.iteritems():
        rect.draw(im2, color=cv.RGB(255,0,0))
    #print (objs[3],objs[13])
    #draw_objects(im, filter_overlap((objs[3],objs[13])))
    
    #objs = []
    #for cascade in body_cascades:
    #    print cascade
    #    cascade = cv.Load(cascade)
    #    objs += detect(im, cascade)
    #draw_objects(im, filter_overlap(objs), color=cv.RGB(0,0,255))

    #objs = []
    #for cascade in mouth_cascades:
    #    print cascade
    #    cascade = cv.Load(cascade)
    #    objs += detect(im, cascade)
    #draw_objects(im,  filter_overlap(objs), color=cv.RGB(255,0,255))
    
    score = 0
    for face_rect, count in faces.iteritems():
        score += count * 0.25 + 0.15
    print faces

    if debug:
        image.show(im2, "Faces")
    return (im2, faces), score
Пример #4
0
def add(request):
    data = request.GET['data']
    image=qrcode.make(data)



    return HttpResponse(image.show())
Пример #5
0
def main():
    parser = argparse.ArgumentParser(description='Decrypts and authenticates binary content hidden in the lowest bits of a bitmap')
    parser.add_argument('-k', '--password', type=str, required=True, metavar='<password>', help='The password used for decryption')
    parser.add_argument('-m', '--macpassword', type=str, required=True, metavar='<macpassword>', help='The password used for authenticity and integrity')
    parser.add_argument('bitmap', type=file, metavar='<bitmap>', help='The bitmap file to extract the binary file from')
    args = parser.parse_args()
    
    password = args.password
    macpassword = args.macpassword
    bitmap = args.bitmap

    key = int(hashlib.sha256(password).hexdigest(), 16) >> 128
    
    ciphertext = image.show(bitmap)
    
    iv = binary.pack(slice(ciphertext, 8))
    binary_size = binary.pack(slice(ciphertext, 2))
    encrypted = slice(ciphertext, binary_size)
    
    decrypted = bytearray(cfb.decrypt(xtea.encrypt, key, iv, encrypted))
    mac, decrypted = binary.pack(decrypted[:32]), decrypted[32:]
    
    check = int(hmac.new(macpassword, str(decrypted), hashlib.sha256).hexdigest(), 16)

    if mac == check:
        for d in decrypted:
            sys.stdout.write(chr(d))
    else:
        raise Exception('Data has been altered')
Пример #6
0
def measure(im, debug=False):
    gray = image.rgb2gray(im)
    size = cv.GetSize(im)
    total = float(size[0] * size[1])
    edges = image.auto_edges(im)

    hue, sat, val = tuple(
        map(image.equalize_hist, image.split(image.rgb2hsv(im))))
    l, u, v = tuple(map(image.equalize_hist, image.split(image.rgb2luv(im))))

    values = []
    if debug:
        image.show(l, "L")
        image.show(val, "Value")
    sat = image.threshold(val, 255 - 32)  #image.And(val, sat)
    if debug:
        image.show(sat, "Thresh")
    #cv.And(val, l, val)
    cv.Sub(l, sat, l)
    cv.Set(l, 0, image.dilate(edges, iterations=3))
    if debug:
        image.show(l, "L - Value")
    val = l
    g = Grid(cv.GetSize(val))
    images = g.split_into(val, 16)
    arr = image.cv2array(val)
    avgmean, avgstd = arr.mean(), arr.std()
    for i in images:
        a = image.cv2array(i)
        mean, std = abs(a.mean() - avgmean), max(a.std(), 0)
        values.append((mean + std))

    if debug:
        print values
        print "AVG", avgmean, avgstd
        image.show(val, "Result")

    return val, (avgmean, avgstd, len([v for v in values if v > avgstd * 2]))
Пример #7
0
def measure(im, debug=False):

    gray = image.rgb2gray(im)
    size = cv.GetSize(im)
    total = float(size[0] * size[1])
    edges = image.auto_edges(im)

    _, sat, val = image.split(image.rgb2hsv(im))
    edges = image.auto_edges(im)
    l, u, v = tuple(map(image.equalize_hist, image.split(image.rgb2luv(im))))
    u, v = tuple(map(image.gaussian, (u, v)))
    if debug:
        image.show(l, "1. L")
        image.show(u, "1. U")
        image.show(v, "1. V")
    la, ua, va, uva = tuple(map(image.cv2array, (l, u, v, image.And(l, u, v))))
    test = image.new_from(gray)
    test2 = image.new_from(gray)
    cv.Xor(u, v, test)
    #cv.AbsDiff(u,v, test2)
    if debug:
        #cv.Threshold(test, test, 32, 255, cv.CV_THRESH_BINARY)
        image.show(test, "2. U Xor V")
        #image.show(test2, "TEST 2")
        #test = image.dilate(test)
    cv.Set(test, 0, image.dilate(edges))
    #cv.Set(test, 0, image.invert(image.threshold(sat, threshold=8)))
    uv_score = cv.CountNonZero(test) / total
    if debug:
        image.show(
            test, "3. U Xor V - dilate(Edges) - invert(threshold(Saturation))")

    arr = image.cv2array(test)
    avg_mean, avg_std = arr.mean(), arr.std()

    score = uv_score, avg_std

    return test, score
Пример #8
0
def measure(im, debug=False):       
    gray = image.rgb2gray(im)
    size = cv.GetSize(im)
    total = float(size[0] * size[1])
    edges = image.auto_edges(im)

    hue, sat, val = tuple(map(image.equalize_hist, image.split(image.rgb2hsv(im)) ))
    l,u,v = tuple(map(image.equalize_hist, image.split(image.rgb2luv(im))))
    
    values = []
    if debug:
        image.show(l, "L")
        image.show(val, "Value")
    sat = image.threshold(val,255-32)#image.And(val, sat)
    if debug:
        image.show(sat, "Thresh")
    #cv.And(val, l, val)
    cv.Sub(l, sat, l)
    cv.Set(l, 0, image.dilate(edges, iterations=3))
    if debug:
        image.show(l, "L - Value")
    val = l
    g = Grid(cv.GetSize(val))
    images = g.split_into(val, 16)
    arr = image.cv2array(val)
    avgmean, avgstd = arr.mean(), arr.std()
    for i in images:
        a = image.cv2array(i)
        mean, std = abs(a.mean() - avgmean), max(a.std(), 0)
        values.append((mean+std))
    
    if debug:
        print values
        print "AVG", avgmean, avgstd
        image.show(val, "Result")
        
    return val, (avgmean, avgstd, len([v for v in values if v > avgstd*2]))
Пример #9
0
def measure(im, debug=False):
    
    gray = image.rgb2gray(im)
    size = cv.GetSize(im)
    total = float(size[0] * size[1])
    edges = image.auto_edges(im)

    _, sat, val = image.split(image.rgb2hsv(im))
    edges = image.auto_edges(im)
    l,u,v = tuple(map(image.equalize_hist, image.split(image.rgb2luv(im))))
    u,v = tuple(map(image.gaussian, (u,v)))
    if debug:
        image.show(l, "1. L")
        image.show(u, "1. U")
        image.show(v, "1. V")
    la,ua,va,uva = tuple(map(image.cv2array, (l,u,v, image.And(l,u,v))))
    test = image.new_from(gray)
    test2 = image.new_from(gray)
    cv.Xor(u,v,test)
    #cv.AbsDiff(u,v, test2)
    if debug:
        #cv.Threshold(test, test, 32, 255, cv.CV_THRESH_BINARY)
        image.show(test, "2. U Xor V")
        #image.show(test2, "TEST 2")
        #test = image.dilate(test)
    cv.Set(test, 0, image.dilate(edges))
    #cv.Set(test, 0, image.invert(image.threshold(sat, threshold=8)))
    uv_score = cv.CountNonZero(test) / total
    if debug:
        image.show(test, "3. U Xor V - dilate(Edges) - invert(threshold(Saturation))")
    
    arr = image.cv2array(test)
    avg_mean, avg_std = arr.mean(), arr.std()
    
    score = uv_score, avg_std
    
    return test, score
Пример #10
0
def detect_skin(im, debug=False):
    hsv = image.rgb2hsv(im)
    
    if debug:
        image.show(hsv, 'hsv')
    h,s,v = image.split(hsv)
    
    if cv.CountNonZero(h) == cv.CountNonZero(s) == 0:
        white = image.new_from(im)
        cv.Set(white, 255)
        return white
    
    if debug:
        image.show(h, "Hue")
        image.show(s,"sat1")
    
    h_rng = 0, 46
    s_rng = 48, 178
    
    h = image.threshold(image.gaussian(h, 5), threshold=h_rng[1], type=cv.CV_THRESH_TOZERO_INV)
    h = image.threshold(h, threshold=h_rng[0], type=cv.CV_THRESH_TOZERO)
    h = image.threshold(h, threshold=1)
    
    s = image.threshold(image.gaussian(s, 5), threshold=s_rng[1], type=cv.CV_THRESH_TOZERO_INV)
    s = image.threshold(s, threshold=s_rng[0], type=cv.CV_THRESH_TOZERO)
    if debug:
        image.show(s,"sat2")
    s = image.threshold(s, threshold=1)
    
    v = image.dilate(image.erode(image.And(s, h)))
    
    
    #im = image.hsv2rgb(image.merge(h,s,v))
    if debug:
        image.show(v, "Human")
    return image.threshold(v, threshold=1)
Пример #11
0
def measure(im, debug=False):
    gray = image.rgb2gray(im)
    _, s, v = image.split(image.rgb2hsv(im))
    h = GrayscaleHist(bins=64).use_image(v)
    s = GrayscaleHist(bins=64).use_image(s)
    scores = [score_hist(h)]

    if debug:
        image.show(v, "1. Value")
        image.show(h.to_img(), "2. Value Histogram")
        image.show(s.to_img(), "2. Saturation Histogram")
        print score_hist(s)

    return (None, scores[0])  # h.to_img(),
Пример #12
0
def measure(im, blur, noise=None, debug=False):
    focus_points = blur[0]
    #is_noisy = noise[2]

    size = cv.GetSize(im)
    npixels = size[0] * size[1]
    
    #if focused_regions is None:
    #    focused_regions = image.new_from(im)
    #    cv.Set(focused_regions, 0)
    #    groups = form_groups(focus_points,
    #        estimated_size=min(max(int(len(npixels) / 1000), 2), 15))
    #    #groups = form_groups(points, threshold=max(cv.GetSize(im))/16)
    #    #print 'groups', len(groups)
    #    draw_groups(groups, focused_regions)
    
    im2 = cv.CloneImage(im)
    g = Grid(cv.GetSize(im2))
    if debug:
        image.show(g.draw(im2), "Image with Grid + ROI")
    
    roi = image.new_from(im, nChannels=1)
    cv.Set(roi, 0)
    #g.draw_lines(roi, thickness=int(max(min((size[0] + size[1]) * 1/100.0, 255), 1)))
    g.draw_regions(roi)
    area = cv.Sum(roi)[0]
    
    (_, face_rects), face_score = faces.measure(im)
    face_block = image.new_from(im, nChannels=1)
    cv.Set(face_block, 0)
    for r in face_rects:
        r.draw(face_block, color=cv.RGB(255,255,255), thickness=cv.CV_FILLED)
    
    if debug:
        face_roi = cv.CloneImage(im)
        cv.Set(face_roi, 0, image.invert(roi))
        cv.Set(face_roi, 0, image.invert(image.threshold(face_block, threshold=1)))
        
        image.show(face_block, "Faces in Binary")
        image.show(g.draw(face_roi), "Face + ROI")
        
    return (im, (
         measure_focused_roi(im, roi, area, focus_points, debug),
         #measure_color_roi(im, roi, area, focus_points, debug),
         measure_contrast(im, debug),
         measure_saturation(im, debug),
         faces.measure(im, debug)[1],
    ))
Пример #13
0
def measure(im, debug=False):
    gray = image.rgb2gray(im)
    _, s, v = image.split(image.rgb2hsv(im))
    h = GrayscaleHist(bins=64).use_image(v)
    s = GrayscaleHist(bins=64).use_image(s)
    scores = [score_hist(h)]

    if debug:
        image.show(v, "1. Value")
        image.show(h.to_img(), "2. Value Histogram")
        image.show(s.to_img(), "2. Saturation Histogram")
        print score_hist(s)

    return (
        None,  #h.to_img(),
        scores[0],
    )
Пример #14
0
def get_focus_points(im,debug=False):
    edges = image.dilate(image.auto_edges(im))
    #d = 1
    #sobel = image.sobel(im, xorder=d, yorder=d)
    sobel = image.laplace(im)
    hsv = image.rgb2hsv(im)

    focused = image.And(sobel, edges)
    if im.nChannels == 3:
        hue, saturation, value = image.split(hsv)
        saturation = image.dilate(saturation)
        focused = image.And(focused, saturation)
    focused = image.threshold(image.dilate(focused), threshold=32)
    
    if debug:
        image.show(edges, "1. Edges")
        image.show(sobel, "2. Sobel")
        if im.nChannels == 3:
            image.show(saturation, "3. Saturation")
    return focused
Пример #15
0
def get_focus_points(im, debug=False):
    edges = image.dilate(image.auto_edges(im))
    #d = 1
    #sobel = image.sobel(im, xorder=d, yorder=d)
    sobel = image.laplace(im)
    hsv = image.rgb2hsv(im)

    focused = image.And(sobel, edges)
    if im.nChannels == 3:
        hue, saturation, value = image.split(hsv)
        saturation = image.dilate(saturation)
        focused = image.And(focused, saturation)
    focused = image.threshold(image.dilate(focused), threshold=32)

    if debug:
        image.show(edges, "1. Edges")
        image.show(sobel, "2. Sobel")
        if im.nChannels == 3:
            image.show(saturation, "3. Saturation")
    return focused
Пример #16
0
def main(progname, *args):

    parser = OptionParser()
    parser.add_option("-f", "--file", dest="filename", default=None,
        help="analyze a given FILE ending in .jpg or .jpeg", metavar="FILE")
    parser.add_option("-i", "--imageset", dest="imageset", default=None,
        help="Runs on a predefined set of algorithms (li,chow,china,custom)")
    parser.add_option("-d", "--debug", dest="debug", action="store_true", default=False,
        help="Enable visual debugging.")
    parser.add_option("-t", "--type", dest="type", default="all",
        help="Specifies the type of feature to debug. Defaults to all.")

    (options, args) = parser.parse_args(list(args))
    
    if options.imageset:
        if options.imageset == 'li':
            process(li_dir)
            return 0
        elif options.imageset == 'chow':
            process(chow_dir)
            return 0
        elif options.imageset == 'china':
            process(china_dir)
            return 0
        elif options.imageset == 'custom':
            process()
            return 0
            
    if not options.filename:
        print "Please specify a file (--file) or image set (--imageset)."
        return 1
    
    if not options.debug:
        process([load_image(options.filename)])
        return 0
        
    if options.filename.startswith('data/'):
        options.filename = options.filename[len('data/'):]
    
    tdata = load_image(options.filename)
    kind = options.type.lower()
    
    size = None #(320,240,'crop') # (0.5, 0.5, 'resize-p')
    if size is None:
        im = tdata.load()
    elif size[-1] == 'crop':
        im = image.random_cropped_region(tdata.load(), size[:2])
    elif size[-1] == 'resize':
        im = tdata.load(size[:2])
    elif size[-1] == 'resize-p':
        im = image.resize(tdata.load(), by_percent=size[:2])
    else:
        raise TypeError, "Invalid image sizing type."
        
    image.show(im, "Image")
    #l,u,v = image.split(image.rgb2luv(im))
    ##cv.Set(l, 128)
    ##cv.EqualizeHist(l, l)
    ##cv.EqualizeHist(u, u)
    ##image.show(image.luv2rgb(image.merge(l,u,v)), "test")
    #s = cv.GetSize(im)
    #t = image.absDiff(u,v)
    #image.show(t, "test")
    #print "Test Score:", cv.CountNonZero(t) / float(s[0] * s[1])
    ##image.show(image.threshold(image.And(u,v), threshold=1), "LUV")

    # noise
    if kind in ('all','noise'):
        noise_img, score = noise.measure(im, debug=True)
        #image.show(noise_img, "Noise Result")
        print 'Noise Score:', score, noise.boolean(score)
    
    # contrast
    if kind in ('all','contrast'):
        contrast_img, score = contrast.measure(im, debug=True)
        #image.show(contrast_img, "Contrast Result")
        print 'Contrast Score:', score, contrast.boolean(score)
    
    # blur
    if kind in ('all','blur','composition'): 
        focused, score = blur.measure(im, debug=kind in ('all','blur'))
        #image.show(focused,  "Blur Result")
        print 'Blur Score:', score, blur.boolean(score)
    
    # composition
    if kind in ('all','composition'):
        composition_img, score = composition.measure(im,
            (focused,score, blur.boolean(score)), debug=True)
        print 'Composition Score:', score, composition.boolean(score)
        
    if kind in ('faces',):
        result, score = faces.measure(im,debug=True)
        print "Face Score:", score, faces.boolean(faces)
    
    #win = CornerTweaker(im)
    #win.show()
    
    #_, sat, _ = image.split(image.rgb2hsv(im))
    #arr = image.cv2array(sat)
    #print arr.mean(), arr.std()
    
    # faces
    #im, score = faces.measure(im, debug=True)
    #print score, faces.boolean(score)
    
    # composition
    #noise_img, score = noise.measure(im, debug=False)
    ##n = (noise_img, score, noise.boolean(score))
    #hulls, score = blur.measure(im, debug=False)
    #b = (hulls, score, blur.boolean(score))
    #cimg, score = composition.measure(im, b, debug=True)
    #print score, composition.boolean(score)
    
    # BLUR
    #from time import time
    #start = time()
    ##im2 = image.threshold(image.laplace(im), threshold=75, type=cv.CV_THRESH_TOZERO)
    #hulls, score = blur.measure(im, debug=True)
    ##blur_img, score = blur.measure(im, debug=True)
    #end = time()
    #print "Time:", (end - start), "seconds"
    #image.show(im,  "image")
    ##image.show(noise_img, "Noise Image")
    #print score, blur.boolean(score)
    
    
    #CONTRAST
    
    #_, score = contrast.measure(im, debug=True)
    #image.show(im, "Image")
    #print score, contrast.boolean(score)

    """
    
    #BLUR
    
    #im2 = image.threshold(image.laplace(im), threshold=75, type=cv.CV_THRESH_TOZERO)
    im3, score = blur.measure(im, debug=True)
    image.show(im,  "image")
    image.show(im3, "Focus Mask")
    print score, blur.boolean(score)
    #plt.show()
    """

    
    #NOISE
    
    #noise_img, score = noise.measure(im, debug=True)
    #image.show(noise_img, "Noise")
    #print score, noise.boolean(score)
    
    
    """
    #hwin = ColorHistograms(im)
    #hwin.show()
    hwin = HistogramWindow(image.rgb2gray(im))
    hwin.show()
    
    print cv.GetSize(im), cv.GetSize(im2)
    print 'blur', papers.blurry_histogram(im)
    #print papers.blurry_histogram(im2)
    
    wind = DerivativeTweaker(im, title="image derivative")
    wind.show()
    
    win = EdgeThresholdTweaker(im, title="image edges")
    win.show(50)#edge_threshold(im))
    
    #win2 = EdgeThresholdTweaker(im2, title="image resized edges")
    #win2.show(edge_threshold(im2))
    """
    cv.WaitKey()
    cv.DestroyAllWindows()
    return 0
Пример #17
0
            saturation = image_name.saturation()
            exposedness = image_name.exposedness()
            weight = (contrast**w_c) * (saturation**w_s) * (exposedness**
                                                            w_e) + 1e-12
            #            weight = ndimage.gaussian_filter(weight, sigma=(3, 3), order=0)
            self.weights.append(weight)
            sums = sums + weight
        for index in range(self.num_images):
            self.weights[index] = self.weights[index] / sums
        return self.weights

    def result_exposure(self, w_c=1, w_s=1, w_e=1):
        "Return the Exposure Fusion image with Naive method"
        self.get_weights_map(w_c, w_s, w_e)
        self.result_image = np.zeros(self.shape)
        for canal in range(3):
            for index in range(self.num_images):
                self.result_image[:, :, canal] += self.weights[
                    index] * self.images[index].array[:, :, canal]
        self.result_image[self.result_image < 0] = 0
        self.result_image[self.result_image > 1] = 1
        return self.result_image


if __name__ == "__main__":
    names = [line.rstrip('\n') for line in open('list_jpeg_test.txt')]
    W = WeightsMap("mask", names)
    im = W.result_exposure(1, 1, 1)
    image.show(im)
    misc.imsave("res/mask_naive.jpg", im)
Пример #18
0
                    dest='w_s',
                    type=float,
                    default=1.0,
                    help='Exponent of the saturation')
parser.add_argument('-we',
                    dest='w_e',
                    type=float,
                    default=1.0,
                    help='Exponent of the exposedness')
args = parser.parse_args()
params = vars(args)  # convert to ordinary dict

names = [line.rstrip('\n') for line in open(params['names'])]
folder = params['folder']
height_pyr = params['height_pyr']
w_c = params['w_c']
w_s = params['w_s']
w_e = params['w_e']

# Naive Fusion

W = naivefusion.WeightsMap(folder, names)
res_naive = W.result_exposure(w_c, w_s, w_e)
image.show(res_naive)

# Laplacian Fusion

lap = laplacianfusion.LaplacianMap(folder, names, n=height_pyr)
res_lap = lap.result_exposure(w_c, w_s, w_e)
image.show(res_lap)
Пример #19
0
        print "laplacian pyramid"
        self.get_laplacian_pyramid_images()
        result_pyramid = []
        for floor in range(self.height_pyr):
            print 'floor ', floor
            result_floor = np.zeros(self.laplacian_pyramid[0][floor].shape)
            for index in range(self.num_images):
                print 'image ', index
                for canal in range(3):
                    result_floor[:, :, canal] += self.laplacian_pyramid[index][
                        floor][:, :,
                               canal] * self.weights_pyramid[index][floor]
            result_pyramid.append(result_floor)
        # Get the image from the Laplacian pyramid
        self.result_image = result_pyramid[-1]
        for floor in range(self.height_pyr - 2, -1, -1):
            print 'floor ', floor
            self.result_image = result_pyramid[floor] + utils.Expand(
                self.result_image, 1)
        self.result_image[self.result_image < 0] = 0
        self.result_image[self.result_image > 1] = 1
        return self.result_image


if __name__ == "__main__":
    names = [line.rstrip('\n') for line in open('list_images.txt')]
    lap = LaplacianMap('arno', names, n=6)
    res = lap.result_exposure(1, 1, 1)
    image.show(res)
    misc.imsave("res/arno_3.jpg", res)
Пример #20
0
def measure_color_roi(im, roi, area, focused_regions, debug=False):
    im = cv.CloneImage(im)
    g = Grid(cv.GetSize(im))
    
    
    """
    contours = Contours(image.threshold(focused_regions, threshold=1)).approx_poly()
    if debug:
        test = image.new_from(im)
        cv.Set(test, 0)
        for c in contours:
            i = 1
            while c:
                cv.FillPoly(test, [[c[x] for x in range(len(c))]], cv.RGB(0,64*i,0))
                c = c.v_next()
                i += 1
        #contours.draw(test, levels=9)
        image.show(test, "Test")
    """    
    #mask = image.And(image.threshold(focused_regions, threshold=1), roi)
    #
    #canvas = image.new_from(im, nChannels=1)
    #cv.Set(canvas, 0)
    #if cv.CountNonZero(mask) <= 1:
    #    return 0, 0
    #contours = Contours(image.dilate(mask)).approx_poly()
    #for c in contours:
    #    i = 1
    #    while c:
    #        cv.FillPoly(canvas, [[c[x] for x in range(len(c))]], 255)
    #        c = c.v_next()
    #        i += 1
    #mask = image.Or(mask, canvas)
    #if debug:
    #    image.show(mask, "MASK")
    #        
    #cv.Set(im, 0, image.invert(mask))
    cv.Set(im, 0, image.invert(roi))
    
    #area = cv.CountNonZero(image.threshold(im, threshold=1))
    
    if debug:
        image.show(g.draw(im,thickness=2), "Image + ROI + Focus point mask")

    scores = []
    im = image.rgb2gray(im)
    #canvas = image.And(plane, roi)
    quadrants = g.split_in_four(im)
    hist = []
    for q,quad in enumerate(quadrants):
        #scores.append(cv.Sum(quad)[0] / float(area/4))
        h = GrayscaleHist(value_range=(1,255)).use_image(quad)
        #image.show(h.to_img(), ['gray', 'red','green','blue'][i] + ' in ' + str(q))
        hist.append(h.to_array())
    scores = []
    excluded_points = set([(2, 1), (3, 0)])
    for i,h1 in enumerate(hist):
        for j,h2 in enumerate(hist):
            if i <= j or (i,j) in excluded_points:
                continue
            h = abs(h2-h1)
            ht = GrayscaleHist(value_range=(0,255)).use_array_as_hist(h)
            scores.append((h[5:].mean(), h[5:].std()))
    means = max([x[0] for x in scores])        
    stddevs = max([x[1] for x in scores])
    return means/255.0, stddevs/255.0
Пример #21
0
def main(progname, *args):

    parser = OptionParser()
    parser.add_option("-f",
                      "--file",
                      dest="filename",
                      default=None,
                      help="analyze a given FILE ending in .jpg or .jpeg",
                      metavar="FILE")
    parser.add_option(
        "-i",
        "--imageset",
        dest="imageset",
        default=None,
        help="Runs on a predefined set of algorithms (li,chow,china,custom)")
    parser.add_option("-d",
                      "--debug",
                      dest="debug",
                      action="store_true",
                      default=False,
                      help="Enable visual debugging.")
    parser.add_option(
        "-t",
        "--type",
        dest="type",
        default="all",
        help="Specifies the type of feature to debug. Defaults to all.")

    (options, args) = parser.parse_args(list(args))

    if options.imageset:
        if options.imageset == 'li':
            process(li_dir)
            return 0
        elif options.imageset == 'chow':
            process(chow_dir)
            return 0
        elif options.imageset == 'china':
            process(china_dir)
            return 0
        elif options.imageset == 'custom':
            process()
            return 0

    if not options.filename:
        print "Please specify a file (--file) or image set (--imageset)."
        return 1

    if not options.debug:
        process([load_image(options.filename)])
        return 0

    if options.filename.startswith('data/'):
        options.filename = options.filename[len('data/'):]

    tdata = load_image(options.filename)
    kind = options.type.lower()

    size = None  #(320,240,'crop') # (0.5, 0.5, 'resize-p')
    if size is None:
        im = tdata.load()
    elif size[-1] == 'crop':
        im = image.random_cropped_region(tdata.load(), size[:2])
    elif size[-1] == 'resize':
        im = tdata.load(size[:2])
    elif size[-1] == 'resize-p':
        im = image.resize(tdata.load(), by_percent=size[:2])
    else:
        raise TypeError, "Invalid image sizing type."

    image.show(im, "Image")
    #l,u,v = image.split(image.rgb2luv(im))
    ##cv.Set(l, 128)
    ##cv.EqualizeHist(l, l)
    ##cv.EqualizeHist(u, u)
    ##image.show(image.luv2rgb(image.merge(l,u,v)), "test")
    #s = cv.GetSize(im)
    #t = image.absDiff(u,v)
    #image.show(t, "test")
    #print "Test Score:", cv.CountNonZero(t) / float(s[0] * s[1])
    ##image.show(image.threshold(image.And(u,v), threshold=1), "LUV")

    # noise
    if kind in ('all', 'noise'):
        noise_img, score = noise.measure(im, debug=True)
        #image.show(noise_img, "Noise Result")
        print 'Noise Score:', score, noise.boolean(score)

    # contrast
    if kind in ('all', 'contrast'):
        contrast_img, score = contrast.measure(im, debug=True)
        #image.show(contrast_img, "Contrast Result")
        print 'Contrast Score:', score, contrast.boolean(score)

    # blur
    if kind in ('all', 'blur', 'composition'):
        focused, score = blur.measure(im, debug=kind in ('all', 'blur'))
        #image.show(focused,  "Blur Result")
        print 'Blur Score:', score, blur.boolean(score)

    # composition
    if kind in ('all', 'composition'):
        composition_img, score = composition.measure(
            im, (focused, score, blur.boolean(score)), debug=True)
        print 'Composition Score:', score, composition.boolean(score)

    if kind in ('faces', ):
        result, score = faces.measure(im, debug=True)
        print "Face Score:", score, faces.boolean(faces)

    #win = CornerTweaker(im)
    #win.show()

    #_, sat, _ = image.split(image.rgb2hsv(im))
    #arr = image.cv2array(sat)
    #print arr.mean(), arr.std()

    # faces
    #im, score = faces.measure(im, debug=True)
    #print score, faces.boolean(score)

    # composition
    #noise_img, score = noise.measure(im, debug=False)
    ##n = (noise_img, score, noise.boolean(score))
    #hulls, score = blur.measure(im, debug=False)
    #b = (hulls, score, blur.boolean(score))
    #cimg, score = composition.measure(im, b, debug=True)
    #print score, composition.boolean(score)

    # BLUR
    #from time import time
    #start = time()
    ##im2 = image.threshold(image.laplace(im), threshold=75, type=cv.CV_THRESH_TOZERO)
    #hulls, score = blur.measure(im, debug=True)
    ##blur_img, score = blur.measure(im, debug=True)
    #end = time()
    #print "Time:", (end - start), "seconds"
    #image.show(im,  "image")
    ##image.show(noise_img, "Noise Image")
    #print score, blur.boolean(score)

    #CONTRAST

    #_, score = contrast.measure(im, debug=True)
    #image.show(im, "Image")
    #print score, contrast.boolean(score)
    """
    
    #BLUR
    
    #im2 = image.threshold(image.laplace(im), threshold=75, type=cv.CV_THRESH_TOZERO)
    im3, score = blur.measure(im, debug=True)
    image.show(im,  "image")
    image.show(im3, "Focus Mask")
    print score, blur.boolean(score)
    #plt.show()
    """

    #NOISE

    #noise_img, score = noise.measure(im, debug=True)
    #image.show(noise_img, "Noise")
    #print score, noise.boolean(score)
    """
    #hwin = ColorHistograms(im)
    #hwin.show()
    hwin = HistogramWindow(image.rgb2gray(im))
    hwin.show()
    
    print cv.GetSize(im), cv.GetSize(im2)
    print 'blur', papers.blurry_histogram(im)
    #print papers.blurry_histogram(im2)
    
    wind = DerivativeTweaker(im, title="image derivative")
    wind.show()
    
    win = EdgeThresholdTweaker(im, title="image edges")
    win.show(50)#edge_threshold(im))
    
    #win2 = EdgeThresholdTweaker(im2, title="image resized edges")
    #win2.show(edge_threshold(im2))
    """
    cv.WaitKey()
    cv.DestroyAllWindows()
    return 0
Пример #22
0
def measure(im, debug=False):
    gray = image.rgb2gray(im)
    size = cv.GetSize(im)
    total = float(size[0] * size[1])
    l = image.sub(gray, image.gaussian(gray, 5))
    l2 = image.sub(gray, image.gaussian(gray, 9))
    edges = image.dilate(image.auto_edges(im, percentage=0.2))
    if debug:
        image.show(image.threshold(l, threshold=1), "Before Edge Removal (kernel=5)")
        image.show(image.threshold(l2, threshold=1), "Before Edge Removal (kernel=9)")
    cv.Set(l, 0, image.threshold(edges, threshold=1))
    cv.Set(l2, 0, image.threshold(edges, threshold=1))
    
    l = image.threshold(l, threshold=1)
    l2 = image.threshold(l2, threshold=1)
    
    
    
    if debug:
        image.show(image.threshold(edges, threshold=1), "Edges")
        image.show(l, "After Edge Removal (kernel=5)")
        image.show(l2, "After Edge Removal (kernel=9)")
        
    noise2 = image.new_from(gray)
    cv.EqualizeHist(gray, noise2)
    cv.AbsDiff(noise2, gray, noise2)
    cv.Set(noise2, 0, image.threshold(image.sobel(im, xorder=2, yorder=2), threshold=4))
    diff = image.cv2array(noise2)
    if debug:
        image.show(noise2, "DIFF")
        print "M", diff.mean(), "S", diff.std()
    diff_stat = (diff.mean(), diff.std())
    percent_noise = cv.CountNonZero(noise2) / total
    if debug:
        image.show(noise2, "NOISE2")
        


    # magical, I don't understand how this works
    _, sat, _ = image.split(image.rgb2hsv(im))
    edges = image.auto_edges(im)
    l,u,v = tuple(map(image.equalize_hist, image.split(image.rgb2luv(im))))
    u,v = tuple(map(image.gaussian, (u,v)))
    if debug:
        image.show(l, "1. L")
        image.show(u, "1. U")
        image.show(v, "1. V")
    la,ua,va,uva = tuple(map(image.cv2array, (l,u,v, image.And(l,u,v))))
    test = image.new_from(gray)
    test2 = image.new_from(gray)
    cv.Xor(u,v,test)
    if debug:
        image.show(test, "2. U Xor V")
    cv.Set(test, 0, image.dilate(edges))
    #cv.Set(test, 0, image.invert(image.threshold(sat, threshold=8)))
    uv_score = cv.CountNonZero(test) / total
    if debug:
        image.show(test, "3. U Xor V - dilate(Edges) - invert(threshold(Saturation))")

    g = Grid(size)
    images = map(image.cv2array, g.split_into(test, 6))
    arr = image.cv2array(test)
    avg_mean, avg_std = arr.mean(), arr.std()


    #ms = [(a.mean(), a.std()) for a in images]
    #min_mean = min_std = 255
    #max_mean = max_std = 0
    #for m,s in ms:
    #    min_mean = min(min_mean, m)
    #    min_std = min(min_std, s)
    #    max_mean = max(max_mean, m)
    #    max_std = max(max_std, s)
    #if debug:
    #    print min_mean, min_std
    #    print avg_mean, avg_std
    #    print max_mean, max_std
    #
    #score = uv_score, min_mean, avg_mean, avg_std, max_mean
    uv_score = uv_score, avg_std

    score = cv.CountNonZero(l) / total,  cv.CountNonZero(l2) / total, \
        diff_stat[0], diff_stat[1], uv_score
    
    return l, score
Пример #23
0
def measure(im, debug=False):
    size = cv.GetSize(im)
    npixels = size[0] * size[1]
    #print 'np', npixels

    focused = get_focus_points(im, debug)
    points = convert_to_points(focused)

    if debug:
        print "\t" + str(
            len(points)), '/', npixels, '=', len(points) / float(npixels)
        print "\tlen(points) =", len(points)
        image.show(focused, "4. Focused Points")

    saturation_score = 0
    if not image.is_grayscale(im):
        edges = image.auto_edges(im)
        _, saturation, _ = image.split(image.rgb2hsv(im))
        if debug:
            image.show(saturation, "5. Saturation")
        #saturation = image.laplace(image.gaussian(saturation, 3))
        saturation = image.invert(saturation)
        mask = image.invert(image.threshold(im, threshold=16))
        if debug:
            image.show(saturation, "5.3. Laplace of Saturation")
        cv.Set(saturation, 0, mask)
        cv.Set(saturation, 0, focused)
        if debug:
            image.show(mask,
                       "5.6. Mask(focused AND invert(threshold(im, 16)))")
            image.show(saturation, "6. Set(<5.3>, 0, <5.6>)")

        saturation_score = cv.Sum(saturation)[0] / float(npixels * 255)
        print "\tSaturation Score:", saturation_score

    # light exposure
    h, s, v = image.split(image.rgb2hsv(im))
    if debug:
        image.show(h, "7. Hue")
        image.show(s, "7. Saturation")
        image.show(v, "7. Value")
    diff = cv.CloneImage(v)
    cv.Set(diff, 0, image.threshold(s, threshold=16))
    diff = image.dilate(diff, iterations=10)
    if debug:
        thres_s = image.threshold(s, threshold=16)
        image.show(thres_s, "8.3. Mask(threshold(<7.Saturation>, 16))")
        image.show(diff, "8.6. Dilate(Set(<7.Value>, 0, <8.3>), 10)")

    cdiff = cv.CountNonZero(diff)
    if cdiff > 0 and cdiff / float(npixels) > 0.01:
        test = cv.CloneImage(v)
        cv.Set(test, 0, image.invert(diff))
        s = cv.Sum(test)[0] / float(cdiff * 255)
        if debug:
            print '\tLight Exposure Score:', s
    else:
        s = 0

    if image.is_grayscale(im):
        return focused, (1, 1, 1, saturation_score, s)

    # we want to short circuit ASAP to avoid doing KMeans 50% of the image's pixels
    if len(points) > npixels / 2:
        return focused, (1, 1, 1, saturation_score, s)

    # we're so blurry we don't have any points!
    if len(points) < 1:
        return focused, (0, 0, 0, saturation_score, s)

    if debug:
        im2 = cv.CloneImage(im)
    focused_regions = image.new_from(im)
    cv.Set(focused_regions, 0)

    r = lambda x: random.randrange(1, x)
    groups = form_groups(points,
                         estimated_size=min(max(int(len(points) / 1000), 2),
                                            15))
    #groups = form_groups(points, threshold=max(cv.GetSize(im))/16)
    #print 'groups', len(groups)
    hulls = draw_groups(groups, focused_regions)
    focused_regions = image.threshold(focused_regions,
                                      threshold=32,
                                      type=cv.CV_THRESH_TOZERO)
    min_area = npixels * 0.0005
    densities = [h.points_per_pixel() for h in hulls if h.area() >= min_area]

    if debug:
        #image.show(focused, "Focused Points")
        image.show(focused_regions, "9. Focused Regions from <4>")
        cv.Sub(
            im2,
            image.gray2rgb(
                image.invert(image.threshold(focused_regions, threshold=1))),
            im2)
        image.show(im2, "10. threshold(<9>)")

    focused_regions = image.rgb2gray(focused_regions)

    densities = array(densities)
    c = cv.CountNonZero(focused_regions)
    c /= float(npixels)

    score = (c, densities.mean(), densities.std(), saturation_score, s)

    return focused, score
Пример #24
0
def gcs_matching(L, R, Seeds=None, tau=TAU, mu=MU, show=True):

    # NEW: Add padding...

    if (ROLL == False):
        L = zero_pad(L, DISPARITY)
        R = zero_pad(R, DISPARITY)

    # ...

    (height, width) = shape = L.shape

    # ...

    auxilary = numpy.nan * numpy.ones([height, width, DISPARITY_SPAN])

    Tau = Tau_initialize(shape)
    Tau_count = 0

    # Best costs
    CC = 0 - numpy.inf * numpy.ones(shape)
    CC_prime = 0 - numpy.inf * numpy.ones(shape)

    # Best disparities
    DD = 0 - (1 + DISPARITY) * numpy.ones(shape)
    DD_prime = 0 - (1 + DISPARITY) * numpy.ones(shape)

    # Handle preemptive matching
    if Seeds is None:
        Seeds = preemptive_match(auxilary, L, R)

    # Setup visuals
    if show:
        import matplotlib.pyplot as pyplot
        figure, axis = pyplot.subplots()
        pyplot.plot()
        pyplot.hold(True)

    print("GCS Algorithm...")

    while True:

        sys.stdout.write('\r')
        sys.stdout.write("[S Size]: %d" % Seeds.qsize())
        sys.stdout.write('\t')
        sys.stdout.write("[Tau Size]: %d" % Tau_count)
        sys.stdout.flush()

        if Seeds.empty(): break

        (_, ss) = Seeds.get()

        for qq in get_best_neighbours(auxilary, L, R, ss):

            (vv, uu, uu_prime) = qq

            cc = get_similarity(auxilary, L, R, qq)

            if (cc < tau): continue
            if Tau_contains(Tau, qq): continue
            if (cc + mu < numpy.min([CC[vv][uu], CC_prime[vv][uu_prime]])):
                continue

            Tau_insert(Tau, qq)
            Tau_count += 1

            Seeds.put((0 - cc, qq))

            if (CC[vv][uu] < cc):
                CC[vv][uu] = cc
                DD[vv][uu] = get_disparity(qq)

            if (CC_prime[vv][uu_prime] < cc):
                CC_prime[vv][uu_prime] = cc
                DD_prime[vv][uu_prime] = get_disparity(qq)

            if show and (Tau_count % 1000) == 0:
                image.show(DD, block=False)
                figure.canvas.draw()
                axis.cla()

    sys.stdout.write('\n')

    return (DD, DD_prime, CC, CC_prime)
Пример #25
0
			#numpy.random.shuffle(dd_range) # Unnecessary
			cc = [ get_similarity(L_image, R_image, (yy_range[ii], xx_range[ii], xx_range[ii]+dd), auxilary) for dd in dd_range ]
			kk = numpy.argmax(cc)
			Seeds.put( (0-cc[kk], (yy_range[ii], xx_range[ii], xx_range[ii]+dd_range[kk]) ) )
			print(Seeds.qsize(), Seeds_count)

		# ...

		(DD, DD_prime, CC, CC_prime) = gcs_matching(L_image, R_image, Seeds, auxilary)

		pickle.dump(DD, open("DD", "wb"))
		pickle.dump(DD_prime, open("DD_prime", "wb"))
		
		pickle.dump(CC, open("CC", "wb"))
		pickle.dump(CC_prime, open("CC_prime", "wb"))

		exit()

	else:

		DD = pickle.load(open("DD", "rb"))
		DD_prime = pickle.load(open("DD_prime", "rb"))

		CC = pickle.load(open("CC", "rb"))
		CC_prime = pickle.load(open("CC_prime", "rb"))

		(height, width) = shape = DD.shape

	image.show(DD)
	image.show(DD_prime)
Пример #26
0
#
"""
RGB color image
"""
from scipy import misc
import numpy as np
import image as img


class rgb_image(img.image):
    """
    """
    def __init__(self, w=0, h=0, fileName=None):
        """
        Construction d'une image de width x height pixels contenant trois channels R,G et B
        """
        if None == fileName:
            img.image.__init__(self, w, h, 3)
        else:
            data = misc.imread(fileName)
            img.image.__init__(self, data.shape[0], data.shape[1], 3)
            self.__pixels__ = data[:, :, 0:3] / 255.


if __name__ == '__main__':
    img = rgb_image(fileName='lena_rgb.png')
    img.pixels = np.ones((img.height, img.width, 3), np.double) - img.pixels
    img.show()
    img.save("inverse_rgb_lena.png")
Пример #27
0
l = cv.CreateImage((256,256), cv.IPL_DEPTH_8U, 1)
cv.Set(l, 255)
u = image.new_from(l)
v = image.new_from(l)
cv.Set(u, 0)
cv.Set(v, 0)

size = cv.GetSize(l)
print size

for x in range(256):
    for y in range(size[1]):
        cv.Set2D(u, y, x, x)
        cv.Set2D(v, 255-x, min(y, 255), x)
        
image.show(u, "U")
image.show(v, "V")

rgb = image.luv2rgb(image.merge(l,u,v))
r,g,b = image.split(rgb)
#xor = image.threshold(image.Xor(u,v), 0, cv.CV_THRESH_BINARY)
xor = image.Xor(u,v)
cv.Threshold(xor, xor, 16, 255, cv.CV_THRESH_TOZERO)
image.show(rgb, "RGB")
image.show(xor, "Xor")

#cv.Sub(rgb, image.gray2rgb(image.invert(xor)), rgb)
_, sat, _ = image.split(image.rgb2hsv(rgb))
image.show(sat, 'Saturation')
#cv.Set(xor, 0, image.invert(image.threshold(sat, threshold=4)))
Пример #28
0
		aspect = float(MAX_WIDTH)/width
		L = arrays.resample(L, [ int(aspect*x) for x in L.shape ])
		R = arrays.resample(R, [ int(aspect*x) for x in R.shape ])

	print("Left shape: " + str(L.shape))
	print("Right shape: " + str(R.shape))

	print("Compute disparity map with LEFT image as reference")
	
	if (os.path.exists(L_disparity_map_path)):
		L_disparity_map = pickle.load(open(L_disparity_map_path, "rb"))
	else:
		L_disparity_map = block_matching(L, R)
		pickle.dump(L_disparity_map, open(L_disparity_map_path, "wb"))

	image.show(L_disparity_map, title = "Left Disparity Map")

	print("Compute disparity map with RIGHT image as reference")

	if (os.path.exists(R_disparity_map_path)):
		R_disparity_map = pickle.load(open(R_disparity_map_path, "rb"))
	else:
		R_disparity_map = block_matching(R, L)
		pickle.dump(R_disparity_map, open(R_disparity_map_path, "wb"))

	image.show(R_disparity_map, title = "Right Disparity Map")

	print("Remove any inconsistency between both images")
	
	depth_map = remove_inconsistency(L_disparity_map, R_disparity_map, empty_value = 8)
	depth_map = 0-depth_map
Пример #29
0
l = cv.CreateImage((256, 256), cv.IPL_DEPTH_8U, 1)
cv.Set(l, 255)
u = image.new_from(l)
v = image.new_from(l)
cv.Set(u, 0)
cv.Set(v, 0)

size = cv.GetSize(l)
print size

for x in range(256):
    for y in range(size[1]):
        cv.Set2D(u, y, x, x)
        cv.Set2D(v, 255 - x, min(y, 255), x)

image.show(u, "U")
image.show(v, "V")

rgb = image.luv2rgb(image.merge(l, u, v))
r, g, b = image.split(rgb)
#xor = image.threshold(image.Xor(u,v), 0, cv.CV_THRESH_BINARY)
xor = image.Xor(u, v)
cv.Threshold(xor, xor, 16, 255, cv.CV_THRESH_TOZERO)
image.show(rgb, "RGB")
image.show(xor, "Xor")

#cv.Sub(rgb, image.gray2rgb(image.invert(xor)), rgb)
_, sat, _ = image.split(image.rgb2hsv(rgb))
image.show(sat, 'Saturation')
#cv.Set(xor, 0, image.invert(image.threshold(sat, threshold=4)))
Пример #30
0
def block_matching(
	L_image,
	R_image,
	disparity = DISPARITY,
	window = WINDOW,
	roll = True,
	cost_default = numpy.inf,
	cost_function = costs.ssd,
	censor = True, 
	censor_threshold = 2.0,
	show = True):

	# Brute-force matching algorithm!
	
	L_image = arrays.as_bytes(L_image)
	R_image = arrays.as_bytes(R_image)

	# ...

	shape = (height, width) = L_image.shape

	window_span		= 1 + 2*window
	disparity_span	= 1 + 2*disparity
	disparity_range	= (0-disparity) + numpy.array(range(disparity_span))
	disparity_map	= (0-disparity) * numpy.ones(shape)

	if roll:
		yy_range = xrange(height)
		xx_range = xrange(width)
	else:
		yy_range = xrange(window, height-window)
		xx_range = xrange(window+disparity, width-window-disparity)

	if show:
		figure, axis = pyplot.subplots()
		pyplot.plot()
		pyplot.hold(True)

	for yy in yy_range:

		percent_complete = numpy.floor(100*(float(1+yy)/height))
		sys.stdout.write("\rProgress: %d%%" % percent_complete)
		sys.stdout.flush()

		if show:
			image.show(disparity_map, block = False)
			figure.canvas.draw()
			axis.cla()

		for xx in xx_range:

			L_window = image.neighbours(L_image, yy, xx, size = window, roll = roll)

			if (censor): # Censorship: Remove areas of low texture...

				# Censor variance along the horizontal scanline, try: { 2, 8, 16, 32, 48 }

				# IMPORTANT:
				# Rows (xx) is axis=1, ie. numpy.sum(W, axis=1) <=> W[:][0]+W[:][1]+W[:][2]
				# Columns (yy) is axis=0, ie. numpy.sum(W, axis=0) <=> W[0][:]+W[1][:]+W[2][:]
				
				scanline_mean = numpy.mean(L_window, axis = 0) # Horizontal scanline mean
				scanline_variance = numpy.mean((L_window - scanline_mean)**2) # Horizontal scanline mean

				if (scanline_variance < censor_threshold):
					continue # Ensure exture quality

			# Find the best disparity match...

			''' #This one-liner doesn't seem to speed things up...
			disparity_map[yy][xx] = (0-disparity) + numpy.array([
				cost_function(L_window, image.neighbours(R, yy, xx+dd, size = window, roll = roll))
				for dd in disparity_range
			]).argmin()
			'''

			cc_best = cost_default
			
			for dd in disparity_range:

				R_window = image.neighbours(R_image, yy, xx+dd, size = window, roll = roll)

				cc = cost_function(L_window, R_window)

				if (cc < cc_best):
					cc_best = cc
					disparity_map[yy][xx] = dd
	
	sys.stdout.write("\r")

	return disparity_map
Пример #31
0
def measure(im, debug=False):
    size = cv.GetSize(im)
    npixels = size[0] * size[1]
    #print 'np', npixels
    
    
    focused = get_focus_points(im, debug)
    points = convert_to_points(focused)
    
    if debug:
        print "\t"+str(len(points)), '/', npixels, '=', len(points) / float(npixels)
        print "\tlen(points) =", len(points)
        image.show(focused, "4. Focused Points")

    saturation_score = 0
    if not image.is_grayscale(im):
        edges = image.auto_edges(im)
        _, saturation, _ = image.split(image.rgb2hsv(im))
        if debug:
            image.show(saturation, "5. Saturation")
        #saturation = image.laplace(image.gaussian(saturation, 3))
        saturation = image.invert(saturation)
        mask = image.invert(image.threshold(im, threshold=16))
        if debug:
            image.show(saturation, "5.3. Laplace of Saturation")
        cv.Set(saturation, 0, mask)
        cv.Set(saturation, 0, focused)
        if debug:
            image.show(mask, "5.6. Mask(focused AND invert(threshold(im, 16)))")
            image.show(saturation, "6. Set(<5.3>, 0, <5.6>)")

        saturation_score = cv.Sum(saturation)[0] / float(npixels * 255)
        print "\tSaturation Score:", saturation_score
        
    # light exposure
    h,s,v = image.split(image.rgb2hsv(im))
    if debug:
        image.show(h, "7. Hue")
        image.show(s, "7. Saturation")
        image.show(v, "7. Value")
    diff = cv.CloneImage(v)
    cv.Set(diff, 0, image.threshold(s, threshold=16))
    diff = image.dilate(diff, iterations=10)
    if debug:
        thres_s = image.threshold(s, threshold=16)
        image.show(thres_s, "8.3. Mask(threshold(<7.Saturation>, 16))")
        image.show(diff, "8.6. Dilate(Set(<7.Value>, 0, <8.3>), 10)")

    cdiff = cv.CountNonZero(diff)
    if cdiff > 0 and cdiff / float(npixels) > 0.01:
        test = cv.CloneImage(v)
        cv.Set(test, 0, image.invert(diff))
        s = cv.Sum(test)[0] / float(cdiff * 255)
        if debug:
            print '\tLight Exposure Score:', s
    else:
        s = 0
        
    if image.is_grayscale(im):
        return focused, (1, 1, 1, saturation_score, s)
    
    # we want to short circuit ASAP to avoid doing KMeans 50% of the image's pixels
    if len(points) > npixels/2:
        return focused, (1, 1, 1, saturation_score, s)

    # we're so blurry we don't have any points!
    if len(points) < 1:
        return focused, (0, 0, 0, saturation_score, s)
    
    if debug:
        im2 = cv.CloneImage(im)
    focused_regions = image.new_from(im)
    cv.Set(focused_regions, 0)
    
    r = lambda x: random.randrange(1, x)
    groups = form_groups(points,
        estimated_size=min(max(int(len(points) / 1000), 2), 15))
    #groups = form_groups(points, threshold=max(cv.GetSize(im))/16)
    #print 'groups', len(groups)
    hulls = draw_groups(groups, focused_regions)
    focused_regions = image.threshold(focused_regions, threshold=32, type=cv.CV_THRESH_TOZERO)
    min_area = npixels * 0.0005
    densities = [h.points_per_pixel() for h in hulls if h.area() >= min_area]
    
    if debug:    
        #image.show(focused, "Focused Points")
        image.show(focused_regions, "9. Focused Regions from <4>")
        cv.Sub(im2, image.gray2rgb(image.invert(image.threshold(focused_regions, threshold=1))), im2)
        image.show(im2, "10. threshold(<9>)")
    
    
    focused_regions = image.rgb2gray(focused_regions)
    
    densities = array(densities)
    c = cv.CountNonZero(focused_regions)
    c /= float(npixels)
    
    score = (c, densities.mean(), densities.std(), saturation_score, s)
    
    return focused, score
Пример #32
0
        aspect = float(MAX_WIDTH) / width
        L = arrays.resample(L, [int(aspect * x) for x in L.shape])
        R = arrays.resample(R, [int(aspect * x) for x in R.shape])

    print("Left shape: " + str(L.shape))
    print("Right shape: " + str(R.shape))

    print("Compute disparity map with LEFT image as reference")

    if (os.path.exists(L_disparity_map_path)):
        L_disparity_map = pickle.load(open(L_disparity_map_path, "rb"))
    else:
        L_disparity_map = block_matching(L, R)
        pickle.dump(L_disparity_map, open(L_disparity_map_path, "wb"))

    image.show(L_disparity_map, title="Left Disparity Map")

    print("Compute disparity map with RIGHT image as reference")

    if (os.path.exists(R_disparity_map_path)):
        R_disparity_map = pickle.load(open(R_disparity_map_path, "rb"))
    else:
        R_disparity_map = block_matching(R, L)
        pickle.dump(R_disparity_map, open(R_disparity_map_path, "wb"))

    image.show(R_disparity_map, title="Right Disparity Map")

    print("Remove any inconsistency between both images")

    depth_map = remove_inconsistency(L_disparity_map,
                                     R_disparity_map,
Пример #33
0
from flask import Flask, render_template, request
import requests
from bs4 import BeautifulSoup
import datetime
import selenium
from selenium import webdriver
import image
#https://www.weather.go.kr/mini/marine/wavemodel_c.jsp?prefix=kim_cww3_%5BAREA%5D_wdpr_&area=jeju&tm=2020.04.28.09&ftm=s000&newTm=2020.04.28.09&x=4&y=10

a = input("날짜")

url = "https://www.weather.go.kr/mini/marine/wavemodel_c.jsp?prefix=kim_cww3_%5BAREA%5D_wdpr_&area=jeju&tm=" + a

driver = webdriver.Chrome()
driver.implicitly_wait(3)
driver.get(url)

soup = BeautifulSoup(driver.page_source, "html.parser")

for i in soup.select("#chart_image"):
    src = i.find("img")['src']

image = image.open(src)
image.show()
Пример #34
0
def block_matching(L_image,
                   R_image,
                   disparity=DISPARITY,
                   window=WINDOW,
                   roll=True,
                   cost_default=numpy.inf,
                   cost_function=costs.ssd,
                   censor=True,
                   censor_threshold=2.0,
                   show=True):

    # Brute-force matching algorithm!

    L_image = arrays.as_bytes(L_image)
    R_image = arrays.as_bytes(R_image)

    # ...

    shape = (height, width) = L_image.shape

    window_span = 1 + 2 * window
    disparity_span = 1 + 2 * disparity
    disparity_range = (0 - disparity) + numpy.array(range(disparity_span))
    disparity_map = (0 - disparity) * numpy.ones(shape)

    if roll:
        yy_range = xrange(height)
        xx_range = xrange(width)
    else:
        yy_range = xrange(window, height - window)
        xx_range = xrange(window + disparity, width - window - disparity)

    if show:
        figure, axis = pyplot.subplots()
        pyplot.plot()
        pyplot.hold(True)

    for yy in yy_range:

        percent_complete = numpy.floor(100 * (float(1 + yy) / height))
        sys.stdout.write("\rProgress: %d%%" % percent_complete)
        sys.stdout.flush()

        if show:
            image.show(disparity_map, block=False)
            figure.canvas.draw()
            axis.cla()

        for xx in xx_range:

            L_window = image.neighbours(L_image,
                                        yy,
                                        xx,
                                        size=window,
                                        roll=roll)

            if (censor):  # Censorship: Remove areas of low texture...

                # Censor variance along the horizontal scanline, try: { 2, 8, 16, 32, 48 }

                # IMPORTANT:
                # Rows (xx) is axis=1, ie. numpy.sum(W, axis=1) <=> W[:][0]+W[:][1]+W[:][2]
                # Columns (yy) is axis=0, ie. numpy.sum(W, axis=0) <=> W[0][:]+W[1][:]+W[2][:]

                scanline_mean = numpy.mean(L_window,
                                           axis=0)  # Horizontal scanline mean
                scanline_variance = numpy.mean(
                    (L_window - scanline_mean)**2)  # Horizontal scanline mean

                if (scanline_variance < censor_threshold):
                    continue  # Ensure exture quality

            # Find the best disparity match...
            ''' #This one-liner doesn't seem to speed things up...
			disparity_map[yy][xx] = (0-disparity) + numpy.array([
				cost_function(L_window, image.neighbours(R, yy, xx+dd, size = window, roll = roll))
				for dd in disparity_range
			]).argmin()
			'''

            cc_best = cost_default

            for dd in disparity_range:

                R_window = image.neighbours(R_image,
                                            yy,
                                            xx + dd,
                                            size=window,
                                            roll=roll)

                cc = cost_function(L_window, R_window)

                if (cc < cc_best):
                    cc_best = cc
                    disparity_map[yy][xx] = dd

    sys.stdout.write("\r")

    return disparity_map
Пример #35
0
    _transformation = seismic.parse_transformation(_seismic, 15 * 1000,
                                                   int(2.5 * 1000), 10)  # 2.5

    #test shit begin
    test_data, test_match = test_trapeziums()
    _geo = Geo(Well(test_data[0], test_data[2]),
               Well(test_data[1], test_data[3]), _transformation.width,
               _transformation.left_well_intent,
               _transformation.right_well_intent)
    _match = Match(
        [match[1][0] for match in reversed(test_match)],
        [match[0][0] + 1 for match in reversed(test_match)],
        [match[1][1] for match in reversed(test_match)],
        [match[0][1] + 1 for match in reversed(test_match)],
    )
    #test shit end
    _image = image.create(_geo.height, _geo.width)

    # paint.lines(_image, _geo, _match)
    paint.fill(_image, _geo, _match)
    paint.wells(_image, _geo)

    _base_image = image.create(_geo.height, _geo.width)
    paint.wells(_base_image, _geo)
    paint.lines(_base_image, _geo, _match)

    _result = image.create(_transformation.height, _transformation.width)
    seismic.paint_transformation(_result, _transformation, _image)

    image.show(_image, _base_image, _result, paint.bined(_result))
Пример #36
0
def measure(im, debug=False):
    gray = image.rgb2gray(im)
    size = cv.GetSize(im)
    total = float(size[0] * size[1])
    l = image.sub(gray, image.gaussian(gray, 5))
    l2 = image.sub(gray, image.gaussian(gray, 9))
    edges = image.dilate(image.auto_edges(im, percentage=0.2))
    if debug:
        image.show(image.threshold(l, threshold=1),
                   "Before Edge Removal (kernel=5)")
        image.show(image.threshold(l2, threshold=1),
                   "Before Edge Removal (kernel=9)")
    cv.Set(l, 0, image.threshold(edges, threshold=1))
    cv.Set(l2, 0, image.threshold(edges, threshold=1))

    l = image.threshold(l, threshold=1)
    l2 = image.threshold(l2, threshold=1)

    if debug:
        image.show(image.threshold(edges, threshold=1), "Edges")
        image.show(l, "After Edge Removal (kernel=5)")
        image.show(l2, "After Edge Removal (kernel=9)")

    noise2 = image.new_from(gray)
    cv.EqualizeHist(gray, noise2)
    cv.AbsDiff(noise2, gray, noise2)
    cv.Set(noise2, 0,
           image.threshold(image.sobel(im, xorder=2, yorder=2), threshold=4))
    diff = image.cv2array(noise2)
    if debug:
        image.show(noise2, "DIFF")
        print "M", diff.mean(), "S", diff.std()
    diff_stat = (diff.mean(), diff.std())
    percent_noise = cv.CountNonZero(noise2) / total
    if debug:
        image.show(noise2, "NOISE2")

    # magical, I don't understand how this works
    _, sat, _ = image.split(image.rgb2hsv(im))
    edges = image.auto_edges(im)
    l, u, v = tuple(map(image.equalize_hist, image.split(image.rgb2luv(im))))
    u, v = tuple(map(image.gaussian, (u, v)))
    if debug:
        image.show(l, "1. L")
        image.show(u, "1. U")
        image.show(v, "1. V")
    la, ua, va, uva = tuple(map(image.cv2array, (l, u, v, image.And(l, u, v))))
    test = image.new_from(gray)
    test2 = image.new_from(gray)
    cv.Xor(u, v, test)
    if debug:
        image.show(test, "2. U Xor V")
    cv.Set(test, 0, image.dilate(edges))
    #cv.Set(test, 0, image.invert(image.threshold(sat, threshold=8)))
    uv_score = cv.CountNonZero(test) / total
    if debug:
        image.show(
            test, "3. U Xor V - dilate(Edges) - invert(threshold(Saturation))")

    g = Grid(size)
    images = map(image.cv2array, g.split_into(test, 6))
    arr = image.cv2array(test)
    avg_mean, avg_std = arr.mean(), arr.std()

    #ms = [(a.mean(), a.std()) for a in images]
    #min_mean = min_std = 255
    #max_mean = max_std = 0
    #for m,s in ms:
    #    min_mean = min(min_mean, m)
    #    min_std = min(min_std, s)
    #    max_mean = max(max_mean, m)
    #    max_std = max(max_std, s)
    #if debug:
    #    print min_mean, min_std
    #    print avg_mean, avg_std
    #    print max_mean, max_std
    #
    #score = uv_score, min_mean, avg_mean, avg_std, max_mean
    uv_score = uv_score, avg_std

    score = cv.CountNonZero(l) / total,  cv.CountNonZero(l2) / total, \
        diff_stat[0], diff_stat[1], uv_score

    return l, score
Пример #37
0
import seismic
from data import *

if __name__ == '__main__':
    _seismic = image.load("SeismicScaled.jpg")
    _transformation = seismic.parse_transformation(_seismic, 15 * 1000, int(2.5 * 1000), 10)

    _geo = Geo(Well.generate_left(), Well.generate_right(), _transformation.width, _transformation.left_well_intent, _transformation.right_well_intent)
    _match = Match.generate()
    _image = image.create(_geo.height, _geo.width)

    # image.show(_seismic)

    paint.wells(_image, _geo)
    paint.lines(_image, _geo, _match)
    paint.fill(_image, _geo, _match)

    # _image1 = _image.copy()
    # paint.fill(_image1, _geo, _match)
    # paint.wells(_image1, _geo)
    #
    # _, _image2 = cv2.threshold(_image1, 127, 255, cv2.THRESH_BINARY)
    #
    # print("Showing ...")
    # image.show(_image, _image1, _image2)

    _result = image.create(_transformation.height, _transformation.width)
    seismic.paint_transformation(_result, _transformation, _image)

    image.show(_image, paint.bined(_result))
Пример #38
0
def gcs_matching(L, R, Seeds, auxilary, tau = TAU, mu = MU, show = True):

	# little tau is first pass screening, ie. "We require a 90% positive match"
	# mu is an exploration threshold, ie. "We will explore if there is significant improvement"

	(height, width) = shape = L.shape

	# ...

	Tau	= Tau_initialize(shape)
	Tau_count = 0

	# Best costs
	CC			= 0-numpy.inf*numpy.ones(shape)
	CC_prime	= 0-numpy.inf*numpy.ones(shape)

	# Best disparities
	DD			= 0-(1+DISPARITY)*numpy.ones(shape)
	DD_prime	= 0-(1+DISPARITY)*numpy.ones(shape)

	if show:
		import matplotlib.pyplot as pyplot
		figure, axis = pyplot.subplots()
		pyplot.plot()
		pyplot.hold(True)

	while not Seeds.empty():

		(_, ss) = Seeds.get()

		for qq in get_best_neighbours(L, R, ss, auxilary):

			(vv, uu, uu_prime) = qq

			cc = get_similarity(L, R, qq, auxilary)

			#T = 0.9 - float(Tau_count) / (height*width)
			
			if (cc < tau): continue
			if Tau_contains(Tau, qq): continue
			if (cc+mu < numpy.min([ CC[vv][uu], CC_prime[vv][uu_prime] ])): continue

			Tau_insert(Tau, qq)
			Tau_count += 1

			Seeds.put( (0-cc, qq) )
			
			if (CC[vv][uu] < cc):
				CC[vv][uu] = cc
				DD[vv][uu] = get_disparity(qq)

			if (CC_prime[vv][uu_prime] < cc):
				CC_prime[vv][uu_prime] = cc
				DD_prime[vv][uu_prime] = get_disparity(qq)

			if show and (Tau_count % 500) == 0:
				image.show(DD, block = False)
				figure.canvas.draw()
				axis.cla()

	return (DD, DD_prime, CC, CC_prime)
Пример #39
0
        height = numpy.min([L.shape[0], R.shape[0]])
        width = numpy.min([L.shape[1], R.shape[1]])

        L = L[:height, :width]
        R = R[:height, :width]

        # TODO: Scale rectify_mask?

        # Scale by height

        if (MAX_WIDTH < width):
            aspect = float(MAX_WIDTH) / width
            L = resample(L, [int(aspect * x) for x in L.shape])
            R = resample(R, [int(aspect * x) for x in R.shape])

        print("Left shape: " + str(L.shape))
        print("Right shape: " + str(R.shape))

        (L_disparity_map, R_disparity_map, L_cost_map,
         R_cost_map) = gcs_matching(L, R)

        pickle.dump(L_disparity_map, open(L_disparity_map_path, "wb"))
        pickle.dump(R_disparity_map, open(R_disparity_map_path, "wb"))

    image.show(L_disparity_map, title="Left Dispartiy Map")
    image.show(R_disparity_map, title="Right Dispartiy Map")

    image.write("disparity_1.png", L_disparity_map)
    image.write("disparity_2.png", R_disparity_map)