Exemplo n.º 1
10
#!/usr/bin/env python

# Python 2/3 compatibility
from __future__ import print_function

import cv2

if __name__ == '__main__':
    import sys
    try:
        fn = sys.argv[1]
    except:
        fn = '../data/fruits.jpg'

    img = cv2.imread(fn)
    if img is None:
        print('Failed to load image file:', fn)
        sys.exit(1)

    img2 = cv2.logPolar(img, (img.shape[0]/2, img.shape[1]/2), 40, cv2.WARP_FILL_OUTLIERS)
    img3 = cv2.linearPolar(img, (img.shape[0]/2, img.shape[1]/2), 40, cv2.WARP_FILL_OUTLIERS)

    cv2.imshow('before', img)
    cv2.imshow('logpolar', img2)
    cv2.imshow('linearpolar', img3)

    cv2.waitKey(0)
Exemplo n.º 2
0
 def converter(self, filereader, output):
     cv2.linearPolar(
         self.intermediate_input,
         (Constants.CENTER_POINT_x,
          Constants.CENTER_POINT_z),
         Constants.SCAN_CONVERTER_SCALE,
         cv2.INTER_CUBIC + cv2.WARP_INVERSE_MAP,
         self.output)
     cv2.imwrite(output, self.output)
Exemplo n.º 3
0
    def convertImageToInput(self, im, theta=0):
        x,y = im.shape[1]/2,im.shape[0]/2
        im = cv.linearPolar(im, (x,y), y, cv.WARP_FILL_OUTLIERS)
        im = cv.resize(im, (256,160))

        im2 = np.zeros((256,256,3), dtype="uint8")
        im2[:160] = im
        im2[160:] = im[:256-160]
        return im2
Exemplo n.º 4
0
def main(argv):  
			
	im = imread('C:\\Users\\Franz\\Documents\\MATLAB\\dering\\test2.tif')
	
	# Get original size:
	origsize = im.shape

	# Up-scaling:
	im = cv2.resize(im, None, 1.0, 1.0, cv2.INTER_CUBIC)
	rows, cols = im.shape
	cen_x = im.shape[1] / 2
	cen_y = im.shape[0] / 2

	# To polar:
	im = cv2.linearPolar(im, (cen_x, cen_y), amax([rows,cols]), cv2.INTER_CUBIC)

	# Padding:
	#cropsize = im.shape
	#im = pad(im, ((origsize[0] / 4, origsize[0] / 4), (origsize[1] / 2, 0)), 'symmetric')    
	#imsave('C:\\Temp\\padded.tif', im)

	# Filter:
	#im = homomorphic(im, "0.95;0.15")

	im = raven(im, "5;0.95")

	# Crop:
	#im = im[origsize[0] / 4:origsize[0] / 4 + cropsize[0],origsize[1] / 2:origsize[1] / 2 + cropsize[1]]
	#imsave('C:\\Temp\\cropped.tif', im)

	# Back to cartesian:
	im = cv2.linearPolar(im, (cen_x, cen_y), amax([rows,cols]), cv2.INTER_CUBIC + cv2.WARP_INVERSE_MAP)
	imsave('C:\\Temp\\p2c.tif', im)	

	# Down-scaling to original size:
	im = cv2.resize(im, (origsize[0], origsize[1]), interpolation = cv2.INTER_CUBIC)
	imsave('C:\\Temp\\output.tif', im)
Exemplo n.º 5
0
def correct_limb_darkening(image, center, radius):
    # Fit limb darkening.
    # This assume a square image in the polar transform

    # I(theta) = I(0) * ( 1 - ux)
    # I(theta) = I(0) * ( 1 - u(1-cos(theta)) )
    # cos(theta) = sqrt(radius**2 - r**2)/radius

    # Image dimensions
    naxis1 = image.shape[0]
    naxis2 = image.shape[1]
    # Coordinate of disk center
    xc = center[0]
    yc = center[1]

    imageP = cv2.linearPolar(image, center, naxis1, cv2.INTER_LANCZOS4 + cv2.WARP_FILL_OUTLIERS)
    # Take the median over theta (azimuthal average)
    Iaz_avg = np.median(imageP, 0)

    a = np.round(radius)
    r = np.arange(a - 10)

    Itheta = Iaz_avg[0:len(r)]

    x = 1 - np.sqrt(a ** 2 - r ** 2) / a
    y = Itheta / Itheta[0]

    fit = np.polyfit(x, y, 1)
    fit_fn = np.poly1d(fit)

    limb_factor = fit[0] * x + fit[1]

    # Define grid of cos(theta)
    xgrid1, ygrid1 = np.meshgrid(np.arange(naxis1), np.arange(naxis2))
    pixel_r = np.sqrt((xgrid1 - xc) ** 2 + (ygrid1 - yc) ** 2)

    cos_theta = np.sqrt(a ** 2 - pixel_r ** 2) / a

    x2 = 1 - cos_theta
    limb_factor2D = fit[0] * x2 + fit[1]

    mask_limb = pixel_r > a
    limb_factor2D[mask_limb] = 1

    image_limb = image / limb_factor2D

    return image_limb, fit, x, fit_fn(x)
Exemplo n.º 6
0
def main():
    import sys
    try:
        fn = sys.argv[1]
    except IndexError:
        fn = 'fruits.jpg'

    img = cv.imread(cv.samples.findFile(fn))
    if img is None:
        print('Failed to load image file:', fn)
        sys.exit(1)

    img2 = cv.logPolar(img, (img.shape[0]/2, img.shape[1]/2), 40, cv.WARP_FILL_OUTLIERS)
    img3 = cv.linearPolar(img, (img.shape[0]/2, img.shape[1]/2), 40, cv.WARP_FILL_OUTLIERS)

    cv.imshow('before', img)
    cv.imshow('logpolar', img2)
    cv.imshow('linearpolar', img3)

    cv.waitKey(0)
    print('Done')
def add_chromatic(im: np.ndarray, strength: float = 1,
                  radial_blur: bool = True) -> np.ndarray:
    """Splits <im> into red, green, and blue channels, then performs a
    1D Vertical Gaussian blur through a polar representation to create
    radial blur. Finally, it expands the green and blue channels slightly.
    Args:
        im
        strength: determines the amount of expansion and blurring.
        radial_blur: enable the radial blur if True
    """

    dims = get_polar_dimensions(im)
    if radial_blur:
        ima = im.astype(np.float64)

        if warppolar_available:
            flags = cv2.WARP_FILL_OUTLIERS + cv2.WARP_POLAR_LINEAR  # + cv2.INTER_CUBIC  # WARP_POLAR_LOG
            poles = cv2.warpPolar(ima, (dims["d"], dims["p"]), dims["c"], dims["d"], flags)
        else:
            flags = cv2.WARP_FILL_OUTLIERS  # + cv2.INTER_CUBIC
            poles = cv2.linearPolar(ima, dims["c"], dims["d"], flags)

        poles = np.rot90(poles, k=-1, axes=(0, 1))
        pchannels = split_channels(poles)
        bpolar, gpolar, rpolar = pchannels[0], pchannels[1], pchannels[2]

        bluramount = (dims["h"] + dims["w"] - 2) / 100 * strength
        if round(bluramount) > 0:
            rpolar = vertical_gaussian(rpolar, round(bluramount))
            gpolar = vertical_gaussian(gpolar, round(bluramount * 1.2))
            bpolar = vertical_gaussian(bpolar, round(bluramount * 1.4))

        rgbpolar = merge_channels([bpolar, gpolar, rpolar])
        rgbpolar = np.rot90(rgbpolar, k=1, axes=(0, 1))

        if warppolar_available:
            flags_inv = cv2.WARP_FILL_OUTLIERS + cv2.WARP_POLAR_LINEAR + cv2.WARP_INVERSE_MAP
            # + cv2.INTER_CUBIC # WARP_POLAR_LOG
            cartes = cv2.warpPolar(rgbpolar, (dims["w"], dims["h"]), dims["c"], dims["d"], flags_inv)
        else:
            flags_inv = cv2.WARP_INVERSE_MAP  # + cv2.INTER_CUBIC
            cartes = cv2.linearPolar(rgbpolar, dims["c"], dims["d"], cv2.WARP_INVERSE_MAP)

        cchannels = split_channels(cartes)
        bfinal, gfinal, rfinal = np.uint8(cchannels[0]), np.uint8(cchannels[1]), np.uint8(cchannels[2])
    else:
        # channels remain unchanged
        channels = split_channels(im)
        bfinal, gfinal, rfinal = channels[0], channels[1], channels[2]

    # enlarge the green and blue channels slightly, blue being the most enlarged
    gfinal = cv2.resize(
        gfinal,
        (round((1 + 0.018 * strength) * dims["w"]),
         round((1 + 0.018 * strength) * dims["h"])),
        interpolation=cv2.INTER_CUBIC)
    bfinal = cv2.resize(
        bfinal,
        (round((1 + 0.044 * strength) * dims["w"]),
         round((1 + 0.044 * strength) * dims["h"])),
        interpolation=cv2.INTER_CUBIC)

    # # center and merge the channels
    # rfinal = center_crop(rfinal, bfinal.shape[0:2])
    # gfinal = center_crop(gfinal, bfinal.shape[0:2])
    # imfinal = merge_channels([bfinal, gfinal, rfinal])

    # TODO: check
    # center and merge the channels
    sb = bfinal.shape[0:2]
    sg = gfinal.shape[0:2]
    sr = rfinal.shape[0:2]
    oh = min(sb[0], sg[0], sr[0])
    ow = min(sb[1], sg[1], sr[1])
    bfinal = center_crop(bfinal, (ow, oh))
    gfinal = center_crop(gfinal, (ow, oh))
    rfinal = center_crop(rfinal, (ow, oh))
    imfinal = merge_channels([bfinal, gfinal, rfinal])

    # crop the image to the original image dimensions
    return center_crop(imfinal, (dims["h"], dims["w"]))
Exemplo n.º 8
0
def polarConv(image, kernel, center):
    edge = cv2.Canny(image, 100, 200)
    edge = cv2.dilate(edge, kernel, iterations=1)
    polar = cv2.linearPolar(edge, center, 40, cv2.WARP_FILL_OUTLIERS)
    return polar
Exemplo n.º 9
0
def stateGenerator():
    global velocityPub, lastSegments, lastPose, markersPub, lastMap, lastCostmap, transformListener

    robotPathMap = np.array([[255] * 200] * 200, dtype=np.uint8)
    plannedGoal = None
    lastLastPose = None
    subPixelDiffX = 0
    subPixelDiffY = 0

    while True:
        #wait for voronoi
        while len(lastMap) == 0 or not lastPose:
            print("Waiting for map/pose")
            yield

        (trans,
         rot) = transformListener.lookupTransform(lastCostmap.header.frame_id,
                                                  "base_link", rospy.Time(0))
        rotationAngle = tf.transformations.euler_from_quaternion(rot)[2]
        #shift robotPathMap
        lock.acquire()

        if lastLastPose <> None:
            rollX = int((lastLastPose.x - lastPose.x) /
                        lastCostmap.info.resolution + subPixelDiffX)
            subPixelDiffX = (
                lastLastPose.x - lastPose.x
            ) / lastCostmap.info.resolution + subPixelDiffX - rollX
            rollY = int((lastLastPose.y - lastPose.y) /
                        lastCostmap.info.resolution + subPixelDiffY)
            subPixelDiffY = (
                lastLastPose.y - lastPose.y
            ) / lastCostmap.info.resolution + subPixelDiffY - rollY

            robotPathMap = np.roll(robotPathMap, rollX, axis=1)
            robotPathMap = np.roll(robotPathMap, -rollY, axis=0)
        lastLastPose = lastPose
        lock.release()
        binaryMap = np.array(np.where(lastMap == 0, 255, 0), dtype=np.uint8)
        binaryMap = np.minimum(binaryMap, robotPathMap)
        #pdb.set_trace()
        # binaryMap = np.array(np.where(lastMap == 0, 0, 255),dtype = np.uint8)
        #distMap = skeletonize(binaryMap)
        #distMap = skimage.morphology.skeletonize(binaryMap)

        distMap = np.uint8(cv2.distanceTransform(binaryMap, cv2.DIST_L2, 3))
        distMap = cv2.GaussianBlur(distMap, (3, 3), 0)
        threshMap = np.where(distMap > DIST_THRESHOLD, distMap, 0)

        polarMap = cv2.linearPolar(
            threshMap, (threshMap.shape[1] / 2, threshMap.shape[0] / 2),
            threshMap.shape[0], cv2.WARP_FILL_OUTLIERS)
        polarMap = np.roll(
            polarMap,
            -int(polarMap.shape[0] / 2 - rotationAngle * polarMap.shape[0] /
                 (2 * math.pi)), 0)

        #find histogram of distances around the robot
        histogram = polarMap[:, DIST_IN_POLAR]
        #print(histogram)
        #find local maxima -> candidate directions
        candidates = localmax(histogram)
        markerArray = MarkerArray()

        #if directionChange < minAngleDist and directionChange > -math.pi * 3/4:
        #                minVertex = vertex
        #                minAngleDist = abs(directionChange)
        minAngleDist = 999999
        minAngle = None
        isFirst = True
        for candidate in candidates:
            angle = (candidate[0] + candidate[1]
                     ) / 2 * math.pi * 2 / histogram.shape[0] - math.pi
            print("angle=%lf" % (angle))
            #if angle < minAngleDist and angle > -math.pi * 3/4:
            size = float(histogram[candidate[0]]) / 10
            pointA = Object()
            lock.acquire()
            pointA.x = lastPose.x + math.cos(angle - lastPose.heading) * size
            pointA.y = lastPose.y - math.sin(angle - lastPose.heading) * size
            if angle > -math.pi * 0.75 and isFirst:  #candidate[0] == candidates[0][0]:
                #it is the first candidate
                color = [1.0, 0, 0]
                isFirst = False
                minAngle = angle
            else:
                color = [0, 1.0, 0]

            marker = getMarkerForLine(pointA, lastPose, color)
            lock.release()
            markerArray.markers.append(marker)

        if minAngle <> None:
            cmd_vel = Twist()
            if abs(minAngle) > SPOT_TURNING_THRESHOLD:
                cmd_vel.linear.x = 0
            else:
                cmd_vel.linear.x = SPEED
            angSpeed = -minAngle * ANG_FACTOR
            cmd_vel.angular.z = angSpeed
            velocityPub.publish(cmd_vel)

        #mark own path
        robotPathMap[robotPathMap.shape[0] / 2][robotPathMap.shape[0] / 2] = 0

        print("-----")
        markersPub.publish(markerArray)

        FACTOR = 5
        cv2.circle(threshMap, (100, 100), DIST_IN_POLAR, (255))
        #cv2.imshow('distMap',cv2.resize(cv2.applyColorMap(threshMap * FACTOR, cv2.COLORMAP_JET),(300,300)))
        cv2.imshow('distMap', cv2.resize(threshMap * FACTOR, (800, 800)))
        cv2.imshow('robotPathMap', cv2.resize(robotPathMap * FACTOR,
                                              (400, 400)))
        cv2.imshow('binaryMap', cv2.resize(binaryMap * FACTOR, (400, 400)))
        cv2.imshow('polar', cv2.resize(polarMap * FACTOR, (300, 300)))
        cv2.imshow('histogram', cv2.resize(histogram * FACTOR, (30, 300)))
        cv2.waitKey(1)
        yield
        continue

        while not len(lastSegments) or not lastMap or not lastPose:
            cmd_vel = Twist()
            cmd_vel.angular.z = SPEED
            velocityPub.publish(cmd_vel)
            print("waiting for segments")
            yield

        #if there is no plan, go to the nearest vertex
        if not len(lastSegments):
            print("No last segments")
            continue
        if not plannedGoal:
            print("Searching nearest vertex")
            nearest = findNearestVertex(lastSegments, lastPose)
            if not nearest:
                continue
            plannedGoal = Object()
            plannedGoal.x = nearest.x
            plannedGoal.y = nearest.y

        lock.acquire()
        if robomath.distPose(plannedGoal, lastPose) <= GOAL_REACHED_THRESHOLD:
            #goal is reached
            #TODO plan to the next goal
            print("goal reached")
            #find left most vertex
            minAngleDist = 99999
            minVertex = None
            for vertex in lastSegments:
                dist = robomath.distPose(vertex, lastPose)
                if CURRENT_VERTEX_THRESHOLD < dist < NEAR_VERTEX_THRESHOLD:
                    directionChange = robomath.normalizeAngle(
                        math.atan2(vertex.y - lastPose.y, vertex.x -
                                   lastPose.x) - lastPose.heading)
                    if directionChange < minAngleDist and directionChange > -math.pi * 3 / 4:
                        minVertex = vertex
                        minAngleDist = abs(directionChange)

            lock.release()
            print("minAngleDist=%f" % (minAngleDist))
            plannedGoal = minVertex
            yield
        else:
            #go to the goal
            print("go to goal")
            markerArray = MarkerArray()
            marker = getMarkerForPose(plannedGoal)
            for vertex in lastSegments:
                markerArray.markers.append(
                    getMarkerForVertex(vertex, vertex.id, 0.1))
            markerArray.markers.append(marker)
            markerArray.markers.append(
                getMarkerForVertex(lastSegments[0], "first vertex", 0.5))
            markersPub.publish(markerArray)
            #try:
            #pdb.set_trace()
            directionChange = robomath.normalizeAngle(
                math.atan2(plannedGoal.y - lastPose.y, plannedGoal.x -
                           lastPose.x) - lastPose.heading)  # - math.pi/2)
            lock.release()
            #except:
            #    print(plannedGoal)
            #    print("lastPose %f,%f,%f" % (lastPose.x,lastPose.y, lastPose.heading))
            #    #pdb.set_trace()
            #print("go2waypoint:directionOfWaypoint:",directionChange)
            cmd_vel = Twist()
            if abs(directionChange) > SPOT_TURNING_THRESHOLD:
                cmd_vel.linear.x = 0
            else:
                cmd_vel.linear.x = SPEED
            angSpeed = directionChange * ANG_FACTOR
            cmd_vel.angular.z = angSpeed
            velocityPub.publish(cmd_vel)
            print("go to goal finished")
            yield
root.withdraw()
filename = askopenfilename()
print(filename)

pInputImage = io.imread(filename,as_grey=True)

nX = np.size(pInputImage,0)
nY = np.size(pInputImage,1)

nMaxRadius = np.sqrt(nX**2+nY**2)/2

#-----------------------------------------------------------------
# Transform image into polar co-ordinates
#-----------------------------------------------------------------

pTransformImg = linearPolar(pInputImage,(nX/2,nY/2),nMaxRadius, flags=WARP_FILL_OUTLIERS)
pTransformImg = pTransformImg.T

#-----------------------------------------------------------------
# Apply CSP to segment fetal skull
#-----------------------------------------------------------------

pTransformImg = CSP(pTransformImg)

#-----------------------------------------------------------------
# Transform back to cartesian co-ordinates
#-----------------------------------------------------------------

pTransformImg = linearPolar(pTransformImg.T,(nX/2,nY/2),nMaxRadius, flags=WARP_INVERSE_MAP)

#-----------------------------------------------------------------
Exemplo n.º 11
0
def verify():
    tkMessageBox.showinfo("Image", "Selec")
    filename = askopenfilename()
    im = cv2.imread(filename)
    img = cv2.medianBlur(im, 5)

    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    # detect circles
    gray = cv2.medianBlur(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY), 5)
    circles = cv2.HoughCircles(gray,
                               cv2.HOUGH_GRADIENT,
                               1,
                               30.0,
                               param1=50,
                               param2=110,
                               minRadius=0,
                               maxRadius=0)
    circles = np.uint16(np.around(circles))

    # draw mask
    mask = np.full((img.shape[0], img.shape[1]), 0,
                   dtype=np.uint8)  # mask is only
    for i in circles[0, :]:
        cv2.circle(mask, (i[0], i[1]), i[2], (255, 255, 255), -1)

    # get first masked value (foreground)
    fg = cv2.bitwise_and(img, img, mask=mask)

    #cv2.imshow("godady",fg)
    cv2.imwrite("p.png", fg)
    cv2.waitKey(0)

    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    # detect circles
    gray = cv2.medianBlur(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY), 5)
    circles = cv2.HoughCircles(gray,
                               cv2.HOUGH_GRADIENT,
                               2,
                               100.0,
                               param1=30,
                               param2=150,
                               minRadius=90,
                               maxRadius=140)
    circles = np.uint16(np.around(circles))

    # draw mask
    mask = np.full((img.shape[0], img.shape[1]), 0,
                   dtype=np.uint8)  # mask is only
    for i in circles[0, :]:
        cv2.circle(mask, (i[0], i[1]), i[2], (255, 255, 255), -1)

    # get first masked value (foreground)
    fg = cv2.bitwise_and(img, img, mask=mask)

    #cv2.imshow("masked",fg)
    cv2.imwrite("m.png", fg)

    pic1 = cv2.imread('p.png')
    pic2 = cv2.imread('m.png')
    mask_ab = cv2.bitwise_xor(pic2, pic1)

    #cv2.imshow('abc',mask_ab)
    cv2.imwrite('final.png', mask_ab)

    cv2.waitKey(0)

    img = cv2.imread('final.png')

    #img2 = cv2.logPolar(img, (img.shape[0]/2, img.shape[1]/2), 40, cv2.WARP_FILL_OUTLIERS)
    img3 = cv2.linearPolar(img, (img.shape[0] / 2, img.shape[1] / 2), 40,
                           cv2.WARP_FILL_OUTLIERS)

    #cv2.imshow('before', img)
    #cv2.imshow('logpolar', img2)
    #cv2.imshow('linearpolar', img3)

    cv2.imwrite('polar.png', img3)

    cv2.waitKey(0)

    def build_filters():
        filters = []
        ksize = 31
        for theta in np.arange(0, np.pi, np.pi / 16):
            kern = cv2.getGaborKernel((ksize, ksize),
                                      4.0,
                                      theta,
                                      10.0,
                                      0.5,
                                      0,
                                      ktype=cv2.CV_32F)
            kern /= 1.5 * kern.sum()
            filters.append(kern)
        return filters

    def process(img, filters):
        accum = np.zeros_like(img)
        for kern in filters:
            fimg = cv2.filter2D(img, cv2.CV_8UC3, kern)
            np.maximum(accum, fimg, accum)
        return accum

    if __name__ == '__main__':
        img = cv2.imread('polar.png')

        filters = build_filters()

        res1 = process(img, filters)
        cv2.imshow('result', res1)
        cv2.imwrite("gh.png", res1)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

        import image_slicer
        xy = image_slicer.slice('polar.png', 16)
        yz = image_slicer.slice('gh.png', 16)

        cv2.waitKey(0)

    import sys

    img = cv2.imread('polar_01_01.png')
    px1 = img[64, 32]

    img = cv2.imread('gh_01_01.png')
    px2 = img[64, 32]

    if (px1[1] >= px2[1]):
        sys.stdout = open("test.txt", "w")
        print("1")

    else:
        sys.stdout = open("test.txt", "w")
        print("0")

    img = cv2.imread('polar_01_02.png')
    px3 = img[64, 32]

    img = cv2.imread('gh_01_02.png')
    px4 = img[64, 32]

    if (px3[1] >= px4[1]):
        sys.stdout = open("test.txt", "a")
        print("1")
    else:
        sys.stdout = open("test.txt", "a")
        print("0")

    img = cv2.imread('polar_01_03.png')
    px5 = img[64, 32]

    img = cv2.imread('gh_01_03.png')
    px6 = img[64, 32]

    if (px5[1] >= px6[1]):
        sys.stdout = open("test.txt", "a")
        print("1")
    else:
        sys.stdout = open("test.txt", "a")
        print("0")

    img = cv2.imread('polar_01_04.png')
    px7 = img[64, 32]

    img = cv2.imread('gh_01_04.png')
    px8 = img[64, 32]

    if (px7[1] >= px8[1]):
        sys.stdout = open("test.txt", "a")
        print("1")
    else:
        sys.stdout = open("test.txt", "a")
        print("0")

    img = cv2.imread('gh_02_01.png')
    px9 = img[64, 32]

    img = cv2.imread('polar_02_01.png')
    px10 = img[64, 32]

    if (px9[1] >= px10[1]):
        sys.stdout = open("test.txt", "a")
        print("1")
    else:
        sys.stdout = open("test.txt", "a")
        print("0")

    img = cv2.imread('gh_02_02.png')
    px11 = img[64, 32]

    img = cv2.imread('polar_02_02.png')
    px12 = img[64, 32]

    if (px11[1] >= px12[1]):
        sys.stdout = open("test.txt", "a")
        print("1")
    else:
        sys.stdout = open("test.txt", "a")
        print("0")

    img = cv2.imread('gh_02_03.png')
    px13 = img[64, 32]

    img = cv2.imread('polar_02_03.png')
    px14 = img[64, 32]

    if (px13[1] >= px14[1]):
        sys.stdout = open("test.txt", "a")
        print("1")
    else:
        sys.stdout = open("test.txt", "a")
        print("0")
    img = cv2.imread('polar_02_04.png')
    px15 = img[64, 32]

    img = cv2.imread('gh_02_04.png')
    px16 = img[64, 32]

    if (px15[1] >= px16[1]):
        sys.stdout = open("test.txt", "a")
        print("1")
    else:
        sys.stdout = open("test.txt", "a")
        print("0")

    img = cv2.imread('gh_03_01.png')
    px17 = img[64, 32]

    img = cv2.imread('polar_03_01.png')
    px18 = img[64, 32]

    if (px17[1] >= px18[1]):
        sys.stdout = open("test.txt", "a")
        print("1")
    else:
        sys.stdout = open("test.txt", "a")
        print("0")

    img = cv2.imread('polar_03_02.png')
    px19 = img[64, 32]

    img = cv2.imread('gh_03_02.png')
    px20 = img[64, 32]

    if (px19[1] >= px20[1]):
        sys.stdout = open("test.txt", "a")
        print("1")
    else:
        sys.stdout = open("test.txt", "a")
        print("0")

    img = cv2.imread('gh_03_03.png')
    px21 = img[64, 32]

    img = cv2.imread('polar_03_03.png')
    px22 = img[64, 32]

    if (px21[1] >= px22[1]):
        sys.stdout = open("test.txt", "a")
        print("1")
    else:
        sys.stdout = open("test.txt", "a")
        print("0")

    img = cv2.imread('polar_03_04.png')
    px23 = img[64, 32]

    img = cv2.imread('gh_03_04.png')
    px24 = img[64, 32]

    if (px23[1] >= px24[1]):
        sys.stdout = open("test.txt", "a")
        print("1")
    else:
        sys.stdout = open("test.txt", "a")
        print("0")

    img = cv2.imread('gh_04_01.png')
    px25 = img[64, 32]

    img = cv2.imread('polar_04_01.png')
    px26 = img[64, 32]

    if (px25[1] >= px26[1]):
        sys.stdout = open("test.txt", "a")
        print("1")
    else:
        sys.stdout = open("test.txt", "a")
        print("0")

    img = cv2.imread('gh_04_02.png')
    px27 = img[64, 32]

    img = cv2.imread('polar_04_02.png')
    px28 = img[64, 32]

    if (px27[1] >= px28[1]):
        sys.stdout = open("test.txt", "a")
        print("1")
    else:
        sys.stdout = open("test.txt", "a")
        print("0")

    img = cv2.imread('polar_04_03.png')
    px29 = img[64, 32]

    img = cv2.imread('gh_04_03.png')
    px30 = img[64, 32]

    if (px29[1] >= px30[1]):
        sys.stdout = open("test.txt", "a")
        print("1")
    else:
        sys.stdout = open("test.txt", "a")
        print("0")

    img = cv2.imread('gh_04_04.png')
    px31 = img[64, 32]

    img = cv2.imread('polar_04_04.png')
    px32 = img[64, 32]

    if (px31[1] >= px32[1]):
        sys.stdout = open("test.txt", "a")
        print("1")
    else:
        sys.stdout = open("test.txt", "a")
        print("0")

    import filecmp

    filecmp.cmp("test.txt", "match1.txt")
Exemplo n.º 12
0
        #Costmatrix = COSTMtrix.matrix_cal_corre_full_version(steam)

        #cv2.imshow('initial',frame)
        #cv2.imshow('gray',gray)
        #cv2.imshow('slice_vertical',crop_V_test)
        #cv2.imshow('slice_horizental',crop_H_test)
        #cv2.imshow('steamfilter',stea_filter.astype(np.uint8))
        #cv2.imshow('ffilter',filter_img)
        #cv2.imshow('costmatrix',Costmatrix.astype(np.uint8))
        new_frame2 = cv2.rotate(gray, rotateCode=2)
        new_frame3 = new_frame2.astype(float)
        H, W = new_frame2.shape
        circular3 = np.ones((H, W))
        circular2 = circular3.astype(float)
        circular = circular2 * 2
        circular = cv2.linearPolar(new_frame3, (int(W / 2), int(H / 2)), 400,
                                   cv2.WARP_INVERSE_MAP)
        circular = circular.astype(np.uint8)

        #cv2.imwrite(savedir_matrix  + str(save_sequence_num) +".jpg", Costmatrix)
        cv2.imwrite(savedir_original + str(save_sequence_num) + ".jpg",
                    crop_H_test)
        cv2.imwrite(savedir_filtered_OCT + str(save_sequence_num) + ".jpg",
                    filter_img)
        cv2.imwrite(
            savedir_original_circular + str(save_sequence_num) + ".jpg",
            circular)

        save_sequence_num += 1
        print("[%s]   is processed." % (save_sequence_num))

    # Break the loop
Exemplo n.º 13
0
def main():
    """Create the model and start the evaluation process."""

    args = get_arguments()

    gpu0 = args.gpu

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    model = UNet(3, n_classes=args.num_classes)


    saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict)

    model.cuda(gpu0)
    model.train()

    testloader = data.DataLoader(REFUGE(False, domain='REFUGE_TEST', is_transform=True),
                                    batch_size=args.batch_size, shuffle=False, pin_memory=True)


    if version.parse(torch.__version__) >= version.parse('0.4.0'):
        interp = nn.Upsample(size=(460, 460), mode='bilinear', align_corners=True)
    else:
        interp = nn.Upsample(size=(460, 460), mode='bilinear')

    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print('%d processd' % index)
        image, label, _, _, name = batch
        if args.model == 'Unet':
            _,_,_,_, output2  = model(Variable(image, volatile=True).cuda(gpu0))

            output = interp(output2).cpu().data.numpy()


        for idx, one_name in enumerate(name):
            pred = output[idx]
            pred = pred.transpose(1,2,0)
            pred = np.asarray(np.argmax(pred, axis=2), dtype=np.uint8)
            output_col = colorize_mask(pred)

            if is_polar:

                # plt.imshow(output_col)
                # plt.show()


                output_col = np.array(output_col)
                output_col[output_col == 0] = 0
                output_col[output_col == 1] = 128
                output_col[output_col == 2] = 255

                # plt.imshow(output_col)
                # plt.show()

                output_col = cv2.linearPolar(rotate(output_col, 90), (args.ROI_size / 2, args.ROI_size / 2),
                                             args.ROI_size / 2, cv2.WARP_FILL_OUTLIERS + cv2.WARP_INVERSE_MAP)

                # plt.imshow(output_col)
                # plt.show()

                output_col = np.array(output_col * 255, dtype=np.uint8)
                output_col[output_col > 200] = 210
                output_col[output_col == 0] = 255
                output_col[output_col == 210] = 0
                output_col[(output_col > 0) & (output_col < 255)] = 128

                output_col = Image.fromarray(output_col)

                # plt.imshow(output_col)
                # plt.show()

            one_name = one_name.split('/')[-1]
            if len(one_name.split('_'))>1:
                one_name = one_name[:-4]
            #pred.save('%s/%s.bmp' % (args.save, one_name))
            output_col = output_col.convert('L')

            print(output_col.size)
            output_col.save('%s/%s.bmp' % (args.save, one_name.split('.')[0]))
Exemplo n.º 14
0
 def _cv_linear_polar(self, image, flags):
     h, w = image.shape[:2]
     r = math.sqrt(w**2 + h**2) / 2
     return cv2.linearPolar(image, (w / 2, h / 2), r, flags)
Exemplo n.º 15
0
def unfold_gauge(image, center=(img_size/2, img_size/2)):
    return cv2.linearPolar(image, center, img_size/2, cv2.INTER_LINEAR+cv2.WARP_FILL_OUTLIERS)
Exemplo n.º 16
0
    #frame = cv2.imread('/Users/geewiz/Desktop/2017-01-18-lachine/larry/100.png')
    frame = cv2.imread(filename)

    # get half image
    print(frame.shape)
    height, width = frame.shape[:2]
    top = frame[0:height, 0:width / 2]
    bot = frame[0:height, width / 2:width]

    #### for top half
    # unwarp
    small = cv2.pyrDown(top)
    small2 = cv2.pyrDown(small)
    h, w = small.shape[:2]
    unwarp = cv2.linearPolar(small, (w / 2, h / 2), h / 2,
                             cv2.WARP_FILL_OUTLIERS)
    # rotate
    rot_mat = cv2.getRotationMatrix2D((w / 2, h / 2), -90, 1.0)
    rotate = cv2.warpAffine(unwarp, rot_mat, (w, h), flags=cv2.INTER_LINEAR)
    #resize
    pano_top = cv2.resize(rotate,
                          (w * 2, h / 2))  #, interpolation=cv2.CV_INTER_CUBIC)

    #### for bot half
    small = cv2.pyrDown(bot)
    small2 = cv2.pyrDown(small)
    h, w = small.shape[:2]
    # unwarp
    unwarp = cv2.linearPolar(small, (w / 2, h / 2), h / 2,
                             cv2.WARP_FILL_OUTLIERS)
    # rotate
Exemplo n.º 17
0
    del train_onecell_reflab
    del train_onecell_testlab
    del test_onecell_reflab
    del test_onecell_testlab

    # convert input images to polar coordinates
    # shape = (im, row, col, chan)
    # row = Y, col = X
    center = ((train_onecell_im.shape[2] - 1) / 2.0,
              (train_onecell_im.shape[1] - 1) / 2.0)
    maxRadius = np.sqrt(train_onecell_im.shape[2]**2 +
                        train_onecell_im.shape[1]**2) / 2
    for i in range(train_onecell_im.shape[0]):
        train_onecell_im[i, :, :, :] = cv2.linearPolar(
            train_onecell_im[i, :, :, :],
            center=center,
            maxRadius=maxRadius,
            flags=cv2.INTER_LINEAR + cv2.WARP_FILL_OUTLIERS)

    for i in range(test_onecell_im.shape[0]):
        test_onecell_im[i, :, :, :] = cv2.linearPolar(
            test_onecell_im[i, :, :, :],
            center=center,
            maxRadius=maxRadius,
            flags=cv2.INTER_LINEAR + cv2.WARP_FILL_OUTLIERS)
    '''Neural network training
    '''

    # list all CPUs and GPUs
    device_list = K.get_session().list_devices()
Exemplo n.º 18
0
# Python 2/3 compatibility
from __future__ import print_function

import cv2

if __name__ == '__main__':
    print(__doc__)

    import sys
    try:
        fn = sys.argv[1]
    except IndexError:
        fn = '../data/fruits.jpg'

    img = cv2.imread(fn)
    if img is None:
        print('Failed to load image file:', fn)
        sys.exit(1)

    img2 = cv2.logPolar(img, (img.shape[0] / 2, img.shape[1] / 2), 40,
                        cv2.WARP_FILL_OUTLIERS)
    img3 = cv2.linearPolar(img, (img.shape[0] / 2, img.shape[1] / 2), 40,
                           cv2.WARP_FILL_OUTLIERS)

    cv2.imshow('before', img)
    cv2.imshow('logpolar', img2)
    cv2.imshow('linearpolar', img3)

    cv2.waitKey(0)
Exemplo n.º 19
0
def clockDepolarize(image, size = 720):
    radius = size //2
    image = cv2.linearPolar(image, (radius, radius), radius, cv2.WARP_FILL_OUTLIERS)
    cv2.imwrite('./static/depolarizedClock{}.bmp'.format(iterations), image)
    return image
Exemplo n.º 20
0
i = 92
crop_im = get_cropped_circle(circles[i][0], circles[i][1], circles[i][2], 1.1,
                             binim)
crop_im2 = get_cropped_circle(circles[i][0], circles[i][1], circles[i][2], 1.1,
                              im2)

# Do the wacky polar transformation!
#--- ensure image is of the type float ---
img = crop_im.astype(np.float32)

#--- the following holds the square root of the sum of squares of the image dimensions ---
#--- this is done so that the entire width/height of the original image is used to express the complete circular range of the resulting polar image ---
value = np.sqrt(((img.shape[1] / 2.0)**2.0) + ((img.shape[0] / 2.0)**2.0))

polar_image = cv2.linearPolar(img, (img.shape[1] / 2, img.shape[0] / 2), value,
                              cv2.WARP_FILL_OUTLIERS)
polar_image = polar_image.astype(np.uint8)

# Get leftmost pixel from edge!
top_pixels = np.zeros(polar_image.shape)
crop_hgt = polar_image.shape[0]
#pxl_loc = np.zeros(crop_hgt) # gaps/zeros potential problem?

for i in range(crop_hgt):
    for j, pxl in enumerate(polar_image[i, :]):
        if pxl == 255:
            top_pixels[i][j] = 255
            #            pxl_loc[j] = i
            break

# Dilate that shiz
Exemplo n.º 21
0
            # plt.title('ROI_mask_result')
            # plt.show()

            ROI_mask_result = (255.0 / ROI_mask_result.max() * (ROI_mask_result - ROI_mask_result.min())).astype(np.uint8)
            ROI_mask_result[ROI_mask_result == 255] = 200
            ROI_mask_result[ROI_mask_result == 0] = 255
            ROI_mask_result[ROI_mask_result == 200] = 0
            ROI_mask_result[(ROI_mask_result > 0) & (ROI_mask_result < 255)] = 128
            # plt.imshow(ROI_mask_result)
            # plt.title('ROI_mask_result')
            # plt.show()

        if is_polar_coordinate:

            if is_only_image == False:
                ROI_mask_result = rotate(cv2.linearPolar(ROI_mask_result, (DiscROI_size / 2, DiscROI_size / 2), DiscROI_size / 2,
                                            cv2.INTER_NEAREST + cv2.WARP_FILL_OUTLIERS), -90)
                # plt.imshow(ROI_mask_result)
                # plt.title('ROI_mask_result')
                # plt.show()


            ROI_img_result = rotate(cv2.linearPolar(org_img_disc_region, (DiscROI_size / 2, DiscROI_size / 2), DiscROI_size / 2,
                                            cv2.INTER_NEAREST + cv2.WARP_FILL_OUTLIERS), -90)
        else:
            ROI_img_result = org_img_disc_region


        filename_ROI_Img = '{}_{}{}'.format(temp_txt[0][:nameLen - 4], DiscROI_size, train_data_type)
        imsave(Image_save_path + filename_ROI_Img, ROI_img_result)
        if is_only_image == False:
            filename_ROI_Mask = '{}_{}{}'.format(temp_txt[0][:nameLen - 4], DiscROI_size, mask_data_type)
    org_img = np.asarray(image.load_img(test_data_path + temp_txt))
    # Disc region detection by U-Net
    temp_img = resize(org_img, (DiscSeg_size, DiscSeg_size, 3)) * 255
    temp_img = np.reshape(temp_img, (1, ) + temp_img.shape)
    disc_map = DiscSeg_model.predict([temp_img])
    disc_map = BW_img(np.reshape(disc_map, (DiscSeg_size, DiscSeg_size)), 0.5)

    regions = regionprops(label(disc_map))
    C_x = int(regions[0].centroid[0] * org_img.shape[0] / DiscSeg_size)
    C_y = int(regions[0].centroid[1] * org_img.shape[1] / DiscSeg_size)
    disc_region, err_xy, crop_xy = disc_crop(org_img, DiscROI_size, C_x, C_y)

    # Disc and Cup segmentation by M-Net
    run_start = time()
    Disc_flat = rotate(
        cv2.linearPolar(disc_region, (DiscROI_size / 2, DiscROI_size / 2),
                        DiscROI_size / 2, cv2.WARP_FILL_OUTLIERS), -90)

    temp_img = pro_process(Disc_flat, CDRSeg_size)
    temp_img = np.reshape(temp_img, (1, ) + temp_img.shape)
    [_, _, _, _, prob_10] = CDRSeg_model.predict(temp_img)
    run_end = time()

    # Extract mask
    prob_map = np.reshape(
        prob_10, (prob_10.shape[1], prob_10.shape[2], prob_10.shape[3]))
    disc_map = scipy.misc.imresize(prob_map[:, :, 0],
                                   (DiscROI_size, DiscROI_size))
    cup_map = scipy.misc.imresize(prob_map[:, :, 1],
                                  (DiscROI_size, DiscROI_size))
    disc_map[-round(DiscROI_size / 3):, :] = 0
    cup_map[-round(DiscROI_size / 2):, :] = 0
Exemplo n.º 23
0
            tr = img[ny][min(63, nx + 1)]
            bl = img[min(63, ny + 1)][nx]
            br = img[min(63, ny + 1)][min(63, nx + 1)]

            pl = tl * (1 - oy) + bl * oy
            pr = tr * (1 - oy) + br * oy
            p = pl * (1 - ox) + pr * ox
            op[line][px] = p
    return op


img = cv2.imread('test.bmp', 1)
# smi = os.open("/dev/smi", os.O_RDWR)

while 1:
    polar_image = cv2.linearPolar(img, (img.shape[0] / 2, img.shape[1] / 2),
                                  img.shape[0] / 2, cv2.WARP_FILL_OUTLIERS)
    polar_image = cv2.warpPolar(img, (img.shape[0] / 2, 500),
                                (img.shape[0] / 2, img.shape[1] / 2),
                                img.shape[0] / 2, cv2.WARP_FILL_OUTLIERS)
    # polar_image = cv2.remap(img, map1, None, cv2.INTER_NEAREST)
    polar_image = make_polar(img)

    cv2.imshow("Polar Image", polar_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    output = [
        bytearray(l)
        for l in cv2.remap(img, map1, None, cv2.INTER_NEAREST).reshape((
            500, img.shape[0] * 3 / 2))
    ]
Exemplo n.º 24
0
img2 = cv2.resize(img2, (1024, 1024))

cx, cy = img1.shape
M = cv2.getRotationMatrix2D((cx / 2, cy / 2), 30, 1)
# img2 = cv2.warpAffine(img1, M, (cx, cy))

hanw = cv2.createHanningWindow((cx, cy), cv2.CV_64F)
img1 = img1 * hanw
f1 = np.fft.fft2(img1)
fshift1 = np.fft.fftshift(f1)
magnitude_spectrum1 = 10 * np.log(np.abs(fshift1) + 1)
cv2.imwrite('log_FFT1.png', magnitude_spectrum1)

M90 = cv2.getRotationMatrix2D((cx / 2, cy / 2), 90, 1)
polar_map1 = cv2.linearPolar(magnitude_spectrum1, (cx / 2, cy / 2),
                             cx / np.log(cx),
                             flags=cv2.INTER_LINEAR + cv2.WARP_FILL_OUTLIERS)
p1 = cv2.warpAffine(polar_map1, M90, (cx, cy))
cv2.imwrite('polar1.png', p1)

img2 = img2 * hanw
f2 = np.fft.fft2(img2)
fshift2 = np.fft.fftshift(f2)
magnitude_spectrum2 = 10 * np.log(np.abs(fshift2) + 1)
cv2.imwrite('log_FFT2.png', magnitude_spectrum2)
polar_map2 = cv2.linearPolar(magnitude_spectrum2, (cx / 2, cy / 2),
                             cx / np.log(cx),
                             flags=cv2.INTER_LINEAR + cv2.WARP_FILL_OUTLIERS)
p2 = cv2.warpAffine(polar_map2, M90, (cx, cy))
cv2.imwrite('polar2.png', p2)
Exemplo n.º 25
0
# -*- coding: utf-8 -*-
import cv2
import sys
#主函数
if __name__ == "__main__":
    if len(sys.argv) > 1:
        src = cv2.imread(sys.argv[1], cv2.IMREAD_ANYCOLOR)
    else:
        print "Usage: python linearPolar.py image"
    #显示原图
    cv2.imshow("src", src)
    #图像的极坐标变换
    dst = cv2.linearPolar(src, (508, 503), 550, cv2.INTER_LINEAR)
    #显示极坐标变化的结果
    cv2.imshow("dst", dst)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    images[:,:,i] = hdu[0].data
# Take median of image series
image = np.mean(images, 2)

# Coordinate of disk center
xc = naxis1/2 - 0.5
yc = naxis2/2 - 0.5

# Calculate azimuthal average
# Take the polar transform of the image; knowing the disk center it is rather straight forward
# rho = (image width / max_radius) * sqrt(x**2 + y**2) , phi = atan2(y/x)
center = (xc, yc)
# Set max_radius to image width = naxis1 so rho = 1.0 * sqrt(x**2 + y**2)
max_radius = naxis1

imageP = cv2.linearPolar(image, center, max_radius, cv2.INTER_LANCZOS4 + cv2.WARP_FILL_OUTLIERS)
# Invert the polar transform
# cv2.linearPolar(image, center, max_radius, cv2.INTER_LANCZOS4 + cv2.WARP_FILL_OUTLIERS)

# Take the median over theta (azimuthal average)
Iaz_avg = np.median(imageP, 0)


xgrid1, ygrid1 = np.meshgrid(np.arange(naxis1), np.arange(naxis2))
# Distance of each pixel to disk center
pixel_r = np.sqrt((xgrid1 - xc)**2 + (ygrid1 - yc)**2)



# apply a radius-based scaling to boost the limb
radius = header['SOLAR_R']
Exemplo n.º 27
0
import cv2
import numpy as np
dim = (250, 250)
source = cv2.imread("pol2.jpg", 0)
source = cv2.resize(source, dim, interpolation=cv2.INTER_AREA)
img64_float = source.astype(np.float64)

Mvalue = np.sqrt(((img64_float.shape[0] / 2)**2.0) +
                 ((img64_float.shape[1] / 2)**2.0))

ploar_image = cv2.linearPolar(
    img64_float, (img64_float.shape[0] / 2, img64_float.shape[1] / 2), Mvalue,
    cv2.WARP_FILL_OUTLIERS)
#p2=cv2.LogPolar(img64_float, (img64_float.shape[0]/2, img64_float.shape[1]/2),(40,50) , Mvalue,cv2.INTER_LINEAR+cv2.WARP_FILL_OUTLIERS)
cartisian_image = cv2.linearPolar(
    ploar_image, (img64_float.shape[0] / 2, img64_float.shape[1] / 2), Mvalue,
    cv2.WARP_INVERSE_MAP)

cartisian_image = cartisian_image / 200
ploar_image = ploar_image / 255

ploar_image = cv2.resize(ploar_image, dim, interpolation=cv2.INTER_AREA)
cv2.imwrite("ploarimg.jpg", ploar_image)
pol = cv2.imread("ploarimg.jpg")
blur = cv2.GaussianBlur(pol, (51, 51), 0)
#cv2.imshow('blur',blur)
edges = cv2.Canny(blur, 4, 0)
'''
lines = cv2.HoughLines(edges,1,np.pi/90,1)
for rho,theta in lines[0]:
    a = np.cos(theta)
Exemplo n.º 28
0
#!/usr/bin/python

# import numpy as np
import cv2

logo = cv2.imread('data/circle.png')

warped = cv2.linearPolar(logo,(128,128),120,flags=cv2.INTER_LINEAR+cv2.WARP_FILL_OUTLIERS)

# grab the dimensions of the image and calculate the center
# of the image
(h, w) = warped.shape[:2]
center = (w / 2, h / 2)

# rotate the image by 180 degrees
M = cv2.getRotationMatrix2D(center, 90, 1.0)
rotated = cv2.warpAffine(warped, M, (w, h))


# perform the actual resizing of the image and show it
dim = (1280,320)
resized = cv2.resize(rotated, dim, interpolation = cv2.INTER_AREA)

cv2.imshow("logo",logo)
cv2.imshow("rotated",resized)
cv2.waitKey(0)
Exemplo n.º 29
0
def get_rect(path):
    e, img = detect(path)
    img = cv2.linearPolar(img.T, (e.center[0], e.center[1]), 80, cv2.WARP_FILL_OUTLIERS).T
    imshow(img)
    img = img[0:23, :]
    return img
Exemplo n.º 30
0
        #img_sq = np.ndarray(shape = (width, width, 3))
        diff = int((w - h) / 2)
        # pad
        frame_pad = cv2.copyMakeBorder(frame_resize, diff, diff, 0, 0,
                                       cv2.BORDER_CONSTANT)
        # crop
        #        img_sq = img[0:h, diff:(diff+h)] # crop only the middle
        h, w = frame_pad.shape[:2]
        #print ('image is now squared of size ', (w, h) )
    else:
        frame_pad = frame_resize
        h, w = frame_pad.shape[:2]

    # fisheye to rectlinear
    #frame_polar= cv2.logPolar(frame, (frame_resize.shape[0]/2, frame_resize.shape[1]/2), 80, cv2.WARP_FILL_OUTLIERS)      # [0]: height; [1]: width
    frame_polar = cv2.linearPolar(frame_pad, (w / 2, h / 2), h / 2,
                                  cv2.WARP_FILL_OUTLIERS)
    h, w = frame_polar.shape[:2]
    #print(h, w)

    # rotate 90
    rot_mat = cv2.getRotationMatrix2D((w / 2, h / 2), -90,
                                      1.0)  # center,  angle, scale
    frame_rot = cv2.warpAffine(frame_polar,
                               rot_mat, (w, h),
                               flags=cv2.INTER_LINEAR)

    # resize to pano view
    frame_pano = cv2.resize(
        frame_rot, (w * 2, h / 2))  #, interpolation=cv2.CV_INTER_CUBIC)

    # option 1: play back
Exemplo n.º 31
0
for i in range(nexp):
    temp = np.fliplr(imagesN[i, :, :, :].reshape(-1, 3)).reshape(imagesN[i, :, :, :].shape)
    images_expN_RGB[i, :, :, :] = temp / max16



# Gray scaled image
gimages = imagesN.mean(axis=3)
# Coordinate of the center
center = (2120, 1361)
radius = 641/2

pimages = np.zeros([nexp, ny, nx])
for i in range(0,3):
    # Azimuthal average
    pimages[i,:,:] = cv2.linearPolar(gimages[i,:,:], center, gimages.shape[2], cv2.INTER_LANCZOS4 + cv2.WARP_FILL_OUTLIERS)
    # Take the median over theta (azimuthal average)

ymin = 1100
ymax = 1301
# Take the median over this selection on the y-axis.
az_avgs = np.median(pimages[:,ymin:ymax, :], 1)
zeroloc1 = np.where(pimages[2, ymin, :] == 0)[0].min()
zeroloc2 = np.where(pimages[2, ymax, :] == 0)[0].min()
zeroloc = np.min([zeroloc1, zeroloc2])
az_avgs[:, zeroloc:] = 0

ny, nx = gimages[0,...].shape


plt.figure(1, figsize=(19,10))
Exemplo n.º 32
0
 def converter(self, filereader, output):
     cv2.linearPolar(self.intermediate_input,
                     (Constants.CENTER_POINT_x, Constants.CENTER_POINT_z),
                     Constants.SCAN_CONVERTER_SCALE,
                     cv2.INTER_CUBIC + cv2.WARP_INVERSE_MAP, self.output)
     cv2.imwrite(output, self.output)
Exemplo n.º 33
0
plt.imshow(images_expf_masked[0,...])
plt.subplot(235)
plt.imshow(images_expf_masked[1,...])
plt.subplot(236)
plt.imshow(images_expf_masked[2,...])
plt.tight_layout()

plt.savefig('/Users/rattie/Data/Eclipse/figures/imagesRGB_and_exp_normalized.png')


pimages0_rgb = np.zeros([nexp, ny, nx, 3])
pimages_rgb = np.zeros([nexp, ny, nx, 3])
for i in range(0,3):
    for k in range(0,3):
        # Azimuthal average
        pimages0_rgb[i, :, :, k] = cv2.linearPolar(imagesf[i, :, :, k], center, nx, cv2.INTER_LANCZOS4 + cv2.WARP_FILL_OUTLIERS)
        pimages_rgb[i,:,:,k] = cv2.linearPolar(images_exp[i,:,:,k], center, nx, cv2.INTER_LANCZOS4 + cv2.WARP_FILL_OUTLIERS)

pimages0_rgb = np.clip(pimages0_rgb, 0,1)
pimages_rgbf = np.clip(pimages_rgb / maxval, 0, 1)


# Take the median over a selection on the y-axis.
#ymin = 1100
ymin = 990
ymax = 1100 # 1301

ymin2 = 970
ymax2 = 1040

zeroloc1 = np.where(pimages_rgb[2, ymin, :, 0] == 0)[0].min()
Exemplo n.º 34
0
def to_polar(image,s0,s1,value):
     polar_image = cv2.linearPolar(image,(s1, s0), value, cv2.WARP_FILL_OUTLIERS)
     polar_image=cv2.resize(polar_image, (500, 200), interpolation = cv2.INTER_AREA)
     return polar_image
Exemplo n.º 35
0
    disc_map = DiscSeg_model.predict([temp_img])

    disc_map = BW_img(np.reshape(disc_map, (DiscSeg_size, DiscSeg_size)), 0.5)

    regions = regionprops(label(disc_map))
    C_x = int(regions[0].centroid[0] * org_img.shape[0] / DiscSeg_size)
    C_y = int(regions[0].centroid[1] * org_img.shape[1] / DiscSeg_size)

    for disc_idx in range(len(disc_list)):
        DiscROI_size = disc_list[disc_idx]
        disc_region, err_coord, crop_coord = disc_crop(org_img, DiscROI_size,
                                                       C_x, C_y)
        label_region, _, _ = disc_crop(new_label, DiscROI_size, C_x, C_y)
        Disc_flat = rotate(
            cv2.linearPolar(disc_region, (DiscROI_size / 2, DiscROI_size / 2),
                            DiscROI_size / 2,
                            cv2.INTER_NEAREST + cv2.WARP_FILL_OUTLIERS), -90)
        Label_flat = rotate(
            cv2.linearPolar(label_region, (DiscROI_size / 2, DiscROI_size / 2),
                            DiscROI_size / 2,
                            cv2.INTER_NEAREST + cv2.WARP_FILL_OUTLIERS), -90)

        disc_result = Image.fromarray((Disc_flat * 255).astype(np.uint8))
        disc_result.save(data_save_path + temp_txt[:-4] + '_' +
                         str(DiscROI_size) + '.png')
        label_result = Image.fromarray((Label_flat * 255).astype(np.uint8))
        label_result.save(label_save_path + temp_txt[:-4] + '_' +
                          str(DiscROI_size) + '.png')

plt.imshow(Disc_flat)
plt.show()
Exemplo n.º 36
0
def train():
    global max_OD_Dice, max_OD_Dice_cup_Dice, max_cup_Dice, max_cup_Dice_OD_Dice
    for epoch in range(cur_epoch, args['epochs']):
        # train network
        net.train()
        train_loss = 0
        progress_bar = tqdm(train_loader)
        for batch_idx, (inputs, targets) in enumerate(progress_bar):
            progress_bar.set_description('Epoch {}/{}'.format(epoch + 1, args['epochs']))

            inputs = Variable(inputs.cuda().detach())
            targets = Variable(targets.long().cuda().detach())

            optimizer.zero_grad()
            outputs = net(inputs)

            loss_ = []
            for criterion, output, loss_weight in zip(criterions, outputs, loss_weights):
                loss_od = criterion(output[:, (0, 1), :, :], targets[:, 1, :, :].reshape(targets.shape[0], targets.shape[2], targets.shape[3]))
                loss_cup = criterion(output[:, (0, 2), :, :], targets[:, 2, :, :].reshape(targets.shape[0], targets.shape[2], targets.shape[3]))
                loss_.append(loss_weight * (0.5 * loss_od + 0.5 * loss_cup))

            loss = sum(loss_)
            loss.backward()
            optimizer.step()

            train_loss += loss.item()
            progress_bar.set_postfix(loss='%.3f' % (train_loss / (batch_idx + 1)))

        net.eval()
        with torch.no_grad():
            OD_Dices = []
            cup_Dices = []
            for batch_idx, (test_input, test_target, test_disk_center, test_crop_point, test_gt_point) in enumerate(test_loader):
                test_input = test_input.detach().cuda()
                test_outputs = net(test_input)
                disc_predict = torch.nn.functional.softmax(test_outputs[-1][:, (0, 1), :, :], dim=1).cpu().data.numpy()[0]
                disc_map = (resize(disc_predict[1, :, :], (args['test_crop_size'], args['test_crop_size']), mode='constant') * 255).astype(np.uint8)
                cup_predict = torch.nn.functional.softmax(test_outputs[-1][:, (0, 2), :, :], dim=1).cpu().data.numpy()[0]
                cup_map = (resize(cup_predict[1, :, :], (args['test_crop_size'], args['test_crop_size']), mode='constant') * 255).astype(np.uint8)
                if args['data_polar'] is True:
                    disc_map[-round(args['test_crop_size'] / 3):, :] = 0
                    cup_map[-round(args['test_crop_size'] / 2):, :] = 0
                    disc_map = cv2.linearPolar(rotate(disc_map, 90), (args['test_crop_size'] / 2, args['test_crop_size'] / 2),
                                               args['test_crop_size'] / 2, cv2.WARP_FILL_OUTLIERS + cv2.WARP_INVERSE_MAP)
                    cup_map = cv2.linearPolar(rotate(cup_map, 90), (args['test_crop_size'] / 2, args['test_crop_size'] / 2),
                                              args['test_crop_size'] / 2, cv2.WARP_FILL_OUTLIERS + cv2.WARP_INVERSE_MAP)

                disc_map = np.array(BW_img(disc_map, threshold_confusion), dtype=int)
                cup_map = np.array(BW_img(cup_map, threshold_confusion), dtype=int)
                test_target = test_target[0].data.numpy()
                img_result = np.zeros(test_target.shape, dtype=np.int8)
                img_result[1, test_gt_point[0][0]:test_gt_point[0][1], test_gt_point[0][2]:test_gt_point[0][3]] = \
                    disc_map[test_crop_point[0][0]:test_crop_point[0][1], test_crop_point[0][2]:test_crop_point[0][3]]
                img_result[2, test_gt_point[0][0]:test_gt_point[0][1], test_gt_point[0][2]:test_gt_point[0][3]] = \
                    cup_map[test_crop_point[0][0]:test_crop_point[0][1], test_crop_point[0][2]:test_crop_point[0][3]]

                OD_Dice, cup_Dice = calc_dice(img_result, test_target)
                OD_Dices.append(OD_Dice)
                cup_Dices.append(cup_Dice)

            OD_Dice = np.mean(OD_Dices)
            cup_Dice = np.mean(cup_Dices)

            if max_OD_Dice < OD_Dice:
                max_OD_Dice = OD_Dice
                max_OD_Dice_cup_Dice = cup_Dice
            if max_cup_Dice < cup_Dice:
                max_cup_Dice_OD_Dice = OD_Dice
                max_cup_Dice = cup_Dice

            logs.append([epoch, OD_Dice, max_OD_Dice, max_OD_Dice_cup_Dice, cup_Dice, max_cup_Dice, max_cup_Dice_OD_Dice])
            state = {
                'net': net.state_dict(),
                'max_OD_Dice': max_OD_Dice,
                'max_OD_Dice_cup_Dice': max_OD_Dice_cup_Dice,
                'max_cup_Dice': max_cup_Dice,
                'max_cup_Dice_OD_Dice': max_cup_Dice_OD_Dice,
                'epoch': epoch,
                'logs': logs,
                'args': args
            }

            torch.save(state, basic_path + 'checkpoints/periods/{}.pt'.format(epoch))
            torch.save(state, basic_path + 'checkpoints/last.pt')

            lr = get_lr(optimizer)
            ts_writer.add_scalar('train/loss', train_loss / len(train_loader), epoch)
            ts_writer.add_scalar('train/lr', lr, epoch)
            ts_writer.add_scalar('test/OD_Dice', OD_Dice, epoch)
            ts_writer.add_scalar('test/cup_Dice', cup_Dice, epoch)
            tqdm.write('     OD_Dice: {:.4f},     cup_Dice: {:.4f}'.format(OD_Dice, cup_Dice))
            tqdm.write(' max_OD_Dice: {:.4f},     cup_Dice: {:.4f}'.format(max_OD_Dice, max_OD_Dice_cup_Dice))
            tqdm.write('     OD_Dice: {:.4f}, max_cup_Dice: {:.4f}'.format(max_cup_Dice_OD_Dice, max_cup_Dice))
            tqdm.write('learning rate: {:.4f}'.format(lr))
Exemplo n.º 37
0
    i = i+1

fileWrite = []
i=0
while i<len(fileRead) :
    fileWrite.append(fileRead[i][len(readPath):len(fileRead[i])-3])
    fileWrite[i] = writePath + fileWrite[i] + "txt"
    i = i+1

print "----File Write----"
print fileWrite
k = 0
while k<len(fileRead) :
    img1 = cv2.imread(fileRead[0],1)
    img2 = cv2.resize(img1, (360,360)) 
    img3 = cv2.linearPolar(img2, (img2.shape[0]/2, img2.shape[1]/2), 180, cv2.WARP_FILL_OUTLIERS)
    img4 = cv2.resize(img3, (300,300))

    #cv2.imshow('before',img1)
    #cv2.imshow('resized', img2)
    #cv2.imshow('linear-polar', img3)

    #Creates a 12*360 = 4320 element list of each LED (GRB) for each degree (360)
    img5 = []

    j = 0
    while j < img3.shape[0] :
        i = 0
        while i < 360-18-6 :
            img5.append(img3[j,6+i])
            i=i+28
Exemplo n.º 38
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 groups=1,
                 bias=True,
                 mapping='translation',
                 kernel_type='linear',
                 learnable_kernel=False,
                 kernel_regularizer=False,
                 alpha=0.03,
                 balance=1,
                 power=3,
                 sigma=2,
                 gamma=1):
        super(Kerv2d, self).__init__(in_channels, out_channels, kernel_size,
                                     stride, padding, dilation, groups, bias)
        self.mapping, self.kernel_type = mapping, kernel_type
        self.learnable_kernel, self.kernel_regularizer = learnable_kernel, kernel_regularizer
        self.alpha, self.balance, self.power, self.sigma, self.gamma = alpha, balance, power, sigma, gamma

        # parameter for kernel type
        if learnable_kernel == True:
            self.alpha = nn.Parameter(torch.cuda.FloatTensor([alpha]),
                                      requires_grad=True)
            self.balance = nn.Parameter(torch.cuda.FloatTensor([balance]),
                                        requires_grad=True)
            self.sigma = nn.Parameter(torch.cuda.FloatTensor([sigma]),
                                      requires_grad=True)
            self.gamma = nn.Parameter(torch.cuda.FloatTensor([gamma]),
                                      requires_grad=True)
        if kernel_type == 'gaussian' or kernel_type == 'cauchy':
            self.weight_ones = Variable(torch.cuda.FloatTensor(
                self.weight.size()).fill_(1 / self.weight.numel()),
                                        requires_grad=False)

        # mapping functions
        if mapping == 'translation':
            return

        index_all = np.reshape(np.arange(self.weight.nelement()),
                               (out_channels * in_channels, kernel_size**2))
        if mapping == 'polar':
            import cv2
            center = (kernel_size / 2, kernel_size / 2)
            radius = (kernel_size + 1) / 2.0
            index = np.reshape(np.arange(kernel_size**2),
                               (kernel_size, kernel_size))
            maps = np.reshape(
                cv2.linearPolar(index, center, radius,
                                cv2.WARP_FILL_OUTLIERS).astype(int),
                (kernel_size**2))
            index_all[:, :] = index_all[:, maps]
        elif mapping == 'logpolar':
            import cv2
            center = (kernel_size / 2, kernel_size / 2)
            radius = (kernel_size + 1) / 2.0
            M = kernel_size / np.log(radius)
            index = np.reshape(np.arange(kernel_size**2),
                               (kernel_size, kernel_size))
            maps = np.reshape(
                cv2.logPolar(index, center, M,
                             cv2.WARP_FILL_OUTLIERS).astype(int),
                (kernel_size**2))
            index_all[:, :] = index_all[:, maps]
        elif mapping == 'random':
            for i in range(out_channels * in_channels):
                index_all[i, :] = index_all[
                    i,
                    np.random.
                    randint(low=0, high=kernel_size**2, size=kernel_size**2)]
        else:
            NotImplementedError()
        self.mapping_index = torch.cuda.LongTensor(index_all).view(-1)
Exemplo n.º 39
0
 def converter(self, filereader):
     cv2.linearPolar(self.intermediate_input, (constants.CENTER_POINT_x,constants.CENTER_POINT_z), constants.SCAN_CONVERTER_SCALE, cv2.INTER_CUBIC + cv2.WARP_INVERSE_MAP, self.output)
     cv2.imwrite('color_img.bmp', self.output)
     cv2.imshow('image',self.output)
     cv2.waitKey(0)
Exemplo n.º 40
0
    def load(self, xs):
        if self.filter_classes:
            if not any([c in xs[-1] for c in self.filter_classes]):
                # print('removing ' + xs[-1])
                return None

        fname = xs[-1]

        if self.filter_ids or self.filter_classes:
            match = re.match('(.*)_(\d\d\d\d).*', Path(fname).name)
            if match:
                cls, id = match.groups()
            else:
                print('Warning! could not match class and id!')
                cls, id = self.filter_classes[0], 0

        if self.filter_ids:
            if int(id) > self.filter_ids[cls]:
                return None

        label = xs[-2]
        if self.label_to0idx:
            label -= 1
        # adjust label
        if self.filter_classes:
            label = self.filter_classes.index(cls)

        # this is when we have n encoded views
        # xs = np.stack([cv2.imdecode(x, cv2.IMREAD_GRAYSCALE)
        #                for x in xs[:-2]])
        # and this when we have a single encoding for all views (faster)
        flag = cv2.IMREAD_COLOR if self.rgb else cv2.IMREAD_GRAYSCALE
        x = cv2.imdecode(xs[0], flag)
        xs = np.split(x, x.shape[1] // x.shape[0], axis=1)

        if self.filter_views is not None:
            if len(xs) == len(self.filter_views):
                # upright datasets correspond to views given by this constant
                xs = [xs[i] for i in cts.upright_to_homogeneous[len(xs)]]
            else:
                xs = [xs[i] for i in self.filter_views]

        if self.autocrop:
            xs = [autocrop(x, self.keep_aspect_ratio) for x in xs]

        if self.force_res > 0:
            out = []
            res = self.force_res
            for x in xs:
                h, w = x.shape
                if (h != res or w != res):
                    x = cv2.resize(x, (res, res))
                out.append(x)
            xs = out

        # if we want to augment individual views, add here
        if self.polarmode == 'logpolar':
            w = xs[0].shape[0]
            # this will create some dead pixels
            m = w / np.log(w * np.sqrt(2) / 2)
            # but this may crop parts of the original img:
            # m = w/np.log(w/2)
            xs = [cv2.logPolar(x, ((w - 1.) / 2., (w - 1.) / 2.), m,
                               cv2.INTER_LINEAR + cv2.WARP_FILL_OUTLIERS)
                  for x in xs]
        elif self.polarmode == 'polar':
            w = xs[0].shape[0]
            m = w * np.sqrt(2) / 2
            xs = [cv2.linearPolar(x, ((w - 1.) / 2., (w - 1.) / 2.), m,
                                  cv2.INTER_LINEAR + cv2.WARP_FILL_OUTLIERS)
                  for x in xs]

        xs = np.stack(xs, axis=0)

        return [xs, label, fname]