コード例 #1
0
ファイル: detect_blobs.py プロジェクト: fvegar/blades
def showImageWithCircles(img, circles):
    
    for index, c in circles.iterrows():   
        cv2.circle(img, (int(c.x), int(c.y)), 5, (255, 255, 255), -1)
        cv2.putText(img, "centroid", (int(c.x) - 25, int(c.y) - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
    
    showImage(img)
コード例 #2
0
    def birdsEye(self, img, offset=(48, 88), margin=(200, 40), debug=False):
        h, w = img.shape[0], img.shape[1]
        cx, cy = int(w / 2), int(h / 2)

        top_left, top_right = [cx - offset[0], cy + offset[1]
                               ], [cx + offset[0], cy + offset[1]]
        bottom_left, bottom_right = [margin[0], h - margin[1]
                                     ], [w - margin[0], h - margin[1]]
        src_point = np.array([top_left, top_right, bottom_right, bottom_left],
                             np.float32)

        top_left[0] = bottom_left[0] = int((top_left[0] + bottom_left[0]) / 2)
        top_right[0] = bottom_right[0] = int(
            (top_right[0] + bottom_right[0]) / 2)
        top_left[1] = top_right[1] = 0
        bottom_left[1] = bottom_right[1] = h
        dst_point = np.array([top_left, top_right, bottom_right, bottom_left],
                             np.float32)

        self.lastBirdEyePoints = {'src': src_point, 'dst': dst_point}
        M = cv2.getPerspectiveTransform(src_point, dst_point)
        warped = cv2.warpPerspective(img, M, (w, h), flags=cv2.INTER_LINEAR)

        debug_image = None
        if debug:
            debug_image = warped.copy() * 255
            debug_image = cv2.cvtColor(debug_image, cv2.COLOR_GRAY2RGB)
            cv2.polylines(debug_image, np.array([dst_point], np.int), True,
                          (0, 255, 0))
            utils.showImage(debug_image)

        return warped, debug_image
コード例 #3
0
    def evaluate(self):
        def show(img):
            npimg = img.numpy()
            plt.imshow(np.transpose(npimg, (1, 2, 0)), interpolation='nearest')

        # load last iteration if training was started but not finished
        if len(listdir("saves7")) > 0:
            # the [4::-3] is because of the file name format, with the number of each checkpoint at these points
            self.loadModel(27)
        else:
            self.StyleGan.init_weights()
            self.checkpoint = 0

        if np.random.random() < self.mixed_probability:
            style_noise = utils.createStyleMixedNoiseList(
                self.batch_size, self.latent_dim, self.num_layers,
                self.StyleGan.styleNetwork, self.device)
        else:
            style_noise = utils.createStyleNoiseList(
                self.batch_size, self.latent_dim, self.num_layers,
                self.StyleGan.styleNetwork, self.device)
        image_noise = utils.create_image_noise(self.batch_size,
                                               self.image_size, self.device)
        generated_images = self.StyleGan.generator(style_noise, image_noise)
        for x in range(generated_images.shape[0]):
            utils.showImage(generated_images[x])
コード例 #4
0
    def colorFilter(self, img, img_):
        # 转PIL_image,获取颜色统计;
        tmp = img.copy()
        PILImg = Image.fromarray(cv2.cvtColor(tmp, cv2.COLOR_BGR2RGB))
        _, colorDic = self.get_dominant_color(PILImg)

        # 去除红色,带补全;
        HSV = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2HSV)
        H, S, V = cv2.split(HSV)
        H = np.array(H).flatten()
        S = np.array(S).flatten()
        V = np.array(V).flatten()
        mask_R = utils.color_(img_.copy(), 1)
        img_R = cv2.bitwise_and(img, img, mask=mask_R)
        img_R = cv2.bitwise_not(img_R)
        img -= img_R
        # utils.showImage(img, "red")

        # 取蓝色;
        HSV = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2HSV)
        H, S, V = cv2.split(HSV)
        rangeColor = 20
        for item in colorDic:
            color = item[1]
            Hue = colorsys.rgb_to_hsv(color[0] / 255.0, color[1] / 255.0,
                                      color[2] / 255.0)[0] * 180
            if (abs(Hue - 112) < rangeColor):
                break
        LowerBlue = np.array([Hue - rangeColor, 43, 46])
        UpperBlue = np.array([Hue + rangeColor, 255, 255])
        mask_B = cv2.inRange(HSV, LowerBlue, UpperBlue)
        img_B = cv2.bitwise_and(img, img, mask=mask_B)
        utils.showImage(img_B, "with blue")
        img = img_B
        return img
コード例 #5
0
    def laneSearchHistogram(self,
                            binary_image,
                            slice_count=2,
                            slice_num=0,
                            offset=0,
                            debug=False):
        h, w = binary_image.shape[0], binary_image.shape[1]
        slice_h = int(h / slice_count)

        max_h = h - slice_h * slice_num
        min_h = h - slice_h * (slice_num + 1)

        slice = binary_image[min_h:max_h, :]
        histogram = np.sum(slice, axis=0)
        if debug:
            utils.showImage(slice, cmap='gray')
            print('histogram', histogram.shape)
            plt.plot(histogram)
            plt.show()

        midpoint = int((len(histogram) / 2) + offset)

        left_peak = np.argmax(histogram[:midpoint])
        right_peak = np.argmax(histogram[midpoint:]) + midpoint

        return left_peak, right_peak
コード例 #6
0
 def refresh_cookie(self):
     url = "http://www.shangxueba365.com/"
     try:
         rep = self.session.get(url, headers=self.base_headers, timeout=self.timeout)
         rep.raise_for_status()
         img_base64 = re.findall(
             r'<img class="verifyimg" alt="verify_img" src="data:image/bmp;base64,(.*?)"/>', rep.text)[0]
         img_bytes = base64.b64decode(img_base64)
         img_name = "verify.jpg"
         saveImage(img_bytes, img_name)
         showImage(img_name)
         time.sleep(1)
         verify_code = input("输入验证码:")
         removeImage(img_name)
         # ==============
         # var val = "";
         #         for (var i = 0; i < str.length; i++) {
         #             if (val == "")
         #                 val = str.charCodeAt(i).toString(16);
         #             else
         #                 val += str.charCodeAt(i).toString(16);
         #         }
         # ==============
         啥 = ""
         for item in verify_code:
             啥 += str(ord(item) - 18)
         # ==============
         rep = self.session.get(url + "/?security_verify_img=" + 啥, headers=self.base_headers, timeout=self.timeout)
         cookie_dict = requests.utils.dict_from_cookiejar(self.session.cookies)
         with open("cookie.json", "w") as f:
             json.dump(cookie_dict, f)
     except Exception:
         raise RefreshException("cookie获取失败")
コード例 #7
0
def cornerFinder(input_shape,
                 name='corner_cnn',
                 load_weights=None,
                 debug=False):

    model = Sequential(name=name)
    model.add(Lambda(lambda x: x / 255, input_shape=input_shape))  # normalize
    model.add(Conv2D(8, kernel_size=(5, 5), activation="relu", strides=(2, 2)))
    model.add(Conv2D(16, kernel_size=(5, 5), activation="relu",
                     strides=(2, 2)))
    model.add(Conv2D(32, kernel_size=(5, 5), activation="relu",
                     strides=(2, 2)))
    model.add(Flatten())
    model.add(Dense(100))
    model.add(Dropout(0.5))
    model.add(Dense(100))
    model.add(Dropout(0.5))
    model.add(Dense(8, activation="sigmoid"))

    if load_weights is not None:
        print('Loading weights', load_weights)
        model.load_weights(load_weights)
    else:
        print('Loading weights failed', load_weights)

    if debug:
        model_img_path = output_images + model.name + '.png'
        plot_model(model, to_file=model_img_path, show_shapes=True)
        utils.showImage(model_img_path)

    return model
コード例 #8
0
def processFrame(im):
    utils.preprocessing(im)
    # To see in the right way
    im = cv2.flip(im, 1)
    im_height, im_width = im.shape
    global TPL_RECT
    global TPL_CENTER
    global FILTER_INIT
    global FILTER_NUM
    global FILTER_DEN
    global COS_WINDOW
    global PSR_THRES

    psr_test = False
    if TPL_RECT != None:
        tplc = TPL_RECT.astype(np.int64)
        patch = im[tplc[0, 0] : tplc[1, 0], tplc[0, 1] : tplc[1, 1]]
        if not FILTER_INIT:
            initFilter(patch)
            print tplc.shape
        else:
            if patch.shape == COS_WINDOW.shape:
                height, width = patch.shape
                #                ptpl = patch * COS_WINDOW
                #                height, width = ptpl.shape
                #                output = utils.genGaussianMatrix(width, height, (width/2, height/2), 2.0)
                #                ftpl = np.fft.fft2(ptpl)
                #                foutput = np.fft.fft2(output)
                #                n = foutput #* np.conj(ftpl)
                #                d = ftpl #* np.conj(ftpl)
                G = np.conj(FILTER_NUM / FILTER_DEN) * np.conj(np.fft.fft2(patch * COS_WINDOW))
                g = np.real(np.fft.ifft2(G))
                utils.showImage("output", g)
                utils.showImage("filter", np.real(np.fft.fftshift(np.fft.ifft2(np.conj(FILTER_NUM / FILTER_DEN)))))
                psr = utils.computePSR(g)
                psr_test = psr > PSR_THRES
                if True:
                    peak_pos = np.argmax(g)
                    dy = peak_pos // width - height // 2
                    dx = peak_pos % width - width // 2
                    if tplc[0, 0] - dy < 0:
                        dy = tplc[0, 0]
                    if tplc[1, 0] - dy >= im_height:
                        dy = tplc[1, 0] - im_height
                    if tplc[0, 1] - dx < 0:
                        dx = tplc[0, 1]
                    if tplc[1, 1] - dx >= im_width:
                        dx = tplc[1, 1] - im_width
                    tplc[:, 0] -= dy
                    tplc[:, 1] -= dx
                    TPL_RECT[:, 0] -= dy
                    TPL_RECT[:, 1] -= dx
                    new_patch = im[tplc[0, 0] : tplc[1, 0], tplc[0, 1] : tplc[1, 1]]
                    updateFilter(new_patch)
                # print psr

        utils.drawRectangle(im, tplc, not psr_test)

    return im
コード例 #9
0
ファイル: main.py プロジェクト: tjtanaa/comp4421
def assignment3(input):
    img = cv2.imread(input)
    markedImg, digits = extractDigits(img)

    labels = adaboostClassification(digits)
    utils.showDigits(digits, labels, labels.shape[0], title_text="label = {}", random=False)
    utils.showImage(markedImg)
    return markedImg
コード例 #10
0
def main():
    img = utils.readImage()
    img = smooth(img)

    img_red, img_blue = img, img

    processImage(img_red, 'Red')
    processImage(img_blue, 'Blue')

    utils.showImage(img, 'Final Classification')
コード例 #11
0
ファイル: detect_blobs.py プロジェクト: fvegar/blades
def alternative_detectCirclesImage(img, frame_number=0, display_intermediate_steps=False, meanRadius=30, thresh=20, opening_kernel=5,
                                   ROI_center=[656,395], ROI_radius=408):
    """ Produces a biased position when particles are not uniform (letters)
        but it is way more robust to changes in ilumination """
    # FOR CAMERA VERY HIGH, FIRST CREATE CIRCULAR MASK
    mask = createCircularMask(800, 1280, center=ROI_center, radius=ROI_radius)
    img = maskImage(img, mask)



    if display_intermediate_steps==True:
        showImage(img, name='Original')
    bw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    _, binarized = cv2.threshold(bw, thresh, 255.0, cv2.THRESH_BINARY)
    if display_intermediate_steps==True:
        showImage(binarized, name='Binary')
        print('Thresh: '+str(thresh))
    opened = morphOperation(binarized, operation='opening', times=1, kernel_size=opening_kernel)
    if display_intermediate_steps==True:
        showImage(opened, name='Opened')
        print('Opening Kernel size: '+str(opening_kernel))
    closed = morphOperation(opened, operation='closing', times=1, kernel_size=6)
    if display_intermediate_steps==True:
        showImage(closed, name='Closed')
    eroded = morphOperation(closed, operation='erosion', times=1, kernel_size=opening_kernel+38)
    if display_intermediate_steps==True:
        showImage(eroded, name='Erode')
# =============================================================================
#     eroded = morphOperation(opened, operation='gradient', times=1, kernel_size=12)
#     if display_intermediate_steps==True:
#         showImage(eroded, name='Erode')
#         showImage(closed-eroded, name='Erode')
# =============================================================================


    # Find contours in the binary image:
    contours, hierarchy = cv2.findContours(eroded, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    # Detect the centroid of each contour and store it on a list
    centroids = findCentroids(contours)
    N = centroids.shape[0]

    # We first prepare the structure to store detected circles in a format that
    # trackpy can easily work with (for the linking)   
    A = pd.DataFrame(np.zeros((N, 2), dtype=np.float64), columns=('x', 'y'))
    B = pd.DataFrame(np.full((N, 1), frame_number, dtype=np.int64), columns=('frame',))
    C = pd.DataFrame(np.full((N, 1), meanRadius, dtype=np.float64), columns=('size',))
    circles_tp = pd.concat((A, C, B), axis=1)
    # Fill that structure:
    circles_tp.x, circles_tp.y = centroids[:,0], centroids[:,1]
    
    if display_intermediate_steps==True:
        showImageWithCircles(img, circles_tp)
    
    return circles_tp
コード例 #12
0
ファイル: IOTool.py プロジェクト: AI-Force/PanoAnnotator
def showLayoutMaps(scene, color=None):

    edgeMap, _ = utils.genLayoutEdgeMap(scene, pm.layoutMapSize)
    if color is not None:
        color = utils.imageResize(color, [512, 1024])
        edgeMap = edgeMap * 0.5 + color * 0.5
    utils.showImage(edgeMap)

    obj2dMap = utils.genLayoutObj2dMap(scene, pm.layoutMapSize)
    obj2dMap = obj2dMap * 0.7 + color * 0.3
    utils.showImage(obj2dMap)
コード例 #13
0
def processVideo(self, path, function, live=False, debug=False):
    if not live:
        strdate = '_' + utils.standardDatetime()
        output_video = video_output_path + utils.filenameAppend(path, strdate)
        video = VideoFileClip(path)
        video_clip = video.fl_image(function)
    else:
        vidcap = cv2.VideoCapture(path)
        while True:
            success, image = vidcap.read()
            if not success:
                break
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            final_image = self.pipeline(image, debug=debug)
            utils.showImage(final_image)
コード例 #14
0
ファイル: detect_blobs.py プロジェクト: fvegar/blades
def detectCirclesImage(img, frame_number=0, display_intermediate_steps=False, meanRadius=30):

    if display_intermediate_steps==True:
        showImage(img, name='Original')
    
    added = eraseText(img, k1=3, k2=5, k3=3, k4=5, display_intermediate_steps=display_intermediate_steps)
    
    if display_intermediate_steps==True:
        showImage(added)
        
    # Now I binarize the image to get only b/w, the erosion so blobs are not
    # connected and can later be detected as separate contours
    _, binarized = cv2.threshold(added, 20.0, 255.0, cv2.THRESH_BINARY)
    if display_intermediate_steps==True:
        showImage(binarized)
    # The kernel must be of a size similar (better higher) to that of the feature we want to detect
# =============================================================================
#     kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (29, 29))
#     eroded = cv2.erode(binarized[1], kernel , iterations=1)
# =============================================================================
    eroded = morphOperation(binarized, operation='erosion', times=1, kernel_size=36)
    if display_intermediate_steps==True:
        showImage(eroded)  
        
    # Find contours in the binary image:
    contours, hierarchy = cv2.findContours(eroded,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    # Detect the centroid of each contour and store it on a list
    centroids = []
    for c in contours:
       # calculate moments for each contour
       M = cv2.moments(c)     
       # calculate x,y coordinate of center
       if M["m00"] != 0:
           cX = M["m10"] / M["m00"]
           cY = M["m01"] / M["m00"]
           centroids.append([cX, cY])
       else:
           cX, cY = 0, 0
    
    # Now we get those circles' positions and store them in an appropiate format
    centroids = np.array(centroids)
    N = centroids.shape[0]

    # We first prepare the structure to store detected circles in a format that
    # trackpy can easily work with (for the linking)   
    A = pd.DataFrame(np.zeros((N, 2), dtype=np.float64), columns=('x', 'y'))
    B = pd.DataFrame(np.full((N, 1), frame_number, dtype=np.int64), columns=('frame',))
    C = pd.DataFrame(np.full((N, 1), meanRadius, dtype=np.float64), columns=('size',))
    circles_tp = pd.concat((A, C, B), axis=1)
    # Fill that structure:
    circles_tp.x, circles_tp.y = centroids[:,0], centroids[:,1]
    
    if display_intermediate_steps==True:
        showImageWithCircles(img, circles_tp)
    
    return circles_tp
コード例 #15
0
    def __init__(self):
        print("****** Start CV Part ******")
        img = utils.readImage(str(input("Please Input the ImgPath: ")))
        img_ = img.copy()
        # back-up;
        img = utils.shift_demo(img, 10, 50)
        img = self.colorFilter(img, img_)
        img = utils.threshold(img)

        img1 = img_.copy()
        region = self.roi_solve(img)
        for i in range(len(region)):
            rect2 = region[i]
            w1, w2 = rect2[0], rect2[0] + rect2[2]
            h1, h2 = rect2[1], rect2[1] + rect2[3]
            box = [[w1, h2], [w1, h1], [w2, h1], [w2, h2]]
            cv2.drawContours(img1, np.array([box]), 0, (0, 0, 255), 1)
            # self.saveImage(img_, box, i)
        utils.showImage(img1, "Result")
コード例 #16
0
def main(status):

    if status == 'generate':
        generateDatasets(dataset_name, n_train, n_valid, n_test)
        main('train')
    elif status == 'train':
        model_name = md.train(dataset_name, batch_size=512, debug=False)
        main('evaluate')
        pass
    elif status == 'evaluate':
        model = md.loadModel(model_path)
        dataset = ds.loadDataset(dataset_path + dataset_name + '/' + test_file)
        for path, y in zip(dataset['path'], dataset['y']):
            img = utils.loadImage(path)
            p = model.predict(img[None, :, :, :], batch_size=1)
            print(p, y)
            p = p[0]
            y_pred = [[p[0], p[1]], [p[2], p[3]], [p[4], p[5]], [p[6], p[7]]]
            img_dbg = drawPoints(img, y_pred, colors)
            utils.showImage(img_dbg)
    pass
コード例 #17
0
    # result = np.argmax(result, axis = 2)
    # result = result.astype('int')

    return result


model_name = 'U-Net'
ignoreWarnings()
useDevice('CPU')

# time_stamp = input('enter time stamp: ')
# time_stamp = '13-29-56'
time_stamp = '08-48-09'

model = LoadModel(time_stamp, model_name, weights=True, IMG_SIZE=IMG_SIZE)

impath = 'data/RGB/top_potsdam_5_12_RGB.tif'
label_impath = 'raw_data/Label/top_potsdam_3_10_label.tif.npy'

# X_test = np.load(impath)
# y_test = np.load(label_impath)
X_test = readImage(impath, IMG_SIZE)

result = predict(model, X_test)

del X_test, model

result = InvertLabel(result)

showImage(result, waitKey=0)
コード例 #18
0
ファイル: homo.py プロジェクト: dashingstag/tehOpeng
    
	p1 = np.array([k.pt for k in new_left_kp])
	p2 = np.array([k.pt for k in new_right_kp])
	
	H_final, status_final = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
	#print '%d / %d  inliers/matched' % (np.sum(status), len(status))

	final_frame=stitch(left_small_image,right_small_image,H_final,status_final)
	#video.write(final_frame)
	
	cv2.imwrite("final"+str(iterate)+".jpg", final_frame)
	finalheight=len(final_frame)
	finalwidth=len(final_frame[0])
	print "height", len(final_frame)
	print "width", len(final_frame[0])
	utils.showImage(final_frame, scale=(0.5, 0.5), timeout=0)
		
	cv2.waitKey(1)
	if cv2.waitKey(1) & 0xFF == ord('q'):
		break
	
	#detector=cv2.SIFT()
	if cv2.waitKey(1) & 0xFF == ord('q'):
		break
		
		
video  =cv2.VideoWriter("Stitched_video3.avi", -1, 24, (finalwidth,finalheight))
left_cap = cv2.VideoCapture("football_left.mp4")
mid_cap = cv2.VideoCapture("football_mid.mp4")
right_cap = cv2.VideoCapture("football_right.mp4")
for iterate in range (0,7200):
コード例 #19
0
ファイル: touchDriver.py プロジェクト: ChapResearch/ChapR-FCS
 def showImage(self,image,position,fade=0):
         utils.showImage(image,position,fade)
コード例 #20
0
from const import WINDOW_LOC
import pyscreenshot as ImageGrab
from utils import pil2cv


def getGameImage():
    im = ImageGrab.grab(bbox=WINDOW_LOC['bbox'])
    return pil2cv(im)


if __name__ == '__main__':
    from utils import showImage
    img = getGameImage()
    showImage('', img)
コード例 #21
0
                                             feed_dict={
                                                 x: img_x,
                                                 y: img_y
                                             })

            print("total_loss:", total_loss)  # total_loss: 0.9782068
            print("each_loss:", each_loss
                  )  # each_loss: [0.9740695  0.97524875 0.98075956 0.98274946]

    # ================================================================================
    img1 = cv2.imread("data/splice1.png")
    img2 = cv2.imread("data/splice2.png")
    img3 = cv2.imread("data/splice3.png")
    img4 = cv2.imread("data/splice4.png")
    images = [img1, img2, img3, img4]
    showImage(img1, img2, img3, img4)

    dsts = []
    for i in range(len(images)):
        img = images[i] / 255.0
        # INTER_CUBIC = cv2.resize(_img, (_resize_cols, _resize_rows), interpolation=cv2.INTER_CUBIC)
        images[i] = cv2.resize(img, (256, 256), interpolation=cv2.INTER_CUBIC)
        dst = cv2.GaussianBlur(images[i].copy(), (5, 5), 0)
        dsts.append(dst)

    images = np.array(images)
    dsts = np.array(dsts)

    ssimTest(images, dsts)

    # ssim loss: 0.9724881069549962
コード例 #22
0
ファイル: HDMIdriver.py プロジェクト: ChapResearch/ChapR-FCS
 def showImage(self,image,position,fade=0):
     if self.role == "server":
         self._command("SI",image,position,fade)
     else:
         utils.showImage(image,position,fade)
コード例 #23
0
ファイル: wahab.py プロジェクト: doc22940/scriptsdump.gut
            result[y:y+w, x:x+w] = utils.convolve(image, kernel, (y, x), (w, w))
            result[y:y+w, x:x+w] /= np.sum(kernel)

    return result


if __name__ == '__main__':
    np.set_printoptions(
            threshold=np.inf,
            precision=4,
            suppress=True)

    print("Reading image")
    image = ndimage.imread(sourceImage, mode="L").astype("float64")
    if options.images > 0:
        utils.showImage(image, "original", vmax=255.0)

    print("Normalizing")
    image = utils.normalize(image)
    if options.images > 1:
        utils.showImage(image, "normalized")

    print("Finding mask")
    mask = utils.findMask(image)
    if options.images > 1:
        utils.showImage(mask, "mask")

    print("Applying local normalization")
    image = np.where(mask == 1.0, utils.localNormalize(image), image)
    if options.images > 1:
        utils.showImage(image, "locally normalized")
コード例 #24
0
ファイル: detect_blobs.py プロジェクト: fvegar/blades
def eraseText(img, k1=3, k2=5, k3=3, k4=5, display_intermediate_steps=False):
    """ Tries to delete text combining thresholds and other morphological operations
        k1, k2, k3 and k4 are parameters that define the kernel size for
        the diferent morph ops """
    
    # ---------------- Fran's Code ---------------------
    # Grayscale and Gaussian Blur
    bw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    bw = cv2.GaussianBlur(bw, (5,5), cv2.BORDER_DEFAULT)
    if display_intermediate_steps==True:
        showImage(bw)
    
    # apply grad morph so that I get thick borders. use a circle kernel
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k1, k1))
    grad = cv2.morphologyEx(bw, cv2.MORPH_GRADIENT, kernel)
    if display_intermediate_steps==True:
        showImage(grad)
    
    # binarize intelligently, by combining Otus's and simple binarization
    _, binary_bw = cv2.threshold(grad, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    if display_intermediate_steps==True:
        showImage(binary_bw)
        
    # apply open morph over a straight line kernel, 
    #so that I redraw in white regions inside thick borders, but only those containing straight lines
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (k2, k2))
    connected = cv2.morphologyEx(binary_bw, cv2.MORPH_CLOSE, kernel)
    if display_intermediate_steps==True:
        showImage(connected)
    
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k3, k3))
    erosion = cv2.erode(binary_bw, kernel ,iterations = 2)
    if display_intermediate_steps==True:
        showImage(erosion)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k4, k4))
    dilation =  cv2.dilate(erosion,  kernel)
    
    if display_intermediate_steps==True:
        showImage(dilation)
    difference = cv2.absdiff(connected,dilation)
    if display_intermediate_steps==True:
        showImage(difference)
    # add whitened thick borders to b/w image so that I erase only text/symbols with straigth lines
    added = cv2.add(difference, bw)
    return added
コード例 #25
0
ファイル: json2fp.py プロジェクト: zhigangjiang/LED2-Net
import sys
import os
import argparse

import objs
import utils


def json2fp(json, size, ratio):

    scene = objs.Scene()
    utils.loadLabelByJson(json, scene)
    floorMap = utils.genLayoutFloorMap(scene, size, ratio)

    return floorMap


if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument('--i', required=True)
    args = parser.parse_args()

    labelPath = args.i
    outputPath = os.path.dirname(args.i)

    floorMap = json2fp(labelPath, [1000, 1000], 0.02)
    #utils.saveImage(floorMap, os.path.join(outputPath, 'fp_full.png'))
    utils.showImage(floorMap)
コード例 #26
0
ファイル: main.py プロジェクト: jpcosec/ACGAN_cifar10
#dataset = torch.utils.data.ConcatDataset([trainset, testset])

trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=128,
                                          shuffle=True)

#print(len(dataset))
#print(dataset[0][0].size())
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck', 'fake')

dataiter = iter(trainloader)
images, labels = dataiter.next()
print(images.size())
showImage(make_grid(images[0:64]))

# custom weights initialization called on netG and netD

gen = Generator().to(device)
gen.apply(weights_init)

disc = Discriminator().to(device)
disc.apply(weights_init)

paramsG = list(gen.parameters())
print(len(paramsG))

paramsD = list(disc.parameters())
print(len(paramsD))
コード例 #27
0
ファイル: homo.py プロジェクト: dashtagger/stitcher
    p1 = np.array([k.pt for k in new_left_kp])
    p2 = np.array([k.pt for k in new_right_kp])

    H_final, status_final = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
    #print '%d / %d  inliers/matched' % (np.sum(status), len(status))

    final_frame = stitch(left_small_image, right_small_image, H_final,
                         status_final)
    #video.write(final_frame)

    cv2.imwrite("final" + str(iterate) + ".jpg", final_frame)
    finalheight = len(final_frame)
    finalwidth = len(final_frame[0])
    print "height", len(final_frame)
    print "width", len(final_frame[0])
    utils.showImage(final_frame, scale=(0.5, 0.5), timeout=0)

    cv2.waitKey(1)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

    #detector=cv2.SIFT()
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

video = cv2.VideoWriter("Stitched_video3.avi", -1, 24,
                        (finalwidth, finalheight))
left_cap = cv2.VideoCapture("football_left.mp4")
mid_cap = cv2.VideoCapture("football_mid.mp4")
right_cap = cv2.VideoCapture("football_right.mp4")
for iterate in range(0, 7200):
コード例 #28
0
    #Solve system for the 'displacement vector', i.e. the pixel values
    d = np.linalg.solve(K, f)

    for k in range(0, len(siteindex)):
        x, y = siteindex[k][0], siteindex[k][1]
        newimage[y][x] = d[k]

    return newimage


restoredimageR = FEMLaplace(image[:, :, 0], mask)
restoredimageG = FEMLaplace(image[:, :, 1], mask)
restoredimageB = FEMLaplace(image[:, :, 2], mask)

restoredimage = np.dstack([restoredimageR, restoredimageG, restoredimageB])

showImage(
    np.concatenate([
        originalimage,
        np.dstack([mask, mask, mask]).astype(np.uint8), restoredimage
    ],
                   axis=1))

scoreR = discrepancyScore(originalimage[:, :, 0], restoredimageR, mask[1])
scoreG = discrepancyScore(originalimage[:, :, 1], restoredimageG, mask[1])
scoreB = discrepancyScore(originalimage[:, :, 2], restoredimageB, mask[1])

scoreTot = (scoreR + scoreG + scoreB) / 3
print("Average discrepancy score = " + str(scoreTot))
コード例 #29
0
sourceImage = args[0]
if len(args) == 1:
    destinationImage = args[0]
else:
    destinationImage = args[1]

if __name__ == '__main__':
    np.set_printoptions(
            threshold=np.inf,
            precision=4,
            suppress=True)

    print("Reading image")
    image = ndimage.imread(sourceImage, mode="L").astype("float64")
    #if options.images > 0:
    utils.showImage(image, "original", vmax=255.0)

    print("Finding mask")
    mask = utils.findMask(image,threshold = 0.09)
    #if options.images > 1:
    utils.showImage(mask, "mask")

    print("Estimating orientations")
    start = timer()
    orientations = np.where(mask == 1.0, utils.estimateOrientations(image,w=10,interpolate=False), -1.0)  # 16 size of block to look for orientation
    end = timer()
    #if options.images > 0:
    utils.showOrientations(image, orientations, "orientations", 10)

    ''''if options.binarize:
        print("Binarizing")
コード例 #30
0
testMask = utl.makeMask("masks/128/circles.png")

testImage = utl.loadImage("pictures/128/005.jpeg")
image = np.copy(testImage)
testN = utl.findNeighbours(testImage, testMask[1])
from SOR import SOR, RestoreIndex
testR = SOR(testMask[1],testN,30,1.9)#the two ones are the number of iterations and the relaxation constant.
#print(len(testR))
#print(testN)
#print(len(testMask[1]))
#print(len(testN))
#print(testR)

FixedImg = np.copy(RestoreIndex(image, testR, testMask[1]))
utl.showImage(FixedImg)
utl.showImage(image)
DS = utl.discrepancyScore(image, FixedImg, testMask[1])
print(DS)


#%%
#Uppscale example
from PIL import Image, ImageOps

orgImage = utl.loadImage("pictures/128/002.jpeg")

upImage = Image.fromarray(orgImage.astype(np.uint8))
upImage = upImage.resize((252, 252))
upImagePad = ImageOps.expand(upImage, border=3, fill ="black")
upScaled = np.copy(np.array(upImagePad))
コード例 #31
0
img2 = cv2.imread('images/L3.jpg', 0)
#labels_dfs = connectedComponentsDfs(getColorClasses(binary_image), 2000)

# for himography transformation
max_rows = img2.shape[0]
max_cols = img2.shape[1]
src_pts = np.float32([[5, 3], [998, 131], [925, 691], [66, 476]])
dst_pts = np.float32([[0, 0], [max_cols - 1, 0], [max_cols - 1, max_rows - 1],
                      [0, max_rows - 1]])
h_matrix, ret = cv2.findHomography(src_pts, dst_pts)
fv_image = cv2.warpPerspective(src=img2,
                               M=h_matrix,
                               dsize=(max_cols, max_rows))

print(
    "Enter '1' to answer the first part, or '2' to answer the second part\n Enter 0 to exit"
)
t = eval(input())
if (t == 1):
    image = binary_image
else:
    image = fv_image

print("Enter '1' for the Iterative algorithim, or '2' for the Dfs algorithim")
t = eval(input())
if (t == 1):
    result_image = ConnectedComponents('Iter', image)
else:
    result_image = ConnectedComponents('Dfs', image)
showImage("Connected Components on Binary Image", result_image)
コード例 #32
0
    for i in range(1, 5):
        xyz = (float(data[i][0]), 0, -float(data[i][1]))
        print(xyz)
        scene.layoutPoints.append(objs.GeoPoint(scene, None, xyz))

    scene.genLayoutWallsByPoints(scene.layoutPoints)
    scene.updateLayoutGeometry()

    return scene


if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument('--i', required=True)
    parser.add_argument('--gt', required=True)
    args = parser.parse_args()

    data_path = args.i

    scene_pred = lnet2scene(data_path)
    fp_pred = utils.genLayoutFloorMap(scene_pred, (512, 512), 20 / 512)

    gt_path = args.gt
    scene_gt = objs.Scene()
    utils.loadLabelByJson(gt_path, scene_gt)
    scene_gt.normalize(cameraH=1.6)
    fp_gt = utils.genLayoutFloorMap(scene_gt, (512, 512), 20 / 512)

    utils.showImage([fp_pred, fp_gt])
コード例 #33
0
    def pipeline(self, image, debug=False, dump_partials=False):
        original = image.copy()
        image_unsidtort = self.unsidtort(image)
        if debug: utils.showImage(image_unsidtort)

        image_thresh = self.threasholdLaneLines(image_unsidtort, debug=debug)
        if debug: utils.showImage(image_thresh)

        image_roi = self.ROI(image_thresh)
        if debug: utils.showImage(image_roi)

        image_bird, image_bird_debug = self.birdsEye(image_roi, debug=debug)
        if debug:
            utils.showImage(image_bird_debug)
            utils.showImage(image_bird)

        image_lanes, lane = self.laneSearch(image_bird, debug=debug)
        if debug: utils.showImage(image_lanes)

        final_image = self.HUD(original, debug=debug)
        if debug: utils.showImage(final_image)

        if dump_partials:
            path_tpl = output_images_pipeline + 'pipeline_partials_{:02d}.jpg'
            images = [
                original, image_unsidtort, image_thresh, image_roi, image_bird,
                image_lanes, final_image
            ]

            for i, img in enumerate(images):
                path = path_tpl.format(i)
                img = utils.normalizeImage(img)
                cv2.imwrite(path, img)

        return final_image
コード例 #34
0
test_images_path = './test_images/*.jpg'
video_frame_path = './frames/*_1000_*.jpg'

debug = False
test_pipeline = False
test_pipeline_path = test_images_path
#test_pipeline_path = video_frame_path

image_paths = glob.glob(calibration_path + '*.jpg')
camera = Camera()
camera.calibrate(image_paths)

if test_pipeline:
    images = utils.loadImages(test_pipeline_path, cv2.COLOR_BGR2RGB)
    hud = camera.pipeline(images[0], debug=debug, dump_partials=False)
    utils.showImage(hud)
else:
    camera.processVideo('project_video.mp4', debug=debug, live=False)
'''
test_images = list(map(lambda image_path:cv2.imread(image_path),test_images_paths))
test_images = list(map(lambda image:cv2.cvtColor(image,cv2.COLOR_BGR2RGB),test_images))
test_images_grid = list(map(lambda image:utils.drawGrid(image),test_images))

test_images_undist =  list(map(lambda img: camera.unsidtort(img),test_images_grid))

interlaved = []
for i in range(len(test_images)):
    interlaved.append(test_images_grid[i])
    interlaved.append(test_images_undist[i])

utils.showImages(interlaved)