Example #1
0
def Point_differnce(image, step, points, state):
    height, width = image.shape[:]
    if state == 1:                          #在竖直方向上进行比较
        diff_list = []
        for i in range(len(points)):
            point = points[i]
            Point_up = (point[0], point[1]-step)
            Point_down = (point[0], point[1]+step)
            Point_up = np.uint0(Point_up)
            Point_down = np.uint0(Point_down)
            if Point_up[1] >= 0 and Point_down[1] <= height-1:
                diff = abs(image[Point_up[1]][Point_up[0]] - image[Point_down[1]][Point_down[0]])
                diff_list.append(diff)
        arr_var = np.var(diff_list)           #计算方差
        return arr_var

    if state == 0:                         #在水平方向上进行比较
        diff_list = []
        for i in range(len(points)):
            point = points[i]
            Point_right = (point[0] + step, point[1])
            Point_left = (point[0] - step, point[1])
            Point_right = np.uint0(Point_right)
            Point_left = np.uint0(Point_left)
            if Point_left[0] >= 0 and Point_right[0] <= width-1:
                diff = abs(image[Point_left[1]][Point_left[0]] - image[Point_right[1]][Point_right[0]])
                diff_list.append(diff)
        arr_var = np.var(diff_list)           #计算方差
        return arr_var
def image_to_saliency_map_mdf(image, mean, seg_param_path, fuseweights, sess,
                              s3cnn, sp_in, nn_in, pic_in):
    t_preprocess = 0
    eps = sys.float_info.epsilon
    t_seg = time.time()
    segdata = mult_seg(image, seg_param_path)
    t_seg = time.time() - t_seg
    print('segmentation duration: %f' % t_seg)

    salmap_temp = np.zeros(image.shape[0:2])
    t_net = 0
    for i in range(0, segdata.__len__()):
        t_preprocess_temp = time.time()
        temp = np.zeros(image.shape[0:2])
        seg = segdata[str(i)]
        sp, nn, pic = im2mdfin2(image, mean, seg['segmap'], seg['seglist'],
                                seg['neighbour_mat'])
        t_preprocess = t_preprocess + time.time() - t_preprocess_temp
        #sp = np.reshape(np.ravel(mdfin[0:mdfin.__len__():3]),[np.uint16(mdfin.__len__()/3),227,227,3])
        #nn = np.reshape(np.ravel(mdfin[1:mdfin.__len__():3]),[np.uint16(mdfin.__len__()/3),227,227,3])
        #pic = np.reshape(np.ravel(mdfin[2:mdfin.__len__():3]),[np.uint16(mdfin.__len__()/3),227,227,3])
        t_net_temp = time.time()
        labels = np.uint0([])

        prob = np.float32([])

        for j in range(
                0,
                np.uint16(1 + seg['seglist'].__len__() / mdf.MAX_BATCH_SIZE)):
            if j * mdf.MAX_BATCH_SIZE == seg['seglist'].__len__():
                continue
            tensors = [s3cnn.nnout]
            xdim = (227, 227, 3)
            feed_dict = {
                sp_in: sp[j * mdf.MAX_BATCH_SIZE:(1 + j) * mdf.MAX_BATCH_SIZE],
                nn_in: nn[j * mdf.MAX_BATCH_SIZE:(1 + j) * mdf.MAX_BATCH_SIZE],
                pic_in:
                pic[j * mdf.MAX_BATCH_SIZE:(1 + j) * mdf.MAX_BATCH_SIZE]
            }

            with tf.device('/gpu:0'):

                up = sess.run(tensors, feed_dict=feed_dict)
                labels_temp = np.uint0(np.argmax(up[0], 1))
                labels = np.concatenate((labels, labels_temp))
                prob_temp = np.float32(np.max(up[0], 1))
                prob = np.concatenate((prob, prob_temp))
        t_net = t_net + time.time() - t_net_temp
        for j in range(0, seg['seglist'].__len__()):
            if labels[j] == 1:
                prob[j] = 1 - prob[j]
            temp = temp + (prob[j]) * (seg['segmap'] == seg['seglist'][j])

        salmap_temp = fuseweights[i] * temp + salmap_temp
    salmap = np.uint8((salmap_temp - np.min(salmap_temp)) /
                      (np.max(salmap_temp) - np.min(salmap_temp) + eps) * 255)
    return t_preprocess, t_net, salmap
def Line_Difference_v2(image, step, interal, point_number, state):
    height, width = image.shape[:]
    if state == 1:
        sum_line_difference = []
        for i in range(int(height / 2) // 2):
            if (i + 1) * interal < int(height / 2):
                Pt1 = (1, interal * (i + 1))
                Pt2 = (width - 1, interal * (i + 1))
                Pt3 = (1, height - 1 - interal * (i + 1))
                Pt4 = (width - 1, height - 1 - interal * (i + 1))
                Points12 = np.uint0(np.linspace(Pt1, Pt2, point_number))
                Points34 = np.uint0(np.linspace(Pt3, Pt4, point_number))
                diff_var12 = Point_differnce(image,
                                             step=step,
                                             points=Points12,
                                             state=state)
                diff_var34 = Point_differnce(image,
                                             step=step,
                                             points=Points34,
                                             state=state)
                diff_var = (diff_var12 + diff_var34) / 2
                sum_line_difference.append([diff_var, Pt1, Pt2, Pt3, Pt4])
        sum_diff = [x[0] for x in sum_line_difference]
        max_index = sum_diff.index(max(sum_diff))
        return [sum_line_difference[max_index][1], sum_line_difference[max_index][2], \
               sum_line_difference[max_index][3], sum_line_difference[max_index][4]]

    if state == 0:
        sum_line_difference = []
        for i in range(int(width / 2) // 2):
            if (i + 1) * interal < int(width / 2):
                Pt1 = (interal * (i + 1), 1)
                Pt2 = (interal * (i + 1), height - 1)
                Pt3 = (width - 1 - interal * (i + 1), 1)
                Pt4 = (width - 1 - interal * (i + 1), height - 1)
                Points12 = np.uint0(np.linspace(Pt1, Pt2, point_number))
                Points34 = np.uint0(np.linspace(Pt3, Pt4, point_number))
                diff_var12 = Point_differnce(image,
                                             step=step,
                                             points=Points12,
                                             state=state)
                diff_var34 = Point_differnce(image,
                                             step=step,
                                             points=Points34,
                                             state=state)
                diff_var = (diff_var12 + diff_var34) / 2
                sum_line_difference.append([diff_var, Pt1, Pt2, Pt3, Pt4])
        sum_diff = [x[0] for x in sum_line_difference]
        max_index = sum_diff.index(max(sum_diff))
        return [sum_line_difference[max_index][1], sum_line_difference[max_index][2], \
               sum_line_difference[max_index][3], sum_line_difference[max_index][4]]
def save_SLIC_segmentations_MSRA(images, in_dir, out_dir, NSP):
    if 'SLIC_Segs' not in os.listdir(out_dir):
        os.mkdir(out_dir + '/SLIC_Segs')
    if str(NSP) not in os.listdir(out_dir + 'SLIC_Segs'):
        os.mkdir(out_dir + '/SLIC_Segs/' + str(NSP))
    for fimg in images:
        img = sio.imread(in_dir + fimg)
        gt = sio.imread(in_dir + fimg[0:-3] + 'png') / 255
        SLIC_seg = np.uint16(
            slic_wrap(img, NSP, 10, sigma=1, enforce_connectivity=True))
        saliency = []
        segments = []

        segments_temp = np.unique(SLIC_seg)
        for segment in segments_temp:
            sal_temp = calc_saliency_score(segment, SLIC_seg, gt)
            if sal_temp >= 0:
                segments.append(segment)
                saliency.append(np.uint0(sal_temp))
        fslic = open(
            out_dir + '/SLIC_Segs/' + str(NSP) + '/' + fimg[0:-4] + '.slic',
            'wb')
        dill.dump(SLIC_seg, fslic)
        dill.dump(segments, fslic)
        dill.dump(saliency, fslic)
        fslic.close()
def save_fseg_segmentations_MSRA(images,in_dir,out_dir,param_path,train = False):
    fparams = np.load(param_path).item()
    if 'f_Segs' not in os.listdir(out_dir):
        os.mkdir(out_dir+'/f_Segs')

    for fimg in images :
        img = sio.imread(in_dir+fimg)
        gt = sio.imread(in_dir+fimg[0:-3]+'png')/255
        segs = {}
        for g in range(0,15):

            f_seg = np.zeros(img.shape[0:2],dtype=np.int32)
            felseg(img,f_seg,fparams['sigma'][g],np.float(fparams['scale'][g]),np.int(fparams['min_size'][g]))
            saliency = []
            segments = []
            f_seg+=1
            segments_temp = np.unique(f_seg)
            for segment in segments_temp:
                sal_temp = calc_saliency_score(segment,f_seg,gt)
                if (not(train) or (sal_temp >= 0)) :
                    segments.append(segment)
                    saliency.append(np.uint0(sal_temp))
            segs[str(g)]={}
            segs[str(g)]['segmap']= f_seg
            segs[str(g)]['seglist']= segments
            segs[str(g)]['labels']= saliency
        np.save(out_dir+'/f_Segs/'+fimg[0:-4],segs)
def save_fseg_segmentations_MSRA(images,
                                 in_dir,
                                 out_dir,
                                 param_path,
                                 train=False):
    fparams = np.load(param_path).item()
    if 'f_Segs' not in os.listdir(out_dir):
        os.mkdir(out_dir + '/f_Segs')

    for fimg in images:
        img = sio.imread(in_dir + fimg)
        gt = sio.imread(in_dir + fimg[0:-3] + 'png') / 255
        segs = {}
        for g in range(0, 15):

            f_seg = np.zeros(img.shape[0:2], dtype=np.int32)
            felseg(img, f_seg, fparams['sigma'][g],
                   np.float(fparams['scale'][g]),
                   np.int(fparams['min_size'][g]))
            saliency = []
            segments = []
            f_seg += 1
            segments_temp = np.unique(f_seg)
            for segment in segments_temp:
                sal_temp = calc_saliency_score(segment, f_seg, gt)
                if (not (train) or (sal_temp >= 0)):
                    segments.append(segment)
                    saliency.append(np.uint0(sal_temp))
            segs[str(g)] = {}
            segs[str(g)]['segmap'] = f_seg
            segs[str(g)]['seglist'] = segments
            segs[str(g)]['labels'] = saliency
        np.save(out_dir + '/f_Segs/' + fimg[0:-4], segs)
def image_to_saliency_map_mdf(image,mean,seg_param_path,fuseweights,sess,s3cnn,sp_in,nn_in,pic_in):
    t_preprocess =0
    eps = sys.float_info.epsilon
    t_seg = time.time()
    segdata = mult_seg(image,seg_param_path)
    t_seg = time.time() - t_seg
    print('segmentation duration: %f' % t_seg)

    salmap_temp = np.zeros(image.shape[0:2])
    t_net = 0
    for i in range(0,segdata.__len__()):
            t_preprocess_temp = time.time()
            temp = np.zeros(image.shape[0:2])
            seg = segdata[str(i)]
            sp,nn,pic = im2mdfin2(image,mean,seg['segmap'],seg['seglist'],seg['neighbour_mat'])
            t_preprocess = t_preprocess+time.time()-t_preprocess_temp
            #sp = np.reshape(np.ravel(mdfin[0:mdfin.__len__():3]),[np.uint16(mdfin.__len__()/3),227,227,3])
            #nn = np.reshape(np.ravel(mdfin[1:mdfin.__len__():3]),[np.uint16(mdfin.__len__()/3),227,227,3])
            #pic = np.reshape(np.ravel(mdfin[2:mdfin.__len__():3]),[np.uint16(mdfin.__len__()/3),227,227,3])
            t_net_temp = time.time()
            labels = np.uint0([])

            prob = np.float32([])

            for j in range(0,np.uint16(1+seg['seglist'].__len__()/mdf.MAX_BATCH_SIZE)):
                if j*mdf.MAX_BATCH_SIZE == seg['seglist'].__len__() :
                    continue
                tensors = [s3cnn.nnout]
                xdim = (227,227,3)
                feed_dict = {sp_in :sp[j*mdf.MAX_BATCH_SIZE:(1+j)*mdf.MAX_BATCH_SIZE], nn_in : nn[j*mdf.MAX_BATCH_SIZE:(1+j)*mdf.MAX_BATCH_SIZE], pic_in : pic[j*mdf.MAX_BATCH_SIZE:(1+j)*mdf.MAX_BATCH_SIZE]}

                with tf.device('/gpu:0'):

                    up = sess.run(tensors, feed_dict=feed_dict)
                    labels_temp = np.uint0(np.argmax(up[0],1))
                    labels = np.concatenate((labels,labels_temp))
                    prob_temp = np.float32(np.max(up[0],1))
                    prob = np.concatenate((prob,prob_temp))
            t_net = t_net+time.time()-t_net_temp
            for j in range(0,seg['seglist'].__len__()):
                if labels[j] == 1:
                    prob[j]=1-prob[j]
                temp = temp+ (prob[j])*(seg['segmap'] == seg['seglist'][j])

            salmap_temp = fuseweights[i]*temp+salmap_temp
    salmap = np.uint8((salmap_temp-np.min(salmap_temp))/(np.max(salmap_temp)-np.min(salmap_temp)+eps)*255)
    return  t_preprocess, t_net,salmap
Example #8
0
def gen_random_locations(size):
    ''' generate random locations that are not over dark parts of the screen '''
    im = np.mean(pyautogui.screenshot(), axis=2)
    sample = np.uint0(np.random.rand(size * 2, 2) * SIZE)
    try:
        return sample[im[tuple(sample.T[::-1])] > 3][:size]
    except IndexError:
        # if there are a lot of black pixels try with larger sample
        return gen_random_locations(size * 2)[:size]
Example #9
0
 def generate(self):
     yield 1
     yield 2
     yield 10
     yield 100
     if USE_NUMPY:
         yield np.int16(10)
         yield np.int8(4)
         yield np.int64(5)
         yield np.uint0(1)
def calc_saliency_score(segment, slic, gt):
    mask = np.uint0(slic == segment)
    pixels = np.sum(mask)
    sal = -1
    sal_temp = np.sum(mask * gt) / pixels
    sal = -1
    if sal_temp > 0.7:
        sal = 1
    elif sal_temp < 0.3:
        sal = 0
    return sal
def Box_inside_detect(image, top_left_x, bottom_right_x):  #输入图像必须是单通道的灰度图
    roi = image[top_left_x[1]:bottom_right_x[1],
                top_left_x[0]:bottom_right_x[0]]
    h, w = roi.shape[:]

    #sobel算子检测
    x = cv2.Sobel(roi, cv2.CV_16S, 1, 0)
    y = cv2.Sobel(roi, cv2.CV_16S, 0, 1)
    absX = cv2.convertScaleAbs(x)  # 转回uint8
    absY = cv2.convertScaleAbs(y)
    mask = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)
    ret, mask = cv2.threshold(mask, 50, 250, cv2.THRESH_BINARY)

    #对sobel算子的结果进行腐蚀
    kernel = np.ones((2, 2), np.uint8)
    mask = cv2.dilate(mask, kernel)

    new_Pt_1 = Line_Difference_v2(mask,
                                  step=2,
                                  interal=5,
                                  point_number=10,
                                  state=1)
    new_Pt_2 = Line_Difference_v2(mask,
                                  step=1,
                                  interal=5,
                                  point_number=10,
                                  state=0)

    height_pt = abs(new_Pt_1[0][1] - new_Pt_1[2][1])
    width_pt = abs(new_Pt_2[0][0] - new_Pt_2[2][0])

    # 输出面积占比
    area = (height_pt * width_pt) / (w * h) * 100

    rect_corner = []
    if area > 50.0:
        for i in range(len(new_Pt_1) // 2):
            j = i * 2
            line1 = (new_Pt_1[j][0], new_Pt_1[j][1], new_Pt_1[j + 1][0],
                     new_Pt_1[j + 1][1])
            line2 = (new_Pt_2[j][0], new_Pt_2[j][1], new_Pt_2[j + 1][0],
                     new_Pt_2[j + 1][1])
            point_corner = np.uint0(cross_point(line1, line2))
            rect_corner.append(point_corner)

    if len(rect_corner) == 2:
        #cv2.rectangle(roi, (rect_corner[0][0], rect_corner[0][1]), (rect_corner[1][0], rect_corner[1][1]), (0, 0, 255),2)
        rect_corner[0][0] += top_left_x[0]
        rect_corner[0][1] += top_left_x[1]
        rect_corner[1][0] += top_left_x[0]
        rect_corner[1][1] += top_left_x[1]
        return rect_corner
    else:
        return None
Example #12
0
 def generate(self):
     yield -100
     yield -1
     yield 0
     yield 1
     yield 100
     if USE_NUMPY:
         yield np.int16(-10)
         yield np.int8(-1)
         yield np.int64(0)
         yield np.uint0(1)
def calc_saliency_score(segment,slic,gt):
    mask = np.uint0(slic == segment)
    pixels = np.sum(mask)
    sal = -1
    sal_temp = np.sum(mask*gt)/pixels
    sal = -1
    if sal_temp > 0.7 :
        sal = 1
    elif sal_temp < 0.3:
        sal = 0
    return sal
def im2mdfin(img,mean,segmap,segments):

    result = MDFInData()
    mean_image = sp.misc.imresize(mean,img.shape)

    #Superpixel segmentation - to be replaced by other segmentation if necessary
    #SLIC_seg = slic_wrap(img, nsp, 10, sigma=1, enforce_connectivity=True)
    #segments = np.unique(SLIC_seg)
    #numSP = 0

    for SPi in range(0,segments.__len__()):
        pair = MDFInRecord()
        curr_sp = segments[SPi]
        sp_mask= np.uint0(segmap == curr_sp)
        indices = np.where((segmap == curr_sp)!=0)
        bb = np.array([[np.min(indices[0]),np.max(indices[0])],[np.min(indices[1]),np.max(indices[1])]])
        #extracting only the superpixel
        seg_img = np.copy(img[bb[0,0]:bb[0,1],bb[1,0]:bb[1,1]])
        mean_seg = np.copy(mean_image[bb[0,0]:bb[0,1],bb[1,0]:bb[1,1]])
        local_seg = segmap[bb[0,0]:bb[0,1],bb[1,0]:bb[1,1]]
        #zeroing area around superpixel
        seg_img[local_seg != curr_sp,:]=0
        mean_seg[local_seg != curr_sp,:]=0
        #num_pixels = np.sum(local_seg == curr_sp)
        seg_img = seg_img-mean_seg
        #GT_label = np.copy(gt[bb[0,0]:bb[0,1],bb[1,0]:bb[1,1]])
        #GT_label[local_seg != curr_sp]=0
        #saliency_score = np.sum(GT_label/255)/num_pixels
        #Saliency score is deemed reliant so we can add it here
        #if saliency_score > 0.7 or saliency_score < 0.3:
        #numSP = numSP+1
        #finding the neighbor segments
        neighbors = np.unique(local_seg)
        #extracting locations of neighbor segments in image
        ix = np.where(np.in1d(segmap.ravel(),neighbors).reshape(segmap.shape))
        #calculating a bounding box over neghbor superpixels
        bb_mid= np.array([[np.min(ix[0]),np.max(ix[0])],[np.min(ix[1]),np.max(ix[1])]])
        #cropping the bounding box - this is the input to the 2nd mini CNN
        bounding_box_second = np.copy(img[bb_mid[0,0]:bb_mid[0,1],bb_mid[1,0]:bb_mid[1,1]])
        #mean subtraction on region B
        bounding_box_second = bounding_box_second - mean_image[bb_mid[0,0]:bb_mid[0,1],bb_mid[1,0]:bb_mid[1,1]]
        #resizing superpixel to net input size
        pair.SP_Region= np.array(sp.misc.imresize(seg_img,[227,227,3]))#,dtype = np.uint8)
        #resizing neighborhood to net input size
        pair.SP_Neighbor = np.array(sp.misc.imresize(bounding_box_second,[227,227,3]))#,dtype = np.uint8)
        #picture with segment masked
        picture = np.copy(img)-mean_image
        picture[segmap == curr_sp,:]=0
        pair.Pic = np.array(sp.misc.imresize(picture,[227,227,3]))#,dtype = np.uint8)
        #pair.saliency = round(saliency_score)
        pair.SP_mask = sp.misc.imresize(sp_mask,[227,227,3])
        result.segments.append(pair)
    return result
Example #15
0
def shi_tomasi(image):
    gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)

    corners = cv.goodFeaturesToTrack(gray, 25, 0.01, 10)
    corners = np.uint0(corners)
    print(corners.shape)

    for corner in corners:
        x, y = corner.ravel()
        cv.circle(image, (x, y), 3, (0, 0, 255), -1)

    cv.imshow("corner", image)
Example #16
0
def _getLineImg(length, direction='h', lineWidth=5):
    """Crea una imagen con una linea, para realizar la convolución
	es más una función interna del script que otra cosa.
	input args:
	length   : entero, largo de la imagen en la dirección que se extiende la linea
	direction: char 'v' o 'h' dirección vertical u horizontal respectivamente.
	linewidth: ancho de la linea que creará"""
    cover = 3  # ancho alrededor de linea
    cen = np.uint0(cover + lineWidth / 2)
    if direction == 'h':
        imageShape = [lineWidth + 2 * cover, length]
        args = [(0, cen), (imageShape[1], cen)]

    if direction == 'v':
        imageShape = [length, lineWidth + 2 * cover]
        args = [(cen, 0), (cen, imageShape[0])]

    blackImage = np.zeros(imageShape)

    return cv2.line(blackImage, *args, 255, lineWidth)
def save_SLIC_segmentations_MSRA(images,in_dir,out_dir,NSP):
    if 'SLIC_Segs' not in os.listdir(out_dir):
        os.mkdir(out_dir+'/SLIC_Segs')
    if str(NSP) not in os.listdir(out_dir+'SLIC_Segs'):
        os.mkdir(out_dir+'/SLIC_Segs/'+str(NSP))
    for fimg in images :
        img = sio.imread(in_dir+fimg)
        gt = sio.imread(in_dir+fimg[0:-3]+'png')/255
        SLIC_seg = np.uint16(slic_wrap(img,NSP, 10, sigma=1, enforce_connectivity=True))
        saliency = []
        segments = []
        
        segments_temp = np.unique(SLIC_seg)
        for segment in segments_temp:
            sal_temp = calc_saliency_score(segment,SLIC_seg,gt)                
            if sal_temp >= 0 :
                segments.append(segment)
                saliency.append(np.uint0(sal_temp))
        fslic = open(out_dir+'/SLIC_Segs/'+str(NSP)+'/'+fimg[0:-4]+'.slic','wb')
        dill.dump(SLIC_seg,fslic)
        dill.dump(segments,fslic)
        dill.dump(saliency,fslic)
        fslic.close()
Example #18
0
def deal_contours_image(image_path):
    # image = cv2.imread(image_path, 1)
    image = cv2.imdecode(np.fromfile(image_path, dtype=np.uint8),
                         cv2.IMREAD_UNCHANGED)
    if opt.show_image:
        cv2.imshow('crc', image)
    # cv2.imshow("ori", image)

    #保留原图,用于显示图像
    img = image.copy()

    # #保存原图,用于裁剪图片
    img1 = image.copy()

    image = image[:, :, 2]
    image_name = os.path.split(image_path)[-1]
    # 阈值处理后的图片
    # thresh_image = threshTwoPeaks1(image)
    # cv2.imshow('thresh_image',thresh_image)
    print(image_name)
    # 高斯平滑
    image = cv2.GaussianBlur(image, (3, 3), 0.5)
    # 膨胀处理,过滤细小竖直边线
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 1))
    image = cv2.dilate(image, kernel)
    # cv2.imshow("dilate", image)
    # 边缘检测,找出明显轮廓。
    image = cv2.Canny(image, 60, 80)
    if opt.show_image:
        cv2.namedWindow("Canny", flags=1)
        cv2.imshow("Canny", image)
    # 膨胀处理,将紧挨着的边线合并,用于后续寻找外轮廓
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 20))
    image = cv2.dilate(image, kernel)
    # cv2.imshow('canny', image)
    #寻找外轮廓
    hc = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contours = hc[1]
    #print('轮廓数:', len(contours))
    error_image_list = []
    total_num = 0
    for i in range(len(contours)):
        # 绘制轮廓
        # cv2.drawContours(img, contours, i, 255, 2)
        # 获取最小旋转矩形
        # points = cv2.minAreaRect(contours[i])
        # # 计算旋转矩形4个顶点坐标
        # rect = cv2.boxPoints(points)

        #计算最小外包直立矩形
        rect = cv2.boundingRect(contours[i])

        # 数据类型转换
        rect = np.uint0(rect)
        # 绘制矩形
        #旋转矩形4个顶点
        # cv2.drawContours(img, [rect], 0, (0, 255, 0), 2)
        # 直立矩形对角点
        x1 = rect[0]
        x2 = rect[0] + rect[2]
        y1 = rect[1]
        y2 = rect[1] + rect[3]
        cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 1)
        global num
        num = num + 1
        if rect[3] > 600:
            continue
        flag = do_onnx_predict(img1[y1:y2, x1:x2])
        if flag == 1:
            cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 1)
            error_image_list.append(img1[y1:y2, x1:x2])
        total_num += 1

    if opt.show_image:
        show_image(image_name, img)
    error_num = len(error_image_list)
    print("出错区域数量:{}/{}".format(error_num, total_num))
    if error_num > 0:
        cv2.imshow("原图", img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
Example #19
0
reveal_type(np.object0())  # E: numpy.object_
reveal_type(np.void0(0))  # E: numpy.void

reveal_type(np.byte())  # E: {byte}
reveal_type(np.short())  # E: {short}
reveal_type(np.intc())  # E: {intc}
reveal_type(np.intp())  # E: {intp}
reveal_type(np.int0())  # E: {intp}
reveal_type(np.int_())  # E: {int_}
reveal_type(np.longlong())  # E: {longlong}

reveal_type(np.ubyte())  # E: {ubyte}
reveal_type(np.ushort())  # E: {ushort}
reveal_type(np.uintc())  # E: {uintc}
reveal_type(np.uintp())  # E: {uintp}
reveal_type(np.uint0())  # E: {uintp}
reveal_type(np.uint())  # E: {uint}
reveal_type(np.ulonglong())  # E: {ulonglong}

reveal_type(np.half())  # E: {half}
reveal_type(np.single())  # E: {single}
reveal_type(np.double())  # E: {double}
reveal_type(np.float_())  # E: {double}
reveal_type(np.longdouble())  # E: {longdouble}
reveal_type(np.longfloat())  # E: {longdouble}

reveal_type(np.csingle())  # E: {csingle}
reveal_type(np.singlecomplex())  # E: {csingle}
reveal_type(np.cdouble())  # E: {cdouble}
reveal_type(np.complex_())  # E: {cdouble}
reveal_type(np.cfloat())  # E: {cdouble}
Example #20
0
                          return_schema=True)

    assert actual == {"a": [1, 4], "b": ([2, 5], [3, 6])}

    assert schema == {"a": None, "b": (None, None)}


@pytest.mark.parametrize(
    "value",
    [
        np.int0(1),
        np.int8(-2),
        np.int16(-5),
        np.int32(+32),
        np.int64(120),
        np.uint0(1),
        np.uint8(2),
        np.uint16(5),
        np.uint32(+32),
        np.uint64(120),
        np.float16(-2.0),
        np.float32(-2.0),
        np.float64(-2.0),
        np.float128(-2.0),
        np.array(2.0),
        np.array([1, 2, 3]),
        np.array([[1], [2], [3]]),
    ],
)
def test_json_nump_default__roundtrip(value):
    actual = json.loads(json.dumps(value, default=json_numpy_default))
Example #21
0
np.object0()
np.void0(0)

np.byte()
np.short()
np.intc()
np.intp()
np.int0()
np.int_()
np.longlong()

np.ubyte()
np.ushort()
np.uintc()
np.uintp()
np.uint0()
np.uint()
np.ulonglong()

np.half()
np.single()
np.double()
np.float_()
np.longdouble()
np.longfloat()

np.csingle()
np.singlecomplex()
np.cdouble()
np.complex_()
np.cfloat()
Example #22
0
reveal_type(np.unicode_())  # E: numpy.str_
reveal_type(np.str0())  # E: numpy.str_

reveal_type(np.byte())  # E: numpy.signedinteger[numpy.typing._
reveal_type(np.short())  # E: numpy.signedinteger[numpy.typing._
reveal_type(np.intc())  # E: numpy.signedinteger[numpy.typing._
reveal_type(np.intp())  # E: numpy.signedinteger[numpy.typing._
reveal_type(np.int0())  # E: numpy.signedinteger[numpy.typing._
reveal_type(np.int_())  # E: numpy.signedinteger[numpy.typing._
reveal_type(np.longlong())  # E: numpy.signedinteger[numpy.typing._

reveal_type(np.ubyte())  # E: numpy.unsignedinteger[numpy.typing._
reveal_type(np.ushort())  # E: numpy.unsignedinteger[numpy.typing._
reveal_type(np.uintc())  # E: numpy.unsignedinteger[numpy.typing._
reveal_type(np.uintp())  # E: numpy.unsignedinteger[numpy.typing._
reveal_type(np.uint0())  # E: numpy.unsignedinteger[numpy.typing._
reveal_type(np.uint())  # E: numpy.unsignedinteger[numpy.typing._
reveal_type(np.ulonglong())  # E: numpy.unsignedinteger[numpy.typing._

reveal_type(np.half())  # E: numpy.floating[numpy.typing._
reveal_type(np.single())  # E: numpy.floating[numpy.typing._
reveal_type(np.double())  # E: numpy.floating[numpy.typing._
reveal_type(np.float_())  # E: numpy.floating[numpy.typing._
reveal_type(np.longdouble())  # E: numpy.floating[numpy.typing._
reveal_type(np.longfloat())  # E: numpy.floating[numpy.typing._

reveal_type(np.csingle())  # E: numpy.complexfloating[numpy.typing._
reveal_type(np.singlecomplex())  # E: numpy.complexfloating[numpy.typing._
reveal_type(np.cdouble())  # E: numpy.complexfloating[numpy.typing._
reveal_type(np.complex_())  # E: numpy.complexfloating[numpy.typing._
reveal_type(np.cfloat())  # E: numpy.complexfloating[numpy.typing._
def im2mdfin(img, mean, segmap, segments):

    result = MDFInData()
    mean_image = sp.misc.imresize(mean, img.shape)

    #Superpixel segmentation - to be replaced by other segmentation if necessary
    #SLIC_seg = slic_wrap(img, nsp, 10, sigma=1, enforce_connectivity=True)
    #segments = np.unique(SLIC_seg)
    #numSP = 0

    for SPi in range(0, segments.__len__()):
        pair = MDFInRecord()
        curr_sp = segments[SPi]
        sp_mask = np.uint0(segmap == curr_sp)
        indices = np.where((segmap == curr_sp) != 0)
        bb = np.array([[np.min(indices[0]),
                        np.max(indices[0])],
                       [np.min(indices[1]),
                        np.max(indices[1])]])
        #extracting only the superpixel
        seg_img = np.copy(img[bb[0, 0]:bb[0, 1], bb[1, 0]:bb[1, 1]])
        mean_seg = np.copy(mean_image[bb[0, 0]:bb[0, 1], bb[1, 0]:bb[1, 1]])
        local_seg = segmap[bb[0, 0]:bb[0, 1], bb[1, 0]:bb[1, 1]]
        #zeroing area around superpixel
        seg_img[local_seg != curr_sp, :] = 0
        mean_seg[local_seg != curr_sp, :] = 0
        #num_pixels = np.sum(local_seg == curr_sp)
        seg_img = seg_img - mean_seg
        #GT_label = np.copy(gt[bb[0,0]:bb[0,1],bb[1,0]:bb[1,1]])
        #GT_label[local_seg != curr_sp]=0
        #saliency_score = np.sum(GT_label/255)/num_pixels
        #Saliency score is deemed reliant so we can add it here
        #if saliency_score > 0.7 or saliency_score < 0.3:
        #numSP = numSP+1
        #finding the neighbor segments
        neighbors = np.unique(local_seg)
        #extracting locations of neighbor segments in image
        ix = np.where(np.in1d(segmap.ravel(), neighbors).reshape(segmap.shape))
        #calculating a bounding box over neghbor superpixels
        bb_mid = np.array([[np.min(ix[0]), np.max(ix[0])],
                           [np.min(ix[1]), np.max(ix[1])]])
        #cropping the bounding box - this is the input to the 2nd mini CNN
        bounding_box_second = np.copy(img[bb_mid[0, 0]:bb_mid[0, 1],
                                          bb_mid[1, 0]:bb_mid[1, 1]])
        #mean subtraction on region B
        bounding_box_second = bounding_box_second - mean_image[bb_mid[
            0, 0]:bb_mid[0, 1], bb_mid[1, 0]:bb_mid[1, 1]]
        #resizing superpixel to net input size
        pair.SP_Region = np.array(sp.misc.imresize(
            seg_img, [227, 227, 3]))  #,dtype = np.uint8)
        #resizing neighborhood to net input size
        pair.SP_Neighbor = np.array(
            sp.misc.imresize(bounding_box_second,
                             [227, 227, 3]))  #,dtype = np.uint8)
        #picture with segment masked
        picture = np.copy(img) - mean_image
        picture[segmap == curr_sp, :] = 0
        pair.Pic = np.array(sp.misc.imresize(
            picture, [227, 227, 3]))  #,dtype = np.uint8)
        #pair.saliency = round(saliency_score)
        pair.SP_mask = sp.misc.imresize(sp_mask, [227, 227, 3])
        result.segments.append(pair)
    return result
import cv2
import numpy as np 

img = cv2.imread('pic1.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray, 25, 0.01, 10) #(img, max_no_of_Corners, quality_level, min_distance)
corners = np.uint0(corners)               # int64

for i in corners:
    x, y = i.ravel()
    cv2.circle(img, (x,y), 3, 255, -1)

cv2.imshow('dst', img)

if cv2.waitKey(0) &0xFF ==27:
    cv2.destroyAllWindows()
Example #25
0
def deal_contours_image(image_path):
    # image = cv2.imread(image_path, 1)
    image = cv2.imdecode(np.fromfile(image_path, dtype=np.uint8),
                         cv2.IMREAD_UNCHANGED)
    if opt.show_image:
        cv2.imshow('crc', image)
    # cv2.imshow("ori", image)

    #保留原图,用于显示图像
    img = image.copy()

    # #保存原图,用于裁剪图片
    img1 = image.copy()

    image = image[:, :, 2]
    image_name = os.path.split(image_path)[-1]
    # 阈值处理后的图片
    # thresh_image = threshTwoPeaks1(image)
    # cv2.imshow('thresh_image',thresh_image)
    print(image_name)
    # 高斯平滑
    image = cv2.GaussianBlur(image, (3, 3), 0.5)
    #中值滤波,消除椒盐噪声。速度慢
    # image = mediaBlur(image,(3,3))
    # 膨胀处理,过滤细小竖直边线
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 1))
    image = cv2.dilate(image, kernel)
    # cv2.imshow("dilate", image)
    # 边缘检测,找出明显轮廓。
    image = cv2.Canny(image, 60, 80)
    if opt.show_image:
        cv2.namedWindow("Canny", flags=1)
        cv2.imshow("Canny", image)
    # 膨胀处理,将紧挨着的边线合并,用于后续寻找外轮廓
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 20))
    image = cv2.dilate(image, kernel)
    # cv2.imshow('canny', image)
    #寻找外轮廓
    hc = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contours = hc[1]
    print('轮廓数:', len(contours))
    for i in range(len(contours)):
        # 绘制轮廓
        # cv2.drawContours(img, contours, i, 255, 2)
        # 获取最小旋转矩形
        # points = cv2.minAreaRect(contours[i])
        # # 计算旋转矩形4个顶点坐标
        # rect = cv2.boxPoints(points)

        #计算最小外包直立矩形
        rect = cv2.boundingRect(contours[i])

        print(rect)
        # 数据类型转换
        rect = np.uint0(rect)
        # 绘制矩形
        #旋转矩形4个顶点
        # cv2.drawContours(img, [rect], 0, (0, 255, 0), 2)
        # 直立矩形对角点
        x1 = rect[0]
        x2 = rect[0] + rect[2]
        y1 = rect[1]
        y2 = rect[1] + rect[3]
        cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 1)
        global num
        num = num + 1
        if rect[3] > 600:
            continue
        cv2.imwrite('./datas/{}_A.jpg'.format(num),
                    img1[y1:y2, x1:x2])  ######修改图片保存路径
    if opt.show_image:
        show_image(image_name, img)
Example #26
0
img_contour, contours, hierarchys = cv2.findContours(
    img_dilate, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)  #查找轮廓
"""================轮廓处理,通过轮廓外包矩行,符合一定长宽比的前提下取最大矩形面积即为二维码区域=============="""
Area = 0
for i in range(len(contours)):
    x, y, w, h = cv2.boundingRect(contours[i])  #轮廓外包矩形
    if ((w / h > 0.8) & (h / w > 0.8)):  #符合一定长宽比前提下取最大面积
        if (Area < w * h):
            Area = w * h
            index = i
"""绘制了二维码的外包矩形以及最小外包矩形"""
x, y, w, h = cv2.boundingRect(contours[index])  #轮廓外包正矩形
cv2.rectangle(img_color, (x, y), (x + w, y + h), (0, 0, 255), 2)
rect = cv2.minAreaRect(contours[index])  #轮廓最小外包矩形
box = cv2.boxPoints(rect)  #矩形点转成四个矩形的四个顶点表示
box = np.uint0(box)
cv2.drawContours(img_color, [box], 0, (255, 0, 0), 2)
"""创建只有二维码区域的mask,然后在mask再进行一次查找轮廓,通过层级关系定位出三个定位点,并绘制矩形和连接三个定位点"""
img_mask = np.zeros(img_ostu.shape, np.uint8)
img_mask[y:y + h, x:x + w] = img_ostu[y:y + h, x:x + w]
mask_contour, contours_mask, hierarchys_mask = cv2.findContours(
    img_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
hierarchys_mask = hierarchys_mask[0]
#筛选出包含一个子层级的轮廓
found = []
for i in range(len(contours_mask)):
    k = i
    count = 0
    while hierarchys_mask[k][2] != -1:
        k = hierarchys_mask[k][2]
        count += 1