Ejemplo n.º 1
0
def get_lines(img):
  height, width, depth = img.shape
  scale = SCALE_SIZE / height
  small_img = np.zeros((height*scale,width*scale,3), np.uint8)
  cv2.resize(img, small_img)
  small_img_gray = cv2.cvtColor(small_img,cv2.COLOR_BGR2GRAY)
  cv2.canny(small_img_gray, small_img_gray, 50, 200, 3);
   
  lines = cv2.HoughLines(small_img_gray,OpenCV::CV_HOUGH_PROBABILISTIC, 1, CV_PI / 2, 50, 32, 16)
  #lines = cv2.HoughLines(small_img_gray,OpenCV::CV_HOUGH_PROBABILISTIC,  1, 1.57, 50, 50, 32 )
  return lines
Ejemplo n.º 2
0
			print('Cant load Cascade')

	for(x1,y1,x2,y2) in pallet:
		#draw rectangle aroud detected pallet
		cv2.rectangle(img, (x1,y1), (x1+x2, y1+y2), (0,255,0), 5)
		font = cv2.FONT_HERSHEY_SIMPLEX
		cv2.putText(img, 'Pallet', (x2+10, y2+10), font, 1, (0,255,0), 3, cv2.LINE_AA
		imshow('Obj Det', img)

	# pallet is found
	if len(pallet) != 0:
		#cascade can be release somehow
	#perhaps first do canny then get the roi with the next good lines to the detected objects
		palletAndTruckROI
		# do canny detection to get some clean pics 
		canny = cv2.canny(blankImage, 100, 200)
		cv2.contours
		#here we have to get the pixel value of the upper line of the Truck so we can get the length from the ground to the pallet height
		getHeightOfPallet()
		#getForkToTheRightHeight
		driveTheWayToPallet()
		#set up the motor for the right time
		#release everything not needed
		cap.release()
		break

	# if no pallet  is found
	else:
		#drive a bit forward and try again to recognize it
		#octosonarDontCrash
		BP.set_motor_power(BP.PORT_B, 20)
Ejemplo n.º 3
0
def canny(img, low_threshold, high_threshold):
    '''canny边缘检测'''
    return cv2.canny(low_threshold, high_threshold)
Ejemplo n.º 4
0
    '''
    takes inputs from josm, runs edge detection and 
    outputs ways back to josm
    '''


# grab inputs from josm
lat = argv[1]
long = argv[2]
zoom = argv[3]

# grab sat view from provider (likely bing maps)
bing_api_key = ""

# construct request
# https://dev.virtualearth.net/REST/v1/Imagery/Map/Aerial/lat, long/zoom?mapSize=500,500&key=bing_api_key
img = requests.get()
# Reads image from specified path
img = cv2.imread(
    file, 0)  # 0 arg imports image as gray scale as needed for canny function
# cv2.imshow()
# run canny edge detection algorithm
edges = cv2.canny(img, min_val,
                  max_val)  # image, min, max. aperture, bool edge grad.
# post process edges to remove extras
# filter out areas too small
# filter incomplete ways
# filter shadows
# orthagonalize
# convert to correct format for osm
# send back to josm
Ejemplo n.º 5
0
import sys

try:
    fn = sys.argv[1]
except:
    fn = video.presets['chess']


def nothing(*arg):
    pass


cv2.namedWindow('edge')
cv2.createTrackbar('thrs1', 'edge', 2000, 5000, nothing)
cv2.createTrackbar('thrs2', 'edge', 4000, 5000, nothing)

cap = video.create_capture(fn)
while True:
    flag, img = cap.read()
    gray = cv2.cvtColor(img, cv.CV_BGR2GRAY)
    thrs1 = cv2.getTrackbarPos('thrs1', 'edge')
    thrs2 = cv2.getTrackbarPos('thrs2', 'edge')
    edge = cv2.canny(gray, thrs1, thrs2, apertureSize=5)
    vis = img.copy()
    vis /= 2
    vis[edge != 0] = (0, 255, 0)
    cv2.imshow('edge', vis)
    ch = cv2.waitKey(5)
    if ch == 27:
        break
Ejemplo n.º 6
0
 def EdgeDetection(img):
     width = 400
     height = 400
     dim = (width, height)
     cv2.resize(img, (800, 800))
     return cv2.canny(img, 30, 50)
Ejemplo n.º 7
0
def bgcalc():
    im = origin
    im = cv2.canny(300, 600, 5, true)
Ejemplo n.º 8
0
import cv2, cv
import video
import sys

try: fn = sys.argv[1]
except: fn = video.presets['chess']

def nothing(*arg):
    pass

cv2.namedWindow('edge')
cv2.createTrackbar('thrs1', 'edge', 2000, 5000, nothing)
cv2.createTrackbar('thrs2', 'edge', 4000, 5000, nothing)

cap = video.create_capture(fn)
while True:
    flag, img = cap.read()
    gray = cv2.cvtColor(img, cv.CV_BGR2GRAY)
    thrs1 = cv2.getTrackbarPos('thrs1', 'edge')
    thrs2 = cv2.getTrackbarPos('thrs2', 'edge')
    edge = cv2.canny(gray, thrs1, thrs2, apertureSize=5)
    vis = img.copy()
    vis /= 2
    vis[edge != 0] = (0, 255, 0)
    cv2.imshow('edge', vis)
    ch = cv2.waitKey(5)
    if ch == 27:
        break

Ejemplo n.º 9
0
    object_detector = ObjectDetector(net, detector, transform, num_classes, args.cuda)
    
    txt = open("F:/数据备份/大四上(电脑)/大创1/task_xuedi(pytorch测试用)/android_result.txt", "w")

    image_save = []
    '''for image_id in range(len(demo_set)):
        image_per_starttime = time.time()
        print('##### Start display #####')
        # image_file = os.path.join(args.save_folder, demo_set[image_id])
        image_file = os.path.join('F:/数据备份/大四上(电脑)/大创1/task_xuedi(pytorch测试用)/test_inter/', demo_set[image_id])
        print(image_file)
        image = cv2.imdecode(np.fromfile(image_file, dtype=np.uint8), cv2.IMREAD_COLOR)'''
    image = cv2.imdecode(np.fromfile('F:/数据备份/大四上(电脑)/大创1/task_xuedi(pytorch测试用)/test_inter/test.jpg', dtype=np.uint8), cv2.IMREAD_COLOR)
    detect_bboxes = object_detector.predict(image)
    image = cv2.GaussianBlur(image, (3, 3), 0)
    canny = cv2.canny(image, 60, 100)
    ret, thresh = cv2.threshold(canny, 128, 1, cv2.THRESH_BINARY)
    summ = thresh.sum(axis = 1)
    def smooth(a,WSZ):
        # a:原始数据,NumPy 1-D array containing the data to be smoothed
        # 必须是1-D的,如果不是,请使用 np.ravel()或者np.squeeze()转化 
        # WSZ: smoothing window size needs, which must be odd number,
        # as in the original MATLAB implementation
        out0 = np.convolve(a,np.ones(WSZ,dtype=int),'valid')/WSZ
        r = np.arange(1,WSZ-1,2)
        start = np.cumsum(a[:WSZ-1])[::2]/r
        stop = (np.cumsum(a[:-WSZ:-1])[::2]/r)[::-1]
        return np.concatenate((  start , out0, stop  ))

    sm = smooth(summ, 19)
    peaks, _ = signal.find_peaks(-sm, distance=80)