コード例 #1
0
ファイル: train.py プロジェクト: abner0908/cloud_robot
import argparse

dataset_path = "data/digits.csv"
models_path = "models/svm.cpickle_01"

# load the dataset and initialize the data matrix
(digits, target) = dataset.load_digits(dataset_path)
data = []

# initialize the HOG descriptor
hog = HOG(orientations=18, pixelsPerCell=(10, 10),
          cellsPerBlock=(1, 1), normalize=True)

# loop over the images
for image in digits:
    # deskew the image, center it
    image = dataset.deskew(image, 20)
    image = dataset.center_extent(image, (20, 20))

    # describe the image and update the data matrix
    hist = hog.describe(image)
    data.append(hist)

# train the model
model = LinearSVC(random_state=42)
model.fit(data, target)

# dump the model to file
joblib.dump(model, models_path)
print "create model done..."
コード例 #2
0
	(x, y, w, h) = cv2.boundingRect(c)

	# if the width is at least 7 pixels and the height
	# is at least 20 pixels, the contour is likely a digit
	if w >= 7 and h >= 20:
		# crop the ROI and then threshold the grayscale
		# ROI to reveal the digit
		roi = gray[y:y + h, x:x + w]
		thresh = roi.copy()
		T = mahotas.thresholding.otsu(roi)
		thresh[thresh > T] = 255
		thresh = cv2.bitwise_not(thresh)

		# deskew the image center its extent
		thresh = dataset.deskew(thresh, 20)
		thresh = dataset.center_extent(thresh, (20, 20))

		cv2.imshow("thresh", thresh)

		# extract features from the image and classify it
		hist = hog.describe(thresh)
		digit = model.predict([hist])[0]
		print("I think that number is: {}".format(digit))

		# draw a rectangle around the digit, the show what the
		# digit was classified as
		cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 1)
		cv2.putText(image, str(digit), (x - 10, y - 10),
			cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 255, 0), 2)
		cv2.imshow("image", image)
		cv2.waitKey(0)
コード例 #3
0
        # ROI to reveal the digit
        # apply filer
        roi = gray[y:y + h, x:x + w]
        thresh = roi.copy()
        T = mahotas.thresholding.otsu(roi)
        thresh[thresh > T] = 255
        thresh = cv2.bitwise_not(thresh)
        
        # deskew the image center its extent
        thresh = dataset.deskew(thresh, 20)
        thresh = dataset.center_extent(thresh, (20, 20))
                
        cv2.imshow("thresh", thresh)
        
        # extract features from the image and classify it
        hist = hog.describe(thresh)
        digit = model.predict(hist)[0]
        print "Creo que el numero es: %d" % (digit)
        numeros.append(digit)
        
        # draw a rectangle around the digit, the show what the
        # digit was classified as
        cv2.rectangle(image, (x, y), (x + w, y + h), 255, 1)
        cv2.putText(image, str(digit), (x - 5, y - 5),
            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
        cv2.imshow("image", image)
        # prees a key to continue
        cv2.waitKey(0)

print numeros
コード例 #4
0
                required=True,
                help="path to where the model will be stored")
args = vars(ap.parse_args())

# load the dataset and initialize the data matrix
(digits, target) = dataset.load_digits(args["dataset"])
data = []

# initialize the HOG descriptor
hog = HOG(orientations=18,
          pixelsPerCell=(10, 10),
          cellsPerBlock=(1, 1),
          transform=True)

# loop over the images
for image in digits:
    # deskew the image, center it
    image = dataset.deskew(image, 20)
    image = dataset.center_extent(image, (20, 20))

    # describe the image and update the data matrix
    hist = hog.describe(image)
    data.append(hist)

# train the model
model = LinearSVC(random_state=42)
model.fit(data, target)

# dump the model to file
joblib.dump(model, args["model"])
コード例 #5
0
while True:
    # grab the current frame
    (grabbed, image) = camera.read()

    # if we are viewing a video and we did not grab a
    # frame, then we have reached the end of the video
    if args.get("video") and not grabbed:
        break

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    # blur the image, find edges, and then find contours along
    # the edged regions
    blurred = cv2.GaussianBlur(gray, (3, 3), 0)

    # extract features from the image and classify it
    hist = hog.describe(blurred)
    direccion = le.inverse_transform(model.predict(hist))[0]
    #le.inverse_transform(model.predict(features))[0]
    print " Por favor: %s" % (direccion)

    cv2.putText(image, str(direccion), (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.2,
                (255, 0, 0), 2)
    cv2.imshow("image", image)

    thresh = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    T = mahotas.thresholding.otsu(thresh)
    thresh[thresh > T] = 255
    thresh[thresh < T] = 0
    thresh = cv2.bitwise_not(thresh)

    # show our threshold image