Пример #1
0
import mahotas
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required = True,
	help = "path to where the model will be stored")
ap.add_argument("-i", "--image", required = True,
	help = "path to the image file")
args = vars(ap.parse_args())

# load the model
model = joblib.load(args["model"])

# initialize the HOG descriptor
hog = HOG(orientations = 18, pixelsPerCell = (10, 10),
	cellsPerBlock = (1, 1), transform = True, block_norm="L2-Hys")

# load the image and convert it to grayscale
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# blur the image, find edges, and then find contours along
# the edged regions
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(blurred, 30, 150)
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)

# sort the contours by their x-axis position, ensuring
# that we read the numbers from left to right
cnts = sorted([(c, cv2.boundingRect(c)[0]) for c in cnts], key = lambda x: x[1])
Пример #2
0
# import the necessary packages
from sklearn.externals import joblib
from sklearn.svm import LinearSVC
from pyimagesearch.hog import HOG
from pyimagesearch import dataset
import argparse

dataset_path = "data/digits.csv"
models_path = "models/svm.cpickle_01"

# load the dataset and initialize the data matrix
(digits, target) = dataset.load_digits(dataset_path)
data = []

# initialize the HOG descriptor
hog = HOG(orientations=18, pixelsPerCell=(10, 10),
          cellsPerBlock=(1, 1), normalize=True)

# loop over the images
for image in digits:
    # deskew the image, center it
    image = dataset.deskew(image, 20)
    image = dataset.center_extent(image, (20, 20))

    # describe the image and update the data matrix
    hist = hog.describe(image)
    data.append(hist)

# train the model
model = LinearSVC(random_state=42)
model.fit(data, target)
Пример #3
0
                "--dataset",
                required=True,
                help="path to the dataset file")
ap.add_argument("-m",
                "--model",
                required=True,
                help="path to where the model will be stored")
args = vars(ap.parse_args())

# load the dataset and initialize the data matrix
(digits, target) = dataset.load_digits(args["dataset"])
data = []

# initialize the HOG descriptor
hog = HOG(orientations=18,
          pixelsPerCell=(10, 10),
          cellsPerBlock=(1, 1),
          transform=True)

# loop over the images
for image in digits:
    # deskew the image, center it
    image = dataset.deskew(image, 20)
    image = dataset.center_extent(image, (20, 20))

    # describe the image and update the data matrix
    hist = hog.describe(image)
    data.append(hist)

# train the model
model = LinearSVC(random_state=42)
model.fit(data, target)
    frame = imutils.resize(frame, width = 500)
    frameClone = frame.copy()
    mask = np.zeros(frame.shape[:2], dtype = "uint8")
    (cX, cY) = (frame.shape[1] / 2, frame.shape[0] / 2)
    cv2.rectangle(frameClone,(cX - 134, cY - 74), (cX + 102 , cY - 40), (0,0,255), 1)
    cv2.imshow("Toma 1", frameClone)
    cv2.imwrite("images/newimage.png", frame)

    if cv2.waitKey(1) == ord("q"):
        break
#load the model
model = open(args["model"]).read()
model = cPickle.loads(model)

# initialize the HOG descriptor
hog = HOG(orientations = 18, pixelsPerCell = (9,9 ),
          cellsPerBlock = (1, 1), normalize = True)

#load the image captured
image = cv2.imread("images/newimage.png")

# apply filters (Added, subtracted,bitwise Or)
M = np.ones(image.shape, dtype = "uint8") * 10
added = cv2.add(image, M)
M = np.ones(image.shape, dtype = "uint8") * 20
subtracted = cv2.subtract(image, M)
cv2.imshow("Subtracted", subtracted)
gray = cv2.cvtColor(subtracted, cv2.COLOR_BGR2GRAY)

bitwise_Or = cv2.bitwise_not(added,subtracted)
cv2.imshow("not", bitwise_Or)
# loop over the image and mask paths
for imagen_entrenamiento in imagenes_entrenamiento:
    # load the image and mask
    image = cv2.imread(imagen_entrenamiento)
    target.append(imagen_entrenamiento.split("_")[-2])
    #print imagen_entrenamiento.split("_")[-2]
    #print "."

le = LabelEncoder()
target = le.fit_transform(target)

#################################################################################

# initialize the HOG descriptor
hog = HOG(orientations=18,
          pixelsPerCell=(10, 10),
          cellsPerBlock=(1, 1),
          normalize=True)
"""
# load the image and convert it to grayscale
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# blur the image, find edges, and then find contours along
# the edged regions
blurred = cv2.GaussianBlur(gray, (3, 3), 0)

# extract features from the image and classify it
hist = hog.describe(blurred)
direccion = le.inverse_transform(model.predict(hist))[0]
#le.inverse_transform(model.predict(features))[0]
print " Por favor: %s" % (direccion)
Пример #6
0
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])

y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

saver = tf.train.Saver()
saver.restore(sess, "/Users/ryanzotti/Documents/repos/OpenCvDigits/tf_model/model.ckpt")

# load the model
model = joblib.load(args["model"])

# initialize the HOG descriptor
hog = HOG(orientations = 18, pixelsPerCell = (10, 10),
    cellsPerBlock = (1, 1), normalize = True)

# load the image and convert it to grayscale
image = cv2.imread(args["image"])

scale_factor = 1
if image.shape[1]/600 > 0:
    scale_factor = int(image.shape[1]/600)  
newx,newy = int(image.shape[1]/scale_factor),int(image.shape[0]/scale_factor)
image = cv2.resize(image,(newx,newy))

gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# blur the image, find edges, and then find contours along
# the edged regions
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
ap.add_argument("-m",
                "--model",
                required=True,
                help="path to where the model will be stored")
ap.add_argument("-i", "--image", required=True, help="path to the image file")
args = vars(ap.parse_args())
numeros = []

# load the model
model = open(args["model"]).read()
model = cPickle.loads(model)
print model

# initialize the HOG descriptor
hog = HOG(orientations=18,
          pixelsPerCell=(10, 10),
          cellsPerBlock=(1, 1),
          transform_sqrt=True)

# load the image
image = cv2.imread(args["image"])

# apply filters (Added, subtracted, bitwise Or)
M = np.ones(image.shape, dtype="uint8") * 5
added = cv2.add(image, M)
#cv2.imshow('add', added)
#cv2.waitKey(0)
M = np.ones(image.shape, dtype="uint8") * 20
subtracted = cv2.subtract(image, M)
#cv2.imshow("Subtracted", subtracted)
#cv2.waitKey(0)
gray = cv2.cvtColor(subtracted, cv2.COLOR_BGR2GRAY)