예제 #1
0
def test_one_picture(file_path='./datas/img_person/0.png',
                     save_path='./test/test_100/0.png',
                     show_org=False,
                     model_path=None):
    """
    只测试一张图片
    :param file_path: 输入单张图片完成的绝对路径[str]
    :param save_path: 保存单张图片的绝对路径[str]
    :param show_org: 是否展示原始人工评语[boolean]
    :param model_path: 测试模型路径[str]
    :return:
    """
    st = time.time()
    img_data = generator.load_one_img(
        file_path=file_path,
        image_size=Params.image_size,
        preprocess_method=Params.preprocess_method)
    predict_model = rebuild_decoder(model_path=model_path)
    predict_text = predict(image_input=img_data, predict_model=predict_model)

    sh.show_image(file_path=file_path,
                  predict_text=predict_text,
                  save_path=save_path,
                  show_org=show_org)

    en = time.time()
    print(predict_text)
    print(en - st)
예제 #2
0
def test_model(test_info=None,
               show_org=True,
               save_root_path='./test/test_50/',
               model_path=None):
    """
    :param test_info: 测试数据, e.g (test_lst, test_texts, test_img) [tuple]
    :param show_org: 是否展示人工评语[boolean]
    :param save_root_path: 保存测试结果的根路径[str]
    :param model_path: 测试所需要的模型[str]
    :return:
    """
    names, texts, images = test_info
    predict_model = rebuild_decoder(model_path=model_path)

    for name, text, image in zip(names, texts, images):
        file_path = os.path.join(Params.img_data_path, str(name) + '.png')
        save_path = os.path.join(save_root_path, str(name) + '.png')
        predict_text = predict(image_input=image, predict_model=predict_model)
        print("图片标号:" + str(name))
        original_text = "人工描述:" + text
        print(original_text)

        sh.show_image(file_path=file_path,
                      original_text=original_text,
                      predict_text=predict_text,
                      save_path=save_path,
                      show_org=show_org)

        print(predict_text)
예제 #3
0
def hack(img_name):
    '''
    HACK Recognize a CAPTCHA image
      Inputs:
          img_name: filename of image
      Outputs:
          digits: 1x5 matrix, 5 digits in the input CAPTCHA image.
    '''
    data = np.load('hack_data.npz')

    # YOUR CODE HERE (you can delete the following code as you wish)
    x_train = data['x_train']
    y_train = data['y_train']

    # begin answer
    image = extract_image.extract_image(img_name)
    digits = knn.knn(image, x_train, y_train, 5)
    show_image.show_image(image)
    # end answer

    return digits
예제 #4
0
        # face detecting
        facerect = cascade.detectMultiScale(frame_gray,
                                            scaleFactor=1.2,
                                            minNeighbors=3,
                                            minSize=(10, 10))
        if len(facerect) > 0:
            print('face detected')
            color = (255, 255, 255)
            for rect in facerect:
                x, y = rect[0:2]
                width, height = rect[2:4]
                image = frame[y - 10:y + height, x:x + width]

                result = model.predict(image)
                if result == 0:  # owner
                    print('Owner detected!')
                    show_image()
                else:  # not owner
                    print('You are not the owner.')

        # waiting key
        k = cv2.waitKey(100)
        # Press Esc to exit...
        if k == 27:
            break

    # Release
    cap.release()
    cv2.destroyAllWindows()
import argparse
import datetime
import zipfile
import time

from show_image import show_image

parser = argparse.ArgumentParser(description="Webcam zipped image viewer.")
parser.add_argument("input_file",
                    help="log .zip file containing the webcam images")
parser.add_argument("--start",
                    default="2016-01-01 00:00:00",
                    help="(YYYY-MM-DD HH:MM:SS) start time")
args = parser.parse_args()

filename = args.input_file
time_start = datetime.datetime.strptime(args.start, "%Y-%m-%d %H:%M:%S")

zf = zipfile.ZipFile(filename)
time_current = time_start
real_time_current = datetime.datetime.now()
while True:
    try:
        show_image(zf, time_current)
    except:
        print "Can't show image at time {}.".format(time_current)
    while datetime.datetime.now().second == real_time_current.second:
        time.sleep(0.01)
    real_time_current = datetime.datetime.now()
    time_current += datetime.timedelta(seconds=1.0)
예제 #6
0
from show_image import show_image
from video_demo import video_demo
from split import split
from logic import logic
from equal_hist import equal_hist
from image_enhancing import image_enhancement
# Testing OpenCV
'''
show_image()
video_demo()
split()
logic()
equal_hist()
'''

# Experiment: Image Enhancement
image_enhancement()
from extract_image import extract_image
from show_image import show_image
import numpy as np

x_train = np.zeros((5 * 100, 140))
y_train = np.zeros((5 * 100, ))

for i in range(20):
    filename = 'captcha_train_data/' + str(i) + '.jpg'
    x_train[5 * i:5 * i + 5] = extract_image(filename)
    show_image(x_train[5 * i:5 * i + 5])
    label = input('label ' + str(i))
    for j in range(5):
        y_train[5 * i + j] = eval(label[j])
    print(y_train[5 * i:5 * i + 5])

np.savez('hack_data.npz', x_train=x_train, y_train=y_train)
def onclick(event):
    if event.xdata:
        time = pltdates.num2date(event.xdata)
        print "Retrieve image from {}...".format(time)
        show_image(zf, time)
예제 #9
0
            clasf_name = clasf[u'name']
            if u'Triage' in clasf_name:
                print('The classifier is Remote Triage')
                for item in clasf[u'classes']:
                    if u'Prone' in item[u'class']:
                        situation.append('Prone person probability (%0.0f%%)' %
                                         (item['score'] * 100.0))
                    else:
                        print(item[u'class'])
                        print(item[u'score'])
        # Find the right face information
        for face_im in faces[u'images']:
            if filename in face_im[u'image']:
                for face in face_im[u'faces']:
                    face_type.append(
                        '%s (age %d-%d)' %
                        (face[u'gender'][u'gender'], face[u'age'][u'min'],
                         face[u'age'][u'max']))
                    pos = (face[u'face_location'][u'left'],
                           face[u'face_location'][u'top'],
                           face[u'face_location'][u'width'],
                           face[u'face_location'][u'height'])
                    face_pos.append(pos)

        show_image(filename, 'LieOrStand2/' + filename, situation, face_type,
                   face_pos)

# Debug variable content so Watson does not get a query for every development iteration
#    results = {u'images': [{u'image': u'LieOrStand2.zip/A.jpg', u'classifiers': [{u'classes': [{u'score': 0.889, u'class': u'Erect'}], u'classifier_id': u'RemoteTriage_482484271', u'name': u'Remote Triage'}, {u'classes': [{u'score': 0.798, u'class': u'person'}, {u'score': 0.903, u'class': u'Parkour', u'type_hierarchy': u'/sport/Parkour'}, {u'score': 0.922, u'class': u'sport'}, {u'score': 0.5, u'class': u'skateboarding', u'type_hierarchy': u'/sport/skating/skateboarding'}, {u'score': 0.502, u'class': u'skating'}, {u'score': 0.794, u'class': u'clothing'}, {u'score': 0.964, u'class': u'gray color'}, {u'score': 0.793, u'class': u'charcoal color'}], u'classifier_id': u'default', u'name': u'default'}]}, {u'image': u'LieOrStand2.zip/D.jpg', u'classifiers': [{u'classes': [{u'score': 0.907, u'class': u'Prone'}], u'classifier_id': u'RemoteTriage_482484271', u'name': u'Remote Triage'}, {u'classes': [{u'score': 0.64, u'class': u'stocking', u'type_hierarchy': u'/hosiery/stocking'}, {u'score': 0.725, u'class': u'hosiery'}, {u'score': 0.561, u'class': u'boothose', u'type_hierarchy': u'/hosiery/boothose'}, {u'score': 0.535, u'class': u'pogo stick'}, {u'score': 0.5, u'class': u'belt', u'type_hierarchy': u'/accessory/belt'}, {u'score': 0.501, u'class': u'accessory'}, {u'score': 0.599, u'class': u'sport'}, {u'score': 0.599, u'class': u'knot'}, {u'score': 0.601, u'class': u'clothing'}, {u'score': 0.633, u'class': u'maroon color'}, {u'score': 0.512, u'class': u'purple color'}], u'classifier_id': u'default', u'name': u'default'}]}, {u'image': u'LieOrStand2.zip/C.jpg', u'classifiers': [{u'classes': [{u'score': 0.907, u'class': u'Prone'}], u'classifier_id': u'RemoteTriage_482484271', u'name': u'Remote Triage'}, {u'classes': [{u'score': 0.851, u'class': u'Parkour', u'type_hierarchy': u'/sport/Parkour'}, {u'score': 0.851, u'class': u'sport'}, {u'score': 0.5, u'class': u'nylons', u'type_hierarchy': u'/hosiery/nylons'}, {u'score': 0.508, u'class': u'hosiery'}, {u'score': 0.793, u'class': u'footwear'}, {u'score': 0.8, u'class': u'clothing'}, {u'score': 0.923, u'class': u'charcoal color'}, {u'score': 0.909, u'class': u'gray color'}], u'classifier_id': u'default', u'name': u'default'}]}, {u'image': u'LieOrStand2.zip/B.jpg', u'classifiers': [{u'classes': [{u'score': 0.88, u'class': u'Prone'}], u'classifier_id': u'RemoteTriage_482484271', u'name': u'Remote Triage'}, {u'classes': [{u'score': 0.597, u'class': u'ramp', u'type_hierarchy': u'/mechanical device/machine/inclined plane/ramp'}, {u'score': 0.597, u'class': u'inclined plane'}, {u'score': 0.598, u'class': u'machine'}, {u'score': 0.602, u'class': u'mechanical device'}, {u'score': 0.544, u'class': u'khukuri (knife)', u'type_hierarchy': u'/weapon/knife/khukuri (knife)'}, {u'score': 0.545, u'class': u'knife'}, {u'score': 0.556, u'class': u'weapon'}, {u'score': 0.511, u'class': u'Repairing'}, {u'score': 0.51, u'class': u'Earthquake'}, {u'score': 0.5, u'class': u'stoop', u'type_hierarchy': u'/porch/stoop'}, {u'score': 0.5, u'class': u'porch'}, {u'score': 0.597, u'class': u'lever'}, {u'score': 0.98, u'class': u'gray color'}], u'classifier_id': u'default', u'name': u'default'}]}, {u'image': u'LieOrStand2.zip/IMG_4132.jpg', u'classifiers': [{u'classes': [{u'score': 0.784, u'class': u'Erect'}], u'classifier_id': u'RemoteTriage_482484271', u'name': u'Remote Triage'}, {u'classes': [{u'score': 0.57, u'class': u'lever', u'type_hierarchy': u'/bar/lever'}, {u'score': 0.659, u'class': u'bar'}, {u'score': 0.538, u'class': u'handlebar', u'type_hierarchy': u'/bar/handlebar'}, {u'score': 0.531, u'class': u'skateboarding', u'type_hierarchy': u'/sport/skating/skateboarding'}, {u'score': 0.533, u'class': u'skating'}, {u'score': 0.534, u'class': u'sport'}, {u'score': 0.5, u'class': u'stabilizer bar'}, {u'score': 0.599, u'class': u'weapon'}, {u'score': 0.599, u'class': u'sports equipment'}, {u'score': 0.893, u'class': u'gray color'}, {u'score': 0.581, u'class': u'ash grey color'}], u'classifier_id': u'default', u'name': u'default'}]}], u'custom_classes': 3, u'images_processed': 5}
#    faces = {u'images': [{u'image': u'LieOrStand2.zip/B.jpg', u'faces': [{u'gender': {u'gender': u'MALE', u'score': 0.9846315}, u'age': {u'max': 28, u'score': 0.83206326, u'min': 24}, u'face_location': {u'width': 136, u'top': 757, u'left': 1296, u'height': 146}}]}, {u'image': u'LieOrStand2.zip/A.jpg', u'faces': [{u'gender': {u'gender': u'MALE', u'score': 0.99685293}, u'age': {u'max': 40, u'score': 0.2921676, u'min': 32}, u'face_location': {u'width': 135, u'top': 747, u'left': 1730, u'height': 137}}]}, {u'image': u'LieOrStand2.zip/D.jpg', u'faces': [{u'gender': {u'gender': u'MALE', u'score': 0.9732676}, u'age': {u'max': 28, u'score': 0.99969834, u'min': 25}, u'face_location': {u'width': 175, u'top': 644, u'left': 1170, u'height': 205}}]}, {u'image': u'LieOrStand2.zip/IMG_4132.jpg', u'faces': []}, {u'image': u'LieOrStand2.zip/C.jpg', u'faces': [{u'gender': {u'gender': u'MALE', u'score': 0.8604275}, u'age': {u'max': 41, u'score': 0.23711067, u'min': 32}, u'face_location': {u'width': 179, u'top': 794, u'left': 724, u'height': 190}}]}], u'images_processed': 5}
예제 #10
0
from hack import hack
from extract_image import extract_image
from show_image import show_image
import scipy.misc
import numpy as np

N_train = 100
y = []
for i in range(N_train):
    path = './checkcode/' + str(i) + '.aspx'
    m = scipy.misc.imread(path, mode='L')
    d1 = m[4:18, 4:14].reshape(140)
    d2 = m[4:18, 13:23].reshape(140)
    d3 = m[4:18, 22:32].reshape(140)
    d4 = m[4:18, 31:41].reshape(140)
    d5 = m[4:18, 40:50].reshape(140)
    d = np.vstack((d1, d2, d3, d4, d5))  # d.shape = (5, 140)
    show_image(d)
    if (i == 0):
        x = np.copy(d)
    else:
        x = np.vstack((x, d))
    label = list(input("Enter 5 labels here: "))
    y.append(label)
y = np.array(y)
y = y.reshape(y.shape[0] * y.shape[1])
print(x.shape, y.shape)
np.savez('hack_data.npz', x_train=x, y_train=y)
print('save done')
예제 #11
0
def onclick(event):
    if event.xdata:
        time = pltdates.num2date(event.xdata)
        print "Retrieve image from {}...".format(time)
        show_image(zf, time)
import argparse
import datetime
import zipfile
import time

from show_image import show_image

parser = argparse.ArgumentParser(description="Webcam zipped image viewer.")
parser.add_argument("input_file", help="log .zip file containing the webcam images")
parser.add_argument("--start", default="2016-01-01 00:00:00", help="(YYYY-MM-DD HH:MM:SS) start time")
args = parser.parse_args()

filename = args.input_file
time_start = datetime.datetime.strptime(args.start, "%Y-%m-%d %H:%M:%S")


zf = zipfile.ZipFile(filename)
time_current = time_start
real_time_current = datetime.datetime.now()
while True:
    try:
        show_image(zf, time_current)
    except:
        print "Can't show image at time {}.".format(time_current)
    while datetime.datetime.now().second == real_time_current.second:
        time.sleep(0.01)
    real_time_current = datetime.datetime.now()
    time_current += datetime.timedelta(seconds=1.0)

예제 #13
0
              "score": 0.98, 
              "class": "gray color"
            }
          ], 
          "classifier_id": "default", 
          "name": "default"
        }
      ]
    }
  ], 
  "custom_classes": 3, 
  "images_processed": 5
}


for im in results['images']:
    print(' ')
    fileparts = os.path.split(im['image'])
    filename = fileparts[1]
    print(filename)
    overlay = ''
    for clasf in im['classifiers']:
        clasf_name = clasf['name']
        if clasf_name is 'Remote Triage':
            for item in clasf['classes']:
                if item['class'] is 'Prone':
                    overlay = 'Prone person probability (%f%%)' % (item['score'] * 100.0)
                    print('Prone person probability ({}%)'.format(item['score'] * 100.0))
    show_image('LieOrStand2/' + filename, overlay)
            
import os
import numpy as np
from extract_image import extract_image
from show_image import show_image

path = "./download/res"
path_test = "./download/res/0.gif"
test = extract_image(path_test)
num_of_image, num_of_input = test.shape
features = np.zeros((num_of_image * 1000, num_of_input))
label = np.zeros((num_of_image * 1000, 1))
i = 0
for file in os.listdir(path):
    full_path = os.path.join(path, file)
    s_x = extract_image(full_path)
    show_image(s_x)
    s_number = input("Please input the label\n")
    if s_number[0] == 'q':
        break
    features[i:i + 5, :] = s_x
    for j in range(5):
        label[i + j, 0] = int(s_number[j])
    i = i + 5

np.savez("hack_data.npz", x_train=features, y_train=label)