예제 #1
0
def get_image():
    cap = cv2.VideoCapture(0)
    fgbg = cv2.createBackgroundSubtractorKNN(history=24)
    i = 0
    for i in range(10):
        ret, frame2 = cap.read()
    while (True):
        i += 1
        # Capture frame-by-frame
        ret, frame2 = cap.read()
        frame = frame2[250:340, :]
        # Our operations on the frame come here
        fgmask = fgbg.apply(frame)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        im = pi.mask_handle(fgmask, frame)
        # Display the resulting frame
        im2, contours, hier = cv2.findContours(fgmask, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)
        contour_sizes = [(cv2.contourArea(contour), contour)
                         for contour in contours]
        biggest_contours = sorted(contour_sizes, key=lambda x: x[0])
        if (len(biggest_contours) > 1):
            contour1 = biggest_contours[-1][1]
            contour1_size = biggest_contours[-1][0]
            if (contour1_size > 40):
                x, y, w, h = cv2.boundingRect(np.concatenate((contour1)))
                if (True):
                    frameArray = np.array(frame)
                    crop_img = frame[y:(int)(y + h), :]
                    cap.release()
                    cv2.destroyAllWindows()
                    return pi.mask_handle(fgmask, frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
예제 #2
0
def save_worker(save_queue, data_queue, process_values, trigger, session_count, gui=None):
    """
    The thread worker used save and analyze the pictures from CameraThread.
        :argument queue: The queue used to store all of the pictures recieved from XimeaController.
        :type queue: Queue.Queue
        :argument process_values: All of the ProcessImage settings in ScarberrySettings.
        :type process_values: dict
        :argument trigger: The ThreadTrigger which contains all of the booleans for thread syncing.
        :type trigger: ThreadTrigger
        :argument session_count: The number of the current instance of the threads
        :type session_count: int
        :keyword gui: Optional interface used to print.
        :type gui: Interface.ScarberryGui
    """
    global abort
    pic_count = 0
    while trigger.get(name='runProcess') and not abort:
        while not save_queue.empty() and not abort:
            pic = save_queue.get()
            Interface.choose_print(gui, 'save', 'pic {} hex: {}'.format(pic_count,pic))
            formated_number = ProcessImage.format_number(pic_count,int(process_values.get("NumberPadding")))
            save_name = process_values.get("BaseName")
            if process_values.get("Count") > 0:
                save_name = '{}.{}'.format(save_name,session_count)
            ProcessImage.save_image(pic,
                                    formated_number,
                                    image_direcoty=process_values.get("ImageDirectory"),
                                    name=save_name,
                                    extention=process_values.get("FileExtension"))
            data_queue.put(pic)
            pic_count += 1
    trigger.set_off('runData')
    Interface.choose_print(gui, 'save', 'SaveImageThread: Finished')
예제 #3
0
def yuwangwen():
    # TO DO check or corp image size?
    # the uploaded image should be 178 * 220 size image right now
    oridata = request.data
    if 'file' not in request.files:
        return 'Error!'
    # file is a object
    # for example
    # curl -X POST http://54.223.112.245:9527/ssdflow -H 'contentt/form-data' -F file=@/home/ruobo/Downloads/FireShot/flow_8.jpg
    # get a file
    file = request.files['file']
    if file and allowed_file(file.filename):
        filename = secure_filename(file.filename)
        file.save(
            os.path.join('/home/ubuntu/robbytest/dcgan-autoencoder/',
                         filename))
        ProcessImage.prcessing()  #creat hdf5 file
    else:
        print "not a valid form!"
        pass
    # Infer file using loaded model and hdf5 file
    path = "/home/ubuntu/robbytest/dcgan-autoencoder/faces_single_test.hdf5"
    result_path = Model.infer(path, model)
    # return result
    imgname = result_path.split('/')[-1]
    return send_from_directory(app.static_folder, imgname)
def query():
    import ProcessImage

    response.content_type = 'application/json'
    q = json.load(request.body)

    queryString = str(q['queryString'])
    queryString = queryString.replace(" ","").replace("\t","").strip()
    imageName = str(q['imgName'])
    lcImageName = str(q['lcImageName'])
    dir = q['dir']

    ProcessImage.process_image(imageName, lcImageName, queryString, dir)

    return {'Your query': queryString}
예제 #5
0
def train_neigh_():
    t = train_neigh(80)
    im = image.ProcessImages('train')
    X, Y = im.make_all()
    t.train_first_two_features(X, Y)
    max = 0.0
    index = 3
    x_list = []
    y_list = []
    # t3 = train_neigh(4)
    # print('test: ', t3.test())

    for x in range(3, 100, 1):
        # for weight in ['uniform', 'distance']:
        print(' negighbours: ', 'distance', ' ', x)
        t2 = train_neigh(x, first_two=True, features=['x0', 'y0'])
        #t2 = train_neigh(x, first_two=True)
        t2.train(X, Y)
        # t2.load(x, weight)
        v = t2.valid()
        if (v > max):
            max = v
            index = x
            print(v)
        x_list.append(x)
        y_list.append(v)

    plt.plot(x_list, y_list)
    plt.show()

    print('maximum: ', max, ' neighbours: ', index)
예제 #6
0
def getBestKValue(maxK, two_feature=False, features=['ecc', 'extent']):
    im = image.ProcessImages('train', two_value=two_feature, list_=features)
    X, Y = im.make_all()
    max = 0.0
    index = 3
    x_list = []
    y_list = []
    #iteracio
    for x in range(3, maxK, 1):
        # for weight in ['uniform', 'distance']:
        print(' negighbours: ', 'distance', ' ', x)
        t2 = train_neigh(x, first_two=True, features=features)
        t2.train(X, Y)
        # t2.load(x, weight)
        v = t2.valid()
        #validacios halmazon elert legjobb eredmeny
        if (v > max):
            max = v
            index = x
            print(v)
        x_list.append(x)
        y_list.append(v)

    #megjelenitese
    plt.plot(x_list, y_list)
    plt.show()

    print('maximum: ', max, ' neighbours: ', index)
    return (max, index)
예제 #7
0
    def getCheckCode(self, net):
        # 验证码连接
        checkcode_url = "http://58.194.172.34/reader/yz.php"
        request = urllib.request.Request(checkcode_url, headers=self.headers)
        picture = self.opener.open(request).read()
        # 将验证码写入本地
        local = open("checkcode.jpg", "wb")
        local.write(picture)
        local.close()
        # 调用系统默认的图片查看程序查看图片
        os.system("checkcode.jpg")
        img_list = ProcessImage.MainProcess()
        txt_check = ""
        for img in img_list:
            #img.show()
            img = np.reshape(img, (784, 1))
            img2 = np.zeros((784, 1))
            for i in range(784):
                img2[i][0] = float((255 - img[i][0]) / 255.0)
            #img=float(img/[[255.0] for i in range(784)])
            #print(img[0].shape)
            num = net.GetNumber(img2)
            print(num)
            txt_check = txt_check + str(num)
        print(txt_check)

        #txt_check = input(str("请输入验证码").encode(self.charaterset))
        return txt_check
예제 #8
0
def upload():
    if request.method == 'POST':

        if 'file' not in request.files:
            return render_template('upload.html',
                                   lngs=Defines.lngs,
                                   msg='No file selected')

        filename = Upload.Upload(request.files['file'])

        if filename == '':
            return render_template('upload.html',
                                   lngs=Defines.lngs,
                                   msg='No file selected')

        extracted_text, summary, threshold = ProcessImage.ProcessImage(
            filename, request.form['lng'])

        render_text = '\n'
        for sentence, value in summary:
            # Format to be displayed in js on client
            render_text += '[`' + sentence + '`,' + str(value) + '],\n'

        return render_template('upload.html',
                               lngs=Defines.lngs,
                               msg='Successfully processed',
                               threshold=threshold,
                               extracted_text=render_text,
                               img_src=filename)

    elif request.method == 'GET':
        return render_template('upload.html', lngs=Defines.lngs)
예제 #9
0
def _train_neigh(value, feauters=['x0', 'y0']):
    #legkozelebbi szomszed
    t = train_neigh(value, first_two=True, features=feauters)
    #train halmaz
    im = image.ProcessImages('train', list_=feauters)
    trainX, trainY = im.make_all()
    im2 = image.ProcessImages('test', list_=feauters)
    testX, testY = im2.make_all()
    #test halmaz megjelenitese a train halmazon
    t.test_train_two_features(testX, testY, trainX, trainY)
    predictY = []
    #kapott ertekek a k szomszed szerint
    predictY = t.predict(testX)

    #vegigiteralas a kepeken: melyiket nem talalta el+megjelenites
    for i, j, z in zip(predictY, testY, testX):
        if i != j:
            print('coordinates: (', z[0], ' ', z[1], ') predicted:  ', i,
                  ' original ', j)
            im2.find_pictures(z)
    cv2.waitKey(0)
예제 #10
0
def predict():
    if request.method == 'POST':
        try:
            data = request.get_json()
            # print(data["landmark_perk"])
            # print(data["landmark_neutral"])
            # print(data["image_data"])
            prediction = pl.getEmotionPredict(data["landmark_neutral"],
                                              data["landmark_perk"])
            subject_names = pi.getSubjectPredict(data["image_data"])
            prediction['subject_names'] = subject_names
            print(prediction)
        except ValueError:
            return jsonify("Input Error.")

        return jsonify(prediction)
예제 #11
0
def data_worker(queue, process_values, trigger, session_count, gui=None):
    """
    The thread worker used save and analyze the pictures from CameraThread.
        :argument queue: The queue used to store all of the pictures recieved from XimeaController.
        :type queue: Queue.Queue
        :argument process_values: All of the ProcessImage settings in ScarberrySettings.
        :type process_values: dict
        :argument trigger: The ThreadTrigger which contains all of the booleans for thread syncing.
        :type trigger: ThreadTrigger
        :argument session_count: The number of the current instance of the threads
        :type session_count: int
        :keyword gui: Optional interface used to print.
        :type gui: Interface.ScarberryGui
    """
    global abort
    pic_count = 0
    data_directory = process_values.get("ImageDirectory") + '\\data'
    if not os.path.exists(data_directory):
        os.makedirs(data_directory)
    while trigger.get(name='runData') and not abort:
        while not queue.empty() and not abort:
            pic = queue.get()
            Interface.choose_print(gui, 'data', 'pic {} hex: {}'.format(pic_count, pic))
            formated_number = ProcessImage.format_number(pic_count, int(process_values.get("NumberPadding")))
            save_name = process_values.get("BaseName")
            if process_values.get("Count") > 0:
                save_name = '{}.{}'.format(save_name, session_count)
            data_filename = '{}\\data-{}_{}{}'.format(data_directory,
                                                      save_name,
                                                      formated_number,
                                                      '.txt')
            if process_values.get("SaveDraw"):
                ProcessImage.draw_and_data(pic,
                                           '{}\\data\\data-{}_{}{}'.format(process_values.get("ImageDirectory"),
                                                                           save_name,
                                                                           formated_number,
                                                                           process_values.get("FileExtension")),
                                           data_filename,
                                           process_values.get("BlurValue"),
                                           process_values.get("ThreshLimit"),
                                           draw_rois=process_values.get("DrawROIs"),
                                           draw_centroid=process_values.get("DrawCentroid"),
                                           draw_colours=process_values.get("DrawColour"),
                                           draw_count=process_values.get("DrawCount"))
            else:
                data = ProcessImage.get_data(pic,
                                             process_values.get("BlurValue"),
                                             process_values.get("ThreshLimit"))
                ProcessImage.save_data(data, data_filename)
            pic_count += 1
    Interface.choose_print(gui, 'data', 'DataImageThread: Finished')
예제 #12
0
 def valid(self):
     im = image.ProcessImages('validation')
     X, Y = im.make_all()
     return self.clf.score(X, Y)
예제 #13
0
 def test(self):
     im = image.ProcessImages('test')
     X, Y = im.make_all()
     return self.clf.score(X, Y)
예제 #14
0
def main():

    #SVM:
    t = train()
    #im = image.ProcessImages('train')
    #X, Y = im.make_all()
    #t.train(X, Y)
    #rbf:

    print('SVM clf:')
    t.load(name='clf_rbf.pkl')
    print(t.predict(measure.process('test3.png')))
    print(t.test())

    #linear:
    print('SVM linear:')
    t.load(name='clf_linear.pkl')
    print(t.predict(measure.process('test3.png')))
    print(t.test())

    #k szomszed:
    #osszes feature-ra
    #getBestKValue(400)
    print('osszes feature k neighbours:')
    im = image.ProcessImages('train')
    X, Y = im.make_all()
    #train halmazon elert eredmeny:
    #maximum:  0.7032590051457976  neighbours:  183
    t2 = train_neigh(183)
    t2.train(X, Y)
    #teszt halmazon elert eredmeny:
    #elert eredmeny:  0.5483870967741935
    print('elert eredmeny: ', t2.test())

    #dontesi fa:
    print('dontesi fa: ')
    tree = DT()
    tree.train(X, Y)
    print('elert eredmeny: ', tree.test())
    print(tree.valid())
    tree.show(X, Y)

    #ecc-extent:
    print('ket feature: ecc, extent:')
    #getBestKValue(100, two_feature=True, features=['ecc', 'extent'])
    t3 = train_neigh(80, first_two=True, features=['ecc', 'extent'])
    t3.train(X, Y)
    print('elert eredmeny: ', t3.test())
    #elert eredmeny:  0.4946236559139785
    t3.train_first_two_features(X, Y)
    #_train_neigh(80, feauters=['ecc', 'extent'])

    #x0-y0:
    print('ket feature: x0, y0:')
    #getBestKValue(100, two_feature=True, features=['x0', 'y0'])
    t4 = train_neigh(54, first_two=True, features=['x0', 'y0'])
    t4.train(X, Y)
    print('elert eredmeny: ', t4.test())
    #elert eredmeny:  0.3333333333333333
    t4.train_first_two_features(X, Y)
    _train_neigh(54, feauters=['x0', 'y0'])

    #train_neigh_()
    im2 = image.ProcessImages('test')
    _train_neigh(80, feauters=['ecc', 'extent'])
__author__ = 'geoimages'

import sys

import ProcessImage
import Helpers

if __name__ == '__main__':


    testimage= r'C:\Users\geoimages\angular-seed\app\images\Alvin NE\Alvin NE_w012_h014.jpg'
    testLCimage= r'C:\Users\geoimages\angular-seed\app\images\Alvin NE\Alvin NE_w012_h014LC.jpg'
    directory = Helpers.getImageOutputLoc()
    ProcessImage.process_image(testimage, testLCimage, 'Area_35_500#Perimeter_50_300')
예제 #16
0
import os
import csv
import Upload
import Defines
import ProcessImage
from flask import Flask, render_template, request

app = Flask(__name__)
app.template_folder = os.path.join(app.instance_path, 'template')

ProcessImage.InitProcessImage(app.instance_path)
Upload.InitUpload(app.instance_path)


@app.route('/')
def home_page():
    return render_template('index.html')


@app.route('/search', methods=['GET', 'POST'])
def search():
    if request.method == 'POST':
        search_term = request.form['search']

        results = []
        with open(ProcessImage.CSV_File, "r+", encoding="utf-8") as csv_file:
            csv_reader = csv.reader(csv_file, delimiter=Defines.csv_delimiter)
            for row in csv_reader:
                for token in row:
                    if search_term in token:
                        file = row[0]