Example #1
0
 def confirm(self, controller):
     # se si tratta di Verifica del paziente
     if self.role == 0:
         print("Verifica del paziente")
         patient, index, user = Recognition.recognize(
             self.cf.cget("text")[16:], self.photo)
     # se si tratta di Verifica del delegato
     else:
         print("Verifica del delegato")
         patient, index, user = Recognition.identify(
             self.cf.cget("text")[16:], self.photo)
     print("Il paziente e':", patient, "L'utente riconosciuto e':", user)
     if user is not None:
         list(controller.frames.values())[pages.UserPage.value - 1].reset()
         list(controller.frames.values())[pages.UserPage.value -
                                          1].update_data(
                                              index, user["User"],
                                              patient["User"],
                                              patient["Codice Fiscale"],
                                              patient["Delegati"],
                                              patient["Farmaci"],
                                              patient["Data"],
                                              self.panel.image)
         controller.show_frame(UserPage)
     else:
         list(
             controller.frames.values())[pages.InformationPage.value -
                                         1].update_data(recognitionRejected)
         controller.show_frame(InformationPage)
def xzdingwei():
    lable3 = tk.Label(window, text="形状定位如下", font=('Arial', 15))
    lable3.place(x=750, y=0)
    Recognition.find_car_num_brod(pic_path)
    img = Image.open("tmp/chepai_img1.jpg")
    photo = ImageTk.PhotoImage(img)
    img = img.resize((160, 40), Image.ANTIALIAS)
    photo = ImageTk.PhotoImage(img)
    label2 = tk.Label(window,anchor="ne")
    label2.config(image=photo)
    label2.image=photo
    label2.place(x=735,y=30)
Example #3
0
    def saveFrame(self, userId):
        """ write frame to disk and add row to file config
        Return:
        detection - face is detected
        newUser - created new user
        """
        imgBasePath = ""
        imgBasePath = self.faceDir + "/" + "face_" + userId
        recognition = rec.Recognition(False)
        # faceImg, detection, x, y, h, w = recognition.detectFace(
        #     cv2.cvtColor(self.currentFrame, cv2.COLOR_RGB2GRAY))
        # faceImg2, detection2 = recognition.getCroppedImageByEyes(
        #     cv2.cvtColor(self.currentFrame, cv2.COLOR_RGB2GRAY), (0.2, 0.2))
        #faceImg, detection = recognition.getCroppedImageByEyes(
        faceImg, detection, x, y, h, w = recognition.detectFace(
            cv2.cvtColor(self.currentFrame, cv2.COLOR_RGB2GRAY))

        if detection:
            writer = rwi.ReadWriteIndex()
            faceImg = cv2.resize(faceImg, (92, 112))
            fileExtension = "." + self.config.get("Cam", "imgExtension")
            filePath = (imgBasePath + "#" +
                        str(writer.getCountUserElem(userId)) + fileExtension)
            cv2.imwrite(self.currentDir + filePath, faceImg)
            newUser = writer.checkUser(userId)
            writer.addRow(userId, filePath)  # add new line to file index
            return detection, not newUser

        return detection, 0
Example #4
0
def updateThreshold(new_user):
    gallery_threshold = np.load("npy_db/gallery_thresholds.npy").tolist()
    gallery_target = np.load("npy_db/gallery_target.npy")
    gallery_histograms = np.load("npy_db/histogram_gallery_data.npy")
    new_index = gallery_target.tolist().index(new_user)
    max = -1
    # ritorna la lista di utenti unici. Si usa il dizionario perche' cosi' l'ordine tra gallery users e gallery
    # thresholds e' lo stesso
    galley_users = list(dict.fromkeys(gallery_target))
    print("AGGIORNAMENTO DEI THRESHOLDS...")
    for user in galley_users:
        # confrontiamo un utente con tutti gli utenti della gallery ma non con se stesso
        if user != new_user:
            index = galley_users.index(user)
            # per ogni template dell'utente
            for i in range(n_photo_x_user):
                thd = Recognition.topMatch(user, gallery_target,
                                           gallery_histograms,
                                           gallery_histograms[new_index + i])
                # prendo il threshold massimo e lo arrotondo
                if thd > gallery_threshold[index]:
                    gallery_threshold[index] = thd
                if thd > max:
                    if np.round(thd, 2) <= thd:
                        max = np.round(thd, 2) + 0.01
                    else:
                        max = np.round(thd, 2)
    # l'ultimo max fa riferimento all'ultimo utente appena iscritto, questo proprio perche' si segue l'ordine di
    # gallery_users, che mantiene a sua volta l'ordine di gallery_target
    gallery_threshold.append(max)
    print("IL TUO THRESHOLD:", max, "N. TOTALI DI HISTOGRAM:",
          len(gallery_threshold))
    np.save("npy_db/gallery_thresholds.npy", gallery_threshold)
    return
Example #5
0
    def __init__(self, video_src=None, modelType=None, threshold=None):
        QtGui.QMainWindow.__init__(self)
        self.recognition = rec.Recognition(True, modelType, threshold)
        self.setWindowTitle("Authorization system - Face")
        cWidget = QtGui.QWidget(self)
        mainLayout = QtGui.QVBoxLayout()

        # Title
        # titleLabel = QtGui.QLabel("Show your credential!")
        # titleLabel.setAlignment(QtCore.Qt.AlignCenter)

        # Webcam
        self.imgLabel = QtGui.QLabel()
        self.webcamSampling = vs.VideoSampling(video_src)
        self.update()

        # Button
        self.startButton = QtGui.QPushButton("Authenticate")
        self.cancelButton = QtGui.QPushButton("Cancel")
        self.connect(self.startButton, QtCore.SIGNAL("clicked()"),
                     self.startAuthentication)
        self.connect(self.cancelButton, QtCore.SIGNAL("clicked()"),
                     QtCore.SLOT("close()"))

        # mainLayout.addWidget(titleLabel)
        mainLayout.addWidget(self.imgLabel)
        mainLayout.addWidget(self.startButton)
        mainLayout.addWidget(self.cancelButton)

        mainLayout.setAlignment(QtCore.Qt.AlignCenter)
        cWidget.setLayout(mainLayout)
        self.setCentralWidget(cWidget)
        self.center()
Example #6
0
    def adaptiveThresholds(self):
        print("ADAPTIVE THRESHOLDS")
        thresholds = []
        # lista di utenti non ripetuti
        galley_users = list(dict.fromkeys(self.gallery_target))
        # per ogni utente
        for user in galley_users:
            max_thd = -1
            # per ogni template
            for i in range(len(self.gallery_data)):
                # se i due utenti non sono uguali
                if user != self.gallery_target[i]:
                    new_thd = 0
                    # ottengo l'histogram h del template
                    hist_probe = self.histogram_gallery_data[i]
                    index = self.gallery_target.index(user)
                    # per ogni histogram dell'utente, lo comparo con h e prendo il valore di similarity piu' alto
                    for j in range(self.get_n_gallery_temp_x_user()):
                        hist_gallley = self.histogram_gallery_data[index + j]
                        diff = Recognition.compareHistogram(
                            hist_probe, hist_gallley)
                        if diff >= new_thd:
                            new_thd = diff
                    # aggiorno il threshold piu' alto ottenuto per user e lo arrotondo
                    if new_thd > max_thd:
                        if np.round(new_thd, 2) <= new_thd:
                            max_thd = np.round(new_thd, 2) + 0.01
                        else:
                            max_thd = np.round(new_thd, 2)
            thresholds.append(max_thd)
            print("Threshold per l'utente", user, ":", max_thd)
        print("Thresholds:", thresholds)

        return thresholds
Example #7
0
def SendMouseClick(a):
    x, y = Recognition.getXYCoord(a)
    print(x,y)
    if (x != -999 & y != -999):
        Cursor.MoveCursor(x,y)
        pyautogui.click(x,y)
        time.sleep(0.5)
def convert(*args):
  # call extraction functions on the image, return output
  im = PIL.Image.open(imPath.get())
  extracts = extraction3.main3(im)
  net = Recognition.load('network_improved4.json')
  output_string = net.recognize(extracts)

  t.set(output_string)
  ttk.Label(mainframe, textvariable=t).grid(column=4, row=3)
Example #9
0
def update_btn_text2():
    butt1.config(state=NORMAL)
    v1.set("Your system is armed")
    print(butt1['state'])
    print("rec wcalled")
    #while butt1['state']== 'normal':
    print("entered loop")
    x = Recognition.eigrec(names)
    print(x)
    em2.eml(x)
    print("lopoexit")
    butt2.config(state=DISABLED)
Example #10
0
def recognition(queue_images, queue_number):
    """
    This gets the images stored in the queue and use the recognition algorithm established in the file "Recognition".
    Then it put the number in an other queue.
    :param queue_images: the queue containing the images detected
    :param queue_number: the queue containing the number recognized
    """

    # Global loop
    while True:
        print("Queue size :", queue_images.qsize())
        images, type = queue_images.get()
        if images is not None:
            for image in images:
                number = Recognition.detect_number(image)
                queue_number.put((number, type))
imgTrafficLabel = tkinter.Label(canvasTraffic, image=imgTraffic)

windowSpeedLimit = tkinter.Tk()
windowSpeedLimit.title("Speed limitation")
canvasSpeedLimit = tkinter.Canvas(windowSpeedLimit)

img = ImageTk.PhotoImage(file="assets/blank.jpg")
sign = tkinter.Label(canvasSpeedLimit, image=img)
text = tkinter.Label(windowSpeedLimit, text="")
for elt in Images:
    imgs = Processing.pre_processing(elt)
    print("processed")
    img = ImageTk.PhotoImage(file="assets/blank.jpg")
    if imgs is not None:
        for image in imgs:
            number = Recognition.detect_number(image)
            if number is not None:
                if "11" in number or "10" in number:
                    print("@@@@ 110 @@@@")
                    img = ImageTk.PhotoImage(file="assets/ref110.jpg")
                elif "30" in number:
                    if "130" in number or "13" in number:
                        print("@@@@ 130 @@@@")
                        img = ImageTk.PhotoImage(file="assets/ref130.jpg")
                    else:
                        print("@@@@ 30 @@@@")
                        img = ImageTk.PhotoImage(file="assets/ref30.jpg")
                elif "5" in number:
                    print("@@@@ 50 @@@@")
                    img = ImageTk.PhotoImage(file="assets/ref50.jpg")
                elif "7" in number:
Example #12
0
import keyboard
from Frame import Framework
from ChessInformation import chessInformation
import json

from Recognition import *

picutre_path = "D:/MyProject/python/testLOL/picture/"

frame = Framework()
imgHandle = chessInformation()
cnt = 0
with open(r'D:\MyProject\python\testLOL\project\scr\information.json',
          'r') as f:
    config = json.load(f)
    rec = Recognition(config)

    def Id2Name(id):
        if id in config:
            return config[id]["HeroName"]
        else:
            return "None"

    while True:
        # print("get " + str(cnt))
        keyboard.wait("f9")
        for i in range(5):
            img = imgHandle.getChessImg(i, frame)
            print(Id2Name(rec.get(img)))

    # time.sleep(20)
Example #13
0
USAGE
python main.py -i videos/sj.mp4 -d dataset
"""

import Recognition
import Encoder
import argparse

# read arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d",
                "--dataset",
                type=str,
                default="dataset",
                help="input face dataset path")
ap.add_argument("-i", "--input", type=str, help="path to input video")
args = vars(ap.parse_args())

# start encoding
print("Encoding data set...")
encoder = Encoder.Encoder(args['dataset'])
encode_file_dir = encoder.encode()
print("Completely encode the data set")

# start detection and recognition
print("Detecting the face...")
input_video_dir = args['input']
recognition = Recognition.Recognition(encode_file_dir, input_video_dir)
mosaic_list = recognition.recognize()
print("Completely detected faces from input video")
Example #14
0
def parse(str):
    #start up the emulator
    if (str == "-init"):
        Start.start()
        Done = TerminateKeyInput("icon.png", "enter", "App opened.")
        if (Done != ""): return Done
        else: return "Error starting up the app."
        
    #return to home
    if (str == "-h"):
        Start.openApp()
        SendKeyInput("home.png", "h")

    #run dimension missions
    if (str == "-dm"):
        Start.openApp()
        SendMouseClick("home.png")
        SendMouseClick("enter.png")
        SendMouseClick("dm.png")
        SendMouseClick("ready.png")
        SendMouseClick("clear.png")
        SendMouseClick("cmax.png")
        SendMouseClick("home.png")

        noEnergyErr = TerminateKeyInput("noenergy.png", "n", "Out of energy. Returning to home.")
        if noEnergyErr != "":
            SendKeyInput("home.png", "h")
            return noEnergyErr          
        time.sleep(4)
        SendMouseClick("close.png")
        Done = TerminateKeyInput("home.png", "h", "Done.")
        if Done != "":
            return "Done."
        else: return "Error."

    #collect antimatter from store
    if (str == "-lab col"):
        Start.openApp()
        SendMouseClick("home.png")
        SendMouseClick("list.png")
        SendMouseClick("lab.png")
        SendMouseClick("amcollect.png")
        time.sleep(0.5)
        Done = TerminateKeyInput("home.png", "h", "Antimatter collected. Returning to home.")
        if Done != "":
            return Done
        else:
            return "An error occurred."
    
    #Legendary battle: Loki Ragnarok 
    if (str == "-lb"):
        Start.openApp()
        SendMouseClick("start.png")
        pyautogui.press("l")
        time.sleep(10)
        pyautogui.press("l")
        while True:
            if (Recognition.locate("repeat.png")):
                pyautogui.press("l")
                time.sleep(4)
                return "Stage complete."
            pyautogui.press("l")
            pyautogui.press("l")
            time.sleep(0.5)
            pyautogui.press(";")
            pyautogui.press(";")
            time.sleep(0.5)
            pyautogui.press("'")
            pyautogui.press("'")
            time.sleep(0.5)
            pyautogui.press("[")
            pyautogui.press("[")
            time.sleep(0.5)
            pyautogui.press("p")
            pyautogui.press("p")
            time.sleep(0.5)

    return ""
Example #15
0
def TerminateKeyInput(a, b, c):
    if (Recognition.locate(a)) == 1:
        pyautogui.press(b)
        return c
    else: 
        return ""
Example #16
0
def SendKeyInput(a, b):
    if (Recognition.locate(a)) == 1:
        pyautogui.press(b)
Example #17
0
    #model.plotResults()

"""print "body points"
print gmB1.b_points

print "gravity points"
print gmB1.gr_points

print "body sigma"
print gmB1.b_sigma

print "gravity sigma"
print gmB1.gr_points"""

from Recognition import*
r =  Recognition(list_models)

#Calculate best weights
print "Calculating weigths"
r.calculate_Weights()

print "Validation"

sfile = "validation/t1.txt"
r.recognition_from_files(sfile)


sfile = "validation/t2.txt"
r.recognition_from_files(sfile)

Example #18
0
import Calibration
import Recognition
import IOFunctions
import Sequence
import math

MIN_SEQ_LENGTH = 100
MAX_SEQ_LENGTH = 150
    

def initial():
    dataL = IOFunctions.read('Data\\test2_B.csv')
    dataR = IOFunctions.read('Data\\test3_B.csv')
    #return {"Rechts" : [Sequence.Sequence(dataR, 1200, 100).getNormalized(), Sequence.Sequence(dataR, 1982, 100).getNormalized()],
    #        "Links" : [Sequence.Sequence(dataL, 2506, 100).getNormalized(), Sequence.Sequence(dataL, 2672, 100).getNormalized()]}
    return {"Rechts" : [Sequence.Sequence(dataL, 2672, 100).getNormalized(), Sequence.Sequence(dataL, 2506, 100).getNormalized()],
            "Links" : [Sequence.Sequence(dataL, 2506, 100).getNormalized(), Sequence.Sequence(dataL, 2672, 100).getNormalized()]}

def getAllLengths():
    allLengths = []
    for seqLength in range(MIN_SEQ_LENGTH,MAX_SEQ_LENGTH+1,math.ceil(MAX_SEQ_LENGTH*0.03)):
        allLengths.append(seqLength)
    return allLengths

if __name__ == '__main__':
    
    allLengths = getAllLengths()
    labels = Calibration.run(["Rechts","Links"], allLengths)
    #labels = initial()
    Recognition.recognize(labels, MIN_SEQ_LENGTH, allLengths)
#!/usr/bin/env python
# coding: utf-8

# In[3]:

import mnist_loader
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
training_data = list(training_data)

# In[2]:

import Recognition

# In[6]:

net = Recognition.Network([784, 100, 10])
net.SGD(training_data, 30, 10, 0.001, test_data=test_data)

# In[3]:

net = Recognition.Network([784, 30, 10])

# In[4]:

net.SGD(training_data, 30, 10, 3.0, test_data=test_data)

# In[4]:

import network2

# In[5]:
# script for making a saving a neural network
import Recognition
import Data

# get the data
train_data, test_data = Data.alpha_load()

net = Recognition.load('network_improved3.json')

net.train(train_data, 5, 104, 0.1, reg_param=5.0, test_data=test_data)

net.save('network_improved4.json')

print "Network Saved"
Example #21
0
 def read(self):
     print("hi")
     print(Recognition.readNumber(self.getter()))