Beispiel #1
0
    def __init__(self, vs, width=320, height=450, framerate=32):
        self.vs = vs

        self.root = tki.Tk()

        self.framerate = framerate
        self.sleepduration = 1.0 / self.framerate
        self.frame = None
        self.thread = None
        self.stopEvent = None

        self.root.resizable(width=False, height=False)
        self.root.geometry('{}x{}'.format(width, height))

        self.panelWidth = width

        self.panel = None

        self.button = tki.Button(self.root,
                                 text="Ring the Bell!",
                                 command=self.ring)
        self.button.pack(side="bottom",
                         fill="both",
                         expand="yes",
                         padx=10,
                         pady=10)

        self.stopVideoLoop = threading.Event()
        self.thread = threading.Thread(target=self.videoLoop, args=())
        self.thread.start()

        self.root.wm_title("Hoosthere")
        self.root.wm_protocol("WM_DELETE_WINDOW", self.onClose)

        self.recognizer = Recognizer()
 def __init__(self):
     self.detection_reader = DetectionReader('detections.json')
     self.project_file_name = '/home/algernon/andro2'
     self.video_file_name = ''
     self.db_name = ''
     self.data_base = None
     self.video_maker = None
     self.db_user_name = 'root'
     self.db_user_pass = '******'
     self.db_host = 'localhost'
     self.commands = []
     self.output_video_file_name = 'output.mkv'
     self.video_reader = None
     self.video_writer = None
     self.emotion_detection_reader = DetectionReader('emotion_results/er.json')
     self.emotion_recognizer = EmotionRecognizer(self.EMOTION_PROB_THRESH)
     self.captioner = Captioner('/home/algernon/a-PyTorch-Tutorial-to-Image-Captioning/weights/BEST_checkpoint_coco_5_cap_per_img_5_min_word_freq.pth.tar',
                                '/home/algernon/a-PyTorch-Tutorial-to-Image-Captioning/weights/WORDMAP_coco_5_cap_per_img_5_min_word_freq.json')
     self.segmentator = None
     self.clothes_detector = ClothesDetector("yolo/df2cfg/yolov3-df2.cfg", "yolo/weights/yolov3-df2_15000.weights", "yolo/df2cfg/df2.names")
     self.face_recognizer = FaceRecognizer()
     self.open_project()
     self.recognizer = Recognizer(
         '/home/algernon/PycharmProjects/AIVlog/mmdetection/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py',
         '/home/algernon/PycharmProjects/AIVlog/mmdetection/work_dirs/faster_rcnn_r50_fpn_1x_voc0712/epoch_10.pth')
Beispiel #3
0
 def button_process_click(self):
     preprocessor = Preprocessor()
     labeled_img = preprocessor.process(self.image)
     cv2.imwrite('result_img.png', labeled_img)
     recognizer = Recognizer()
     disease = recognizer.recognize_disease('result_img.png')
     print(disease)
     QMessageBox.about(self.mainwindow, "Predicted Disease", disease)
Beispiel #4
0
	def __init__(self, opts):
		#import the recognizer so Gst doesn't clobber our -h
		from Recognizer import Recognizer
		self.ui = None
		self.options = {}
		ui_continuous_listen = False
		self.continuous_listen = False

		self.commander = Command.Commander(command_file,strings_file)

		#load the options file
		self.load_options()

		#merge the opts
		for k,v in opts.__dict__.items():
			if (not k in self.options) or opts.override:
				self.options[k] = v

		if self.options['interface'] != None:
			if self.options['interface'] == "q":
				from QtUI import UI
			elif self.options['interface'] == "g":
				from GtkUI import UI
			elif self.options['interface'] == "gt":
				from GtkTrayUI import UI
			else:
				print "no GUI defined"
				sys.exit()

			self.ui = UI(args, self.options['continuous'])
			self.ui.connect("command", self.process_command)
			#can we load the icon resource?
			icon = self.load_resource("icon.png")
			if icon:
				self.ui.set_icon_active_asset(icon)
			#can we load the icon_inactive resource?
			icon_inactive = self.load_resource("icon_inactive.png")
			if icon_inactive:
				self.ui.set_icon_inactive_asset(icon_inactive)

		if self.options['history']:
			self.history = []

		#create the recognizer
		try:
			self.recognizer = Recognizer(lang_file, dic_file, self.options['microphone'] )
		except Exception, e:
			#no recognizer? bummer
			sys.exit()
def Main():
    if(Train):
        print("Loading CSV...")
        machine = MyMachine("./Data/labels.csv")
        print("CSV Loaded.")
        trainCoulumnName = "id"
        valuesColumnName = "breed"
        filesType = ".jpg"
        TrainingPicturesPathDirectory = "C:\\Users\\hosse\\Desktop\\Dog Breed Project\\Data\\train\\"
        TestingPicturesPathDirectoy= "./Data/test/"
        trainingImagesNo = 12000 # Should be more than 126 due to __show_25_images() u may get Index out of bound error
        callbackLogsPath = "C:\\Users\\hosse\\Desktop\\Dog Breed Project\\Logs\\"
        modelsSavingPath = "C:\\Users\\hosse\\Desktop\\Dog Breed Project\\Models\\"
        
    
    
        machine.initializer(trainCoulumnName,
                            valuesColumnName,
                            filesType,
                            TrainingPicturesPathDirectory,
                            TestingPicturesPathDirectoy,
                            trainingImagesNo,
                            callbackLogsPath,
                            modelsSavingPath)
        
        machine.Train()
    else:
        print("Recognizing...")
        imgPath = "7.jpg"
        # Predict an Image
        modelPath = "C:\\Users\\hosse\\Desktop\\Dog Breed Project\\Models\\ModelNO_10_LOSS_0.74_ACCURACY_0.82_IMG_NO_12000.h5" 
        CSVLabelsPath = "./Data/labels.csv"
        X_ColumnName = "id"
        Y_ColumnName = "breed"
        imagePath = "C:\\Users\\hosse\\Desktop\\Dog Breed Project\\ImagesToTest\\" + str(imgPath)
        trainPicturesPath = "C:\\Users\\hosse\\Desktop\\Dog Breed Project\\Data\\train\\"
        filesType = ".jpg"
        imgReco = Recognizer(modelPath,CSVLabelsPath,X_ColumnName,Y_ColumnName,trainPicturesPath,filesType)
        imgReco.predict(imagePath)
        
        
    return 0
Beispiel #6
0
    def __init__(self):
        super(MainWindow, self).__init__()
        self.recognizer = Recognizer()
        self.recognizer.learn()
        self.ui = loadUi(os.path.join(THIS_DIR, 'mainwindow.ui'), self)
        self.thread = QThread()
        try:
            self.camera = CameraDevice()
        except ValueError:
            self.ui.video.setText("Device not found!\n\nIs FFMPEG available?")
        else:
            self.camera.frame_ready.connect(self.update_video_label)
            self.ui.video.setMinimumSize(640 * 2, 480)
            self.camera.moveToThread(self.thread)

        self.ui.t_max.setValue(0)
        self.ui.t_min.setValue(255)
        self.ui.s_max.setValue(200)
        self.ui.s_min.setValue(3)
        self.update_values()
Beispiel #7
0
    def __init__(self, opts):
        #import the recognizer so Gst doesn't clobber our -h
        from Recognizer import Recognizer
        self.ui = None
        #keep track of the opts
        self.opts = opts
        ui_continuous_listen = False
        self.continuous_listen = opts.continuous
        self.commands = {}
        self.read_commands()
        self.recognizer = Recognizer(lang_file, dic_file, opts.microphone)
        self.recognizer.connect('finished', self.recognizer_finished)
        self.matchTime = 0
        self.keywordTimeLimit = opts.keytime  #set to 0 to always speak the keyword

        self.commandFileTime = 0
        #updates language file and commands on start
        self.checkCommandFile()

        self.commandFileTime = os.path.getmtime(command_file)

        #read options
        if opts.interface != None:
            if opts.interface == "q":
                #import the ui from qt
                from QtUI import UI
            elif opts.interface == "g":
                from GtkUI import UI
            else:
                print "no GUI defined"
                sys.exit()

            self.ui = UI(args, opts.continuous)
            self.ui.connect("command", self.process_command)
            #can we load the icon resource?
            icon = self.load_resource("icon.png")
            if icon:
                self.ui.set_icon(icon)

        if self.opts.history:
            self.history = []
Beispiel #8
0
    def __init__(self, opts):

        # Initialize our ROS node:
        rospy.init_node('voice')

        # Define our publisher:
        self.voice_pub = rospy.Publisher('voice', String, queue_size=1)

        #import the recognizer so Gst doesn't clobber our -h
        from Recognizer import Recognizer
        self.ui = None
        #keep track of the opts
        self.opts = opts
        ui_continuous_listen = False
        self.continuous_listen = opts.continuous

        self.stringsFileTime = os.path.getmtime(strings_file)
        self.commands = {}
        self.read_commands()

        self.recognizer = Recognizer(lang_file, dic_file, opts.microphone)
        self.recognizer.connect('finished', self.recognizer_finished)
        self.matchTime = 0
        self.keywordTimeLimit = opts.keytime  #set to 0 to always speak the keyword

        # Update the Language File and Commands?
        self.commandFileTime = os.path.getmtime(command_file)
        if ((AUTO_UPDATE_CMD_FILE)
                or (self.commandFileTime > self.stringsFileTime)):
            # Trick the system by making it think we just created the command file:
            self.commandFileTime = time.time()
        self.checkCommandFile()

        #read options
        if self.opts.history:
            self.history = []
    "um": audio.wavread(wav_directory + 'um.wav')[0],
    "dois": audio.wavread(wav_directory + 'dois.wav')[0],
}
audio_base[3] = {
    "matrix": audio.wavread(wav_directory + 'matrix.wav')[0],
    "braveheart": audio.wavread(wav_directory + 'braveheart.wav')[0],
    "constantine": audio.wavread(wav_directory + 'constantine.wav')[0],
}
audio_base[4] = {
    "dinheiro": audio.wavread(wav_directory + 'dinheiro.wav')[0],
    "cartao": audio.wavread(wav_directory + 'cartao.wav')[0],
}
audio_base[5] = {
    "finalizar_compra":
    audio.wavread(wav_directory + 'finalizar_compra.wav')[0],
    "sair": audio.wavread(wav_directory + 'sair.wav')[0],
}

if __name__ == "__main__":
    for i in range(6):
        recorder = Recorder()
        recognizer = Recognizer()

        recorder.record(time_to_run=2)
        (input_signal1, sampling_rate1, bits1) = audio.wavread('record.wav')
        Recognizer.test_audio(audio_base[i], input_signal1, wav_directory)

    fs = sampling_rate1
    lowcut = 300
    highcut = 3400
Beispiel #10
0
    mo.open("com.lx.jdhg", "com.lx.jdhg/com.ly.lxdr.AppActivity")
    time.sleep(4)
    mo.click(params['skip2_x'], params['skip2_y'])
    mo.click(params['skip_x'], params['skip_y'])
    mo.click(params['start_button_x'], params['start_button_y'])


while True:
    current = time.time() * 1000
    p = os.popen(
        'adb shell "dumpsys window | grep mCurrentFocus"')  # 启动前检测是否在正确的页面
    result = str(p.read())
    if not result[:-1].endswith("com.lx.jdhg/com.ly.lxdr.AppActivity}"):
        restart_app()
        continue
    reco = Recognizer(mo.get_screen_shot())
    try:
        table = reco.find()
        path = table.find_path(True)
        step_time = time.time() * 1000
        index = 0
        while index < len(path) - 1:
            mo.swipe(path[index][0] + params['main_area_west'],
                     path[index][1] + params['main_area_north'],
                     path[index + 1][0] + params['main_area_west'],
                     path[index + 1][1] + params['main_area_north'])
            index += 1
        print("滑动耗时 %d ms" % (time.time() * 1000 - step_time, ))
        mo.click(params['collect_button_x'], params['collect_button_y'])
        time.sleep(0.5)
        mo.click(params['next_x'], params['next_y'])
Beispiel #11
0
    def __init__(self, opts):
        #import the recognizer so Gst doesn't clobber our -h
        from Recognizer import Recognizer
        #set variables
        self.ui = None
        self.options = {}
        ui_continuous_listen = False
        self.continuous_listen = False
        self.commands = {}

        #read the commands
        self.load_commands_file()
        #load the options file
        print("load the options")
        self.load_options_file()

        #merge the opts
        for k, v in opts.__dict__.items():
            if (not k in self.options) or opts.override:
                self.options[k] = v

        # should we be updating?
        if self.options['update']:
            #make the sentences corpus
            self.generate_sentences_corpus()
            #run the update stuff
            UpdateLanguage.update_language()

        if self.options['interface'] != None:
            if self.options['interface'] == "q":
                from QtUI import UI
            elif self.options['interface'] == "g":
                from GtkUI import UI
            elif self.options['interface'] == "gt":
                from GtkTrayUI import UI
            else:
                print("no GUI defined")
                sys.exit()

            self.ui = UI(args, self.options['continuous'])
            self.ui.connect("command", self.process_command)
            #can we load the icon resource?
            icon = self.load_resource("icon.png")
            if icon:
                self.ui.set_icon_active_asset(icon)
            #can we load the icon_inactive resource?
            icon_inactive = self.load_resource("icon_inactive.png")
            if icon_inactive:
                self.ui.set_icon_inactive_asset(icon_inactive)

        if self.options['history']:
            self.history = []

        #create the recognizer
        try:
            self.recognizer = Recognizer(lang_file, dic_file,
                                         self.options['microphone'])
        except Exception as e:
            print(e)
            #no recognizer? bummer
            sys.exit()

        self.recognizer.connect('finished', self.recognizer_finished)

        print("Using Options: ", self.options)
Beispiel #12
0
    def __init__(self):
        '''
        variables clarification
        definition of variables in this class
        '''
        self.root = Tk()
        self.welcome_label = Label(master=self.root, width=80, height=30)
        self.photo = PhotoImage(file='new_welc.png')
        self.back_right = Label(master=self.root,
                                width=50,
                                height=30,
                                bg='green')
        self.back_left = Label(master=self.root,
                               width=30,
                               height=30,
                               bg='blue')
        self.img_label = Label(master=self.welcome_label,
                               image=self.photo,
                               height=self.photo.height(),
                               width=self.photo.width())
        self.welc_over_but = Button(master=self.img_label,
                                    text='Get Start',
                                    width=20,
                                    height=1,
                                    command=self.come_in)
        self.mfcc_label = Label(master=self.back_right,
                                width=50,
                                height=16,
                                bg='yellow')
        self.word_label = Label(master=self.back_left,
                                width=30,
                                height=30,
                                bg='pink')
        self.word_list = Listbox(master=self.word_label, width=30, height=10)
        self.word_rec_label = Label(master=self.word_label,
                                    height=1,
                                    width=30,
                                    bg='red')
        self.word_rec_but = Button(master=self.word_rec_label,
                                   height=1,
                                   width=10,
                                   text='Listen to me',
                                   command=self.listen_word)
        self.auto_cover = IntVar()
        self.word_args = Label(master=self.word_label,
                               width=30,
                               height=6,
                               bg='blue')
        self.word_rec_time = Text(master=self.word_args, width=30, height=1)
        self.word_file = Text(master=self.word_args, width=30, height=1)
        self.word_listen_status = Label(master=self.word_args,
                                        width=30,
                                        height=4,
                                        bg='white')
        self.word_result = Listbox(master=self.word_label, height=13, width=30)
        self.mfcc_pic_label = Label(master=self.mfcc_label,
                                    height=16,
                                    width=50)
        self.mfcc_pic = None
        self.talk_label = Label(master=self.back_right,
                                width=50,
                                height=14,
                                bg='purple')
        self.talk_arg = Label(master=self.talk_label, width=50, height=2)
        self.lcy = Label(master=self.talk_arg, width=20, height=3)
        self.fjw = Label(master=self.talk_arg, width=30, height=3, bg='green')
        self.talk_rec_but = Button(master=self.lcy,
                                   width=10,
                                   height=2,
                                   command=self.listen_talk,
                                   text="Let's chat!")
        self.talk_auto_cover = IntVar()
        self.talk_rec_time = Text(master=self.fjw, width=30, height=1)
        self.talk_file = Text(master=self.fjw, width=30, height=1)
        self.talk_area = Label(master=self.talk_label, width=50, height=12)
        self.talk_history = Text(master=self.talk_area, width=50, height=12)
        self.speech_content = Text(master=self.talk_area, width=50, height=2)
        self.rc = Recognizer()
        self.talker = Talker()

        self.root.title('Speech Recognizer')
        self.welcome()
from VizGen import *
from Recognizer import Recognizer

if __name__ == "__main__":
    alphabet = ['B', 'A', 'D', 'C']

    generated_string, characters_dict, original_image = generate_image(
        alphabet, 8)
    noised_image = noise_image(original_image, sigma=100)

    recognizer = Recognizer(original_image, noised_image, alphabet,
                            characters_dict)

    print(original_image.shape[1])
    print(noised_image.shape[1])

    recognized_string = recognizer.recognize()
    recognized_image = concatenate_images(recognized_string, characters_dict)

    show_triple_images(original_image, noised_image, recognized_image,
                       "original image", "noised image", "recognized image")
Beispiel #14
0
class Controller():
    global language
    language = "eng"
    global recognizer
    recognizer = Recognizer()
    global engoperations
    engoperations = English_Operations()
    global uroperations
    uroperations = Urdu_Operations()

    def __init__(self):
        app = QtWidgets.QApplication(sys.argv)
        MainWindow = QtWidgets.QMainWindow()
        global ui
        ui = Ui_MainWindow()
        ui.setupUi(MainWindow)
        ui.microbutton.clicked.connect(lambda: self.callistner("button"))
        ui.querytextfield.returnPressed.connect(
            lambda: self.callistner("textfield"))
        ui.actionEnglish.triggered.connect(lambda: self.change_language("eng"))
        ui.actionUrdu.triggered.connect(lambda: self.change_language("ur"))
        ui.add_label("goti", "Hello! I am Gotti. How can I help you?")
        recognizer.talk("Hello! I am Gotti. How can I help you?", "en-uk")
        MainWindow.show()
        sys.exit(app.exec_())

    def change_language(self, lang):
        global language
        language = lang

    def makedecision(self, command):
        if language == 'eng':
            if 'search' in command:
                engoperations.OpenChrome(command)
            elif 'launch' in command:
                engoperations.LaunchApp(command)
            else:
                ui.add_label("goti", "Sorry can't understand your command")
                recognizer.talk("Sorry can't understand Your command", "en-uk")
        else:
            if 'تلاش' in command:
                uroperations.OpenChrome(command)
            elif 'کھولو' in command:
                uroperations.LaunchApp(command)
            else:
                ui.add_label("goti", "معاف کیجئے گا آپ کا حکم سمجھ نہیں آیا")

    def callistner(self, who):

        if ((who == "button") & (language == "eng")):
            ui.changetext("Gouti is Listening..")
            text = recognizer.myCommand('en-US')
            if text == -1:
                text = "your last command couldn\'t be heard.please speak again"
                ui.add_label("goti", text)
                recognizer.talk(text, "en-uk")
            else:
                ui.add_label("user", text)
                ui.changetext("")
                self.makedecision(text)

        elif ((who == "textfield") & (language == "eng")):
            text = ui.querytextfield.text()
            ui.add_label("user", text)
            ui.changetext("")
            self.makedecision(text)

        elif ((who == "button") & (language == "ur")):
            ui.changetext("گوٹی سن رہا ہے ...")
            text = recognizer.myCommand('ur-PK')
            if text == -1:
                text = "آپ کا آخری حکم نہیں سنا گیا۔ براہ کرم دوبارہ بولیں"
                ui.add_label("goti", text)
            else:
                ui.add_label("user", text)
                ui.changetext("")
                self.makedecision(text)
        elif ((who == "textfield") & (language == "ur")):
            text = ui.querytextfield.text()
            ui.add_label("user", text)
            ui.changetext("")
            self.makedecision(text)

        ui.scrolled(ui.scrollArea.verticalScrollBar().maximum())
Beispiel #15
0
from exemplary_points import filter_and_getExemplaries

sample_rate = 10

# load data
N_users = 10
for i in range(N_users):
    execfile("data/user" + str(i + 1) + ".py")
all_data = [data_user1, data_user2, data_user3, data_user4, data_user5, \
        data_user6, data_user7, data_user8, data_user9, data_user10]

# extract exemplary points
filter_and_getExemplaries(all_data, sample_rate)

# make prediction
recognizer = Recognizer(sample_rate)
# The data is from letter "O"
print recognizer.predict_one([[0.00151, 0.01135], [0.01175, 0.02839],
                              [0.0243, -0.00781], [0.01606, -0.03011],
                              [-0.01251, -0.02651], [-0.08557, 0.02435],
                              [-0.05574, 0.02611], [0.00386, 0.01013],
                              [0.03279, 0.00049], [0.02381, -0.0027],
                              [-0.0126, -0.00321], [-0.06615, 0.01246],
                              [-0.06997, 0.00418], [-0.06926, -0.00977],
                              [-0.0444, -0.01925], [0.00515, -0.02594],
                              [0.01419, -0.02939], [0.00689, -0.01478],
                              [0.00534, -0.01004], [-0.00194, -0.01154],
                              [-0.01755, -0.0048], [-0.0367, -0.0101],
                              [-0.05585, 0.00492], [-0.03075, 0.01247],
                              [0.01603, -0.02082], [0.04772, -0.01042],
                              [0.05997, -0.0237], [0.06643, -0.01772],
Beispiel #16
0
from Network import Network

#Parse arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--picamera", type=int, default=1,
	help="Use Raspberry Camera")
ap.add_argument("-w", "--width", type=int, default=316,
	help="Witdh of the window")
ap.add_argument("-ht", "--height", type=int, default=450,
	help="Height of the window")
ap.add_argument("-fr", "--framerate", type=int, default=25,
	help="Frame rate of the camera")
opt = vars(ap.parse_args())


#recognizer = Recognizer()
recognizer = Recognizer(modelFile='model.mdl')
network = Network()
#network = Network(endpoint='http://localhost:8000/hoo/')
print('INFO: People: ')
print(recognizer.people)

print("INFO: Launching camera")
vs = VideoStream(usePiCamera=opt["picamera"] > 0).start()
time.sleep(2.0)
view = View(vs, recognizer, network, width=opt["width"], height=opt["height"], framerate=opt["framerate"])


print("INFO: Application started successfully.")
view.root.mainloop()
Beispiel #17
0
 def __init__(self):
     super().__init__()
     self.initUI()
     self.pathList = []
     self.rgzr = Recognizer()