Exemple #1
0
class Parser(object):
    def __init__(self):
        self.init_commands()
        self.continuous_listen = True
        self.recognizer = Recognizer("conf/lang/lm", "conf/lang/dic")
        self.recognizer.connect('finished', self.parse_hyp)

    def init_commands(self):
        self.commands = {}
        for jsonfile in glob.iglob("conf/cmds/*.json"):
            with open(jsonfile) as fd:
                json_text = fd.read().split()
                json_data = json.loads(''.join(json_text))
                self.commands.update(json_data)

    def parse_hyp(self, recognizer, hyp):
        hyp = hyp.split(' ')
        self.display_parse_data(recognizer, hyp)

    def display_parse_data(self, recognizer, hyp):
        print("-------\nHYPOTHESIS: %s\n-------" % hyp)
        for cmd in hyp:
            sys.stdout.write("COMMAND: %s" % cmd)
            if self.commands.has_key(cmd):
                print(" :: ACTION FOUND: %s" % ', '.join(self.commands[cmd]))
            else:
                print(" :: NO ACTION FOUND!!")
        print("-------")

    def run(self):
        self.recognizer.listen()
Exemple #2
0
 def __init__(self, clarifai_token, telegram_token):
     self.recognizer = Recognizer(clarifai_token)
     self.bot = Bot(telegram_token)
     self.secure = Secure()
     self.users_status = dict()
     self.users_login = dict()
     self.users_message = dict()
Exemple #3
0
    def identify(self, event):
        """
			On-line face recognition
		"""
        rgb_img, depth_img, depth_mtx = kinect.get_sample()
        r_bw, r_depth, r_mtx = preprocessor.normalize_sample(
            rgb_img, depth_img, depth_mtx)
        if r_bw == None:
            event.Skip()
            self.Controller_statusbar.SetStatusText(
                "There where no faces detected")
            return
        #0: eigenfaces (1), 1: fisherfaces(2)
        if self.combo_box_2.GetSelection() == 0:
            model = 1
        else:
            model = 2
        #0: automatic("auto"), 1:"nbw", 2:"ndepth"
        if self.combo_box_2_copy.GetSelection() == 0:
            mode = "auto"
        elif self.combo_box_2_copy.GetSelection() == 1:
            mode = "nbw"
        else:
            mode = "ndepth"
        rec = Recognizer(model)
        rec.predict(mode, r_bw, r_depth)
        event.Skip()
Exemple #4
0
    def recognize_data(self, group, folder_name):
        classifier_path = f"classifiers/{self.photos_size}/{folder_name}"
        paths_to_recognize = []
        y_true = []
        photos_ids = []
        for photos_path in group:
            paths_to_recognize.append(photos_path)
            student_id = int(os.path.split(photos_path)[-1].split("-")[0])
            photo_id = int(
                os.path.split(photos_path)[-1].split("-")[1].split(".")[0])
            y_true.append(student_id)
            photos_ids.append(photo_id)

        recognizer = Recognizer(paths_to_recognize, classifier_path)

        eigenfaces_y_pred = recognizer.eigenfaces()
        eigenfaces_metrics = MetricsCalculator(y_true, photos_ids,
                                               eigenfaces_y_pred)
        eigenfaces_metrics.calculate_metrics()
        eigenfaces_metrics.print_metrics()
        self.eigenfaces_metrics.append(eigenfaces_metrics)

        fisherfaces_y_pred = recognizer.fisherfaces()
        fisherfaces_metrics = MetricsCalculator(y_true, photos_ids,
                                                fisherfaces_y_pred)
        fisherfaces_metrics.calculate_metrics()
        fisherfaces_metrics.print_metrics()
        self.fisherfaces_metrics.append(fisherfaces_metrics)

        lbph_y_pred = recognizer.lbph()
        lbph_metrics = MetricsCalculator(y_true, photos_ids, lbph_y_pred)
        lbph_metrics.calculate_metrics()
        lbph_metrics.print_metrics()
        self.lbph_metrics.append(lbph_metrics)
Exemple #5
0
def guard():
  """Function to continually accept input and determine when to unlock door."""

  # TODO: How can we verify that this is running? Should it periodically call home?

  LOGFORMAT = '%(asctime)-15s %(message)s'
  logging.basicConfig(filename='/var/log/latchburg.log', level=logging.DEBUG, format=LOGFORMAT)

  ver = Recognizer()
  interface = EntryAttemptInterface()
  with Latch() as latch:
    while True:
      try:
        attempt = interface.getAttempt()
        if attempt == None:
          break

        result = ver.check(attempt)
        if result != None:
          latch.unlock(INTERVAL)
          logging.info('Allowed access for user %s', result)
        else:
          logging.warning('Unauthorized attempt: %s', attempt)
      except Exception as inst:
        logging.error(inst)
Exemple #6
0
    def __init__(self, treshold):
        self.Threshold = treshold
        self.SHORT_NORMALIZE = (1.0 / 32768.0)
        self.chunk = 128
        self.FORMAT = pyaudio.paInt16
        self.CHANNELS = 1
        self.RATE = 16000
        self.swidth = 2
        self.Max_Seconds = 5
        self.seconds_of_record = 0.5
        self.timeout_signal = ((self.RATE / self.chunk * self.Max_Seconds) + 2)
        self.silence = True
        self.filename = 'temp/test.wav'
        self.Time = 0
        self.p = pyaudio.PyAudio()

        self.stream = self.p.open(format=self.FORMAT,
                                  channels=self.CHANNELS,
                                  rate=self.RATE,
                                  input=True,
                                  output=True,
                                  frames_per_buffer=self.chunk,
                                  input_device_index=1)
        self.r = Recognizer()
        self.r.load_models()
Exemple #7
0
class Parser(object):

	def __init__(self):
		self.init_commands()
		self.continuous_listen = True
		self.recognizer = Recognizer("conf/lang/lm", "conf/lang/dic")
		self.recognizer.connect('finished', self.parse_hyp)

	def init_commands(self):
		self.commands = {}
		for jsonfile in glob.iglob("conf/cmds/*.json"):
			with open(jsonfile) as fd:
				json_text = fd.read().split()
				json_data = json.loads(''.join(json_text))
				self.commands.update(json_data)

	def parse_hyp(self, recognizer, hyp):
		hyp = hyp.split(' ')
		self.display_parse_data(recognizer, hyp)

	def display_parse_data(self, recognizer, hyp):
		print("-------\nHYPOTHESIS: %s\n-------" % hyp)
		for cmd in hyp:
			sys.stdout.write("COMMAND: %s" % cmd)
			if self.commands.has_key(cmd):
				print(" :: ACTION FOUND: %s" % ', '.join(self.commands[cmd]))			
			else:
				print(" :: NO ACTION FOUND!!")
		print("-------")		

	def run(self):
		self.recognizer.listen()
Exemple #8
0
    def __init__(self):
        """Initialize brain class of Sara."""
        self.log = logging.getLogger()
        self.log.info('initialize brain...')

        self.config = configs.default_obj()
        if os.path.exists('user.json'):
            self.config = configs.update(self.config, 'user.json')

        self.speech = Speech(self.config.speech)

        self.speech.speak("Load language: " +
                          self.config.language.name[self.config.language.code])
        self.phrases = configs.language_obj(self.config.language.languages_dir,
                                            self.config.language.code)
        self.help = Help(self.config.language, self.phrases)
        self.speech.speak(self.phrases.general.start)

        self.generals = {'quit': False, 'text': '', 'arg': None}

        self.bd = Databases(self.config.sqlite)
        self.assist = Assistant(self.speech, self.config, self.phrases,
                                self.bd)
        if True if "true" == self.config.general.setup else False:
            self.assist.setup()

        self.backups = Backups(self.config.backups, self.bd)
        self.hotkeys = Hotkeys(self.config.hotkeys, self.generals)
        self.recognizer = Recognizer(self.config.recognition,
                                     self.config.language.code, self.generals,
                                     self.speech, self.phrases.recognition)
Exemple #9
0
 def __init__(self, new=False):
     self.reg = None
     self.dir_yml = join("yml")
     self.dir_data = join("data")
     self.path = join(self.dir_data, "reg.pkl")
     self.rel2id = yaml.load(open(join(self.dir_yml, "rel2id.yml"),
                                  encoding="utf-8"),
                             Loader=yaml.SafeLoader)
     self.int2id = yaml.load(open(join(self.dir_yml, "int2id.yml"),
                                  encoding="utf-8"),
                             Loader=yaml.SafeLoader)
     if exists(self.path) and not new:
         with open(self.path, "rb") as f:
             self.reg = pickle.load(f)
     else:
         relations = yaml.load(open(join(self.dir_yml, "relation.yml"),
                                    encoding="utf-8"),
                               Loader=yaml.SafeLoader)
         ques_word = yaml.load(open(join(self.dir_yml, "quesword.yml"),
                                    encoding="utf-8"),
                               Loader=yaml.SafeLoader)
         wrong_word = yaml.load(open(join(self.dir_yml, "wrong_table.yml"),
                                     encoding="utf-8"),
                                Loader=yaml.SafeLoader)
         concepts = relations
         concepts.update(ques_word)
         concepts.update(wrong_word)
         self.reg = Recognizer(concepts)
         pickle.dump(self.reg, open(self.path, "wb"))
def process_folder(source_folder, collection):
    db = DBManager(collection)
    recognizer = Recognizer(db)
    source_folder = os.path.normpath(source_folder)
    assert os.path.isdir(source_folder), 'Folder does not exist'
    labels_folder = os.path.join(source_folder, 'labels')
    first_subfolders = [
        os.path.join(labels_folder, l) for l in os.listdir(labels_folder)
    ]
    for first_subfolder in first_subfolders:
        second_subfolders = [
            os.path.join(first_subfolder, l)
            for l in os.listdir(first_subfolder)
        ]
        for second_subfolder in second_subfolders:
            json_files = glob.glob(os.path.join(second_subfolder, '*.json'))
            for json_file in json_files:
                json_data = load_json(json_file)
                locations = get_locations(json_data, second_subfolder)
                if locations is None:
                    continue
                locations, img = locations
                ids = []
                for location in locations:

                    _id = recognizer.recognaze_face(img, location)
                    ids.append(_id)

                #face_encodings = get_face_encodings(json_data)
                new_json_data = update_ids(json_data, ids)
                save_json(json_file, new_json_data)
Exemple #11
0
def recognize_api():

    if 'file' not in request.files:
        return 'No file part'

    file = request.files['file']

    if file.filename == '':
        return 'No filename'

    time = datetime.datetime.now()
    try:
        with tempfile.NamedTemporaryFile() as temp_file:
            recognizer = Recognizer()
            filename = temp_file.name
            file.save(filename)
            text = recognizer.get_text(filename)
            used = datetime.datetime.now() - time
            usage = Usage()
            usage.log()
            return "Text reconegut: [{0}] - temps usat {1}".format(text, used)
    except ValueError as e:
        return (
            "No és un fitxer d'àudio en format PCM WAV, AIFF/AIFF-C o FLAC ",
            400)
Exemple #12
0
	def __init__(self, cameras=[], port=9004, users_file="users.json", recognition_db="recognition.db"):

		Server.__init__(self, port=port, usessl=False)

		self.recognition_db = recognition_db

		self.last_user_uuid = ""
		self.last_len_persons_detected = -1
		self.last_len_users = -1

		self.camera_clients = []
		self.recognizer = Recognizer(users_file)

		self.cameras = cameras
		self.start()

		self.method_handlers = {}
		self.method_handlers["list_users"] = self.list_users
		self.method_handlers["select_camera"] = self.select_camera
		self.method_handlers["list_users_with_level"] = self.list_users_with_level
		self.method_handlers["add_association"] = self.add_association

		self.users_recognized = []

		asyncio.get_event_loop().create_task(self.poll())
    def decrypt(self):
        if not self.fname:
            return

        self.recognizer_thread = Recognizer()
        self.recognizer_thread.finished.connect(self.do_decrypt)
        self.status_bar.showMessage("Say the password")
        self.recognizer_thread.start()
Exemple #14
0
def detect_faces_in_image(file_stream):
    result = {}
    face_recognizer = Recognizer("face_gallery", file_stream)
    locs, names = face_recognizer.startEngine()
    for loc, name in zip(locs, names):
        result[name] = loc

    return jsonify(result)
Exemple #15
0
class StartWindow(QDialog):
    # class StartWindow(QMainWindow):
    def __init__(self, camera=None):
        super().__init__()
        self.camera = camera

        self.central_widget = QWidget()

        self.win = pg.GraphicsLayoutWidget()
        self.view = self.win.addViewBox()
        self.img = pg.ImageItem()
        self.view.addItem(self.img)

        # testPushButton = QPushButton("This is a test")

        # cam_layout = QVBoxLayout(self.central_widget)
        cam_layout = QVBoxLayout()
        # cam_layout.addWidget(self.win)

        # Had this uncommented.
        # cam_layout.addWidget(testPushButton)
        cam_layout.addStretch(1)
        # self.setCentralWidget(self.central_widget)

        main_layout = QGridLayout()
        # main_layout.addLayout(cam_layout, 0, 0, 1, 1)
        main_layout.addWidget(self.win, 0, 0)
        main_layout.addLayout(cam_layout, 0, 1, 1, 1)

        # main_layout.setRowStretch(1, 1)
        # main_layout.setRowStretch(2, 1)

        # Had these uncommented.
        # main_layout.setColumnStretch(0, 2)
        # main_layout.setColumnStretch(1, 1)

        self.setLayout(main_layout)

        self.update_timer = QTimer()
        self.update_timer.timeout.connect(self.update)
        self.update_timer.start(10)

        self.recognizer = Recognizer()
        self.recognizer.load_models()

        self.setWindowTitle("Recognizer")

    def update(self):
        frame = self.camera.get_frame()
        encodings, landmarks = self.recognizer.find_faces(frame)
        for face in encodings.keys():
            frame = self.recognizer.draw_face(frame, face, landmarks[face])
            person = self.recognizer.recognize(encodings[face])
            print(person)
        # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        # self.img.setImage(np.flip(gray).T)
        self.img.setImage(np.rot90(gray, 3))
 def repeat(self, password):
     if not password:
         self.status_bar.showMessage("Error: empty password")
         return
     self.password = password
     self.recognizer_thread = Recognizer()
     self.recognizer_thread.finished.connect(self.do_encrypt)
     self.status_bar.showMessage("Your password is '" + password +
                                 "'. Repeat to confirm.")
     self.recognizer_thread.start()
Exemple #17
0
    def __init__(self, master=None):
        super().__init__(master)
        self.master = master

        self.speak_engine = SpeakEngine(self.speak_engine_did_finish_utterance)
        self.voices = self.speak_engine.get_voices()

        self.recognizer = Recognizer(self.speak_engine)

        self.pack()
        self.create_widgets()
Exemple #18
0
def adjust(response):
    recognizer = Recognizer()

    PATH = response.GET["canvasURL"]
    text = response.GET["text"]

    imgList = SymbolFinder.find(PATH)

    text = list(text)
    print(text)
    recognizer.adjust(imgList, text)
    return HttpResponse("Adjusting was successful")
Exemple #19
0
class RecognitionScreen(ScrollView):
    def __init__(self, filename, count, **kwargs):
        super().__init__(**kwargs)

        self.filename = filename
        self.count = count
        self.recog = Recognizer()
        
        # Elemento de reconhecimento
        self.label_recognition = Label(text=f'Reconhecimento 1 de {self.count}', font_size=30)
        self.clear_widgets()
        self.layout = BoxLayout(orientation='vertical')

        # Inicializando thread de reconhecimento
        threading.Thread(target=self._recognition).start()

        self.layout.add_widget(self.label_recognition)
        self.add_widget(self.layout)

    def _recognition(self):
        # Colocando no banco de dados
        text = []

        # Para cada arquivo, um reconhecimento
        for i in range(self.count):
            # Informando qual reconhecimento está sendo realizado
            self.label_recognition.text = f'Reconhecimento {i+1} de {self.count}'

            # Lendo o arquivo de aúdio
            audio = self.recog.read_file(f'{self.filename}-{i}.wav')

            # Fazendo o STT
            text_recognition = self.recog.recognize_speech(audio)
            text.append(text_recognition)

            sleep(0.1)

        self.label_recognition.text = "Reconhecimento concluído, salvando o arquivos"
        organizator = Source()
        organizator.delete()

        # Definindo o meu dia atual
        now = datetime.now()
        dia_atual = f'{now.day}-{now.month}-{now.year}'

        # Definindo nome
        name = self.filename.replace('tmp/', '').replace(dia_atual ,'').replace('-', '')

        organizator.constructor(f'{dia_atual}-{str(now.hour)}-{str(now.minute)}', name, text)

        self.remove_widget(self.layout)
        self.add_widget(Initial())
Exemple #20
0
 def __init__(self, *args, accounts_title_re: str, accounts_rects: dict,
              **kwargs):
     super().__init__(*args, **kwargs)
     self.accounts_title_re = accounts_title_re
     self.accounts_rects = accounts_rects
     self.accounts_recognizer = Recognizer(name=self.name + '_accounts',
                                           in_width=16,
                                           in_height=16,
                                           n_units=100,
                                           n_out=128,
                                           filter_mode=self.filter_mode,
                                           chrome=self.chrome)
     self.accounts_handle = None
     self.account = new_account(self.name)
Exemple #21
0
    def __init__(self, size, devices, parent=None):
        super(MiniGameWidget, self).__init__(parent)
        self.width, self.height = size
        self.scores = []
        self.player, self.conductor, self.template = self.init_ui()
        rec_1 = Recognizer()
        rec_2 = Recognizer()
        rec_1.set_callback(self.on_result)
        rec_2.set_callback(self.on_result)

        self.template.template_selected.connect(
            self.player.on_template_selected)
        self.template.template_selected.connect(
            self.conductor.on_template_selected)
        self.player.finished_unistroke.connect(
            lambda points, name, t_name: self.on_rec(rec_1, points, name,
                                                     t_name))
        self.conductor.finished_unistroke.connect(
            lambda points, name, t_name: self.on_rec(rec_2, points, name,
                                                     t_name))

        self.devices = None
        if devices is not None and len(devices) > 0:
            self.connect_devices(devices, self.player, self.conductor)
            self.devices = devices
        self.template.show()
        self.player.show()
        self.conductor.show()
        self.show()
        self.setHidden(False)

        self.template.draw()
Exemple #22
0
def main(args):
    """Entry point."""
    # TODO Replace with argparse
    if len(args) > 1 and os.path.isfile(args[1]):
        with io.open(args[1], 'r', encoding='utf-8') as f:
            data = f.read()
    else:
        stream = io.open(sys.stdin.fileno(), 'r', encoding='utf-8')
        data = stream.read()
    # TODO add '--verbose' flag to disable logging
    logging.basicConfig(format='%(levelname)s %(name)s: %(message)s',
                        level=logging.DEBUG)
    recognizer = Recognizer()
    result = recognizer.recognize(data)
    print(','.join(result))
Exemple #23
0
    def __init__(self, recognizer_path, retina=False, on_gpu=False, emotions=False):
        self.on_gpu = on_gpu

        if retina:
            self.finder = FaceFinder(on_gpu=on_gpu)
        else:
            self.finder = None

        if emotions:
            self.emotions = Emanalisis(on_gpu=on_gpu,path_to_classifier="net_714.pth", finder=self.finder)
        else:
            self.emotions = None

        self.recog = Recognizer(finder=self.finder)
        self.recog.load_model(recognizer_path)
        self.clust = Clusterizer(samples=5)
 def test_load(self):
     r = Recognizer()
     r.load()
     from dataset import DigitDataSet
     dataset = DigitDataSet()
     dataset.load(os.path.join('..', 'data', 'data.csv'))
     m, n = dataset.shape()
     err = 0
     for i in range(m):
         X = dataset.getData(i)
         y = dataset.getLabel(i)
         label = r.predict(X)
         print(y, label)
         if y != label:
             err += 1
     print(float(err) / m)
Exemple #25
0
class Artist:
    # <target> is from <folder>
    def __init__(self):
        # load resources via Recognizer
        self.recognizer = Recognizer(EIG_x_NPZ, K, loadPrior=True)

    def caricature(self, target, knownIndexes=[]):
        # enlarge top 3 coords
        # decrease bot 3 coords?
        targetCoords = self.recognizer.represent(target, makeFaceClass=True)

        # find top 3 coords
        idx1, coord1 = -1, 0
        idx2, coord2 = -1, 0
        idx3, coord3 = -1, 0
        numCoords = len(targetCoords)
        for i in xrange(numCoords):
            coord = targetCoords[i]
            if coord > coord1:
                # shift (1st, 2nd) to (2nd, 3rd)
                coord3 = coord2
                idx3 = idx2
                coord2 = coord1
                idx2 = idx1
                # new biggest coord
                coord1 = coord
                idx1 = i
            elif coord > coord2:
                # shift (2nd) to (3rd)
                coord3 = coord2
                idx3 = idx2
                # new 2nd biggest coord
                coord2 = coord
                idx2 = i
            elif coord > coord3:
                # new 3rd biggest coord
                coord3 = coord
                idx3 = i
        
        # magnify top 3 coords
        targetCoords[idx1] *= 2.
        targetCoords[idx2] *= 2.
        targetCoords[idx3] *= 2.
        

        self.recognizer.represent(target, coords=targetCoords)
Exemple #26
0
    def __init__(self, filename, count, **kwargs):
        super().__init__(**kwargs)

        self.filename = filename
        self.count = count
        self.recog = Recognizer()
        
        # Elemento de reconhecimento
        self.label_recognition = Label(text=f'Reconhecimento 1 de {self.count}', font_size=30)
        self.clear_widgets()
        self.layout = BoxLayout(orientation='vertical')

        # Inicializando thread de reconhecimento
        threading.Thread(target=self._recognition).start()

        self.layout.add_widget(self.label_recognition)
        self.add_widget(self.layout)
 def test_train(self):
     return
     r = Recognizer()
     path = os.path.join('..', 'data', 'sample')
     r.train(path)
     from dataset import DigitDataSet
     dataset = DigitDataSet()
     dataset.load(path)
     m, n = dataset.shape()
     err = 0
     for i in range(m):
         X = dataset.getData(i)
         y = dataset.getLabel(i)
         label = r.predict(X)
         print(y, label)
         if y != label:
             err += 1
     print(float(err) / m)
Exemple #28
0
    def __init__(self, camera=None):
        super().__init__()
        self.camera = camera

        self.central_widget = QWidget()

        self.win = pg.GraphicsLayoutWidget()
        self.view = self.win.addViewBox()
        self.img = pg.ImageItem()
        self.view.addItem(self.img)

        # testPushButton = QPushButton("This is a test")

        # cam_layout = QVBoxLayout(self.central_widget)
        cam_layout = QVBoxLayout()
        # cam_layout.addWidget(self.win)

        # Had this uncommented.
        # cam_layout.addWidget(testPushButton)
        cam_layout.addStretch(1)
        # self.setCentralWidget(self.central_widget)

        main_layout = QGridLayout()
        # main_layout.addLayout(cam_layout, 0, 0, 1, 1)
        main_layout.addWidget(self.win, 0, 0)
        main_layout.addLayout(cam_layout, 0, 1, 1, 1)

        # main_layout.setRowStretch(1, 1)
        # main_layout.setRowStretch(2, 1)

        # Had these uncommented.
        # main_layout.setColumnStretch(0, 2)
        # main_layout.setColumnStretch(1, 1)

        self.setLayout(main_layout)

        self.update_timer = QTimer()
        self.update_timer.timeout.connect(self.update)
        self.update_timer.start(10)

        self.recognizer = Recognizer()
        self.recognizer.load_models()

        self.setWindowTitle("Recognizer")
 def __init__(self):
     self.recognizer = Recognizer()
     self.handler = TargetHandler()
     self.camera_input = CameraInput()
     self.preview_output = PreviewOutput()
     self.displacement_estimator = DisplacementEstimator()
     self.horizontal_field_of_view = HORIZONTAL_FIELD_OF_VIEW
     self.drone_control = DroneControl(self.handler)
     self.drone_control.startup_simulation(TAKEOFF_HEIGHT, MAX_FRAMES_PER_SECOND)
     self.simple_guidance = None
Exemple #30
0
 def __init__(self, agent):
     self.posestate = "rest"  # "rest" or "ready"
     self.controlstate = "controlled"  # "controlled" or "indep"
     self.agent = agent
     self.target = None
     self.image = None
     self.__recognizer = Recognizer()
     self.__recognizer.on_keyword = self.on_keyword
     self.__thread = Thread(target=self.__recognizer.run)
     self.__ran = 0
Exemple #31
0
    def __init__(self,
                 recognizer_path,
                 retina=False,
                 on_gpu=False,
                 emotions=False,
                 confidence_threshold=0.02,
                 top_k=5000,
                 nms_threshold=0.4,
                 keep_top_k=750,
                 vis_thres=0.6,
                 network='resnet50',
                 distance_threshold=0.4,
                 samples=5,
                 eps=0.3):
        self.on_gpu = on_gpu

        if retina:
            self.finder = FaceFinder(on_gpu=on_gpu,
                                     confidence_threshold=confidence_threshold,
                                     top_k=top_k,
                                     nms_threshold=nms_threshold,
                                     keep_top_k=keep_top_k,
                                     vis_thres=vis_thres,
                                     network=network)
        else:
            self.finder = None

        if emotions:
            self.emotions = Emanalisis(on_gpu=on_gpu,
                                       path_to_classifier="net_714.pth",
                                       finder=self.finder)
        else:
            self.emotions = None

        self.recognizer_retrained = True
        self.recog = Recognizer(finder=self.finder,
                                distance_threshold=distance_threshold)
        self.recog.load_model(recognizer_path)
        self.clust = Clusterizer(samples=samples, eps=eps)
        self.em_labels = [
            'ANGRY', 'DISGUST', 'FEAR', 'HAPPY', 'SAD', 'SURPRISE', 'NEUTRAL'
        ]
Exemple #32
0
class GaitameServer(CaptureServer):
    def __init__(self, *args, accounts_title_re: str, accounts_rects: dict,
                 **kwargs):
        super().__init__(*args, **kwargs)
        self.accounts_title_re = accounts_title_re
        self.accounts_rects = accounts_rects
        self.accounts_recognizer = Recognizer(name=self.name + '_accounts',
                                              in_width=16,
                                              in_height=16,
                                              n_units=100,
                                              n_out=128,
                                              filter_mode=self.filter_mode,
                                              chrome=self.chrome)
        self.accounts_handle = None
        self.account = new_account(self.name)

    def load_model(self):
        self.recognizer.load_model()
        self.accounts_recognizer.load_model()

    def split_image(self, img: Image) -> list:
        pass

    def capture_accounts(self):
        if not self.accounts_handle:
            self.accounts_handle = self.find_window(self.accounts_title_re)
        if not self.accounts_handle:
            return []
        img = self.capture_window(self.accounts_handle)
        results = []
        d = {}
        for k, rect in self.accounts_rects.items():
            cropped = img.crop(rect)
            s = ''
            for x in self.split_image(cropped):
                s += self.accounts_recognizer.recognize(x)
            d[k] = float(s)
        account = new_account(self.name, **d)
        return [account]

    def get_accounts(self, do_refresh: bool = False) -> List[dict]:
        return self.capture_accounts()
Exemple #33
0
class ItemMatcher:
    def __init__(self, new=False):
        self.reg = None
        self.dir_yml = join("yml")
        self.dir_data = join("data")
        self.path = join(self.dir_data, "reg.pkl")
        self.rel2id = yaml.load(open(join(self.dir_yml, "rel2id.yml"),
                                     encoding="utf-8"),
                                Loader=yaml.SafeLoader)
        self.int2id = yaml.load(open(join(self.dir_yml, "int2id.yml"),
                                     encoding="utf-8"),
                                Loader=yaml.SafeLoader)
        if exists(self.path) and not new:
            with open(self.path, "rb") as f:
                self.reg = pickle.load(f)
        else:
            relations = yaml.load(open(join(self.dir_yml, "relation.yml"),
                                       encoding="utf-8"),
                                  Loader=yaml.SafeLoader)
            ques_word = yaml.load(open(join(self.dir_yml, "quesword.yml"),
                                       encoding="utf-8"),
                                  Loader=yaml.SafeLoader)
            wrong_word = yaml.load(open(join(self.dir_yml, "wrong_table.yml"),
                                        encoding="utf-8"),
                                   Loader=yaml.SafeLoader)
            concepts = relations
            concepts.update(ques_word)
            concepts.update(wrong_word)
            self.reg = Recognizer(concepts)
            pickle.dump(self.reg, open(self.path, "wb"))

    def matcher(self, q: str):
        res = {
            "relation": [],
            "intent": '0',
            "raw_query": q,
            "correct_query": None,
            "correct": []
        }
        for item in self.reg.query4type(q):
            if item["type"] in self.rel2id:
                item.update({"id": self.rel2id[item["type"]]})
                res["relation"].append(item)
            elif item["type"] in self.int2id:
                if res["intent"] != '0' and res["intent"] != self.int2id[
                        item["type"]]:
                    res["intent"] = '1'  # 冲突
                else:
                    res["intent"] = self.int2id[item["type"]]

            else:
                res["correct"].append(item)
                res["correct_query"] = q.replace(item["mention"], item["type"])
        return res
Exemple #34
0
    def __init__(self):
        # lower and upper bound for marker color
        self._lower_hue_0 = numpy.array([80, 90, 100])
        self._lower_hue_1 = numpy.array([120, 255, 255])
        self._upper_hue_0 = numpy.array([80, 90, 100])
        self._upper_hue_1 = numpy.array([120, 255, 255])

        # image processing kernels
        self._kernel_median_blur = 27
        self._kernel_dilate_mask = (9, 9)

        # marker properties
        self._x = -1
        self._y = -1
        self._dx = 0
        self._dy = 0
        self._vx = 0
        self._vy = 0
        self._histdx = []
        self._histdy = []
        self._points = []
        self._max_points = 50
        self._min_change = 10
        self._min_veloxy = 2.0
        self._marker_ctr = None
        self._marker_tip = None

        # frames per second
        self._fps = 20

        # render elements
        self._render_marker = True
        self._render_trails = True

        # recognizer
        self._recognizer = Recognizer()

        # opencv version
        self._opencv_version = int(cv2.__version__.split('.')[0])

        return
Exemple #35
0
 def selection_task(self, event):
     if self.task_cnt == len(self.cases) * len(self.recog_typelist):
         # clean from previous task
         self.clean_task()
         self.clean_session()
         self.rest_text = self.w.create_text(
             int(self.width / 2),
             int(self.height / 2),
             anchor='center',
             fill='orange',
             font=("Microsoft YaHei", 50),
             text='Remaining rest time {}s'.format(self.rest_cnt))
         self.rest_handles.append(self.root.after(1, self.rest))
     else:
         # clean from previous task
         self.clean_task()
         self.task_init()
         self.pats_status = [0] * self.n
         print(self.session_cnt, self.task_cnt, self.recog_type)
         # start new recognizer thread for the new task
         self.stop_event.clear()
         self.recog = Recognizer(self.stop_event, self.select_event,
                                 self.sig_queue, self.pat_queues,
                                 self.recog_type, self.n, self.interval,
                                 self.pats_selected, self.model_period,
                                 self.model_delay)
         self.recog.start()
         # draw the posters and dots
         self.display()
         # blink the dot according to pats
         for i, item in enumerate(self.w.find_withtag('dot')):
             # print(self.pats_selected[i], i, item)
             self.after_handles.append(
                 self.root.after(self.pats_selected[i][1], self.flash, item,
                                 i, 0))
         self.task_cnt += 1
         self.task_time = time.time()
         self.check_handles.append(self.root.after(1, self.target_check))
Exemple #36
0
    def __init__(self, **kwargs):
        if kwargs.get('cd'):
            print os.getcwd()
            os.chdir('../')

        self.fingerprinter = Fingerprinter()
        self.logger = Logging()
        self.fetcher = Fetcher()
        self.parser = Parsers()
        self.config = Configs()
        creds = self.config.get_db_creds()
        self.recognizer = Recognizer()
        self.database = self.database = MySQLDatabases(hostname=creds['hostname'], username=creds['username'],
                                                       password=creds['passwd'], database=creds['db_name'])
Exemple #37
0
    def __init__(self): 
        self.init = False

        #Are we paused
        self.pause = False
        #training mode enabled 
        self.train = False
        self.tracking = False
        self.useMog = True
        self.showBGDiff = False
        self.lastTimeWarningSent = 1
        self.alertInterval = 10000
        #seperate bg weight ratio's
        self.weightBG1 = 0.2
        self.weightBG2 = 0.6

        self.trained = {}
        self.counter = 0
        self.trainCouter = 0
        self.erodeIter = 1
        #gausion weight ratio
        self.blurKp = 1
        #cutoff threshold for BW image
        self.cutOffThresh=30;
        #what size to limit our bounding boxes too
        self.sizeL = 4000
        self.sizeM = 1500
        #kernal size for erode and dilate
        self.kernalH = 3
        self.kernalW = 3
        self.kernel = np.ones((self.kernalH,self.kernalW),'uint8')
        #kernal size
        self.rec = Recognizer()
        self.currFrame = None
        self.currFrameOpt = None

        self.contours = []
        #tracking will start after this many frames
        self.start = 10
        #track interval don't do tracking every frame
        self.track_interval = 20

        #Use MOG BG extractor
        self.bgs = cv2.BackgroundSubtractorMOG(24*60, 1
                                               , 0.8, 0.5)
        self.svmTracker = SVM()
        self.svmReady = True
        self.twtr = TwitterAlert()
Exemple #38
0
    def __init__(self):
        super(MusicMakerApp, self).__init__()
        self.setMinimumHeight(500)
        self.setMinimumWidth(800)

        self.markerHelper = IrMarkerEventFilter(self)
        self.installEventFilter(self.markerHelper)

        self.recognizer = Recognizer()
        self.recognizer.addTemplate(template.Template(*template.circle))
        self.recognizer.addTemplate(template.Template(*template.delete))
        self.recognizer.addTemplate(template.Template(*template.rectangle))
        self.recognizer.addTemplate(template.Template(*template.caret))
        self.recognizer.addTemplate(template.Template(*template.zig_zag))
        self.recognizer.addTemplate(template.Template(*template.left_square_bracket))

        self.recognizeThread = RecognizeThread()
        self.recognizeThread.finished.connect(self.recognized)
        self.recognizeThread.start()

        self.head = Playhead(self, self.playheadMoved)
class RecognitionServer(Server):

	def __init__(self, cameras=[], port=9004, users_file="users.json", recognition_db="recognition.db"):

		Server.__init__(self, port=port, usessl=False)

		self.recognition_db = recognition_db

		self.last_user_uuid = ""
		self.last_len_persons_detected = -1
		self.last_len_users = -1

		self.camera_clients = []
		self.recognizer = Recognizer(users_file)

		self.cameras = cameras
		self.start()

		self.method_handlers = {}
		self.method_handlers["list_users"] = self.list_users
		self.method_handlers["select_camera"] = self.select_camera
		self.method_handlers["list_users_with_level"] = self.list_users_with_level
		self.method_handlers["add_association"] = self.add_association

		self.users_recognized = []

		asyncio.get_event_loop().create_task(self.poll())


	def save_recognition_db(self):
		print("trying to save recognition db...")
		try:
			with open(self.recognition_db, "w+") as db:
				data = self.recognizer.serialize()

				if not data:
					print("Failed to serialize recognition database")

				db.write(data)
		except:
			import sys, traceback
			exc_type, exc_value, exc_traceback = sys.exc_info()
			traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
			traceback.print_exception(exc_type, exc_value, exc_traceback,
						limit=6, file=sys.stdout)

	def load_recognition_db(self):
		try:
			with open(self.recognition_db, "r") as db:
				success = self.recognizer.deserialize(db.read())
		except:
			import sys, traceback
			exc_type, exc_value, exc_traceback = sys.exc_info()
			traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
			traceback.print_exception(exc_type, exc_value, exc_traceback,
						limit=6, file=sys.stdout)

	def reset_last_uuid(self):
		 self.last_user_uuid=""

	def send_all(self, msg):
		for client in self.camera_clients:
			client.sendMessage(msg, False)

	def face_detected(self, person):
		msg = Signals.face_detected(None)

		self.send_all(msg)

	def persons_detected(self, persons, users):
		msg = Signals.persons_detected(persons, users)
		self.send_all(msg)

	def face_recognized(self, user, img, confidence):
		msg = Signals.face_recognized(user, img, confidence)

		self.send_all(msg)

	def users_recognized(self, users):
		msg = Signals.users_recognized(users)

		self.send_all(msg)


	@asyncio.coroutine
	def poll(self):
		while True:
			try:
				persons = self.recognizer.detect_persons()

				self.process(persons)

			except:
				print("crashed while trying to poll recognizer...")
				import sys, traceback
				exc_type, exc_value, exc_traceback = sys.exc_info()
				traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
				traceback.print_exception(exc_type, exc_value, exc_traceback,
							limit=6, file=sys.stdout)
			yield asyncio.From(asyncio.sleep(1))


	def user_exists(self, userdata):
		for user in self.users_recognized:
			if user.uuid == userdata.uuid:
				return True

		return False

	def scrub_users(self):
		"removes users from list that haven't been seen in a 10 second span"
		for user in self.users_recognized:
			if (datetime.now() - user.last_seen).total_seconds() > 10:
				self.users_recognized.remove(user)

	def process(self, persons):

		for person in persons:
			try:
				user = self.recognizer.recognize(person.person_id.tracking_id)

				if not user or user.status > 1:
					if user:
						print(user.status_desc)
					return

				confidence = user.confidence
				uuid = user.recognition_id

				userdata = self.recognizer.user(uuid=uuid)

				if not userdata:
					continue

				userdata = AugmentedUser(userdata)
				userdata.last_seen = datetime.now()

				print("user recognized: {}".format(userdata.username))
				print("confidence: {}".format(confidence))

				if confidence > 50:
					print("confidence is good.  Sending face_recognized signal")
					if not self.user_exists(userdata):
						self.users_recognized.append(userdata)

			except:
				import sys, traceback
				exc_type, exc_value, exc_traceback = sys.exc_info()
				traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
				traceback.print_exception(exc_type, exc_value, exc_traceback,
							limit=6, file=sys.stdout)

		self.scrub_users()

		if len(persons) != self.last_len_persons_detected or self.last_len_users != len(self.users_recognized):

			self.last_len_persons_detected = len(persons)
			self.last_len_users = len(self.users_recognized)

			self.persons_detected(persons, self.users_recognized)

	def onMessage(self, msg, fromClient):
		print("message received!!!")

		try:
			msg = json.loads(msg)

			if "method" in msg.keys():
				self.hndl_method(msg, fromClient)
			else:
				print("unhandled message: {}".format(msg))

		except:
			print ("message: {}".format(msg))
			import sys, traceback
			exc_type, exc_value, exc_traceback = sys.exc_info()
			traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
			traceback.print_exception(exc_type, exc_value, exc_traceback,
							limit=6, file=sys.stdout)

	def hndl_method(self, msg, fromClient):
		method = msg["method"]

		if method in self.method_handlers:
			self.method_handlers[method](msg, fromClient)
		else:
			print("method not handled: {}".format(method))

	def select_camera(self, msg, fromClient):
		if not "camera" in msg:
			print("Error: invalid select_camera message")
			return

		self.camera_clients.append(fromClient)

	def list_users(self, msg, fromClient):
		filter=None
		if "filter" in msg:
			filter = msg["filter"]

		reply = Signals.list_users(self.recognizer.getUsers(filter))

		fromClient.sendMessage(reply, False)

	def list_users_with_level(self, msg, fromClient):
		level = msg["level"]
		users = self.recognizer.users

		reply_user_list = []

		for user in users:
			if user.level >= level:
				reply_user_list.append(user.to_json())

		print("replying to list_users_with level with ({}) users".format(len(reply_user_list)))

		reply = Signals.list_users_with_level(reply_user_list)

		fromClient.sendMessage(reply, False)

	def add_association(self, msg, fromClient):
		uuid = msg["uuid"]
		associate_uuid = msg["associate_uuid"]
		self.recognizer.associate(uuid, associate_uuid)
Exemple #40
0
class Nest:
    """
        - convert all mp3 sounds to wav sounds -> store in wavs folder
        - go to db get birdID & wavFile & fingerprint the wavfile
        - store birdID & hash in db
    """

    SOUNDS_DIR = 'BirdSounds/'
    WAV_SOUNDS_DIR = 'BirdSounds/wavSounds/'
    MAX_PROCS = 10

    def __init__(self, **kwargs):
        if kwargs.get('cd'):
            print os.getcwd()
            os.chdir('../')

        self.fingerprinter = Fingerprinter()
        self.logger = Logging()
        self.fetcher = Fetcher()
        self.parser = Parsers()
        self.config = Configs()
        creds = self.config.get_db_creds()
        self.recognizer = Recognizer()
        self.database = self.database = MySQLDatabases(hostname=creds['hostname'], username=creds['username'],
                                                       password=creds['passwd'], database=creds['db_name'])

    def mp3_to_wav(self, src_dir, extension_list=('*.mp4', '*.flv', '*.mp3')):
        os.chdir(src_dir)
        logs = ""
        for extension in extension_list:
            for media_file in glob.glob(extension):
                wav_file = "../" + Nest.WAV_SOUNDS_DIR + os.path.splitext(os.path.basename(media_file))[0] + '.wav'
                logs += "converting %s to %s\n" % (os.path.basename(media_file), wav_file)
                AudioSegment.from_file(media_file).export(wav_file, format='wav')
        os.chdir('../')
        print logs
        self.logger.write_log(log_file='fingerprint', log_tag='i', log_msg=logs)

    def reload_creds(self):
        self.database = None

        creds = self.config.get_db_creds()
        self.database = self.database = MySQLDatabases(hostname=creds['hostname'], username=creds['username'],
                                                       password=creds['passwd'], database=creds['db_name'])

    def fetch_stuff(self):
        pass
        #self.parser.parse()
        #self.parser.threading_ops()

    def chunkify(self, lst, n):
        """
        split a list into n no of parts
        """
        return [lst[i::n] for i in xrange(n)]

    def fetch_images(self):
        """
        get all birds from db
            - get birdID & birdName
            - get image URLS from GAPI & store in DB
        """
        cursor = self.parser.database.get_all_birds()
        for row in cursor:
            self.parser.parse_GAPI(birdName=row['englishName'], birdID=row['birdID'])

    def fingerprint_sounds(self):
        """
            - go to db get birdID & wavFile & fingerprint the wavfile
            - store birdID & hash in db
        """
        cursor = self.database.get_sounds()
        threads = []
        sound_details = []

        count = 0
        print len(cursor)
        for row in cursor:
            birdID = row['birdID']
            wavFile = "%s%s.wav" % (Nest.WAV_SOUNDS_DIR, row['wavFile'])
            sound_details.append((birdID, wavFile))

        shuffle(sound_details)
        split_details = self.chunkify(sound_details, Nest.MAX_PROCS)

        #split procs
        procs = []
        #for i in range(Nest.MAX_PROCS):
        #    #create separate/non-shared connections to db
        #    creds = Configs().get_db_creds()
        #    self.database = self.database = MySQLDatabases(hostname=creds['hostname'], username=creds['username'],
        #                                                   password=creds['passwd'], database=creds['db_name'])
        #
        #    #create procs & start
        #    proc = Process(target=self.fingerprint_worker, args=([split_details[i]]))
        #    proc.start()
        #    procs.append(proc)
        #
        ##wait for all procs to finish
        #for proc in procs:
        #    proc.join()

        self.fingerprint_worker(sound_details)

    def fingerprint_worker(self, sound_details):
        """
            fingerprint each song & store hash in db
        """

        for birdID, wavFile in sound_details:
            print "birdID: ", birdID, "wavFile: ", wavFile

            channels = self.fingerprinter.extract_channels(wavFile)
            for c in range(len(channels)):
                channel = channels[c]
                t_start = time()
                logs = "now fingerprinting channel %d of song %s. BirdID: %s" % (c + 1, wavFile, birdID)
                self.logger.write_log(log_file='fingerprint', log_tag='i', log_msg=logs)
                print logs
                self.fingerprinter.fingerprint(channel, birdID)
                logs = "time taken: %d seconds" % (time() - t_start)
                self.logger.write_log(log_file='fingerprint', log_tag='i', log_msg=logs)
                print logs

            #update song as fingerprinted
            self.database.update_fingerprinted_songs(birdID=birdID)

    def process_requests(self, request_id):
        """
            get wavfile from inbound request, match &
        """
        cursor = self.database.get_inbound_request(request_id)
        if cursor is None:
            print "cursor is None!"
            self.reload_creds()
            cursor = self.database.get_inbound_request(request_id)
        else:
            print "cursor is not None!"

        wavfile = cursor['wavFile']

        bird_details = self.recognizer.recognize_file(filename=wavfile, verbose=False)
        self.database.update_processed_requests(request_id)

        match_result = 0 if bird_details['bird_id'] == 0 else 1
        outbound_id = self.database.insert_outbound_match(request_id=request_id, birdID=bird_details['bird_id'],
                                                          matchResults=match_result)
        # print "outboundID: %s" % outbound_id
        return outbound_id

    def get_outbound_birdID(self, outboundID):
        """
            return outboundId from outbound_matches tbl
        """
        cursor = self.database.get_outbound_bird_id(outboundID)
        return cursor['birdID']

    def get_match_results(self, outboundID):
        """
            return matchResults from outbound_matches tbl
        """
        cursor = self.database.get_match_results(outboundID)
        return cursor['matchResults']

    def add_request(self, wavfile, deviceID):
        """
            add new unmatched request in db
        """
        request_id = self.database.insert_inbound_request(wavfile, deviceID)
        return request_id

    def get_bird_details(self, birdID):
        """
            get bird details from db
        """
        cursor = self.database.get_bird_by_id(birdID)
        return cursor

    def get_sound_details(self, birdID):
        """
            get sounds from db for a given birdID
            birdID, soundType, wavFile, soundURL
        """
        cursor = self.database.get_sound_by_id(birdID)
        return {"soundType": cursor['soundType'], "soundURL": cursor['soundURL']}

    def get_thumbnail_pic(self, birdID):
        """
            get thumbnail img from db for a given birdID
        """
        cursor = self.database.get_thumbnail_pic(birdID)
        return cursor['imageURL']

    def get_images(self, birdID):
        """
            return a list of images from db for a given birdID
        """
        cursors = self.database.get_images(birdID)
        pics = []
        for cursor in cursors:
            img = {"imageURL": cursor['imageURL'], "siteURL": cursor['siteURL']}
            pics.append(img)
        return pics
Exemple #41
0
	def __init__(self):
		self.init_commands()
		self.continuous_listen = True
		self.recognizer = Recognizer("conf/lang/lm", "conf/lang/dic")
		self.recognizer.connect('finished', self.parse_hyp)
class UmbrellaTracker:
    def __init__(self): 
        self.init = False

        #Are we paused
        self.pause = False
        #training mode enabled 
        self.train = False
        self.tracking = False
        self.useMog = True
        self.showBGDiff = False
        self.lastTimeWarningSent = 1
        self.alertInterval = 10000
        #seperate bg weight ratio's
        self.weightBG1 = 0.2
        self.weightBG2 = 0.6

        self.trained = {}
        self.counter = 0
        self.trainCouter = 0
        self.erodeIter = 1
        #gausion weight ratio
        self.blurKp = 1
        #cutoff threshold for BW image
        self.cutOffThresh=30;
        #what size to limit our bounding boxes too
        self.sizeL = 4000
        self.sizeM = 1500
        #kernal size for erode and dilate
        self.kernalH = 3
        self.kernalW = 3
        self.kernel = np.ones((self.kernalH,self.kernalW),'uint8')
        #kernal size
        self.rec = Recognizer()
        self.currFrame = None
        self.currFrameOpt = None

        self.contours = []
        #tracking will start after this many frames
        self.start = 10
        #track interval don't do tracking every frame
        self.track_interval = 20

        #Use MOG BG extractor
        self.bgs = cv2.BackgroundSubtractorMOG(24*60, 1
                                               , 0.8, 0.5)
        self.svmTracker = SVM()
        self.svmReady = True
        
        # Disabling twitter for the time being - not sure how its configured.
        #self.twtr = TwitterAlert()

    def onMouseClick(self,event, x, y, flags, param ):
        #select which roi to train for features
        if event == cv.CV_EVENT_LBUTTONDOWN:
            if len(self.contours) > 0:
                for cont in self.contours:
                    if self.isWithinBB(x,y,cont):
                        #x,y,w,h = cv2.boundingRect(cont)
                        box = cv2.boundingRect(cont)
                        if self.train:
                            print "Trainig for umbrella"
                            name = "./train/pos/%s.png"%self.trainCouter
                            self.trained[name] = box
                            cv2.imwrite("./train/pos/%s.png"%self.trainCouter,self.currFrame)
                            #self.rec.createTargetFeatures(self.getROI(self.currFrame,box),box)
                            #self.svmTracker.train(self.getROI(self.currFrame,box),np.float32)
                            #self.svmReady = True
                        else:
                            print "Trainig for None umbrella"
                            name = "./train/neg/%s.png"%self.trainCouter
                            self.trained[name] = box
                            cv2.imwrite("./train/neg/%s.png"%self.trainCouter,self.currFrame)

                        self.trainCouter = self.trainCouter + 1 
                               
            else:
                print "wait for contours to get initialized"

    def isWithinBB(self,x,y,cont):
        #check weather a point clicked on screen is within a bounding box defined by contours
        xb,yb,wb,hb = cv2.boundingRect(cont)
        xbC = xb + wb
        ybC = yb + hb
        if x > xb and x <xbC:
            if y > yb and y < ybC:
                return True
        return False

    def getROI(self,image,box):
        #cut out and return the section from an image extraceted from a contour
        if len(box) == 4:
            #make sure we have for courners seems sometimes we don'e #paranoid
            x,y,w,h = box
            return image[y:y+h,x:x+w]
        return None

    #get Video stream
    def addGausianBlur(self,f1,level):
        return cv2.GaussianBlur(f1,(level,level),1)

    #get diff 
    def subtractFrames(self,f1,f2):
        dif = cv2.absdiff(f1,f2)
        #dif = cv2.bitwise_and(f1,f2)
        return dif

    #get image threshold to remove some noise
    def getThreshold(self,f,cutOffThreshVal):
        return cv2.threshold(f,cutOffThreshVal,255,0)

    #find the contours in the image
    def findCountoursBW(self,f):
        conts,hier = cv2.findContours(f,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
        #check size of countours ignore ones that are too small or large
        return conts

    def drawContours(self,image,conts,sizeL,sizeM,color):
        #draw contors on image
        for cnt in conts:   
            x,y,w,h = cv2.boundingRect(cnt)
            cv2.rectangle(image,(x,y),(x+w,y+h), color,2)
        return image

    def counterSizeFilter(self,contours,sizeL,sizeM):
        #filter out contours that fit a certain size
        filtered = []
        for cnt in contours:   
            x,y,w,h = cv2.boundingRect(cnt)
            area = float(w)*float(h)
            if area < sizeL and area > sizeM:
                filtered.append(cnt)
        return filtered

    def drawContour(self,image,cont,color):
        #TODO create a version that which works for single contours
        x,y,w,h = cv2.boundingRect(cont)
        cv2.rectangle(image,(x,y),(x+w,y+h), color,2)
        return image

    def drawContours2(self,image,conts,sizeL,sizeM,color):
        #draw contors on image given array od contours in contour
        for cnt in conts:   
            x,y,w,h = cnt[0],cnt[1],cnt[2],cnt[3]
            #area = float(w)*float(h)
            #if area < sizeL and area > sizeM:
            cv2.rectangle(image,(x,y),(x+w,y+h), color,2)
        return image

    def drawBox(self,image,x,y,size,color):
        cv2.rectangle(image,(x,y),(x+size,y+size),color)
        return image
    
    # TODO - does this comment go with cv2.rectangle or vc2.cvtColor ?    
    #this really slows things down!

    def makeBW(self,f):
        """
        Overview of cvtColor -  Converts an image from one color space to another.

            C++: void cvtColor(InputArray src, OutputArray dst, int code, int dstCn=0 )
            Python: cv2.cvtColor(src, code[, dst[, dstCn]]) -> dst
        """    
        # http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html#cvtcolor
        bwImg = cv2.cvtColor(f,cv.CV_RGB2GRAY)
        return bwImg
    
    def createTrainigOutput(self):
        count = 1
        fpP = open("info.dat","w")
        fpN = open("bg.txt","w")
        for key in self.trained.keys():
                if key.find("neg") > 0:
                    output = "%s \n"%(key)
                    fpN.write(output)
                else:
                    output = "%s  %s  %s %s %s %s \n"%(key,count,self.trained[key][0],self.trained[key][1],self.trained[key][2],self.trained[key][3])
                    fpP.write(output)
        fpP.close()
        fpN.close()
    #Home Brew BG removal
    def bgSmoothing(self,frameBG,arr1, arr2):
 
        frameBG = cv2.equalizeHist(frameBG)
        #blurring helps to normalize and smooth out noise
        arr1 = self.addGausianBlur(arr1,self.blurKp)
        arr2 = self.addGausianBlur(arr2,self.blurKp)
        #add to wighted averages of the background in order to obtained weighterd average to remove noise
        cv2.accumulateWeighted(frameBG,arr1,self.weightBG1)
        cv2.accumulateWeighted(frameBG,arr2,self.weightBG2)
        #normalize
        res1 = cv2.convertScaleAbs(arr1)
        res2 = cv2.convertScaleAbs(arr2)
        
        #res1 = self.makeBW(res1)
        #res2 = self.makeBW(res2)
        return res1,res2

    def bgSmooothing2(self,frame):
        #OpenCV BG removal using MOG
        #frameBW = self.makeBW(frame)
        frame = self.addGausianBlur(frame,self.blurKp)
        frame = cv2.equalizeHist(frame)       
        fgMask = self.bgs.apply(frame)
        return fgMask

    def track(self, frame_raw, video_capture):
        """
        Primary function for rain detection - main loop to read the video input, 
        extract a frame, and process it.
        """
        frame_black_white = self.makeBW(frame_raw)
        #create numpy arrays from image frames
        avg1 = np.float32(frame_black_white)
        avg2 = np.float32(frame_black_white)
        objs = []
        while 1:
            if not self.pause:
                #get frame from video
                _, frame_raw = video_capture.read()
                
                # Check to see if we have a valid frame so we
                # don't get a strange error from opencv. 
                # http://stackoverflow.com/questions/16703345/how-can-i-use-opencv-python-to-read-a-video-file-without-looping-mac-os
                if (type(frame_raw) == type(None)):
                    break

                frame_black_white = self.makeBW(frame_raw)
                res2 = frame_black_white.copy()
                #make BW first

                if self.useMog:
                    mask = self.bgSmooothing2(res2)
                else:
                    res1,res2 = self.bgSmoothing(frame_black_white,avg1,avg2)
                    #get diff
                    mask = self.subtractFrames(res1,res2)
                    #lets threshold the image
                    _,mask = self.getThreshold(mask,self.cutOffThresh)

                #Dilate and erode
                #res3 = cv2.dilate(res3,kernel,iterations=3)
                #res3 = cv2.erode(res3,kernel,iterations=2)
                res3 = cv2.erode(mask,self.kernel,iterations=self.erodeIter)
            
                #set this for later use
                self.currFrame = res2
                self.currFrameOpt = res3

                cimage = np.copy(res3)
                #find countours
                if self.counter > self.start:
                    self.contours  = self.findCountoursBW(cimage)
                    self.contours = self.counterSizeFilter(self.contours,self.sizeL,self.sizeM)

                #make it color again
                #lets do some interesting stuff
                if len(self.contours) > 0 :
                    for cont in self.contours:
                        if self.counter % self.track_interval == 0:
                            box = self.getROI(res2,cv2.boundingRect(cont))
                            #self.rec.findUmbrellas(box)
                            #dont search every frame
                            if self.tracking and self.counter % self.track_interval == 0:
                                for thing in self.contours:
                                    objs = self.rec.detectUmbrellaCascade(box)
                                    umb = thing


                if len(objs) > 0:
                    if  self.lastTimeWarningSent == 1 or self.counter - self.lastTimeWarningSent >  self.alertInterval:
                        print "Umbrella Detected!!!! run for cover!!!!"
                        #self.twtr.sendAlert()
                        self.lastTimeWarningSent = self.counter
                    else:
                        print "Just sent an alert so won't send one again but just FYI there is an umbrella"

                res2 = cv2.cvtColor(res2,cv2.COLOR_GRAY2BGR)
                if len(objs) > 0:
                    res2 = self.drawContour(res2,umb,(255,0,0))

                res2 = self.drawContours(res2,self.contours,self.sizeL,self.sizeM,(255,255,0))
                #res3 = cv2.dilate(res3,kernel)
                self.counter = self.counter + 1
                #cv2.imwrite("./pngs/image-"+str(counter).zfill(5)+".png", mask)
                #self.rec.getFeatures(res2,2)
                #res1 = self.rec.drawFeatures(res2)

                if len(self.rec.toTrack) > 0 and self.svmReady:
                        for person in self.contours:
                            #masks = self.rec.flannMatching(cv2.boundingRect(person))
                            matches = self.rec.trackObjects(res2)
                            #res3 = self.drawBox()
            if self.showBGDiff:
                cv2.imshow('bg1',res3)
            cv2.imshow('bg2',res2)
            if not self.init:
                cv2.setMouseCallback('bg2',self.onMouseClick,None);
                init = True

            #break if esc is hit
            k = cv2.waitKey(20) 
            if k == ord('t'):
                if self.train:
                    print "training for Negative samples"
                    self.train = False
                else:
                    print "training for positive samples"
                    self.train = True
            if k == ord('d'):
                if len(self.trained.keys()):
                    print "creating training output file.."
                    self.createTrainigOutput()
                    print "output training files ready"
                    self.trained = {}
                    self.trainCouter = 0
            if k == ord('r'):
                self.rec.reset()
            if k == ord(' '):
                print "paused"
                self.pause = not self.pause
            if k == ord('g'):
                self.tracking = not self.tracking
                print "Tracking:%s"%self.tracking
            if k == ord('b'):
                self.useMog = not self.useMog
                print "MOG bg extraction %s"%self.useMog
            if k == ord('x'):
                self.showBGDiff = not self.showBGDiff
                print "enable bg diff:%s"%self.showBGDiff
            if k == ord('+'):
                self.erodeIter = self.erodeIter + 1
                print "erode : %s"%self.erodeIter
            if k == ord('-') and self.erodeIter > 0:
                self.erodeIter = self.erodeIter -1
                print "erode : %s"%self.erodeIter
            if k == 27:
                break
Exemple #43
0
class MusicMakerApp(QWidget):
    TEMPLATEWIDGETFACTORIES = {
        "circle": lambda: PlayWidget("samples/clap.wav", "samples/cymbal.wav",
                                     lambda args: args[0].drawEllipse(*args[1:])),
        "rectangle": lambda: PlayWidget("samples/kick.wav", "samples/rs.wav", lambda args: args[0].drawRect(*args[1:])),
        "caret": lambda: PlayWidget("samples/hh.wav", "samples/ohh.wav", lambda args: DrawHelper.drawTriangle(*args)),
        "zig-zag": lambda: PlayWidget("samples/sd1.wav", "samples/sd2.wav", lambda args: DrawHelper.drawZig(*args)),
        "left_square_bracket": lambda: PlayWidget("samples/cb.wav", "samples/hc.wav",
                                                  lambda args: DrawHelper.drawBracket(*args)),
    }

    def __init__(self):
        super(MusicMakerApp, self).__init__()
        self.setMinimumHeight(500)
        self.setMinimumWidth(800)

        self.markerHelper = IrMarkerEventFilter(self)
        self.installEventFilter(self.markerHelper)

        self.recognizer = Recognizer()
        self.recognizer.addTemplate(template.Template(*template.circle))
        self.recognizer.addTemplate(template.Template(*template.delete))
        self.recognizer.addTemplate(template.Template(*template.rectangle))
        self.recognizer.addTemplate(template.Template(*template.caret))
        self.recognizer.addTemplate(template.Template(*template.zig_zag))
        self.recognizer.addTemplate(template.Template(*template.left_square_bracket))

        self.recognizeThread = RecognizeThread()
        self.recognizeThread.finished.connect(self.recognized)
        self.recognizeThread.start()

        self.head = Playhead(self, self.playheadMoved)

    def setPointerDrawFilter(self, filter):
        self.pointerDrawFilter = filter
        self.pointerDrawFilter.setCompleteCallback(self.onPointerDrawComplete)

    def playheadMoved(self, xpos, stepping):
        cs = self.children()
        lower = xpos - stepping
        p = cs[0].pos().x()
        for c in cs:
            c = c  # type: QWidget
            r = c.geometry()  # type: QRect
            if c.isVisible() and (lower < r.x() < xpos) or (lower < r.right() < xpos):
                if hasattr(c, "play"):
                    c.play()

    def adjustSize(self):
        QWidget.adjustSize(self)
        self.head.adjustSize()

    def recognized(self, context):
        print("recognized")

        recognized = context.res
        if not recognized:
            return

        pointer = context.pointer
        points = context.points

        template = recognized[0]  # type: template.Template
        if (template):
            if (recognized[1] > 0.5):
                print(template.name + " recognized: " + str(recognized[1]))
                command = self.resolveCommand(template.name, points)
                if (command):
                    pointer.undoStack().push(command)
            else:
                # TODO output some status
                pass

    def onPointerDrawComplete(self, pointer, points):
        if (len(points) <= 2):
            return

        points = list(map(lambda p: (p.x(), p.y()), points))
        self.recognizeThread.recognize(RecognizeContext(self.recognizer, points, pointer))

    def paintEvent(self, ev):
        QWidget.paintEvent(self, ev)

        qp = QPainter()
        qp.begin(self)

        qp.setBrush(Qt.black)
        qp.drawRect(self.rect())

        self.drawStepping(qp, self.head.stepping)
        if self.markerHelper.markerMode:
            self.markerHelper.drawMarkers(qp)
        self.pointerDrawFilter.drawPoints(qp)

        qp.end()

    def drawStepping(self, qp, stepping):
        pos = 0
        qp.setBrush(Qt.yellow)
        pen = QPen()
        pen.setColor(Qt.darkGray)
        qp.setPen(pen)
        while pos < self.width():
            pos += stepping
            qp.drawLine(pos, 0, pos, self.height())

    def resolveCommand(self, templateName, points):

        if templateName == "delete":
            x, y = np.mean(points, 0)
            widget = self.childAt(x, y)
            if widget and not widget is self:
                return DeleteCommand(self, widget)

        widgetFactory = MusicMakerApp.TEMPLATEWIDGETFACTORIES.get(templateName, None)

        if (not widgetFactory):
            return None

        widget = widgetFactory()

        self.setupChildWidget(widget, points)
        return AddCommand(self, widget)

    def setupChildWidget(self, widget, points):
        widget.setFixedWidth(100)
        widget.setFixedHeight(100)

        x, y = np.mean(points, 0)
        x = x - widget.width() * 0.5
        y = y - widget.height() * 0.5
        widget.move(x, y)
        widget.setParent(self)
Exemple #44
0
 def __init__(self):
     # load resources via Recognizer
     self.recognizer = Recognizer(EIG_x_NPZ, K, loadPrior=True)
Exemple #45
0
        else:
            gestureMaskParentDirPath = None
        return gestureParentDirPath,gestureMaskParentDirPath,opts.trainmask
    else:
        return
            
#########################
### Main script entry ###
#########################
if __name__ == "__main__":
    opts,args = cmd_parser()
    inputMode,inTrainDirs = process_opts(opts)    
    vc = cv2.VideoCapture(0)
    try:
        if inputMode == "video":
            recognizer = Recognizer(vc=vc, opts=opts)
            score = recognizer.train_from_video()
            print "Training score = {0}".format(score)
            outTrainDir = get_new_directory(opts.num, opts.type)
            clf = pickle_files(outTrainDir, recognizer.trainer)
        elif inputMode == "descriptors":
            descList,trainLabels = get_relevant_objects(inputMode, inTrainDirs, opts)
            recognizer = Recognizer(vc=vc, opts=opts)
            score = recognizer.train_from_descriptors(descList, trainLabels)
            print "Training score = {0}".format(score)
            if len(inTrainDirs) == 1:
                outTrainDir = inTrainDirs[0]
            else:
                outTrainDir = get_new_directory(opts.num, opts.type)
            clf = pickle_files(outTrainDir, recognizer.trainer)
        elif inputMode == "images":
Exemple #46
0
def main(filename):
    r = Recognizer(filename)
    r.parse()
import wx
from recognizer import Recognizer
from template import *
import tuio
tracking = tuio.Tracking()

# Should run the frame as a seperate thread
# http://stackoverflow.com/questions/12786471/invoking-a-wxpython-evt-button-event-programmatically
print "Initializing recognizer"
recognizer = Recognizer()
for template in templates:
	recognizer.addTemplate(template)
print "Initilized recognizer!"


class MyApp(wx.App):
	def OnInit(self):
		self.frame = MyFrame(None, "Sample")  # add two lines here
		self.frame.Centre()
		self.frame.Show(True)
		return True


class MyFrame(wx.Frame):
	def __init__(self, parent, title):
		wx.Frame.__init__(self, parent, title=title)
		wx.StaticText(self, label='Detected shape:', pos=(10, 10))
		wx.StaticText(self, label='Detected score:', pos=(10, 30))
		self.detected_shape = wx.StaticText(self, label='', pos=(95, 10))
		self.detected_score = wx.StaticText(self, label='', pos=(93, 30))
		self.previous_points = []
Exemple #48
0
#!/usr/bin/env python

import trollius as asyncio
from recognizer import Recognizer
import rospy

if __name__ == "__main__":
	import argparse
	parser = argparse.ArgumentParser()
	parser.add_argument('-o', dest="output_file", help="output_file", default=None)
	parser.add_argument('-i', dest="input_file", help="input_file", default=None)
	args = parser.parse_args()

	rospy.init_node('dumper', disable_signals=False)

	serv = Recognizer()

	if args.output_file:
		with open(args.output_file, "w+") as db:
			data = serv.serialize()
			db.write(data)

	elif args.input_file:
		with open(args.input_file, "r") as db:
			success = serv.deserialize(db.read())
			print("success: {}".format(success))

	rospy.signal_shutdown("Done")